From 6e5343169dc0d94779178688cbb4f5ed1ac565fa Mon Sep 17 00:00:00 2001 From: "kirby@puppetlabs.com" Date: Wed, 20 Mar 2013 10:23:27 -0700 Subject: [PATCH] Adding gam, oh-my-zsh --- .gitignore | 1 + gam/atom/__init__.py | 1484 ++++ gam/atom/__init__.pyc | Bin 0 -> 53239 bytes gam/atom/auth.py | 43 + gam/atom/client.py | 221 + gam/atom/core.py | 550 ++ gam/atom/data.py | 338 + gam/atom/http.py | 364 + gam/atom/http.pyc | Bin 0 -> 10834 bytes gam/atom/http_core.py | 597 ++ gam/atom/http_core.pyc | Bin 0 -> 19330 bytes gam/atom/http_interface.py | 156 + gam/atom/http_interface.pyc | Bin 0 -> 7027 bytes gam/atom/mock_http.py | 132 + gam/atom/mock_http_core.py | 323 + gam/atom/mock_service.py | 243 + gam/atom/service.py | 740 ++ gam/atom/service.pyc | Bin 0 -> 26803 bytes gam/atom/token_store.py | 117 + gam/atom/token_store.pyc | Bin 0 -> 4398 bytes gam/atom/url.py | 139 + gam/atom/url.pyc | Bin 0 -> 4199 bytes gam/gam.py.symlink | 6748 +++++++++++++++++ gam/gdata/Crypto/Cipher/AES.pyd | Bin 0 -> 27648 bytes gam/gdata/Crypto/Cipher/ARC2.pyd | Bin 0 -> 15872 bytes gam/gdata/Crypto/Cipher/ARC4.pyd | Bin 0 -> 8704 bytes gam/gdata/Crypto/Cipher/Blowfish.pyd | Bin 0 -> 19968 bytes gam/gdata/Crypto/Cipher/CAST.pyd | Bin 0 -> 26112 bytes gam/gdata/Crypto/Cipher/DES.pyd | Bin 0 -> 20480 bytes gam/gdata/Crypto/Cipher/DES3.pyd | Bin 0 -> 20992 bytes gam/gdata/Crypto/Cipher/IDEA.pyd | Bin 0 -> 15360 bytes gam/gdata/Crypto/Cipher/RC5.pyd | Bin 0 -> 15872 bytes gam/gdata/Crypto/Cipher/XOR.pyd | Bin 0 -> 8704 bytes gam/gdata/Crypto/Cipher/__init__.py | 33 + gam/gdata/Crypto/Hash/HMAC.py | 108 + gam/gdata/Crypto/Hash/MD2.pyd | Bin 0 -> 8704 bytes gam/gdata/Crypto/Hash/MD4.pyd | Bin 0 -> 9728 bytes gam/gdata/Crypto/Hash/MD5.py | 13 + gam/gdata/Crypto/Hash/RIPEMD.pyd | Bin 0 -> 14336 bytes gam/gdata/Crypto/Hash/SHA.py | 11 + gam/gdata/Crypto/Hash/SHA256.pyd | Bin 0 -> 9216 bytes gam/gdata/Crypto/Hash/__init__.py | 24 + gam/gdata/Crypto/Protocol/AllOrNothing.py | 295 + gam/gdata/Crypto/Protocol/Chaffing.py | 229 + gam/gdata/Crypto/Protocol/__init__.py | 17 + gam/gdata/Crypto/PublicKey/DSA.py | 238 + gam/gdata/Crypto/PublicKey/ElGamal.py | 132 + gam/gdata/Crypto/PublicKey/RSA.py | 256 + gam/gdata/Crypto/PublicKey/__init__.py | 17 + gam/gdata/Crypto/PublicKey/pubkey.py | 172 + gam/gdata/Crypto/PublicKey/qNEW.py | 170 + gam/gdata/Crypto/Util/RFC1751.py | 342 + gam/gdata/Crypto/Util/__init__.py | 16 + gam/gdata/Crypto/Util/number.py | 201 + gam/gdata/Crypto/Util/randpool.py | 421 + gam/gdata/Crypto/Util/test.py | 453 ++ gam/gdata/Crypto/__init__.py | 25 + gam/gdata/Crypto/test.py | 38 + gam/gdata/__init__.py | 835 ++ gam/gdata/__init__.pyc | Bin 0 -> 33816 bytes gam/gdata/acl/__init__.py | 15 + gam/gdata/acl/data.py | 55 + gam/gdata/alt/__init__.py | 20 + gam/gdata/alt/app_engine.py | 101 + gam/gdata/alt/appengine.py | 321 + gam/gdata/analytics/Crypto/Cipher/AES.pyd | Bin 0 -> 27648 bytes gam/gdata/analytics/Crypto/Cipher/ARC2.pyd | Bin 0 -> 15872 bytes gam/gdata/analytics/Crypto/Cipher/ARC4.pyd | Bin 0 -> 8704 bytes .../analytics/Crypto/Cipher/Blowfish.pyd | Bin 0 -> 19968 bytes gam/gdata/analytics/Crypto/Cipher/CAST.pyd | Bin 0 -> 26112 bytes gam/gdata/analytics/Crypto/Cipher/DES.pyd | Bin 0 -> 20480 bytes gam/gdata/analytics/Crypto/Cipher/DES3.pyd | Bin 0 -> 20992 bytes gam/gdata/analytics/Crypto/Cipher/IDEA.pyd | Bin 0 -> 15360 bytes gam/gdata/analytics/Crypto/Cipher/RC5.pyd | Bin 0 -> 15872 bytes gam/gdata/analytics/Crypto/Cipher/XOR.pyd | Bin 0 -> 8704 bytes gam/gdata/analytics/Crypto/Cipher/__init__.py | 33 + gam/gdata/analytics/Crypto/Hash/HMAC.py | 108 + gam/gdata/analytics/Crypto/Hash/MD2.pyd | Bin 0 -> 8704 bytes gam/gdata/analytics/Crypto/Hash/MD4.pyd | Bin 0 -> 9728 bytes gam/gdata/analytics/Crypto/Hash/MD5.py | 13 + gam/gdata/analytics/Crypto/Hash/RIPEMD.pyd | Bin 0 -> 14336 bytes gam/gdata/analytics/Crypto/Hash/SHA.py | 11 + gam/gdata/analytics/Crypto/Hash/SHA256.pyd | Bin 0 -> 9216 bytes gam/gdata/analytics/Crypto/Hash/__init__.py | 24 + .../analytics/Crypto/Protocol/AllOrNothing.py | 295 + .../analytics/Crypto/Protocol/Chaffing.py | 229 + .../analytics/Crypto/Protocol/__init__.py | 17 + gam/gdata/analytics/Crypto/PublicKey/DSA.py | 238 + .../analytics/Crypto/PublicKey/ElGamal.py | 132 + gam/gdata/analytics/Crypto/PublicKey/RSA.py | 256 + .../analytics/Crypto/PublicKey/__init__.py | 17 + .../analytics/Crypto/PublicKey/pubkey.py | 172 + gam/gdata/analytics/Crypto/PublicKey/qNEW.py | 170 + gam/gdata/analytics/Crypto/Util/RFC1751.py | 342 + gam/gdata/analytics/Crypto/Util/__init__.py | 16 + gam/gdata/analytics/Crypto/Util/number.py | 201 + gam/gdata/analytics/Crypto/Util/randpool.py | 421 + gam/gdata/analytics/Crypto/Util/test.py | 453 ++ gam/gdata/analytics/Crypto/__init__.py | 25 + gam/gdata/analytics/Crypto/test.py | 38 + gam/gdata/analytics/__init__.py | 835 ++ gam/gdata/analytics/__init__.pyc | Bin 0 -> 32064 bytes gam/gdata/analytics/acl/__init__.py | 15 + gam/gdata/analytics/acl/data.py | 63 + gam/gdata/analytics/alt/__init__.py | 20 + gam/gdata/analytics/alt/app_engine.py | 101 + gam/gdata/analytics/alt/appengine.py | 321 + gam/gdata/analytics/analytics/__init__.py | 223 + gam/gdata/analytics/analytics/client.py | 313 + gam/gdata/analytics/analytics/data.py | 379 + gam/gdata/analytics/analytics/service.py | 331 + gam/gdata/analytics/apps/__init__.py | 526 ++ gam/gdata/analytics/apps/__init__.pyc | Bin 0 -> 18739 bytes .../analytics/apps/adminsettings/__init__.py | 16 + .../analytics/apps/adminsettings/__init__.pyc | Bin 0 -> 146 bytes .../analytics/apps/adminsettings/service.py | 471 ++ .../analytics/apps/adminsettings/service.pyc | Bin 0 -> 15462 bytes gam/gdata/analytics/apps/audit/__init__.py | 1 + gam/gdata/analytics/apps/audit/__init__.pyc | Bin 0 -> 138 bytes gam/gdata/analytics/apps/audit/service.py | 277 + gam/gdata/analytics/apps/audit/service.pyc | Bin 0 -> 9785 bytes .../analytics/apps/emailsettings/__init__.py | 15 + .../analytics/apps/emailsettings/__init__.pyc | Bin 0 -> 146 bytes .../analytics/apps/emailsettings/client.py | 557 ++ .../analytics/apps/emailsettings/data.py | 1174 +++ .../analytics/apps/emailsettings/service.py | 264 + .../analytics/apps/emailsettings/service.pyc | Bin 0 -> 9019 bytes gam/gdata/analytics/apps/groups/__init__.py | 0 gam/gdata/analytics/apps/groups/__init__.pyc | Bin 0 -> 139 bytes gam/gdata/analytics/apps/groups/service.py | 387 + gam/gdata/analytics/apps/groups/service.pyc | Bin 0 -> 13387 bytes .../analytics/apps/migration/__init__.py | 223 + gam/gdata/analytics/apps/migration/service.py | 218 + .../analytics/apps/multidomain/__init__.py | 16 + .../analytics/apps/multidomain/__init__.pyc | Bin 0 -> 144 bytes .../analytics/apps/multidomain/client.py | 336 + gam/gdata/analytics/apps/multidomain/data.py | 453 ++ .../analytics/apps/multidomain/service.py | 105 + .../analytics/apps/multidomain/service.pyc | Bin 0 -> 3885 bytes .../analytics/apps/organization/__init__.py | 0 .../analytics/apps/organization/service.py | 297 + gam/gdata/analytics/apps/orgs/__init__.py | 16 + gam/gdata/analytics/apps/orgs/__init__.pyc | Bin 0 -> 137 bytes gam/gdata/analytics/apps/orgs/service.py | 159 + gam/gdata/analytics/apps/orgs/service.pyc | Bin 0 -> 5650 bytes gam/gdata/analytics/apps/res_cal/__init__.py | 1 + gam/gdata/analytics/apps/res_cal/__init__.pyc | Bin 0 -> 140 bytes gam/gdata/analytics/apps/res_cal/service.py | 71 + gam/gdata/analytics/apps/res_cal/service.pyc | Bin 0 -> 2630 bytes gam/gdata/analytics/apps/service.py | 552 ++ gam/gdata/analytics/apps/service.pyc | Bin 0 -> 21350 bytes gam/gdata/analytics/apps_property.py | 39 + gam/gdata/analytics/auth.py | 952 +++ gam/gdata/analytics/auth.pyc | Bin 0 -> 37399 bytes gam/gdata/analytics/blogger/__init__.py | 202 + gam/gdata/analytics/blogger/client.py | 175 + gam/gdata/analytics/blogger/data.py | 168 + gam/gdata/analytics/blogger/service.py | 142 + gam/gdata/analytics/books/__init__.py | 473 ++ gam/gdata/analytics/books/data.py | 90 + gam/gdata/analytics/books/service.py | 266 + gam/gdata/analytics/calendar/__init__.py | 1044 +++ gam/gdata/analytics/calendar/__init__.pyc | Bin 0 -> 37831 bytes gam/gdata/analytics/calendar/client.py | 538 ++ gam/gdata/analytics/calendar/data.py | 327 + gam/gdata/analytics/calendar/service.py | 595 ++ .../analytics/calendar_resource/__init__.py | 1 + .../analytics/calendar_resource/client.py | 199 + gam/gdata/analytics/calendar_resource/data.py | 206 + gam/gdata/analytics/client.py | 1163 +++ gam/gdata/analytics/codesearch/__init__.py | 136 + gam/gdata/analytics/codesearch/service.py | 110 + gam/gdata/analytics/contacts/__init__.py | 740 ++ gam/gdata/analytics/contacts/__init__.pyc | Bin 0 -> 27620 bytes gam/gdata/analytics/contacts/client.py | 547 ++ gam/gdata/analytics/contacts/data.py | 493 ++ gam/gdata/analytics/contacts/service.py | 427 ++ gam/gdata/analytics/contacts/service.pyc | Bin 0 -> 18426 bytes .../analytics/contentforshopping/__init__.py | 21 + .../analytics/contentforshopping/client.py | 237 + .../analytics/contentforshopping/data.py | 1175 +++ gam/gdata/analytics/core.py | 279 + gam/gdata/analytics/data.py | 1219 +++ gam/gdata/analytics/docs/__init__.py | 269 + gam/gdata/analytics/docs/client.py | 1027 +++ gam/gdata/analytics/docs/data.py | 654 ++ gam/gdata/analytics/docs/service.py | 618 ++ gam/gdata/analytics/dublincore/__init__.py | 15 + gam/gdata/analytics/dublincore/data.py | 78 + gam/gdata/analytics/exif/__init__.py | 217 + gam/gdata/analytics/finance/__init__.py | 486 ++ gam/gdata/analytics/finance/data.py | 156 + gam/gdata/analytics/finance/service.py | 243 + gam/gdata/analytics/gauth.py | 1552 ++++ gam/gdata/analytics/gauth.pyc | Bin 0 -> 56592 bytes gam/gdata/analytics/geo/__init__.py | 185 + gam/gdata/analytics/geo/data.py | 92 + gam/gdata/analytics/health/__init__.py | 229 + gam/gdata/analytics/health/service.py | 263 + gam/gdata/analytics/marketplace/__init__.py | 1 + gam/gdata/analytics/marketplace/client.py | 160 + gam/gdata/analytics/marketplace/data.py | 115 + gam/gdata/analytics/media/__init__.py | 355 + gam/gdata/analytics/media/data.py | 159 + gam/gdata/analytics/notebook/__init__.py | 15 + gam/gdata/analytics/notebook/data.py | 55 + gam/gdata/analytics/oauth/CHANGES.txt | 17 + gam/gdata/analytics/oauth/__init__.py | 529 ++ gam/gdata/analytics/oauth/__init__.pyc | Bin 0 -> 20135 bytes gam/gdata/analytics/oauth/rsa.py | 120 + gam/gdata/analytics/oauth/rsa.pyc | Bin 0 -> 4470 bytes gam/gdata/analytics/opensearch/__init__.py | 15 + gam/gdata/analytics/opensearch/data.py | 48 + gam/gdata/analytics/photos/__init__.py | 1112 +++ gam/gdata/analytics/photos/service.py | 681 ++ .../analytics/projecthosting/__init__.py | 1 + gam/gdata/analytics/projecthosting/client.py | 201 + gam/gdata/analytics/projecthosting/data.py | 134 + gam/gdata/analytics/sample_util.py | 269 + gam/gdata/analytics/service.py | 1718 +++++ gam/gdata/analytics/service.pyc | Bin 0 -> 65673 bytes gam/gdata/analytics/sites/__init__.py | 0 gam/gdata/analytics/sites/client.py | 462 ++ gam/gdata/analytics/sites/data.py | 377 + gam/gdata/analytics/spreadsheet/__init__.py | 474 ++ gam/gdata/analytics/spreadsheet/service.py | 487 ++ gam/gdata/analytics/spreadsheet/text_db.py | 559 ++ gam/gdata/analytics/spreadsheets/__init__.py | 0 gam/gdata/analytics/spreadsheets/client.py | 592 ++ gam/gdata/analytics/spreadsheets/data.py | 346 + gam/gdata/analytics/test_config.py | 434 ++ gam/gdata/analytics/test_data.py | 5616 ++++++++++++++ gam/gdata/analytics/tlslite/BaseDB.py | 120 + gam/gdata/analytics/tlslite/Checker.py | 146 + gam/gdata/analytics/tlslite/FileObject.py | 220 + .../analytics/tlslite/HandshakeSettings.py | 159 + gam/gdata/analytics/tlslite/Session.py | 131 + gam/gdata/analytics/tlslite/SessionCache.py | 103 + gam/gdata/analytics/tlslite/SharedKeyDB.py | 58 + gam/gdata/analytics/tlslite/TLSConnection.py | 1600 ++++ gam/gdata/analytics/tlslite/TLSRecordLayer.py | 1123 +++ gam/gdata/analytics/tlslite/VerifierDB.py | 90 + gam/gdata/analytics/tlslite/X509.py | 133 + gam/gdata/analytics/tlslite/X509CertChain.py | 181 + gam/gdata/analytics/tlslite/__init__.py | 39 + gam/gdata/analytics/tlslite/__init__.pyc | Bin 0 -> 1214 bytes gam/gdata/analytics/tlslite/api.py | 75 + gam/gdata/analytics/tlslite/constants.py | 225 + gam/gdata/analytics/tlslite/errors.py | 149 + .../tlslite/integration/AsyncStateMachine.py | 235 + .../tlslite/integration/ClientHelper.py | 163 + .../tlslite/integration/HTTPTLSConnection.py | 169 + .../tlslite/integration/IMAP4_TLS.py | 132 + .../tlslite/integration/IntegrationHelper.py | 52 + .../analytics/tlslite/integration/POP3_TLS.py | 142 + .../analytics/tlslite/integration/SMTP_TLS.py | 114 + .../integration/TLSAsyncDispatcherMixIn.py | 139 + .../integration/TLSSocketServerMixIn.py | 59 + .../integration/TLSTwistedProtocolWrapper.py | 196 + .../tlslite/integration/XMLRPCTransport.py | 137 + .../analytics/tlslite/integration/__init__.py | 17 + gam/gdata/analytics/tlslite/mathtls.py | 170 + gam/gdata/analytics/tlslite/messages.py | 561 ++ gam/gdata/analytics/tlslite/utils/AES.py | 31 + .../analytics/tlslite/utils/ASN1Parser.py | 34 + .../analytics/tlslite/utils/ASN1Parser.pyc | Bin 0 -> 1428 bytes .../analytics/tlslite/utils/Cryptlib_AES.py | 34 + .../analytics/tlslite/utils/Cryptlib_RC4.py | 28 + .../tlslite/utils/Cryptlib_TripleDES.py | 35 + .../analytics/tlslite/utils/OpenSSL_AES.py | 49 + .../analytics/tlslite/utils/OpenSSL_RC4.py | 25 + .../analytics/tlslite/utils/OpenSSL_RSAKey.py | 148 + .../tlslite/utils/OpenSSL_TripleDES.py | 44 + .../analytics/tlslite/utils/PyCrypto_AES.py | 22 + .../analytics/tlslite/utils/PyCrypto_RC4.py | 22 + .../tlslite/utils/PyCrypto_RSAKey.py | 61 + .../tlslite/utils/PyCrypto_TripleDES.py | 22 + .../analytics/tlslite/utils/Python_AES.py | 68 + .../analytics/tlslite/utils/Python_RC4.py | 39 + .../analytics/tlslite/utils/Python_RSAKey.py | 209 + .../analytics/tlslite/utils/Python_RSAKey.pyc | Bin 0 -> 7978 bytes gam/gdata/analytics/tlslite/utils/RC4.py | 17 + gam/gdata/analytics/tlslite/utils/RSAKey.py | 264 + gam/gdata/analytics/tlslite/utils/RSAKey.pyc | Bin 0 -> 9727 bytes .../analytics/tlslite/utils/TripleDES.py | 26 + gam/gdata/analytics/tlslite/utils/__init__.py | 31 + .../analytics/tlslite/utils/__init__.pyc | Bin 0 -> 825 bytes .../analytics/tlslite/utils/cipherfactory.py | 111 + gam/gdata/analytics/tlslite/utils/codec.py | 94 + gam/gdata/analytics/tlslite/utils/codec.pyc | Bin 0 -> 4285 bytes gam/gdata/analytics/tlslite/utils/compat.py | 140 + gam/gdata/analytics/tlslite/utils/compat.pyc | Bin 0 -> 6584 bytes .../analytics/tlslite/utils/cryptomath.py | 404 + .../analytics/tlslite/utils/cryptomath.pyc | Bin 0 -> 11026 bytes .../analytics/tlslite/utils/dateFuncs.py | 75 + gam/gdata/analytics/tlslite/utils/entropy.c | 173 + gam/gdata/analytics/tlslite/utils/hmac.py | 104 + .../analytics/tlslite/utils/jython_compat.py | 195 + .../analytics/tlslite/utils/keyfactory.py | 243 + .../analytics/tlslite/utils/keyfactory.pyc | Bin 0 -> 9043 bytes gam/gdata/analytics/tlslite/utils/rijndael.py | 392 + gam/gdata/analytics/tlslite/utils/win32prng.c | 63 + gam/gdata/analytics/tlslite/utils/xmltools.py | 202 + .../analytics/tlslite/utils/xmltools.pyc | Bin 0 -> 8173 bytes gam/gdata/analytics/urlfetch.py | 247 + .../analytics/webmastertools/__init__.py | 544 ++ gam/gdata/analytics/webmastertools/data.py | 217 + gam/gdata/analytics/webmastertools/service.py | 516 ++ gam/gdata/analytics/youtube/__init__.py | 684 ++ gam/gdata/analytics/youtube/client.py | 264 + gam/gdata/analytics/youtube/data.py | 502 ++ gam/gdata/analytics/youtube/service.py | 1563 ++++ gam/gdata/apps/__init__.py | 526 ++ gam/gdata/apps/__init__.pyc | Bin 0 -> 19987 bytes gam/gdata/apps/adminaudit/__init__.py | 0 gam/gdata/apps/adminaudit/__init__.pyc | Bin 0 -> 167 bytes gam/gdata/apps/adminaudit/service.py | 82 + gam/gdata/apps/adminaudit/service.pyc | Bin 0 -> 2977 bytes gam/gdata/apps/adminaudit/service.py~ | 83 + gam/gdata/apps/adminsettings/__init__.py | 16 + gam/gdata/apps/adminsettings/__init__.pyc | Bin 0 -> 170 bytes gam/gdata/apps/adminsettings/service.py | 474 ++ gam/gdata/apps/adminsettings/service.pyc | Bin 0 -> 16372 bytes gam/gdata/apps/adminsettings/service.py~ | 474 ++ gam/gdata/apps/audit/__init__.py | 1 + gam/gdata/apps/audit/__init__.pyc | Bin 0 -> 162 bytes gam/gdata/apps/audit/service.py | 277 + gam/gdata/apps/audit/service.pyc | Bin 0 -> 10145 bytes gam/gdata/apps/emailsettings/__init__.py | 15 + gam/gdata/apps/emailsettings/__init__.pyc | Bin 0 -> 170 bytes gam/gdata/apps/emailsettings/client.py | 386 + gam/gdata/apps/emailsettings/data.py | 1130 +++ gam/gdata/apps/emailsettings/service.py | 394 + gam/gdata/apps/emailsettings/service.pyc | Bin 0 -> 14732 bytes gam/gdata/apps/emailsettings/service.py~ | 394 + gam/gdata/apps/groups/__init__.py | 0 gam/gdata/apps/groups/__init__.pyc | Bin 0 -> 163 bytes gam/gdata/apps/groups/service.py | 392 + gam/gdata/apps/groups/service.pyc | Bin 0 -> 14110 bytes gam/gdata/apps/groups/service.py~ | 393 + gam/gdata/apps/groupsettings/__init__.py | 0 gam/gdata/apps/groupsettings/__init__.pyc | Bin 0 -> 170 bytes gam/gdata/apps/groupsettings/service.py | 172 + gam/gdata/apps/groupsettings/service.pyc | Bin 0 -> 6359 bytes gam/gdata/apps/migration/__init__.py | 212 + gam/gdata/apps/migration/service.py | 129 + gam/gdata/apps/multidomain/__init__.py | 16 + gam/gdata/apps/multidomain/__init__.pyc | Bin 0 -> 168 bytes gam/gdata/apps/multidomain/service.py | 155 + gam/gdata/apps/multidomain/service.pyc | Bin 0 -> 6353 bytes gam/gdata/apps/multidomain/service.py~ | 155 + gam/gdata/apps/orgs/__init__.py | 16 + gam/gdata/apps/orgs/__init__.pyc | Bin 0 -> 161 bytes gam/gdata/apps/orgs/service.py | 170 + gam/gdata/apps/orgs/service.pyc | Bin 0 -> 6395 bytes gam/gdata/apps/orgs/service.py~ | 170 + gam/gdata/apps/reporting/__init__.py | 0 gam/gdata/apps/reporting/__init__.pyc | Bin 0 -> 166 bytes gam/gdata/apps/reporting/service.py | 96 + gam/gdata/apps/reporting/service.pyc | Bin 0 -> 3581 bytes gam/gdata/apps/reporting/service.py~ | 96 + gam/gdata/apps/res_cal/__init__.py | 1 + gam/gdata/apps/res_cal/__init__.pyc | Bin 0 -> 164 bytes gam/gdata/apps/res_cal/service.py | 71 + gam/gdata/apps/res_cal/service.pyc | Bin 0 -> 2822 bytes gam/gdata/apps/service.py | 553 ++ gam/gdata/apps/service.pyc | Bin 0 -> 22578 bytes gam/gdata/apps/service.py~ | 552 ++ gam/gdata/apps_property.py | 39 + gam/gdata/auth.py | 952 +++ gam/gdata/auth.pyc | Bin 0 -> 38695 bytes gam/gdata/base/__init__.py | 697 ++ gam/gdata/base/service.py | 256 + gam/gdata/blogger/__init__.py | 202 + gam/gdata/blogger/client.py | 175 + gam/gdata/blogger/data.py | 167 + gam/gdata/blogger/service.py | 142 + gam/gdata/books/__init__.py | 473 ++ gam/gdata/books/data.py | 90 + gam/gdata/books/service.py | 266 + gam/gdata/calendar/__init__.py | 1044 +++ gam/gdata/calendar/__init__.pyc | Bin 0 -> 40015 bytes gam/gdata/calendar/data.py | 300 + gam/gdata/calendar/service.py | 598 ++ gam/gdata/calendar/service.pyc | Bin 0 -> 27615 bytes gam/gdata/calendar_resource/__init__.py | 1 + gam/gdata/calendar_resource/client.py | 199 + gam/gdata/calendar_resource/data.py | 193 + gam/gdata/client.py | 1126 +++ gam/gdata/codesearch/__init__.py | 136 + gam/gdata/codesearch/service.py | 109 + gam/gdata/contacts/__init__.py | 741 ++ gam/gdata/contacts/__init__.pyc | Bin 0 -> 29275 bytes gam/gdata/contacts/client.py | 474 ++ gam/gdata/contacts/data.py | 474 ++ gam/gdata/contacts/service.py | 427 ++ gam/gdata/contacts/service.pyc | Bin 0 -> 19267 bytes gam/gdata/core.py | 279 + gam/gdata/data.py | 1186 +++ gam/gdata/docs/__init__.py | 269 + gam/gdata/docs/client.py | 608 ++ gam/gdata/docs/data.py | 280 + gam/gdata/docs/service.py | 611 ++ gam/gdata/dublincore/__init__.py | 15 + gam/gdata/dublincore/data.py | 78 + gam/gdata/exif/__init__.py | 217 + gam/gdata/finance/__init__.py | 486 ++ gam/gdata/finance/data.py | 156 + gam/gdata/finance/service.py | 243 + gam/gdata/gauth.py | 1306 ++++ gam/gdata/gauth.pyc | Bin 0 -> 49693 bytes gam/gdata/geo/__init__.py | 185 + gam/gdata/geo/data.py | 92 + gam/gdata/health/__init__.py | 229 + gam/gdata/health/service.py | 263 + gam/gdata/maps/__init__.py | 0 gam/gdata/maps/client.py | 179 + gam/gdata/maps/data.py | 125 + gam/gdata/media/__init__.py | 355 + gam/gdata/media/data.py | 159 + gam/gdata/notebook/__init__.py | 15 + gam/gdata/notebook/data.py | 55 + gam/gdata/oauth/CHANGES.txt | 17 + gam/gdata/oauth/__init__.py | 524 ++ gam/gdata/oauth/__init__.pyc | Bin 0 -> 21845 bytes gam/gdata/oauth/rsa.py | 120 + gam/gdata/oauth/rsa.pyc | Bin 0 -> 4734 bytes gam/gdata/opensearch/__init__.py | 15 + gam/gdata/opensearch/data.py | 48 + gam/gdata/photos/__init__.py | 1112 +++ gam/gdata/photos/service.py | 680 ++ gam/gdata/projecthosting/__init__.py | 1 + gam/gdata/projecthosting/client.py | 200 + gam/gdata/projecthosting/data.py | 134 + gam/gdata/sample_util.py | 269 + gam/gdata/service.py | 1717 +++++ gam/gdata/service.pyc | Bin 0 -> 67714 bytes gam/gdata/sites/__init__.py | 0 gam/gdata/sites/client.py | 461 ++ gam/gdata/sites/data.py | 376 + gam/gdata/spreadsheet/__init__.py | 474 ++ gam/gdata/spreadsheet/service.py | 484 ++ gam/gdata/spreadsheet/text_db.py | 559 ++ gam/gdata/spreadsheets/__init__.py | 0 gam/gdata/spreadsheets/client.py | 451 ++ gam/gdata/spreadsheets/data.py | 317 + gam/gdata/test_config.py | 408 + gam/gdata/test_config_template.py | 96 + gam/gdata/test_data.py | 5397 +++++++++++++ gam/gdata/tlslite/BaseDB.py | 120 + gam/gdata/tlslite/Checker.py | 146 + gam/gdata/tlslite/FileObject.py | 220 + gam/gdata/tlslite/HandshakeSettings.py | 159 + gam/gdata/tlslite/Session.py | 131 + gam/gdata/tlslite/SessionCache.py | 103 + gam/gdata/tlslite/SharedKeyDB.py | 58 + gam/gdata/tlslite/TLSConnection.py | 1600 ++++ gam/gdata/tlslite/TLSRecordLayer.py | 1123 +++ gam/gdata/tlslite/VerifierDB.py | 90 + gam/gdata/tlslite/X509.py | 133 + gam/gdata/tlslite/X509CertChain.py | 181 + gam/gdata/tlslite/__init__.py | 39 + gam/gdata/tlslite/__init__.pyc | Bin 0 -> 1238 bytes gam/gdata/tlslite/api.py | 75 + gam/gdata/tlslite/constants.py | 225 + gam/gdata/tlslite/errors.py | 149 + .../tlslite/integration/AsyncStateMachine.py | 235 + gam/gdata/tlslite/integration/ClientHelper.py | 163 + .../tlslite/integration/HTTPTLSConnection.py | 169 + gam/gdata/tlslite/integration/IMAP4_TLS.py | 132 + .../tlslite/integration/IntegrationHelper.py | 52 + gam/gdata/tlslite/integration/POP3_TLS.py | 142 + gam/gdata/tlslite/integration/SMTP_TLS.py | 114 + .../integration/TLSAsyncDispatcherMixIn.py | 139 + .../integration/TLSSocketServerMixIn.py | 59 + .../integration/TLSTwistedProtocolWrapper.py | 196 + .../tlslite/integration/XMLRPCTransport.py | 137 + gam/gdata/tlslite/integration/__init__.py | 17 + gam/gdata/tlslite/mathtls.py | 170 + gam/gdata/tlslite/messages.py | 561 ++ gam/gdata/tlslite/utils/AES.py | 31 + gam/gdata/tlslite/utils/ASN1Parser.py | 34 + gam/gdata/tlslite/utils/ASN1Parser.pyc | Bin 0 -> 1548 bytes gam/gdata/tlslite/utils/Cryptlib_AES.py | 34 + gam/gdata/tlslite/utils/Cryptlib_RC4.py | 28 + gam/gdata/tlslite/utils/Cryptlib_TripleDES.py | 35 + gam/gdata/tlslite/utils/OpenSSL_AES.py | 49 + gam/gdata/tlslite/utils/OpenSSL_RC4.py | 25 + gam/gdata/tlslite/utils/OpenSSL_RSAKey.py | 148 + gam/gdata/tlslite/utils/OpenSSL_TripleDES.py | 44 + gam/gdata/tlslite/utils/PyCrypto_AES.py | 22 + gam/gdata/tlslite/utils/PyCrypto_RC4.py | 22 + gam/gdata/tlslite/utils/PyCrypto_RSAKey.py | 61 + gam/gdata/tlslite/utils/PyCrypto_RSAKey.pyc | Bin 0 -> 3058 bytes gam/gdata/tlslite/utils/PyCrypto_TripleDES.py | 22 + gam/gdata/tlslite/utils/Python_AES.py | 68 + gam/gdata/tlslite/utils/Python_RC4.py | 39 + gam/gdata/tlslite/utils/Python_RSAKey.py | 209 + gam/gdata/tlslite/utils/Python_RSAKey.pyc | Bin 0 -> 8410 bytes gam/gdata/tlslite/utils/RC4.py | 17 + gam/gdata/tlslite/utils/RSAKey.py | 264 + gam/gdata/tlslite/utils/RSAKey.pyc | Bin 0 -> 10231 bytes gam/gdata/tlslite/utils/TripleDES.py | 26 + gam/gdata/tlslite/utils/__init__.py | 31 + gam/gdata/tlslite/utils/__init__.pyc | Bin 0 -> 849 bytes gam/gdata/tlslite/utils/cipherfactory.py | 111 + gam/gdata/tlslite/utils/codec.py | 94 + gam/gdata/tlslite/utils/codec.pyc | Bin 0 -> 4693 bytes gam/gdata/tlslite/utils/compat.py | 140 + gam/gdata/tlslite/utils/compat.pyc | Bin 0 -> 7208 bytes gam/gdata/tlslite/utils/cryptomath.py | 404 + gam/gdata/tlslite/utils/cryptomath.pyc | Bin 0 -> 11770 bytes gam/gdata/tlslite/utils/dateFuncs.py | 75 + gam/gdata/tlslite/utils/entropy.c | 173 + gam/gdata/tlslite/utils/hmac.py | 104 + gam/gdata/tlslite/utils/jython_compat.py | 195 + gam/gdata/tlslite/utils/keyfactory.py | 243 + gam/gdata/tlslite/utils/keyfactory.pyc | Bin 0 -> 9307 bytes gam/gdata/tlslite/utils/rijndael.py | 392 + gam/gdata/tlslite/utils/win32prng.c | 63 + gam/gdata/tlslite/utils/xmltools.py | 202 + gam/gdata/tlslite/utils/xmltools.pyc | Bin 0 -> 8653 bytes gam/gdata/urlfetch.py | 247 + gam/gdata/webmastertools/__init__.py | 544 ++ gam/gdata/webmastertools/data.py | 217 + gam/gdata/webmastertools/service.py | 516 ++ gam/gdata/youtube/__init__.py | 684 ++ gam/gdata/youtube/data.py | 477 ++ gam/gdata/youtube/service.py | 1563 ++++ gam/lastupdatecheck.txt | 1 + gam/oauth.txt | 88 + gam/whatsnew.txt | 154 + oh-my-zsh/.oh-my-zsh | 1 + vim/vim.symlink/plugin/gnupg.vim | 512 +- vim/vimrc.symlink | 1 + zsh/zshrc.symlink | 6 +- 536 files changed, 129765 insertions(+), 203 deletions(-) create mode 100755 gam/atom/__init__.py create mode 100644 gam/atom/__init__.pyc create mode 100755 gam/atom/auth.py create mode 100755 gam/atom/client.py create mode 100755 gam/atom/core.py create mode 100755 gam/atom/data.py create mode 100755 gam/atom/http.py create mode 100644 gam/atom/http.pyc create mode 100755 gam/atom/http_core.py create mode 100644 gam/atom/http_core.pyc create mode 100755 gam/atom/http_interface.py create mode 100644 gam/atom/http_interface.pyc create mode 100755 gam/atom/mock_http.py create mode 100755 gam/atom/mock_http_core.py create mode 100755 gam/atom/mock_service.py create mode 100755 gam/atom/service.py create mode 100644 gam/atom/service.pyc create mode 100755 gam/atom/token_store.py create mode 100644 gam/atom/token_store.pyc create mode 100755 gam/atom/url.py create mode 100644 gam/atom/url.pyc create mode 100755 gam/gam.py.symlink create mode 100755 gam/gdata/Crypto/Cipher/AES.pyd create mode 100755 gam/gdata/Crypto/Cipher/ARC2.pyd create mode 100755 gam/gdata/Crypto/Cipher/ARC4.pyd create mode 100755 gam/gdata/Crypto/Cipher/Blowfish.pyd create mode 100755 gam/gdata/Crypto/Cipher/CAST.pyd create mode 100755 gam/gdata/Crypto/Cipher/DES.pyd create mode 100755 gam/gdata/Crypto/Cipher/DES3.pyd create mode 100755 gam/gdata/Crypto/Cipher/IDEA.pyd create mode 100755 gam/gdata/Crypto/Cipher/RC5.pyd create mode 100755 gam/gdata/Crypto/Cipher/XOR.pyd create mode 100755 gam/gdata/Crypto/Cipher/__init__.py create mode 100755 gam/gdata/Crypto/Hash/HMAC.py create mode 100755 gam/gdata/Crypto/Hash/MD2.pyd create mode 100755 gam/gdata/Crypto/Hash/MD4.pyd create mode 100755 gam/gdata/Crypto/Hash/MD5.py create mode 100755 gam/gdata/Crypto/Hash/RIPEMD.pyd create mode 100755 gam/gdata/Crypto/Hash/SHA.py create mode 100755 gam/gdata/Crypto/Hash/SHA256.pyd create mode 100755 gam/gdata/Crypto/Hash/__init__.py create mode 100755 gam/gdata/Crypto/Protocol/AllOrNothing.py create mode 100755 gam/gdata/Crypto/Protocol/Chaffing.py create mode 100755 gam/gdata/Crypto/Protocol/__init__.py create mode 100755 gam/gdata/Crypto/PublicKey/DSA.py create mode 100755 gam/gdata/Crypto/PublicKey/ElGamal.py create mode 100755 gam/gdata/Crypto/PublicKey/RSA.py create mode 100755 gam/gdata/Crypto/PublicKey/__init__.py create mode 100755 gam/gdata/Crypto/PublicKey/pubkey.py create mode 100755 gam/gdata/Crypto/PublicKey/qNEW.py create mode 100755 gam/gdata/Crypto/Util/RFC1751.py create mode 100755 gam/gdata/Crypto/Util/__init__.py create mode 100755 gam/gdata/Crypto/Util/number.py create mode 100755 gam/gdata/Crypto/Util/randpool.py create mode 100755 gam/gdata/Crypto/Util/test.py create mode 100755 gam/gdata/Crypto/__init__.py create mode 100755 gam/gdata/Crypto/test.py create mode 100755 gam/gdata/__init__.py create mode 100644 gam/gdata/__init__.pyc create mode 100755 gam/gdata/acl/__init__.py create mode 100755 gam/gdata/acl/data.py create mode 100755 gam/gdata/alt/__init__.py create mode 100755 gam/gdata/alt/app_engine.py create mode 100755 gam/gdata/alt/appengine.py create mode 100755 gam/gdata/analytics/Crypto/Cipher/AES.pyd create mode 100755 gam/gdata/analytics/Crypto/Cipher/ARC2.pyd create mode 100755 gam/gdata/analytics/Crypto/Cipher/ARC4.pyd create mode 100755 gam/gdata/analytics/Crypto/Cipher/Blowfish.pyd create mode 100755 gam/gdata/analytics/Crypto/Cipher/CAST.pyd create mode 100755 gam/gdata/analytics/Crypto/Cipher/DES.pyd create mode 100755 gam/gdata/analytics/Crypto/Cipher/DES3.pyd create mode 100755 gam/gdata/analytics/Crypto/Cipher/IDEA.pyd create mode 100755 gam/gdata/analytics/Crypto/Cipher/RC5.pyd create mode 100755 gam/gdata/analytics/Crypto/Cipher/XOR.pyd create mode 100755 gam/gdata/analytics/Crypto/Cipher/__init__.py create mode 100755 gam/gdata/analytics/Crypto/Hash/HMAC.py create mode 100755 gam/gdata/analytics/Crypto/Hash/MD2.pyd create mode 100755 gam/gdata/analytics/Crypto/Hash/MD4.pyd create mode 100755 gam/gdata/analytics/Crypto/Hash/MD5.py create mode 100755 gam/gdata/analytics/Crypto/Hash/RIPEMD.pyd create mode 100755 gam/gdata/analytics/Crypto/Hash/SHA.py create mode 100755 gam/gdata/analytics/Crypto/Hash/SHA256.pyd create mode 100755 gam/gdata/analytics/Crypto/Hash/__init__.py create mode 100755 gam/gdata/analytics/Crypto/Protocol/AllOrNothing.py create mode 100755 gam/gdata/analytics/Crypto/Protocol/Chaffing.py create mode 100755 gam/gdata/analytics/Crypto/Protocol/__init__.py create mode 100755 gam/gdata/analytics/Crypto/PublicKey/DSA.py create mode 100755 gam/gdata/analytics/Crypto/PublicKey/ElGamal.py create mode 100755 gam/gdata/analytics/Crypto/PublicKey/RSA.py create mode 100755 gam/gdata/analytics/Crypto/PublicKey/__init__.py create mode 100755 gam/gdata/analytics/Crypto/PublicKey/pubkey.py create mode 100755 gam/gdata/analytics/Crypto/PublicKey/qNEW.py create mode 100755 gam/gdata/analytics/Crypto/Util/RFC1751.py create mode 100755 gam/gdata/analytics/Crypto/Util/__init__.py create mode 100755 gam/gdata/analytics/Crypto/Util/number.py create mode 100755 gam/gdata/analytics/Crypto/Util/randpool.py create mode 100755 gam/gdata/analytics/Crypto/Util/test.py create mode 100755 gam/gdata/analytics/Crypto/__init__.py create mode 100755 gam/gdata/analytics/Crypto/test.py create mode 100755 gam/gdata/analytics/__init__.py create mode 100755 gam/gdata/analytics/__init__.pyc create mode 100755 gam/gdata/analytics/acl/__init__.py create mode 100755 gam/gdata/analytics/acl/data.py create mode 100755 gam/gdata/analytics/alt/__init__.py create mode 100755 gam/gdata/analytics/alt/app_engine.py create mode 100755 gam/gdata/analytics/alt/appengine.py create mode 100755 gam/gdata/analytics/analytics/__init__.py create mode 100755 gam/gdata/analytics/analytics/client.py create mode 100755 gam/gdata/analytics/analytics/data.py create mode 100755 gam/gdata/analytics/analytics/service.py create mode 100755 gam/gdata/analytics/apps/__init__.py create mode 100755 gam/gdata/analytics/apps/__init__.pyc create mode 100755 gam/gdata/analytics/apps/adminsettings/__init__.py create mode 100755 gam/gdata/analytics/apps/adminsettings/__init__.pyc create mode 100755 gam/gdata/analytics/apps/adminsettings/service.py create mode 100755 gam/gdata/analytics/apps/adminsettings/service.pyc create mode 100755 gam/gdata/analytics/apps/audit/__init__.py create mode 100755 gam/gdata/analytics/apps/audit/__init__.pyc create mode 100755 gam/gdata/analytics/apps/audit/service.py create mode 100755 gam/gdata/analytics/apps/audit/service.pyc create mode 100755 gam/gdata/analytics/apps/emailsettings/__init__.py create mode 100755 gam/gdata/analytics/apps/emailsettings/__init__.pyc create mode 100755 gam/gdata/analytics/apps/emailsettings/client.py create mode 100755 gam/gdata/analytics/apps/emailsettings/data.py create mode 100755 gam/gdata/analytics/apps/emailsettings/service.py create mode 100755 gam/gdata/analytics/apps/emailsettings/service.pyc create mode 100755 gam/gdata/analytics/apps/groups/__init__.py create mode 100755 gam/gdata/analytics/apps/groups/__init__.pyc create mode 100755 gam/gdata/analytics/apps/groups/service.py create mode 100755 gam/gdata/analytics/apps/groups/service.pyc create mode 100755 gam/gdata/analytics/apps/migration/__init__.py create mode 100755 gam/gdata/analytics/apps/migration/service.py create mode 100755 gam/gdata/analytics/apps/multidomain/__init__.py create mode 100755 gam/gdata/analytics/apps/multidomain/__init__.pyc create mode 100755 gam/gdata/analytics/apps/multidomain/client.py create mode 100755 gam/gdata/analytics/apps/multidomain/data.py create mode 100755 gam/gdata/analytics/apps/multidomain/service.py create mode 100755 gam/gdata/analytics/apps/multidomain/service.pyc create mode 100755 gam/gdata/analytics/apps/organization/__init__.py create mode 100755 gam/gdata/analytics/apps/organization/service.py create mode 100755 gam/gdata/analytics/apps/orgs/__init__.py create mode 100755 gam/gdata/analytics/apps/orgs/__init__.pyc create mode 100755 gam/gdata/analytics/apps/orgs/service.py create mode 100755 gam/gdata/analytics/apps/orgs/service.pyc create mode 100755 gam/gdata/analytics/apps/res_cal/__init__.py create mode 100755 gam/gdata/analytics/apps/res_cal/__init__.pyc create mode 100755 gam/gdata/analytics/apps/res_cal/service.py create mode 100755 gam/gdata/analytics/apps/res_cal/service.pyc create mode 100755 gam/gdata/analytics/apps/service.py create mode 100755 gam/gdata/analytics/apps/service.pyc create mode 100755 gam/gdata/analytics/apps_property.py create mode 100755 gam/gdata/analytics/auth.py create mode 100755 gam/gdata/analytics/auth.pyc create mode 100755 gam/gdata/analytics/blogger/__init__.py create mode 100755 gam/gdata/analytics/blogger/client.py create mode 100755 gam/gdata/analytics/blogger/data.py create mode 100755 gam/gdata/analytics/blogger/service.py create mode 100755 gam/gdata/analytics/books/__init__.py create mode 100755 gam/gdata/analytics/books/data.py create mode 100755 gam/gdata/analytics/books/service.py create mode 100755 gam/gdata/analytics/calendar/__init__.py create mode 100755 gam/gdata/analytics/calendar/__init__.pyc create mode 100755 gam/gdata/analytics/calendar/client.py create mode 100755 gam/gdata/analytics/calendar/data.py create mode 100755 gam/gdata/analytics/calendar/service.py create mode 100755 gam/gdata/analytics/calendar_resource/__init__.py create mode 100755 gam/gdata/analytics/calendar_resource/client.py create mode 100755 gam/gdata/analytics/calendar_resource/data.py create mode 100755 gam/gdata/analytics/client.py create mode 100755 gam/gdata/analytics/codesearch/__init__.py create mode 100755 gam/gdata/analytics/codesearch/service.py create mode 100755 gam/gdata/analytics/contacts/__init__.py create mode 100755 gam/gdata/analytics/contacts/__init__.pyc create mode 100755 gam/gdata/analytics/contacts/client.py create mode 100755 gam/gdata/analytics/contacts/data.py create mode 100755 gam/gdata/analytics/contacts/service.py create mode 100755 gam/gdata/analytics/contacts/service.pyc create mode 100755 gam/gdata/analytics/contentforshopping/__init__.py create mode 100755 gam/gdata/analytics/contentforshopping/client.py create mode 100755 gam/gdata/analytics/contentforshopping/data.py create mode 100755 gam/gdata/analytics/core.py create mode 100755 gam/gdata/analytics/data.py create mode 100755 gam/gdata/analytics/docs/__init__.py create mode 100755 gam/gdata/analytics/docs/client.py create mode 100755 gam/gdata/analytics/docs/data.py create mode 100755 gam/gdata/analytics/docs/service.py create mode 100755 gam/gdata/analytics/dublincore/__init__.py create mode 100755 gam/gdata/analytics/dublincore/data.py create mode 100755 gam/gdata/analytics/exif/__init__.py create mode 100755 gam/gdata/analytics/finance/__init__.py create mode 100755 gam/gdata/analytics/finance/data.py create mode 100755 gam/gdata/analytics/finance/service.py create mode 100755 gam/gdata/analytics/gauth.py create mode 100755 gam/gdata/analytics/gauth.pyc create mode 100755 gam/gdata/analytics/geo/__init__.py create mode 100755 gam/gdata/analytics/geo/data.py create mode 100755 gam/gdata/analytics/health/__init__.py create mode 100755 gam/gdata/analytics/health/service.py create mode 100755 gam/gdata/analytics/marketplace/__init__.py create mode 100755 gam/gdata/analytics/marketplace/client.py create mode 100755 gam/gdata/analytics/marketplace/data.py create mode 100755 gam/gdata/analytics/media/__init__.py create mode 100755 gam/gdata/analytics/media/data.py create mode 100755 gam/gdata/analytics/notebook/__init__.py create mode 100755 gam/gdata/analytics/notebook/data.py create mode 100755 gam/gdata/analytics/oauth/CHANGES.txt create mode 100755 gam/gdata/analytics/oauth/__init__.py create mode 100755 gam/gdata/analytics/oauth/__init__.pyc create mode 100755 gam/gdata/analytics/oauth/rsa.py create mode 100755 gam/gdata/analytics/oauth/rsa.pyc create mode 100755 gam/gdata/analytics/opensearch/__init__.py create mode 100755 gam/gdata/analytics/opensearch/data.py create mode 100755 gam/gdata/analytics/photos/__init__.py create mode 100755 gam/gdata/analytics/photos/service.py create mode 100755 gam/gdata/analytics/projecthosting/__init__.py create mode 100755 gam/gdata/analytics/projecthosting/client.py create mode 100755 gam/gdata/analytics/projecthosting/data.py create mode 100755 gam/gdata/analytics/sample_util.py create mode 100755 gam/gdata/analytics/service.py create mode 100755 gam/gdata/analytics/service.pyc create mode 100755 gam/gdata/analytics/sites/__init__.py create mode 100755 gam/gdata/analytics/sites/client.py create mode 100755 gam/gdata/analytics/sites/data.py create mode 100755 gam/gdata/analytics/spreadsheet/__init__.py create mode 100755 gam/gdata/analytics/spreadsheet/service.py create mode 100755 gam/gdata/analytics/spreadsheet/text_db.py create mode 100755 gam/gdata/analytics/spreadsheets/__init__.py create mode 100755 gam/gdata/analytics/spreadsheets/client.py create mode 100755 gam/gdata/analytics/spreadsheets/data.py create mode 100755 gam/gdata/analytics/test_config.py create mode 100755 gam/gdata/analytics/test_data.py create mode 100755 gam/gdata/analytics/tlslite/BaseDB.py create mode 100755 gam/gdata/analytics/tlslite/Checker.py create mode 100755 gam/gdata/analytics/tlslite/FileObject.py create mode 100755 gam/gdata/analytics/tlslite/HandshakeSettings.py create mode 100755 gam/gdata/analytics/tlslite/Session.py create mode 100755 gam/gdata/analytics/tlslite/SessionCache.py create mode 100755 gam/gdata/analytics/tlslite/SharedKeyDB.py create mode 100755 gam/gdata/analytics/tlslite/TLSConnection.py create mode 100755 gam/gdata/analytics/tlslite/TLSRecordLayer.py create mode 100755 gam/gdata/analytics/tlslite/VerifierDB.py create mode 100755 gam/gdata/analytics/tlslite/X509.py create mode 100755 gam/gdata/analytics/tlslite/X509CertChain.py create mode 100755 gam/gdata/analytics/tlslite/__init__.py create mode 100755 gam/gdata/analytics/tlslite/__init__.pyc create mode 100755 gam/gdata/analytics/tlslite/api.py create mode 100755 gam/gdata/analytics/tlslite/constants.py create mode 100755 gam/gdata/analytics/tlslite/errors.py create mode 100755 gam/gdata/analytics/tlslite/integration/AsyncStateMachine.py create mode 100755 gam/gdata/analytics/tlslite/integration/ClientHelper.py create mode 100755 gam/gdata/analytics/tlslite/integration/HTTPTLSConnection.py create mode 100755 gam/gdata/analytics/tlslite/integration/IMAP4_TLS.py create mode 100755 gam/gdata/analytics/tlslite/integration/IntegrationHelper.py create mode 100755 gam/gdata/analytics/tlslite/integration/POP3_TLS.py create mode 100755 gam/gdata/analytics/tlslite/integration/SMTP_TLS.py create mode 100755 gam/gdata/analytics/tlslite/integration/TLSAsyncDispatcherMixIn.py create mode 100755 gam/gdata/analytics/tlslite/integration/TLSSocketServerMixIn.py create mode 100755 gam/gdata/analytics/tlslite/integration/TLSTwistedProtocolWrapper.py create mode 100755 gam/gdata/analytics/tlslite/integration/XMLRPCTransport.py create mode 100755 gam/gdata/analytics/tlslite/integration/__init__.py create mode 100755 gam/gdata/analytics/tlslite/mathtls.py create mode 100755 gam/gdata/analytics/tlslite/messages.py create mode 100755 gam/gdata/analytics/tlslite/utils/AES.py create mode 100755 gam/gdata/analytics/tlslite/utils/ASN1Parser.py create mode 100755 gam/gdata/analytics/tlslite/utils/ASN1Parser.pyc create mode 100755 gam/gdata/analytics/tlslite/utils/Cryptlib_AES.py create mode 100755 gam/gdata/analytics/tlslite/utils/Cryptlib_RC4.py create mode 100755 gam/gdata/analytics/tlslite/utils/Cryptlib_TripleDES.py create mode 100755 gam/gdata/analytics/tlslite/utils/OpenSSL_AES.py create mode 100755 gam/gdata/analytics/tlslite/utils/OpenSSL_RC4.py create mode 100755 gam/gdata/analytics/tlslite/utils/OpenSSL_RSAKey.py create mode 100755 gam/gdata/analytics/tlslite/utils/OpenSSL_TripleDES.py create mode 100755 gam/gdata/analytics/tlslite/utils/PyCrypto_AES.py create mode 100755 gam/gdata/analytics/tlslite/utils/PyCrypto_RC4.py create mode 100755 gam/gdata/analytics/tlslite/utils/PyCrypto_RSAKey.py create mode 100755 gam/gdata/analytics/tlslite/utils/PyCrypto_TripleDES.py create mode 100755 gam/gdata/analytics/tlslite/utils/Python_AES.py create mode 100755 gam/gdata/analytics/tlslite/utils/Python_RC4.py create mode 100755 gam/gdata/analytics/tlslite/utils/Python_RSAKey.py create mode 100755 gam/gdata/analytics/tlslite/utils/Python_RSAKey.pyc create mode 100755 gam/gdata/analytics/tlslite/utils/RC4.py create mode 100755 gam/gdata/analytics/tlslite/utils/RSAKey.py create mode 100755 gam/gdata/analytics/tlslite/utils/RSAKey.pyc create mode 100755 gam/gdata/analytics/tlslite/utils/TripleDES.py create mode 100755 gam/gdata/analytics/tlslite/utils/__init__.py create mode 100755 gam/gdata/analytics/tlslite/utils/__init__.pyc create mode 100755 gam/gdata/analytics/tlslite/utils/cipherfactory.py create mode 100755 gam/gdata/analytics/tlslite/utils/codec.py create mode 100755 gam/gdata/analytics/tlslite/utils/codec.pyc create mode 100755 gam/gdata/analytics/tlslite/utils/compat.py create mode 100755 gam/gdata/analytics/tlslite/utils/compat.pyc create mode 100755 gam/gdata/analytics/tlslite/utils/cryptomath.py create mode 100755 gam/gdata/analytics/tlslite/utils/cryptomath.pyc create mode 100755 gam/gdata/analytics/tlslite/utils/dateFuncs.py create mode 100755 gam/gdata/analytics/tlslite/utils/entropy.c create mode 100755 gam/gdata/analytics/tlslite/utils/hmac.py create mode 100755 gam/gdata/analytics/tlslite/utils/jython_compat.py create mode 100755 gam/gdata/analytics/tlslite/utils/keyfactory.py create mode 100755 gam/gdata/analytics/tlslite/utils/keyfactory.pyc create mode 100755 gam/gdata/analytics/tlslite/utils/rijndael.py create mode 100755 gam/gdata/analytics/tlslite/utils/win32prng.c create mode 100755 gam/gdata/analytics/tlslite/utils/xmltools.py create mode 100755 gam/gdata/analytics/tlslite/utils/xmltools.pyc create mode 100755 gam/gdata/analytics/urlfetch.py create mode 100755 gam/gdata/analytics/webmastertools/__init__.py create mode 100755 gam/gdata/analytics/webmastertools/data.py create mode 100755 gam/gdata/analytics/webmastertools/service.py create mode 100755 gam/gdata/analytics/youtube/__init__.py create mode 100755 gam/gdata/analytics/youtube/client.py create mode 100755 gam/gdata/analytics/youtube/data.py create mode 100755 gam/gdata/analytics/youtube/service.py create mode 100755 gam/gdata/apps/__init__.py create mode 100644 gam/gdata/apps/__init__.pyc create mode 100755 gam/gdata/apps/adminaudit/__init__.py create mode 100644 gam/gdata/apps/adminaudit/__init__.pyc create mode 100755 gam/gdata/apps/adminaudit/service.py create mode 100644 gam/gdata/apps/adminaudit/service.pyc create mode 100755 gam/gdata/apps/adminaudit/service.py~ create mode 100755 gam/gdata/apps/adminsettings/__init__.py create mode 100644 gam/gdata/apps/adminsettings/__init__.pyc create mode 100755 gam/gdata/apps/adminsettings/service.py create mode 100644 gam/gdata/apps/adminsettings/service.pyc create mode 100755 gam/gdata/apps/adminsettings/service.py~ create mode 100755 gam/gdata/apps/audit/__init__.py create mode 100644 gam/gdata/apps/audit/__init__.pyc create mode 100755 gam/gdata/apps/audit/service.py create mode 100644 gam/gdata/apps/audit/service.pyc create mode 100755 gam/gdata/apps/emailsettings/__init__.py create mode 100644 gam/gdata/apps/emailsettings/__init__.pyc create mode 100755 gam/gdata/apps/emailsettings/client.py create mode 100755 gam/gdata/apps/emailsettings/data.py create mode 100755 gam/gdata/apps/emailsettings/service.py create mode 100644 gam/gdata/apps/emailsettings/service.pyc create mode 100755 gam/gdata/apps/emailsettings/service.py~ create mode 100755 gam/gdata/apps/groups/__init__.py create mode 100644 gam/gdata/apps/groups/__init__.pyc create mode 100755 gam/gdata/apps/groups/service.py create mode 100644 gam/gdata/apps/groups/service.pyc create mode 100755 gam/gdata/apps/groups/service.py~ create mode 100755 gam/gdata/apps/groupsettings/__init__.py create mode 100644 gam/gdata/apps/groupsettings/__init__.pyc create mode 100755 gam/gdata/apps/groupsettings/service.py create mode 100644 gam/gdata/apps/groupsettings/service.pyc create mode 100755 gam/gdata/apps/migration/__init__.py create mode 100755 gam/gdata/apps/migration/service.py create mode 100755 gam/gdata/apps/multidomain/__init__.py create mode 100644 gam/gdata/apps/multidomain/__init__.pyc create mode 100755 gam/gdata/apps/multidomain/service.py create mode 100644 gam/gdata/apps/multidomain/service.pyc create mode 100755 gam/gdata/apps/multidomain/service.py~ create mode 100755 gam/gdata/apps/orgs/__init__.py create mode 100644 gam/gdata/apps/orgs/__init__.pyc create mode 100755 gam/gdata/apps/orgs/service.py create mode 100644 gam/gdata/apps/orgs/service.pyc create mode 100755 gam/gdata/apps/orgs/service.py~ create mode 100755 gam/gdata/apps/reporting/__init__.py create mode 100644 gam/gdata/apps/reporting/__init__.pyc create mode 100755 gam/gdata/apps/reporting/service.py create mode 100644 gam/gdata/apps/reporting/service.pyc create mode 100755 gam/gdata/apps/reporting/service.py~ create mode 100755 gam/gdata/apps/res_cal/__init__.py create mode 100644 gam/gdata/apps/res_cal/__init__.pyc create mode 100755 gam/gdata/apps/res_cal/service.py create mode 100644 gam/gdata/apps/res_cal/service.pyc create mode 100755 gam/gdata/apps/service.py create mode 100644 gam/gdata/apps/service.pyc create mode 100755 gam/gdata/apps/service.py~ create mode 100755 gam/gdata/apps_property.py create mode 100755 gam/gdata/auth.py create mode 100644 gam/gdata/auth.pyc create mode 100755 gam/gdata/base/__init__.py create mode 100755 gam/gdata/base/service.py create mode 100755 gam/gdata/blogger/__init__.py create mode 100755 gam/gdata/blogger/client.py create mode 100755 gam/gdata/blogger/data.py create mode 100755 gam/gdata/blogger/service.py create mode 100755 gam/gdata/books/__init__.py create mode 100755 gam/gdata/books/data.py create mode 100755 gam/gdata/books/service.py create mode 100755 gam/gdata/calendar/__init__.py create mode 100644 gam/gdata/calendar/__init__.pyc create mode 100755 gam/gdata/calendar/data.py create mode 100755 gam/gdata/calendar/service.py create mode 100644 gam/gdata/calendar/service.pyc create mode 100755 gam/gdata/calendar_resource/__init__.py create mode 100755 gam/gdata/calendar_resource/client.py create mode 100755 gam/gdata/calendar_resource/data.py create mode 100755 gam/gdata/client.py create mode 100755 gam/gdata/codesearch/__init__.py create mode 100755 gam/gdata/codesearch/service.py create mode 100755 gam/gdata/contacts/__init__.py create mode 100644 gam/gdata/contacts/__init__.pyc create mode 100755 gam/gdata/contacts/client.py create mode 100755 gam/gdata/contacts/data.py create mode 100755 gam/gdata/contacts/service.py create mode 100644 gam/gdata/contacts/service.pyc create mode 100755 gam/gdata/core.py create mode 100755 gam/gdata/data.py create mode 100755 gam/gdata/docs/__init__.py create mode 100755 gam/gdata/docs/client.py create mode 100755 gam/gdata/docs/data.py create mode 100755 gam/gdata/docs/service.py create mode 100755 gam/gdata/dublincore/__init__.py create mode 100755 gam/gdata/dublincore/data.py create mode 100755 gam/gdata/exif/__init__.py create mode 100755 gam/gdata/finance/__init__.py create mode 100755 gam/gdata/finance/data.py create mode 100755 gam/gdata/finance/service.py create mode 100755 gam/gdata/gauth.py create mode 100644 gam/gdata/gauth.pyc create mode 100755 gam/gdata/geo/__init__.py create mode 100755 gam/gdata/geo/data.py create mode 100755 gam/gdata/health/__init__.py create mode 100755 gam/gdata/health/service.py create mode 100755 gam/gdata/maps/__init__.py create mode 100755 gam/gdata/maps/client.py create mode 100755 gam/gdata/maps/data.py create mode 100755 gam/gdata/media/__init__.py create mode 100755 gam/gdata/media/data.py create mode 100755 gam/gdata/notebook/__init__.py create mode 100755 gam/gdata/notebook/data.py create mode 100755 gam/gdata/oauth/CHANGES.txt create mode 100755 gam/gdata/oauth/__init__.py create mode 100644 gam/gdata/oauth/__init__.pyc create mode 100755 gam/gdata/oauth/rsa.py create mode 100644 gam/gdata/oauth/rsa.pyc create mode 100755 gam/gdata/opensearch/__init__.py create mode 100755 gam/gdata/opensearch/data.py create mode 100755 gam/gdata/photos/__init__.py create mode 100755 gam/gdata/photos/service.py create mode 100755 gam/gdata/projecthosting/__init__.py create mode 100755 gam/gdata/projecthosting/client.py create mode 100755 gam/gdata/projecthosting/data.py create mode 100755 gam/gdata/sample_util.py create mode 100755 gam/gdata/service.py create mode 100644 gam/gdata/service.pyc create mode 100755 gam/gdata/sites/__init__.py create mode 100755 gam/gdata/sites/client.py create mode 100755 gam/gdata/sites/data.py create mode 100755 gam/gdata/spreadsheet/__init__.py create mode 100755 gam/gdata/spreadsheet/service.py create mode 100755 gam/gdata/spreadsheet/text_db.py create mode 100755 gam/gdata/spreadsheets/__init__.py create mode 100755 gam/gdata/spreadsheets/client.py create mode 100755 gam/gdata/spreadsheets/data.py create mode 100755 gam/gdata/test_config.py create mode 100755 gam/gdata/test_config_template.py create mode 100755 gam/gdata/test_data.py create mode 100755 gam/gdata/tlslite/BaseDB.py create mode 100755 gam/gdata/tlslite/Checker.py create mode 100755 gam/gdata/tlslite/FileObject.py create mode 100755 gam/gdata/tlslite/HandshakeSettings.py create mode 100755 gam/gdata/tlslite/Session.py create mode 100755 gam/gdata/tlslite/SessionCache.py create mode 100755 gam/gdata/tlslite/SharedKeyDB.py create mode 100755 gam/gdata/tlslite/TLSConnection.py create mode 100755 gam/gdata/tlslite/TLSRecordLayer.py create mode 100755 gam/gdata/tlslite/VerifierDB.py create mode 100755 gam/gdata/tlslite/X509.py create mode 100755 gam/gdata/tlslite/X509CertChain.py create mode 100755 gam/gdata/tlslite/__init__.py create mode 100644 gam/gdata/tlslite/__init__.pyc create mode 100755 gam/gdata/tlslite/api.py create mode 100755 gam/gdata/tlslite/constants.py create mode 100755 gam/gdata/tlslite/errors.py create mode 100755 gam/gdata/tlslite/integration/AsyncStateMachine.py create mode 100755 gam/gdata/tlslite/integration/ClientHelper.py create mode 100755 gam/gdata/tlslite/integration/HTTPTLSConnection.py create mode 100755 gam/gdata/tlslite/integration/IMAP4_TLS.py create mode 100755 gam/gdata/tlslite/integration/IntegrationHelper.py create mode 100755 gam/gdata/tlslite/integration/POP3_TLS.py create mode 100755 gam/gdata/tlslite/integration/SMTP_TLS.py create mode 100755 gam/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py create mode 100755 gam/gdata/tlslite/integration/TLSSocketServerMixIn.py create mode 100755 gam/gdata/tlslite/integration/TLSTwistedProtocolWrapper.py create mode 100755 gam/gdata/tlslite/integration/XMLRPCTransport.py create mode 100755 gam/gdata/tlslite/integration/__init__.py create mode 100755 gam/gdata/tlslite/mathtls.py create mode 100755 gam/gdata/tlslite/messages.py create mode 100755 gam/gdata/tlslite/utils/AES.py create mode 100755 gam/gdata/tlslite/utils/ASN1Parser.py create mode 100644 gam/gdata/tlslite/utils/ASN1Parser.pyc create mode 100755 gam/gdata/tlslite/utils/Cryptlib_AES.py create mode 100755 gam/gdata/tlslite/utils/Cryptlib_RC4.py create mode 100755 gam/gdata/tlslite/utils/Cryptlib_TripleDES.py create mode 100755 gam/gdata/tlslite/utils/OpenSSL_AES.py create mode 100755 gam/gdata/tlslite/utils/OpenSSL_RC4.py create mode 100755 gam/gdata/tlslite/utils/OpenSSL_RSAKey.py create mode 100755 gam/gdata/tlslite/utils/OpenSSL_TripleDES.py create mode 100755 gam/gdata/tlslite/utils/PyCrypto_AES.py create mode 100755 gam/gdata/tlslite/utils/PyCrypto_RC4.py create mode 100755 gam/gdata/tlslite/utils/PyCrypto_RSAKey.py create mode 100755 gam/gdata/tlslite/utils/PyCrypto_RSAKey.pyc create mode 100755 gam/gdata/tlslite/utils/PyCrypto_TripleDES.py create mode 100755 gam/gdata/tlslite/utils/Python_AES.py create mode 100755 gam/gdata/tlslite/utils/Python_RC4.py create mode 100755 gam/gdata/tlslite/utils/Python_RSAKey.py create mode 100644 gam/gdata/tlslite/utils/Python_RSAKey.pyc create mode 100755 gam/gdata/tlslite/utils/RC4.py create mode 100755 gam/gdata/tlslite/utils/RSAKey.py create mode 100644 gam/gdata/tlslite/utils/RSAKey.pyc create mode 100755 gam/gdata/tlslite/utils/TripleDES.py create mode 100755 gam/gdata/tlslite/utils/__init__.py create mode 100644 gam/gdata/tlslite/utils/__init__.pyc create mode 100755 gam/gdata/tlslite/utils/cipherfactory.py create mode 100755 gam/gdata/tlslite/utils/codec.py create mode 100644 gam/gdata/tlslite/utils/codec.pyc create mode 100755 gam/gdata/tlslite/utils/compat.py create mode 100644 gam/gdata/tlslite/utils/compat.pyc create mode 100755 gam/gdata/tlslite/utils/cryptomath.py create mode 100644 gam/gdata/tlslite/utils/cryptomath.pyc create mode 100755 gam/gdata/tlslite/utils/dateFuncs.py create mode 100755 gam/gdata/tlslite/utils/entropy.c create mode 100755 gam/gdata/tlslite/utils/hmac.py create mode 100755 gam/gdata/tlslite/utils/jython_compat.py create mode 100755 gam/gdata/tlslite/utils/keyfactory.py create mode 100644 gam/gdata/tlslite/utils/keyfactory.pyc create mode 100755 gam/gdata/tlslite/utils/rijndael.py create mode 100755 gam/gdata/tlslite/utils/win32prng.c create mode 100755 gam/gdata/tlslite/utils/xmltools.py create mode 100644 gam/gdata/tlslite/utils/xmltools.pyc create mode 100755 gam/gdata/urlfetch.py create mode 100755 gam/gdata/webmastertools/__init__.py create mode 100755 gam/gdata/webmastertools/data.py create mode 100755 gam/gdata/webmastertools/service.py create mode 100755 gam/gdata/youtube/__init__.py create mode 100755 gam/gdata/youtube/data.py create mode 100755 gam/gdata/youtube/service.py create mode 100644 gam/lastupdatecheck.txt create mode 100644 gam/oauth.txt create mode 100755 gam/whatsnew.txt create mode 120000 oh-my-zsh/.oh-my-zsh diff --git a/.gitignore b/.gitignore index 8608f94029b..217e7084e81 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ vim/vim.symlink/.netrwhist vim/syntasticthing.txt X/awesome/current_theme X/awesome/themes +*un~ diff --git a/gam/atom/__init__.py b/gam/atom/__init__.py new file mode 100755 index 00000000000..6aa96c1534f --- /dev/null +++ b/gam/atom/__init__.py @@ -0,0 +1,1484 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains classes representing Atom elements. + + Module objective: provide data classes for Atom constructs. These classes hide + the XML-ness of Atom and provide a set of native Python classes to interact + with. + + Conversions to and from XML should only be necessary when the Atom classes + "touch the wire" and are sent over HTTP. For this reason this module + provides methods and functions to convert Atom classes to and from strings. + + For more information on the Atom data model, see RFC 4287 + (http://www.ietf.org/rfc/rfc4287.txt) + + AtomBase: A foundation class on which Atom classes are built. It + handles the parsing of attributes and children which are common to all + Atom classes. By default, the AtomBase class translates all XML child + nodes into ExtensionElements. + + ExtensionElement: Atom allows Atom objects to contain XML which is not part + of the Atom specification, these are called extension elements. If a + classes parser encounters an unexpected XML construct, it is translated + into an ExtensionElement instance. ExtensionElement is designed to fully + capture the information in the XML. Child nodes in an XML extension are + turned into ExtensionElements as well. +""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import warnings + + +# XML namespaces which are often used in Atom entities. +ATOM_NAMESPACE = 'http://www.w3.org/2005/Atom' +ELEMENT_TEMPLATE = '{http://www.w3.org/2005/Atom}%s' +APP_NAMESPACE = 'http://purl.org/atom/app#' +APP_TEMPLATE = '{http://purl.org/atom/app#}%s' + +# This encoding is used for converting strings before translating the XML +# into an object. +XML_STRING_ENCODING = 'utf-8' +# The desired string encoding for object members. set or monkey-patch to +# unicode if you want object members to be Python unicode strings, instead of +# encoded strings +MEMBER_STRING_ENCODING = 'utf-8' +#MEMBER_STRING_ENCODING = unicode + +# If True, all methods which are exclusive to v1 will raise a +# DeprecationWarning +ENABLE_V1_WARNINGS = False + + +def v1_deprecated(warning=None): + """Shows a warning if ENABLE_V1_WARNINGS is True. + + Function decorator used to mark methods used in v1 classes which + may be removed in future versions of the library. + """ + warning = warning or '' + # This closure is what is returned from the deprecated function. + def mark_deprecated(f): + # The deprecated_function wraps the actual call to f. + def optional_warn_function(*args, **kwargs): + if ENABLE_V1_WARNINGS: + warnings.warn(warning, DeprecationWarning, stacklevel=2) + return f(*args, **kwargs) + # Preserve the original name to avoid masking all decorated functions as + # 'deprecated_function' + try: + optional_warn_function.func_name = f.func_name + except TypeError: + pass # In Python2.3 we can't set the func_name + return optional_warn_function + return mark_deprecated + + +def CreateClassFromXMLString(target_class, xml_string, string_encoding=None): + """Creates an instance of the target class from the string contents. + + Args: + target_class: class The class which will be instantiated and populated + with the contents of the XML. This class must have a _tag and a + _namespace class variable. + xml_string: str A string which contains valid XML. The root element + of the XML string should match the tag and namespace of the desired + class. + string_encoding: str The character encoding which the xml_string should + be converted to before it is interpreted and translated into + objects. The default is None in which case the string encoding + is not changed. + + Returns: + An instance of the target class with members assigned according to the + contents of the XML - or None if the root XML tag and namespace did not + match those of the target class. + """ + encoding = string_encoding or XML_STRING_ENCODING + if encoding and isinstance(xml_string, unicode): + xml_string = xml_string.encode(encoding) + tree = ElementTree.fromstring(xml_string) + return _CreateClassFromElementTree(target_class, tree) + + +CreateClassFromXMLString = v1_deprecated( + 'Please use atom.core.parse with atom.data classes instead.')( + CreateClassFromXMLString) + + +def _CreateClassFromElementTree(target_class, tree, namespace=None, tag=None): + """Instantiates the class and populates members according to the tree. + + Note: Only use this function with classes that have _namespace and _tag + class members. + + Args: + target_class: class The class which will be instantiated and populated + with the contents of the XML. + tree: ElementTree An element tree whose contents will be converted into + members of the new target_class instance. + namespace: str (optional) The namespace which the XML tree's root node must + match. If omitted, the namespace defaults to the _namespace of the + target class. + tag: str (optional) The tag which the XML tree's root node must match. If + omitted, the tag defaults to the _tag class member of the target + class. + + Returns: + An instance of the target class - or None if the tag and namespace of + the XML tree's root node did not match the desired namespace and tag. + """ + if namespace is None: + namespace = target_class._namespace + if tag is None: + tag = target_class._tag + if tree.tag == '{%s}%s' % (namespace, tag): + target = target_class() + target._HarvestElementTree(tree) + return target + else: + return None + + +class ExtensionContainer(object): + + def __init__(self, extension_elements=None, extension_attributes=None, + text=None): + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + __init__ = v1_deprecated( + 'Please use data model classes in atom.data instead.')( + __init__) + + # Three methods to create an object from an ElementTree + def _HarvestElementTree(self, tree): + # Fill in the instance members from the contents of the XML tree. + for child in tree: + self._ConvertElementTreeToMember(child) + for attribute, value in tree.attrib.iteritems(): + self._ConvertElementAttributeToMember(attribute, value) + # Encode the text string according to the desired encoding type. (UTF-8) + if tree.text: + if MEMBER_STRING_ENCODING is unicode: + self.text = tree.text + else: + self.text = tree.text.encode(MEMBER_STRING_ENCODING) + + def _ConvertElementTreeToMember(self, child_tree, current_class=None): + self.extension_elements.append(_ExtensionElementFromElementTree( + child_tree)) + + def _ConvertElementAttributeToMember(self, attribute, value): + # Encode the attribute value's string with the desired type Default UTF-8 + if value: + if MEMBER_STRING_ENCODING is unicode: + self.extension_attributes[attribute] = value + else: + self.extension_attributes[attribute] = value.encode( + MEMBER_STRING_ENCODING) + + # One method to create an ElementTree from an object + def _AddMembersToElementTree(self, tree): + for child in self.extension_elements: + child._BecomeChildElement(tree) + for attribute, value in self.extension_attributes.iteritems(): + if value: + if isinstance(value, unicode) or MEMBER_STRING_ENCODING is unicode: + tree.attrib[attribute] = value + else: + # Decode the value from the desired encoding (default UTF-8). + tree.attrib[attribute] = value.decode(MEMBER_STRING_ENCODING) + if self.text: + if isinstance(self.text, unicode) or MEMBER_STRING_ENCODING is unicode: + tree.text = self.text + else: + tree.text = self.text.decode(MEMBER_STRING_ENCODING) + + def FindExtensions(self, tag=None, namespace=None): + """Searches extension elements for child nodes with the desired name. + + Returns a list of extension elements within this object whose tag + and/or namespace match those passed in. To find all extensions in + a particular namespace, specify the namespace but not the tag name. + If you specify only the tag, the result list may contain extension + elements in multiple namespaces. + + Args: + tag: str (optional) The desired tag + namespace: str (optional) The desired namespace + + Returns: + A list of elements whose tag and/or namespace match the parameters + values + """ + + results = [] + + if tag and namespace: + for element in self.extension_elements: + if element.tag == tag and element.namespace == namespace: + results.append(element) + elif tag and not namespace: + for element in self.extension_elements: + if element.tag == tag: + results.append(element) + elif namespace and not tag: + for element in self.extension_elements: + if element.namespace == namespace: + results.append(element) + else: + for element in self.extension_elements: + results.append(element) + + return results + + +class AtomBase(ExtensionContainer): + + _children = {} + _attributes = {} + + def __init__(self, extension_elements=None, extension_attributes=None, + text=None): + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + __init__ = v1_deprecated( + 'Please use data model classes in atom.data instead.')( + __init__) + + def _ConvertElementTreeToMember(self, child_tree): + # Find the element's tag in this class's list of child members + if self.__class__._children.has_key(child_tree.tag): + member_name = self.__class__._children[child_tree.tag][0] + member_class = self.__class__._children[child_tree.tag][1] + # If the class member is supposed to contain a list, make sure the + # matching member is set to a list, then append the new member + # instance to the list. + if isinstance(member_class, list): + if getattr(self, member_name) is None: + setattr(self, member_name, []) + getattr(self, member_name).append(_CreateClassFromElementTree( + member_class[0], child_tree)) + else: + setattr(self, member_name, + _CreateClassFromElementTree(member_class, child_tree)) + else: + ExtensionContainer._ConvertElementTreeToMember(self, child_tree) + + def _ConvertElementAttributeToMember(self, attribute, value): + # Find the attribute in this class's list of attributes. + if self.__class__._attributes.has_key(attribute): + # Find the member of this class which corresponds to the XML attribute + # (lookup in current_class._attributes) and set this member to the + # desired value (using self.__dict__). + if value: + # Encode the string to capture non-ascii characters (default UTF-8) + if MEMBER_STRING_ENCODING is unicode: + setattr(self, self.__class__._attributes[attribute], value) + else: + setattr(self, self.__class__._attributes[attribute], + value.encode(MEMBER_STRING_ENCODING)) + else: + ExtensionContainer._ConvertElementAttributeToMember( + self, attribute, value) + + # Three methods to create an ElementTree from an object + def _AddMembersToElementTree(self, tree): + # Convert the members of this class which are XML child nodes. + # This uses the class's _children dictionary to find the members which + # should become XML child nodes. + member_node_names = [values[0] for tag, values in + self.__class__._children.iteritems()] + for member_name in member_node_names: + member = getattr(self, member_name) + if member is None: + pass + elif isinstance(member, list): + for instance in member: + instance._BecomeChildElement(tree) + else: + member._BecomeChildElement(tree) + # Convert the members of this class which are XML attributes. + for xml_attribute, member_name in self.__class__._attributes.iteritems(): + member = getattr(self, member_name) + if member is not None: + if isinstance(member, unicode) or MEMBER_STRING_ENCODING is unicode: + tree.attrib[xml_attribute] = member + else: + tree.attrib[xml_attribute] = member.decode(MEMBER_STRING_ENCODING) + # Lastly, call the ExtensionContainers's _AddMembersToElementTree to + # convert any extension attributes. + ExtensionContainer._AddMembersToElementTree(self, tree) + + + def _BecomeChildElement(self, tree): + """ + + Note: Only for use with classes that have a _tag and _namespace class + member. It is in AtomBase so that it can be inherited but it should + not be called on instances of AtomBase. + + """ + new_child = ElementTree.Element('') + tree.append(new_child) + new_child.tag = '{%s}%s' % (self.__class__._namespace, + self.__class__._tag) + self._AddMembersToElementTree(new_child) + + def _ToElementTree(self): + """ + + Note, this method is designed to be used only with classes that have a + _tag and _namespace. It is placed in AtomBase for inheritance but should + not be called on this class. + + """ + new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace, + self.__class__._tag)) + self._AddMembersToElementTree(new_tree) + return new_tree + + def ToString(self, string_encoding='UTF-8'): + """Converts the Atom object to a string containing XML.""" + return ElementTree.tostring(self._ToElementTree(), encoding=string_encoding) + + def __str__(self): + return self.ToString() + + +class Name(AtomBase): + """The atom:name element""" + + _tag = 'name' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Name + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NameFromString(xml_string): + return CreateClassFromXMLString(Name, xml_string) + + +class Email(AtomBase): + """The atom:email element""" + + _tag = 'email' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Email + + Args: + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EmailFromString(xml_string): + return CreateClassFromXMLString(Email, xml_string) + + +class Uri(AtomBase): + """The atom:uri element""" + + _tag = 'uri' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Uri + + Args: + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def UriFromString(xml_string): + return CreateClassFromXMLString(Uri, xml_string) + + +class Person(AtomBase): + """A foundation class from which atom:author and atom:contributor extend. + + A person contains information like name, email address, and web page URI for + an author or contributor to an Atom feed. + """ + + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _children['{%s}name' % (ATOM_NAMESPACE)] = ('name', Name) + _children['{%s}email' % (ATOM_NAMESPACE)] = ('email', Email) + _children['{%s}uri' % (ATOM_NAMESPACE)] = ('uri', Uri) + + def __init__(self, name=None, email=None, uri=None, + extension_elements=None, extension_attributes=None, text=None): + """Foundation from which author and contributor are derived. + + The constructor is provided for illustrative purposes, you should not + need to instantiate a Person. + + Args: + name: Name The person's name + email: Email The person's email address + uri: Uri The URI of the person's webpage + extension_elements: list A list of ExtensionElement instances which are + children of this element. + extension_attributes: dict A dictionary of strings which are the values + for additional XML attributes of this element. + text: String The text contents of the element. This is the contents + of the Entry's XML text node. (Example: This is the text) + """ + + self.name = name + self.email = email + self.uri = uri + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + +class Author(Person): + """The atom:author element + + An author is a required element in Feed. + """ + + _tag = 'author' + _namespace = ATOM_NAMESPACE + _children = Person._children.copy() + _attributes = Person._attributes.copy() + #_children = {} + #_attributes = {} + + def __init__(self, name=None, email=None, uri=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for Author + + Args: + name: Name + email: Email + uri: Uri + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.name = name + self.email = email + self.uri = uri + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + +def AuthorFromString(xml_string): + return CreateClassFromXMLString(Author, xml_string) + + +class Contributor(Person): + """The atom:contributor element""" + + _tag = 'contributor' + _namespace = ATOM_NAMESPACE + _children = Person._children.copy() + _attributes = Person._attributes.copy() + + def __init__(self, name=None, email=None, uri=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for Contributor + + Args: + name: Name + email: Email + uri: Uri + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.name = name + self.email = email + self.uri = uri + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + +def ContributorFromString(xml_string): + return CreateClassFromXMLString(Contributor, xml_string) + + +class Link(AtomBase): + """The atom:link element""" + + _tag = 'link' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['href'] = 'href' + _attributes['type'] = 'type' + _attributes['title'] = 'title' + _attributes['length'] = 'length' + _attributes['hreflang'] = 'hreflang' + + def __init__(self, href=None, rel=None, link_type=None, hreflang=None, + title=None, length=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Link + + Args: + href: string The href attribute of the link + rel: string + type: string + hreflang: string The language for the href + title: string + length: string The length of the href's destination + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.href = href + self.rel = rel + self.type = link_type + self.hreflang = hreflang + self.title = title + self.length = length + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def LinkFromString(xml_string): + return CreateClassFromXMLString(Link, xml_string) + + +class Generator(AtomBase): + """The atom:generator element""" + + _tag = 'generator' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _attributes['uri'] = 'uri' + _attributes['version'] = 'version' + + def __init__(self, uri=None, version=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Generator + + Args: + uri: string + version: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.uri = uri + self.version = version + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def GeneratorFromString(xml_string): + return CreateClassFromXMLString(Generator, xml_string) + + +class Text(AtomBase): + """A foundation class from which atom:title, summary, etc. extend. + + This class should never be instantiated. + """ + + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _attributes['type'] = 'type' + + def __init__(self, text_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Text + + Args: + text_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = text_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Title(Text): + """The atom:title element""" + + _tag = 'title' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + + def __init__(self, title_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Title + + Args: + title_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = title_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def TitleFromString(xml_string): + return CreateClassFromXMLString(Title, xml_string) + + +class Subtitle(Text): + """The atom:subtitle element""" + + _tag = 'subtitle' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + + def __init__(self, subtitle_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Subtitle + + Args: + subtitle_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = subtitle_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SubtitleFromString(xml_string): + return CreateClassFromXMLString(Subtitle, xml_string) + + +class Rights(Text): + """The atom:rights element""" + + _tag = 'rights' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + + def __init__(self, rights_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Rights + + Args: + rights_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = rights_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def RightsFromString(xml_string): + return CreateClassFromXMLString(Rights, xml_string) + + +class Summary(Text): + """The atom:summary element""" + + _tag = 'summary' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + + def __init__(self, summary_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Summary + + Args: + summary_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = summary_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SummaryFromString(xml_string): + return CreateClassFromXMLString(Summary, xml_string) + + +class Content(Text): + """The atom:content element""" + + _tag = 'content' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + _attributes['src'] = 'src' + + def __init__(self, content_type=None, src=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Content + + Args: + content_type: string + src: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = content_type + self.src = src + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def ContentFromString(xml_string): + return CreateClassFromXMLString(Content, xml_string) + + +class Category(AtomBase): + """The atom:category element""" + + _tag = 'category' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _attributes['term'] = 'term' + _attributes['scheme'] = 'scheme' + _attributes['label'] = 'label' + + def __init__(self, term=None, scheme=None, label=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Category + + Args: + term: str + scheme: str + label: str + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.term = term + self.scheme = scheme + self.label = label + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def CategoryFromString(xml_string): + return CreateClassFromXMLString(Category, xml_string) + + +class Id(AtomBase): + """The atom:id element.""" + + _tag = 'id' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Id + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def IdFromString(xml_string): + return CreateClassFromXMLString(Id, xml_string) + + +class Icon(AtomBase): + """The atom:icon element.""" + + _tag = 'icon' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Icon + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def IconFromString(xml_string): + return CreateClassFromXMLString(Icon, xml_string) + + +class Logo(AtomBase): + """The atom:logo element.""" + + _tag = 'logo' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Logo + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def LogoFromString(xml_string): + return CreateClassFromXMLString(Logo, xml_string) + + +class Draft(AtomBase): + """The app:draft element which indicates if this entry should be public.""" + + _tag = 'draft' + _namespace = APP_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for app:draft + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def DraftFromString(xml_string): + return CreateClassFromXMLString(Draft, xml_string) + + +class Control(AtomBase): + """The app:control element indicating restrictions on publication. + + The APP control element may contain a draft element indicating whether or + not this entry should be publicly available. + """ + + _tag = 'control' + _namespace = APP_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _children['{%s}draft' % APP_NAMESPACE] = ('draft', Draft) + + def __init__(self, draft=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for app:control""" + + self.draft = draft + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ControlFromString(xml_string): + return CreateClassFromXMLString(Control, xml_string) + + +class Date(AtomBase): + """A parent class for atom:updated, published, etc.""" + + #TODO Add text to and from time conversion methods to allow users to set + # the contents of a Date to a python DateTime object. + + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Updated(Date): + """The atom:updated element.""" + + _tag = 'updated' + _namespace = ATOM_NAMESPACE + _children = Date._children.copy() + _attributes = Date._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Updated + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def UpdatedFromString(xml_string): + return CreateClassFromXMLString(Updated, xml_string) + + +class Published(Date): + """The atom:published element.""" + + _tag = 'published' + _namespace = ATOM_NAMESPACE + _children = Date._children.copy() + _attributes = Date._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Published + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def PublishedFromString(xml_string): + return CreateClassFromXMLString(Published, xml_string) + + +class LinkFinder(object): + """An "interface" providing methods to find link elements + + Entry elements often contain multiple links which differ in the rel + attribute or content type. Often, developers are interested in a specific + type of link so this class provides methods to find specific classes of + links. + + This class is used as a mixin in Atom entries and feeds. + """ + + def GetSelfLink(self): + """Find the first link with rel set to 'self' + + Returns: + An atom.Link or none if none of the links had rel equal to 'self' + """ + + for a_link in self.link: + if a_link.rel == 'self': + return a_link + return None + + def GetEditLink(self): + for a_link in self.link: + if a_link.rel == 'edit': + return a_link + return None + + def GetEditMediaLink(self): + for a_link in self.link: + if a_link.rel == 'edit-media': + return a_link + return None + + def GetNextLink(self): + for a_link in self.link: + if a_link.rel == 'next': + return a_link + return None + + def GetLicenseLink(self): + for a_link in self.link: + if a_link.rel == 'license': + return a_link + return None + + def GetAlternateLink(self): + for a_link in self.link: + if a_link.rel == 'alternate': + return a_link + return None + + +class FeedEntryParent(AtomBase, LinkFinder): + """A super class for atom:feed and entry, contains shared attributes""" + + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _children['{%s}author' % ATOM_NAMESPACE] = ('author', [Author]) + _children['{%s}category' % ATOM_NAMESPACE] = ('category', [Category]) + _children['{%s}contributor' % ATOM_NAMESPACE] = ('contributor', [Contributor]) + _children['{%s}id' % ATOM_NAMESPACE] = ('id', Id) + _children['{%s}link' % ATOM_NAMESPACE] = ('link', [Link]) + _children['{%s}rights' % ATOM_NAMESPACE] = ('rights', Rights) + _children['{%s}title' % ATOM_NAMESPACE] = ('title', Title) + _children['{%s}updated' % ATOM_NAMESPACE] = ('updated', Updated) + + def __init__(self, author=None, category=None, contributor=None, + atom_id=None, link=None, rights=None, title=None, updated=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.rights = rights + self.title = title + self.updated = updated + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Source(FeedEntryParent): + """The atom:source element""" + + _tag = 'source' + _namespace = ATOM_NAMESPACE + _children = FeedEntryParent._children.copy() + _attributes = FeedEntryParent._attributes.copy() + _children['{%s}generator' % ATOM_NAMESPACE] = ('generator', Generator) + _children['{%s}icon' % ATOM_NAMESPACE] = ('icon', Icon) + _children['{%s}logo' % ATOM_NAMESPACE] = ('logo', Logo) + _children['{%s}subtitle' % ATOM_NAMESPACE] = ('subtitle', Subtitle) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Source + + Args: + author: list (optional) A list of Author instances which belong to this + class. + category: list (optional) A list of Category instances + contributor: list (optional) A list on Contributor instances + generator: Generator (optional) + icon: Icon (optional) + id: Id (optional) The entry's Id element + link: list (optional) A list of Link instances + logo: Logo (optional) + rights: Rights (optional) The entry's Rights element + subtitle: Subtitle (optional) The entry's subtitle element + title: Title (optional) the entry's title element + updated: Updated (optional) the entry's updated element + text: String (optional) The text contents of the element. This is the + contents of the Entry's XML text node. + (Example: This is the text) + extension_elements: list (optional) A list of ExtensionElement instances + which are children of this element. + extension_attributes: dict (optional) A dictionary of strings which are + the values for additional XML attributes of this element. + """ + + self.author = author or [] + self.category = category or [] + self.contributor = contributor or [] + self.generator = generator + self.icon = icon + self.id = atom_id + self.link = link or [] + self.logo = logo + self.rights = rights + self.subtitle = subtitle + self.title = title + self.updated = updated + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SourceFromString(xml_string): + return CreateClassFromXMLString(Source, xml_string) + + +class Entry(FeedEntryParent): + """The atom:entry element""" + + _tag = 'entry' + _namespace = ATOM_NAMESPACE + _children = FeedEntryParent._children.copy() + _attributes = FeedEntryParent._attributes.copy() + _children['{%s}content' % ATOM_NAMESPACE] = ('content', Content) + _children['{%s}published' % ATOM_NAMESPACE] = ('published', Published) + _children['{%s}source' % ATOM_NAMESPACE] = ('source', Source) + _children['{%s}summary' % ATOM_NAMESPACE] = ('summary', Summary) + _children['{%s}control' % APP_NAMESPACE] = ('control', Control) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, control=None, title=None, updated=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for atom:entry + + Args: + author: list A list of Author instances which belong to this class. + category: list A list of Category instances + content: Content The entry's Content + contributor: list A list on Contributor instances + id: Id The entry's Id element + link: list A list of Link instances + published: Published The entry's Published element + rights: Rights The entry's Rights element + source: Source the entry's source element + summary: Summary the entry's summary element + title: Title the entry's title element + updated: Updated the entry's updated element + control: The entry's app:control element which can be used to mark an + entry as a draft which should not be publicly viewable. + text: String The text contents of the element. This is the contents + of the Entry's XML text node. (Example: This is the text) + extension_elements: list A list of ExtensionElement instances which are + children of this element. + extension_attributes: dict A dictionary of strings which are the values + for additional XML attributes of this element. + """ + + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.title = title + self.updated = updated + self.control = control + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + __init__ = v1_deprecated('Please use atom.data.Entry instead.')(__init__) + + +def EntryFromString(xml_string): + return CreateClassFromXMLString(Entry, xml_string) + + +class Feed(Source): + """The atom:feed element""" + + _tag = 'feed' + _namespace = ATOM_NAMESPACE + _children = Source._children.copy() + _attributes = Source._attributes.copy() + _children['{%s}entry' % ATOM_NAMESPACE] = ('entry', [Entry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + text=None, extension_elements=None, extension_attributes=None): + """Constructor for Source + + Args: + author: list (optional) A list of Author instances which belong to this + class. + category: list (optional) A list of Category instances + contributor: list (optional) A list on Contributor instances + generator: Generator (optional) + icon: Icon (optional) + id: Id (optional) The entry's Id element + link: list (optional) A list of Link instances + logo: Logo (optional) + rights: Rights (optional) The entry's Rights element + subtitle: Subtitle (optional) The entry's subtitle element + title: Title (optional) the entry's title element + updated: Updated (optional) the entry's updated element + entry: list (optional) A list of the Entry instances contained in the + feed. + text: String (optional) The text contents of the element. This is the + contents of the Entry's XML text node. + (Example: This is the text) + extension_elements: list (optional) A list of ExtensionElement instances + which are children of this element. + extension_attributes: dict (optional) A dictionary of strings which are + the values for additional XML attributes of this element. + """ + + self.author = author or [] + self.category = category or [] + self.contributor = contributor or [] + self.generator = generator + self.icon = icon + self.id = atom_id + self.link = link or [] + self.logo = logo + self.rights = rights + self.subtitle = subtitle + self.title = title + self.updated = updated + self.entry = entry or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + __init__ = v1_deprecated('Please use atom.data.Feed instead.')(__init__) + + +def FeedFromString(xml_string): + return CreateClassFromXMLString(Feed, xml_string) + + +class ExtensionElement(object): + """Represents extra XML elements contained in Atom classes.""" + + def __init__(self, tag, namespace=None, attributes=None, + children=None, text=None): + """Constructor for EtensionElement + + Args: + namespace: string (optional) The XML namespace for this element. + tag: string (optional) The tag (without the namespace qualifier) for + this element. To reconstruct the full qualified name of the element, + combine this tag with the namespace. + attributes: dict (optinal) The attribute value string pairs for the XML + attributes of this element. + children: list (optional) A list of ExtensionElements which represent + the XML child nodes of this element. + """ + + self.namespace = namespace + self.tag = tag + self.attributes = attributes or {} + self.children = children or [] + self.text = text + + def ToString(self): + element_tree = self._TransferToElementTree(ElementTree.Element('')) + return ElementTree.tostring(element_tree, encoding="UTF-8") + + def _TransferToElementTree(self, element_tree): + if self.tag is None: + return None + + if self.namespace is not None: + element_tree.tag = '{%s}%s' % (self.namespace, self.tag) + else: + element_tree.tag = self.tag + + for key, value in self.attributes.iteritems(): + element_tree.attrib[key] = value + + for child in self.children: + child._BecomeChildElement(element_tree) + + element_tree.text = self.text + + return element_tree + + def _BecomeChildElement(self, element_tree): + """Converts this object into an etree element and adds it as a child node. + + Adds self to the ElementTree. This method is required to avoid verbose XML + which constantly redefines the namespace. + + Args: + element_tree: ElementTree._Element The element to which this object's XML + will be added. + """ + new_element = ElementTree.Element('') + element_tree.append(new_element) + self._TransferToElementTree(new_element) + + def FindChildren(self, tag=None, namespace=None): + """Searches child nodes for objects with the desired tag/namespace. + + Returns a list of extension elements within this object whose tag + and/or namespace match those passed in. To find all children in + a particular namespace, specify the namespace but not the tag name. + If you specify only the tag, the result list may contain extension + elements in multiple namespaces. + + Args: + tag: str (optional) The desired tag + namespace: str (optional) The desired namespace + + Returns: + A list of elements whose tag and/or namespace match the parameters + values + """ + + results = [] + + if tag and namespace: + for element in self.children: + if element.tag == tag and element.namespace == namespace: + results.append(element) + elif tag and not namespace: + for element in self.children: + if element.tag == tag: + results.append(element) + elif namespace and not tag: + for element in self.children: + if element.namespace == namespace: + results.append(element) + else: + for element in self.children: + results.append(element) + + return results + + +def ExtensionElementFromString(xml_string): + element_tree = ElementTree.fromstring(xml_string) + return _ExtensionElementFromElementTree(element_tree) + + +def _ExtensionElementFromElementTree(element_tree): + element_tag = element_tree.tag + if '}' in element_tag: + namespace = element_tag[1:element_tag.index('}')] + tag = element_tag[element_tag.index('}')+1:] + else: + namespace = None + tag = element_tag + extension = ExtensionElement(namespace=namespace, tag=tag) + for key, value in element_tree.attrib.iteritems(): + extension.attributes[key] = value + for child in element_tree: + extension.children.append(_ExtensionElementFromElementTree(child)) + extension.text = element_tree.text + return extension + + +def deprecated(warning=None): + """Decorator to raise warning each time the function is called. + + Args: + warning: The warning message to be displayed as a string (optinoal). + """ + warning = warning or '' + # This closure is what is returned from the deprecated function. + def mark_deprecated(f): + # The deprecated_function wraps the actual call to f. + def deprecated_function(*args, **kwargs): + warnings.warn(warning, DeprecationWarning, stacklevel=2) + return f(*args, **kwargs) + # Preserve the original name to avoid masking all decorated functions as + # 'deprecated_function' + try: + deprecated_function.func_name = f.func_name + except TypeError: + # Setting the func_name is not allowed in Python2.3. + pass + return deprecated_function + return mark_deprecated diff --git a/gam/atom/__init__.pyc b/gam/atom/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c84aef762d0c590e68f1d6a21e08ab0be72c3af GIT binary patch literal 53239 zcmeHwdvILWdEeQ^g8&H<35-5?NC{eNuQ!+silqr)CeF2#=ZFzmU_X1dQ zvAfuNK>)U7$0lRRcH$(C8?|a;$95dsnb;krPMp{Hk-Dv$X{OV3`bXOFWTySYbtlv5 zOr|sKw4L_%`_ALuT`Wk7+NC60687-kbIv`_@B7a0`_8ND@7J&0v+$iWMP>f$!M{iG zi+`(IsZgnNaC7QhPA%V}!n_K*R5`Eay41ODl{;IRQ{7RIy0})=e?{eTx#f)#r(2aJ z!rA#>b>W|paGgevq|vR@nExdg=LN=LCWz3h=5z#vKZA6hspQl%3`62;)VVb(>{aLb zREXz(751rfYgO2<&aG47T6Jzf-1REjpw12QlW?87f?8~n=K*=%tRlRwSC?}t+D!M{ zW(h^)4eCl>o!g?qK^5JirgJLXsIDk=S*iD(yH$moRCKHOZg##~Rk%e(TgCSl=etdX zxANuOb`@^5!P{K$4i#>f;M-KV!v^2xf_JKLrv&d(;Vv6|y9>Tug}YUByJUHX^WCjN zPd>Usg?HNEJucW&;aw7ZrwZ@3Y3_BwdsKLj1mC5?du{NL3%*;0`y_a;3h%SQ_q*VG zRJdP7_ehBjINy6!I4mCxsqml;e!vCqQ{f>AzE6b@+Te#=@ck-0EW!I#_^=Ir#04Kv z;oBs5ScQ+;;J3TrgDM=6;0ILr4qMEKg9K>-Hd-bSRkK(A3 zlqxgcXi}Z?qH;8c_jq{iTF*OO4HwFhS3Q3rDkh~%(TG>8S1*;q$P0rcaH&jJ>-ytj zwGt=wg(5!pre=|_i!zIkkYF+!dC#0ab)XW(u~(heUj>!Wr5kv0lrW?cFhlRmaxz=3 zxMY&5SE?jYJt!tfZLyThN;y%lOHn;8A$t!|nBa6Bl|fcsJX>8ThhDW(UiQvMUL`6b z@1VZyEzU+2DUj6LvR>tWOvD zW0c`!DcQ#kXHy&tVs!MVH(gz*AWdc?jl;|qXG>_BY=e1hnez*!ax(0lWT)`IS@b{| zQo z&6M79Et?cn7*&esB5(tn(_5%SOGrLJG9-J)iu=7%;-OfsWkQphv;sbCt}>b@PJ&7? z8g3C3qxIv`Oa-~14W}2%~S=Z?gW;aMNMmsrZA}? zDdy3-r@SEc7Nc@`cx}8JqAjSEhA+g$g)l^;3_Tf5PuHVmZ&G{gEB!zGH-}cB0Rr0j(4PK)E4SxNg=?; z2ZLJeuK4y$3M+&8J<;VtGJW9DB1@jfugm--`xlj}0nO#qr<8g@$&is(pH%AkoO&(? ziOHp5xjtVtidO~c0tBl1NeALqEM#lTtJGeb;GA_euCIO)aPMMR&8bpWYg zI$2}VKu-?gQN{0sbMR>hgZSXZQvLk$!Q<7%O1T<@@xhs3?!be?4?ldMCPV0fxL)*z z4(&TAR9GmKDy5`Q7)Ib$q*1L=&IjcJOIVmTBmk90#Vm=+_2zcubV>U0$_Rx@fPsx~ zQ_Hnzyk4)?C4$dZ^2ut#T|T)>KS}eoqr`(qlC)R|O*2LSVJIpyRr$@mUAc|9^||hx zw2>|pOX2f7?Z@VnlC`A;*CVw{hccx(0}tJBy#x8d^+mEHiI2ZAhCK!6xjcCoWcYlS zdag^=A0jb=Y7^2RU-5iFf}nGOu|brej|)BYr%$&6m5@fe8GlmRIQ}5A8Ur5DW2_V9 z9DON;6HpZJl^)LpBGFgF3=SC>5)Jel1?i25FxKA`VA zNeTIfGRD`cwFM5xcF@tFP0C`kvt`3jK-36Xq*I<-h!Y?QfWW{jB*BcNXNN-16Z(s5 zLD81vQcy1i=gW{{gdj_E<$|u)2 z>Np-`h$sQ?Y6K+$d!bT72S*~(rJW*Q&vtUE?k-~@s)sAPz;Ll8`s zA0@w(A`(itCbE)I$o7v6FbSZK7K4c0o-t%Q5^5?tT4z$2WtN$+vg($B+^xCIxxt(a z>u1VPn<9b>C;rn8rs&A)Q zyZ{lIr_h9KHU1BXf1U!D{vuv6=;K4gfhxcCBIIt7kuF0jcT=)nfGgrQhnGpDBR`F= zUV^MWDLpL$Ppdmxc8N-BL|>fJaTEChlSdkKO;i);Tu)I)WqXJwX-8q*8j`N4RDqF- z83D-bC?Y|gI)B4C!gsHaBo}sK{YJbD6?@=&BVDEP5GQ0c$HI0TN#z(d07;92*a`&0 znw4nLjA(jvaw0{`S0{_LWF4{~XCG}7Ffdq2tw#jPRuK>=^S#jNIS^1&@i-2pC7{HI zraSFb=Sm5Lsn$Z%#Ei%k^^i$sdP$esC6yhEb%D`?EsIK-bsdES?ly&Uc{CJ~$*d?I zJ+m~^DE3@J5=F?#^BM#>YzEg5Ffk@T?{)c?Uzm)8ScLnuJQJiEbS5on}A<(C!7U1hD z;Cu8UQr4Z?Ug_!+p}&YOG^yDI5d$YFq8_(P0S<>DAQA%a%w=Zpje zBR}Hg7Q{%sQ{$2zQPKgiMUA-rcs zyVOvpIZrH7nc>b2b5sc%hB@e>YH25%@%Xb_IJ|M3lXgUx<>uF@vr7kLTobMKvO=_7 z>H{+1aX3R{(I>RVkeeS?XE_LVtNN!^(nA8~=vkK(BzmD+&41dwURc98$cKH@%>gvi zP3&@mU^D{FaL!cqwBR`558X?`JtY8K{D6Nvd|d$D61tiXN3E&Qel2|_y6Wli)5pgB zX5{6QZ+()4=2UjC=2W(z*Z?J0M<%EYw6>tU&}J;;--&d;gI~N6j>@mkZOZM+-IDJI z*AC`I6{M))Px#_CJm_u&Vu9|b;RgFN)4q%?^s5`OR*Nd3ZnkmD_om^RVWWJ>#t^|L z?NH#boKR^kF4SRvP4wv5UWV}RL;63&FDCyfRDNaEX$yc1sK0|+Fzq*>zC8lRK&jFa zN9NzbcaSY&XxcLYjr7Mn{9!sm2Eyb*3$FV90}MR`r_J>|fFyr}pN6>kUet6W+@fV( zl9_b$kMXdy+w`^NU(`*Me&OG>;NKaEAXUCh{(WzTf5E7SXafNMk`?nR{veqa&uqC0 z{mIX3C`pgZzaaRvk1^PCX}}>he+Zn9HttvTzmWvx*`)agnKXYOrRnF_g?=^vH|F(1 zCRO-1_64E7rd9Su;TX(_bCFnn4T)+QOdFif0Zn(AbRNeDp>WzB!$`|%HlWW=6L$6w z@~K0enNW`@jE13>Sn*UfBgg(4VKIkd1@jVlx;M8ox3TMXjKaGuXS1vn{L(Ce4R)NIDgn z>M0dr#Fe^2 z+GV!qwFb!6ShYuF5|P$s7=t8Lk7m1tNWTcm)8Nhn>-l@=JP0QP1YCevWOQ4`@@!hH zy@Nus0ok7b)wt0){!j3USq!zgH;*9#GGiOi`$phH_ir9@B|kAl0c!9hTeDEmI!i%M zEf)$tr;q%7bO7rSlFrX`SS$9j0#oZlwEr_{AJudpX+mbU_gIclECy z=l2j)E1depb3IiPP6M=4HEGr_``b7LZGb=`CQE=Sk?=6C>s2F2$+0Y&<{|P(Q{I=7db~sf>jGCjC%{ zn=q!ZQ6=ltg#k%dzR*az3-y`+y(v|qZaG8%EGj`k<6rPuKfDTpkqL{};B7XD3m2nh z-@~(@6;UoZ?np2uQ>ah~M~VkluZRcbkLc~fF~Xf%P6?1Xj8K#jvsj`Ti8(@SyXIZ2 zEh+o9g>#9=x07ZB!sNF@>}=~Egh6%-OtMr@wxr4lVT?c+a8ugnnifb1bAldzfs>|a z4|e2nqEL1ukR<*D(ClDWqeJE|tP*$u*i|S?V*P@uY*A=LfJ-KXB=qMX6>AM(|M=7z zr11()my7*Th6sif1>iVeDWY`qQ!a`rLGxyZ(4&ao``N@4%PN0sHzob;`7ODvRv};o zT;m=>R8YNZGPLhyLjJ7+`9*6tg#W?3nuosMt(Hmzh{vA|)br5S#a{B9XadiJw;(b# zSDjPyk4TfFeF6RfBtNZ|?q>v1JD?w+mEnqj&CS=;S;R+Z#_tiRA|JCw>!UU%MY||P z>svi{XcXVFS@(;I^54n8;4D&!${`nIdtse=t`9284!(m^5d&nE3HX+p|CTxn9reNn zLky^eW@7+J!^T6L?GSSCkMc=03X-iJAUE@ggO2|QohRrBaX3}Oe;fVoro%yVNXSbV zlqNpmdC^8R*hn_P(=sfy6KzEF$KflnbdZdHZvKZdhSM{SQrpbbEcRohxIK7K-Ceo; zz!ZZ}LA&6t2U~B--y!eNRPwiX71?-Q=Es(bPr*rqVfAoJVUGS0>`y_9QJ3@yU=LZY zA9`e}1k70wc}}7Mbucc`em7J7=WsOs%S>z#;&XCCpuI@AhUttTo#ai;v)Vu=m`^t^ z7nyN768P4F3@_$fAZA4{(OJZjBAJGs728sXm1*Ly7XeB-oR;KFsa|n~#b&m%+=4_R z7prQp!DN)R#^gSD==nPi4@Nx{xTz_T3zGah=y-I5uGr$K1}mcsrY`hA(`JGoslSf| zK@h3>NCX3r@I`pZ5*T)mbUY)vj%Uw`$-z8yOBbV0xt<7NcFjQ!v@t0QD&p_s`?QOx zqo7yxN=Nn&)d9Oo46TiRH%l;A>wW1PH_e|?fbH;F88iK|mdN!x393|)p5}5j>FcX? zv=lKrk&)@xA;KT&SN}AhILP>EUrINU7Ni@6$Jv7&m`SDPP-w6_%&SbddiXwYIS7Hs|R+agi$fM z`~VYU!#*vQdK4u!Nh2gxJ=^S`VVG+mpT%e|WlMHbRXwE)TcL`Xo5~AivpoC>Ws~w* zd6~tv$bq504@WOWV%geUK8k|^mz)#|ORjjii6M5Ve}v9sbdJ)Iv8YLlyN>|`Dq4Y) zF^jD#rS>QBlonW?+JXunN4oti?k+q~pUdcU`Ti~aoBMb5Z=h}`xOW2TXb}BhIh znSzxh0Bw#?3$~UWDF;1PRpZe21yBIymNwg|CP=X^RlCzpHAxW-H~bVCfg8|oShfy< zLZQNHYT4FcDS@daU89rWO|OC%E0dYCH`W9ZW?pKWadTE9+Ws->IXj565SXDP9gbEc z;y^?^;)SIm){V@b752--Yi`M4I8jNL@fr-@lL)m$@r$-QWdOaR_A(Y1xt z_93+^BsGd!wYDF)WMXIlk12PE3RNhC)gr7M97M&CGCK9t>B7Y5>G8=kqhsS*DwA%7 zmxLaQ)!K3sQ+0w_rjRK_sfx+dnpJvHi$S7$c#8l?5xuy@2$S%;E&_a^j z<}*1hj<)j&zxK7ZG#u%e^Fdl@{yIx8LNf)4qk5uY~k z^M6Hp&HOAiUCGekV7&w2jl!O=#=c4!^SU$i7q8CH{(I3K8V;Z3lOQU`yZ$ruYa9$y zHfY}<$92pvwU2L^T*vs9I<3%a%DCUkFfPT*)AiELVcT1fnPc09ddV>@OM&+`vFv|A zYRxQ*I=;oQZ5|WEZ2YHh28MkW6!8b`V^}8FF^1(h4{A->^J8xpOdTF!-9aQoEOBf!p4zxBfx;s?yV*824t zKg^Xh{jE_~uq}Lmc$-!)vDl2R-10ZgU6gw1p&;>K0b3dCdL=%QIaNg2(g#l&PW4jn zQLiSIwzHJ9*8o_XO^fByMLj9G-_uwi2(b+?j`vHti_v+^sm?^+)BZ`Kall|0bxg*{ zCYvR-Rc#3JrXwyh$Ash4tWF;sT5PB&`0u9=Ye^r;(f=U5*c0@kB;zxGPLV!=7hpZ7 zOz=U}2tL3l)~#xHTlE5V*dA4T$f_5xl!;}mfJrN7FZmIW*NJpTWwv3{HqF+J+XAtn z4Vz$fn|iWp6)~Ku0yB%*WQy&sXc_Fmz*1Fg+2Y1oY%8l_CL4_^^U8V?tX<|_L1S_M zo;H2S>SZtRG@%;HS61_3D~^a(BxTTji#4+Jw^|B89hUkEw&R%LA~kfeiH#N?@xU~a z9Q)ZU=ye}~2uK4q@zsHvT>-GV8fD4XZ7t-qw4LM88KWVM;-)9#vznXJsa8|E(BiO= zV8MZGjb*PTsD5UCn)!LsL7?-J-e)B%jnkx8D~5)H8oo><4%=O4Y`tFnDCbYGT zoF>dA?3C(_df5)SNgF+{9uLvlr}oGY_a7*LsaUEl{aBnsMU$!~1$C-wYGm39ic&4} z1+5b3`5Zjb8TU@W(+?qszhap;;-R;vR~C)llEeR)m6NH3Q4tX$k6wNhvAoJ-_9sGLy{8{3WB6u_L<3Jot0!CPn|%)lKT1qG<9Vqhew$ z4}G9<_Nm&ttV#;cepNebRZ?jGP80oJ3&F;2S}D<3T$%CW6fmVTJuFlL*4;&$Dt&h3SWF-oFufSfq#`j30 z&3f9Cloed1r8a4xXWf+(gDv4+E9K2is&_(Bn?X^Ju+T&+nfbrm+Vp>{_`_Bwu-L1e z4P@dSlh+(l$m6M?h9%KKKN8auQ3c1UVFuhuYbpsMtvQmkQCh<)mi@yb?jECdGRK&! zOdJxNv88h2mt}RnVt9~AV{@`lkxB691nXmXxw>GziXAT%np;~4P1j^AC8sEj+A?IM zjwzrOSXQz@7>Sn%FtP5)?$9^Ynt~oQu{Ht4d}@+A>?A$Sb?vKHoWa?~nX**=QK+h3$z}?zOZSv#F*CnOAb)X#_>Daj71tR`XzLDsEWcBb;W(Yr&)K=9R8i zyuu2%U>17+JaY@ROz9Dg@te2`fo*1&Z?0sQ>#*FCm~X1(b`cqVl|^ljv4zQXY|Q17 zAmQh9y!|eVjvo!TlueTh=cSWx8pm^iw?pZ%$=x!&6v=wFoB6p~8Kx(I>vX}ct^P{> zx25=b76F|2Y-oqh51N1Z=8ys(M$7y~`=kJq>zEXvp~TX8DhU2#Mi7XmRhpSi;+rN0 zwxLi?4AdoWCj?j=?bU7((9=Q$_>%cb0ia9p^UVI-XPjw;&NKf{-W<&TQMAk7wvYLl zT*sK7lee0}Q`Y~V8P;Ee@JX%O-Zb9lrUl3QMz40v&jK00c60xl7Tj;@zLNb-8GfGm zpYS`|44oGM-CfABQDuH3>HwcW!~CE22>>S7F#*86=Z45r32-ah85N)wK!)RtEEWbj z3BVEf7=AGqvfZHCpU$^p-EsC0r?6PNJq-?VgE%Pak~D||<0tc`IE=nyrurQ%>*9lXrW%6<^HBy z5I%__?qFd#HaA%AZ_sYf1Uo7}3=^dC<8VfPP+X7Up#GU^ec1^R3KbC`oKtws0z`Hz zG%eVGfxqbwXF1trr~)DyEDc3dLMK>QZ0o5PU_|vfS#KKSs5zWRKyoh!=cDqCtQh0M z3ca}hfHmRE%8S|&Yr@Yi@%LH@9#i0zk_U&G&53+5Q-Qj?Ssy8O)`M1Wd4Ds{7K()V zW%P>wSvtQ$r%4F0-}RK?)#D*ro2AbfIf7V4nYE{_b}7FxW)44tLO#mka+h9%{NZ8* zEJL`FxkDz}F&RV)h7~cX5PBvfgotHMhEDu&GDyVFlX$p6@#Cfrn@<>M*xZOKAnYy* zP93XK*rvUWKS5%UyA9ULlVK}q@H4>L=*bnlEE9S|Edt=xX6D7va<#(+-WY8AYpC`I zSkUH)hZ{RXfXQ`?VJQethAFGYEH~Lh*D;O5Zzs0heTgl38xidz#YJKd@z_Oo5jThU;vP`aHEK8|EwoIA!vrSBUsyb7B z9hkO^Y|}B8!h${>WLnnqEvD@R)4u#>VA_{JVV`Rs(=xe^GA+q9W!i6Kn3fvs@p>?w z+Vwg*)Q%%NG3(cABO$Z3Tx)hX<5X*2V9QHOc(oe$PIE)CT{VZp{%Q;7%cbJ5sNRx7 zo4EYHp=8ZNGp=F1?GZ?=!`nOw*Co(p?~ zvs1?ZVTSQ(1_8Y%^=f)d7lnbS!~q^|P$h;*U!M>nXQ|^n5u7B_v?3oXY~^ea8f)V3 z!m$zo(1PW0GQmg#ZVMZZ`))*J40+-pH8}Oac_55Aet&k-g0z@ES!GD+ zujS?G!L0<9DZzw3agXI_wtTS=+Va&Z6oGA4ng2b>jW$$;Ez8>0#nnR1$0W#kw!g`M z-EviogCz| zP{TeG-d-qN8qe~POX6SKP>nfCY0%gGSh~MsLsNJuOvb!nmTZ3s<@wJn8Rxe^y*;2_ zP_am-r*(&{q=rt|J{#s#3@KQVJ}kvCp0AA@+~$VALw%bCqN(s(xUCbo?SDJ^Z+XsY zOH(MRvstQlbUWpjk>TI7pzSe*GP#b5b3?JIIA05qZSA216lbXTP?p{tE+)(nT*i~w zoE%U2m_>5lZQ|rsI+&~WTfE!}UfxP7ld;%>o^{=5DokIu3%f0-WnyxH)1l1KG3W5Emc_Mpl_*y(xq>y z5}-yMFMdDL#c^7DaMyKIpT<20d(6RpRIBuz3UZ&c+(XCx`j*FIa-5v!=6rDc0oH)o zgBR0F;P{z6q%JH?PeYS3n~ZU015$B^(LuqohsSw57GT(WiYe~LNeY*ua+OE)c|7G# z3JaA_U&;`etLsWdq{Yv841*L^Zs&lNNbjvQ2Nv3^BGPK<3-d}vcO|ae2`!~I+M`PY z4|1PGz;nmuN=v8*{ziF=0ZmaQn5K9YG*5)YjdwWgkKLBX4`Yp1@9xLL(%t5kfaA~Z zkb5q0?Fmk>$1S9|F9%mn0lduPLQY`oNxAeO{zZJmJKf}TifJ6F&fECV7jmNo8qveu zPdrrdUS2kQua<=?ugWI+9zMnI(9RWHNKG@E{F^yVBlc#45DGQ)?D>VDoJp1E_=$pO zAoeT@rH_E%P@UeuppRTDuz)gS)3#|FIf&_rD49ehS&hTw8qpxv@GiSPz3kwITbmjL zKc0|zy$I(e>Ki9ktEc}BRvW+eDaN*j01~9%>QoIW%3=4$k@C^auOx?ERPvP0pY`-R zRZqgopp20Ur|P^8H8oT_YD>VnKGR&+mRq!c4+ZyspUyYw{1ZB_(D|ouQW+voZMpH{ z>qvJWizc9`zc=4M(BG^7PiXt)`Is_#pAj34(;B$$;uwB0ZxGJHjB8keatcR8n|RIx0b4u@XB6i*0Du@Q!IPrIn8lG^hI|hHINO1CR@{vnV@?VU ziy(WMeC(fRQbcz&{^}!1{a>OV+nj!!+Tt2mAMLJU_LBJ0l2^iIV%56;IR+4qGoTdu zvwZI}?`3k{$LM1-(Oa)%AH1_V%U)goD~v|z#DuKDh2`Ln369|xb4eb+NUh(lK*cNW zw%Kl9_1!l2+E+PZB3-xoPV2wQiD8T>XNBIsHvNLS=`7OuGM$bZS-y=Na9mt^W)-W2 zt3UDoE%MXH3=isqh9|2F^{LBYvH*z~}0oQP@m(v6BxB zf3$oxcU}!uYx?Z-ecmV(Cw+0dK9CmbTb%F;KdYc(xqcO@FO(@x@V zv!;)$9wczAOPwoczA;VC}?k5Lj$OS1~d$Y&g|6d2P`ydJc2nra2Z!5XAWZK zhmC3#eE`5T?g9YQxC;Obe+YvAMtOigkk}V(cDVi=NbFk{i4A6u*jHL0F;R!5ZLfjG z(oy5OAeh#;?d>>Cz$;Dbs%Q(v8WB8&hX#BxraRsY6w+f_)i#5jP5tfaBj^#UlH z=qjkm(cOsWrivO0o%%N7q9#a{Elz73B^n1%lvYMKL`wewLuewmY-5_oP&r|joMb7p z5$+4)*I1+mq?OI!y5LpzgVvbJ@EEXMrtn;tYXWQy4E8X0w96C@5I&CMya?k>1|~>t zJel3ms{u7;(N}xR(m%Jz7z_O_m7+zv&W`}60pYPk*A43&_REx|ZFeSJ1Z8k(*2h{r{()Yhghd{~y1f4I?`As?w^!e5F_BO(`0fB3>#(NEl*LdHB*wf7zA`^G?@c)$YIQc`%mdI#_ zs^~k$;P1k@CaS8fVyq?(YM)uF_RAJSiE)dGTeH>-3W-->tBtDm?N-?8uhE-;C6J1l zLndW}z6P#p(2U=*a&bWR3^L0!O%^D;W#zgq7?x?hYa=q@&a0V2*V`OxnyA<$cS7R>8USTs5@L%gHg(3x_7K*OVi*i-h((Tq1Xw|Hgm>8 zBQehHUjvB=);h{~snLy4Kzq(_mgAJwqcO+*$Z4C;Ygq-)R;mN{mn=qT4D%A8dc53h3I9qdcM zA_ANEpr`Y9T2Nwdj4+TTV|_}mju=MEG=r6K$W-2OHLj-uR-zVOeHz!W)>ICZYB3wa z>w#S)b1N4&`|1SUXb{Y-mOv6 zg7Q8wYq(%7evOdE<|?cMHH{OI5Hgv&Pt4GkshNpo_4lid)Xk)Br%3CT3NtyfV(d0a z7lsnteWn|yIqQibGKxS9|o z#eGQfDA}GxQ-gYn!^{a#5g>{PiQJ25 zTR5xpT(`Oa)3NkcQ0UzGaoftS{6KzJZc~0Eegk>w zlP-j^Z^rP855P%KcUe?UdW0+&3PsD|HOF>AW9pZ5nfc=&Ge~pXJp@cFf~$J|LIi1Gw^d19V z9q+F3W?q-7rO-n`&R1cJvcgRpV?``P@tgwr)GXb)uNwA7!8m-U7BSuDLd1R!a z7zI-%Z9+EF*C>L{EYk{lYCR+K7E2fqU=KzcSRF-}+ibEq(ho5lXQCVpwdF3`0Q6|bPVlTwgS)Vk zQgNXiWKzOyWVNVRnqF>X91qJ(WtM~NM9{*lHB31ts(G7C z+F@E05cHTnp}=e1AMA}fa>vSayJRdgf1%G@kYU6F+i3(tnV_MNh=VlX?Tj+QQ2g4b zg36)F+(R@*y2ifz4^uLD%wuyej_ll++l@7u>t&yb`&R~-)|ZZ@276)zk#4>WlK%p) zY0>4WEvW2!NcS<8Syr^?cm~7ZE&bj7TlzN?DQY?RxdCtoUi1Knp^gI}0GW(|cy2f+ zK!o(Ii0wZFM+2+ve5HI&tI>`cB*c8pm^xK7zr?y}m}RhLAL3J1i=6&TJZS8Mx)4o4 z7Wb<9I3F;A!*!$moqD*>dLhQPJ0RnMwgAa=I9`EXfK4Hxp?pFNURHUaC&7v*6|0@@mXx%rnh5W~uYZdm}@ zjV@rlCV-kzn3D9t9y|)bAL5{>RRc!iF32Sf&PV|ok_kBbK3{)8N5)t7pGIhGw{(1U zBY!ZH(KQsby~pL$|ltf>bSU8)gM;5T&|K+ z&q#fl)r8lb4#`UldM9!XWv}`CJakffXH7% zKL+?hLnIRYx|V(<%pv^Rat39>)(ELdb4g2Bu-)J>TXQ;dE=ckz^)hAo*L)Sm zky6PM3)v1q<#W36dn^aFOO@Z;(}iJSeZD&gaCO^!N`1y&T97o*uW)w-WcJJaV z^)^}HuOXg)H$5MxGezfpbk5QF0G(&)6zF`2PC$p`uJ>_F^9g8O`87K8bU^mXU!b!{ zXPM52={!&8qjWw_=M!{(kq%f+`Jblq89Kj8ha$xPbvmD?^BZ(tqVtdFyiDh}=zM|B zZ`1h_oiEe*9Xem7^EEnOr}J$(e?;ex>HGQ({|b(#B|Nl| z&-#$sk-2KPP>vqI56>}++uPNf@4_yW^*JGJxP0D~8|a3in*Vp>|DN8if$al3yYSxM zHMnPMVQai~TYtCv4Q<_nx7+c%55Hab?cBNpZuYkm?;d{H@9gtN=ljTx-CIWx>v8;! L;1~Ay^l$qA5B#ww literal 0 HcmV?d00001 diff --git a/gam/atom/auth.py b/gam/atom/auth.py new file mode 100755 index 00000000000..1d841754803 --- /dev/null +++ b/gam/atom/auth.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import base64 + + +class BasicAuth(object): + """Sets the Authorization header as defined in RFC1945""" + + def __init__(self, user_id, password): + self.basic_cookie = base64.encodestring( + '%s:%s' % (user_id, password)).strip() + + def modify_request(self, http_request): + http_request.headers['Authorization'] = 'Basic %s' % self.basic_cookie + + ModifyRequest = modify_request + + +class NoAuth(object): + + def modify_request(self, http_request): + pass diff --git a/gam/atom/client.py b/gam/atom/client.py new file mode 100755 index 00000000000..388511f4e7b --- /dev/null +++ b/gam/atom/client.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""AtomPubClient provides CRUD ops. in line with the Atom Publishing Protocol. + +""" + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.http_core + + +class Error(Exception): + pass + + +class MissingHost(Error): + pass + + +class AtomPubClient(object): + host = None + auth_token = None + ssl = False # Whether to force all requests over https + xoauth_requestor_id = None + + def __init__(self, http_client=None, host=None, auth_token=None, source=None, + xoauth_requestor_id=None, **kwargs): + """Creates a new AtomPubClient instance. + + Args: + source: The name of your application. + http_client: An object capable of performing HTTP requests through a + request method. This object is used to perform the request + when the AtomPubClient's request method is called. Used to + allow HTTP requests to be directed to a mock server, or use + an alternate library instead of the default of httplib to + make HTTP requests. + host: str The default host name to use if a host is not specified in the + requested URI. + auth_token: An object which sets the HTTP Authorization header when its + modify_request method is called. + """ + self.http_client = http_client or atom.http_core.ProxiedHttpClient() + if host is not None: + self.host = host + if auth_token is not None: + self.auth_token = auth_token + self.xoauth_requestor_id = xoauth_requestor_id + self.source = source + + def request(self, method=None, uri=None, auth_token=None, + http_request=None, **kwargs): + """Performs an HTTP request to the server indicated. + + Uses the http_client instance to make the request. + + Args: + method: The HTTP method as a string, usually one of 'GET', 'POST', + 'PUT', or 'DELETE' + uri: The URI desired as a string or atom.http_core.Uri. + http_request: + auth_token: An authorization token object whose modify_request method + sets the HTTP Authorization header. + + Returns: + The results of calling self.http_client.request. With the default + http_client, this is an HTTP response object. + """ + # Modify the request based on the AtomPubClient settings and parameters + # passed in to the request. + http_request = self.modify_request(http_request) + if isinstance(uri, (str, unicode)): + uri = atom.http_core.Uri.parse_uri(uri) + if uri is not None: + uri.modify_request(http_request) + if isinstance(method, (str, unicode)): + http_request.method = method + # Any unrecognized arguments are assumed to be capable of modifying the + # HTTP request. + for name, value in kwargs.iteritems(): + if value is not None: + value.modify_request(http_request) + # Default to an http request if the protocol scheme is not set. + if http_request.uri.scheme is None: + http_request.uri.scheme = 'http' + # Override scheme. Force requests over https. + if self.ssl: + http_request.uri.scheme = 'https' + if http_request.uri.path is None: + http_request.uri.path = '/' + # Add the Authorization header at the very end. The Authorization header + # value may need to be calculated using information in the request. + if auth_token: + auth_token.modify_request(http_request) + elif self.auth_token: + self.auth_token.modify_request(http_request) + # Check to make sure there is a host in the http_request. + if http_request.uri.host is None: + raise MissingHost('No host provided in request %s %s' % ( + http_request.method, str(http_request.uri))) + # Perform the fully specified request using the http_client instance. + # Sends the request to the server and returns the server's response. + return self.http_client.request(http_request) + + Request = request + + def get(self, uri=None, auth_token=None, http_request=None, **kwargs): + """Performs a request using the GET method, returns an HTTP response.""" + return self.request(method='GET', uri=uri, auth_token=auth_token, + http_request=http_request, **kwargs) + + Get = get + + def post(self, uri=None, data=None, auth_token=None, http_request=None, + **kwargs): + """Sends data using the POST method, returns an HTTP response.""" + return self.request(method='POST', uri=uri, auth_token=auth_token, + http_request=http_request, data=data, **kwargs) + + Post = post + + def put(self, uri=None, data=None, auth_token=None, http_request=None, + **kwargs): + """Sends data using the PUT method, returns an HTTP response.""" + return self.request(method='PUT', uri=uri, auth_token=auth_token, + http_request=http_request, data=data, **kwargs) + + Put = put + + def delete(self, uri=None, auth_token=None, http_request=None, **kwargs): + """Performs a request using the DELETE method, returns an HTTP response.""" + return self.request(method='DELETE', uri=uri, auth_token=auth_token, + http_request=http_request, **kwargs) + + Delete = delete + + def modify_request(self, http_request): + """Changes the HTTP request before sending it to the server. + + Sets the User-Agent HTTP header and fills in the HTTP host portion + of the URL if one was not included in the request (for this it uses + the self.host member if one is set). This method is called in + self.request. + + Args: + http_request: An atom.http_core.HttpRequest() (optional) If one is + not provided, a new HttpRequest is instantiated. + + Returns: + An atom.http_core.HttpRequest() with the User-Agent header set and + if this client has a value in its host member, the host in the request + URL is set. + """ + if http_request is None: + http_request = atom.http_core.HttpRequest() + + if self.host is not None and http_request.uri.host is None: + http_request.uri.host = self.host + + if self.xoauth_requestor_id is not None: + http_request.uri.query['xoauth_requestor_id'] = self.xoauth_requestor_id + + # Set the user agent header for logging purposes. + if self.source: + http_request.headers['User-Agent'] = '%s gdata-py/2.0.14' % self.source + else: + http_request.headers['User-Agent'] = 'gdata-py/2.0.14' + + return http_request + + ModifyRequest = modify_request + + +class CustomHeaders(object): + """Add custom headers to an http_request. + + Usage: + >>> custom_headers = atom.client.CustomHeaders(header1='value1', + header2='value2') + >>> client.get(uri, custom_headers=custom_headers) + """ + + def __init__(self, **kwargs): + """Creates a CustomHeaders instance. + + Initialize the headers dictionary with the arguments list. + """ + self.headers = kwargs + + def modify_request(self, http_request): + """Changes the HTTP request before sending it to the server. + + Adds the custom headers to the HTTP request. + + Args: + http_request: An atom.http_core.HttpRequest(). + + Returns: + An atom.http_core.HttpRequest() with the added custom headers. + """ + + for name, value in self.headers.iteritems(): + if value is not None: + http_request.headers[name] = value + return http_request diff --git a/gam/atom/core.py b/gam/atom/core.py new file mode 100755 index 00000000000..a19a70d4103 --- /dev/null +++ b/gam/atom/core.py @@ -0,0 +1,550 @@ +#!/usr/bin/env python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import inspect +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree + + +try: + from xml.dom.minidom import parseString as xmlString +except ImportError: + xmlString = None + +STRING_ENCODING = 'utf-8' + + +class XmlElement(object): + """Represents an element node in an XML document. + + The text member is a UTF-8 encoded str or unicode. + """ + _qname = None + _other_elements = None + _other_attributes = None + # The rule set contains mappings for XML qnames to child members and the + # appropriate member classes. + _rule_set = None + _members = None + text = None + + def __init__(self, text=None, *args, **kwargs): + if ('_members' not in self.__class__.__dict__ + or self.__class__._members is None): + self.__class__._members = tuple(self.__class__._list_xml_members()) + for member_name, member_type in self.__class__._members: + if member_name in kwargs: + setattr(self, member_name, kwargs[member_name]) + else: + if isinstance(member_type, list): + setattr(self, member_name, []) + else: + setattr(self, member_name, None) + self._other_elements = [] + self._other_attributes = {} + if text is not None: + self.text = text + + def _list_xml_members(cls): + """Generator listing all members which are XML elements or attributes. + + The following members would be considered XML members: + foo = 'abc' - indicates an XML attribute with the qname abc + foo = SomeElement - indicates an XML child element + foo = [AnElement] - indicates a repeating XML child element, each instance + will be stored in a list in this member + foo = ('att1', '{http://example.com/namespace}att2') - indicates an XML + attribute which has different parsing rules in different versions of + the protocol. Version 1 of the XML parsing rules will look for an + attribute with the qname 'att1' but verion 2 of the parsing rules will + look for a namespaced attribute with the local name of 'att2' and an + XML namespace of 'http://example.com/namespace'. + """ + members = [] + for pair in inspect.getmembers(cls): + if not pair[0].startswith('_') and pair[0] != 'text': + member_type = pair[1] + if (isinstance(member_type, tuple) or isinstance(member_type, list) + or isinstance(member_type, (str, unicode)) + or (inspect.isclass(member_type) + and issubclass(member_type, XmlElement))): + members.append(pair) + return members + + _list_xml_members = classmethod(_list_xml_members) + + def _get_rules(cls, version): + """Initializes the _rule_set for the class which is used when parsing XML. + + This method is used internally for parsing and generating XML for an + XmlElement. It is not recommended that you call this method directly. + + Returns: + A tuple containing the XML parsing rules for the appropriate version. + + The tuple looks like: + (qname, {sub_element_qname: (member_name, member_class, repeating), ..}, + {attribute_qname: member_name}) + + To give a couple of concrete example, the atom.data.Control _get_rules + with version of 2 will return: + ('{http://www.w3.org/2007/app}control', + {'{http://www.w3.org/2007/app}draft': ('draft', + , + False)}, + {}) + Calling _get_rules with version 1 on gdata.data.FeedLink will produce: + ('{http://schemas.google.com/g/2005}feedLink', + {'{http://www.w3.org/2005/Atom}feed': ('feed', + , + False)}, + {'href': 'href', 'readOnly': 'read_only', 'countHint': 'count_hint', + 'rel': 'rel'}) + """ + # Initialize the _rule_set to make sure there is a slot available to store + # the parsing rules for this version of the XML schema. + # Look for rule set in the class __dict__ proxy so that only the + # _rule_set for this class will be found. By using the dict proxy + # we avoid finding rule_sets defined in superclasses. + # The four lines below provide support for any number of versions, but it + # runs a bit slower then hard coding slots for two versions, so I'm using + # the below two lines. + #if '_rule_set' not in cls.__dict__ or cls._rule_set is None: + # cls._rule_set = [] + #while len(cls.__dict__['_rule_set']) < version: + # cls._rule_set.append(None) + # If there is no rule set cache in the class, provide slots for two XML + # versions. If and when there is a version 3, this list will need to be + # expanded. + if '_rule_set' not in cls.__dict__ or cls._rule_set is None: + cls._rule_set = [None, None] + # If a version higher than 2 is requested, fall back to version 2 because + # 2 is currently the highest supported version. + if version > 2: + return cls._get_rules(2) + # Check the dict proxy for the rule set to avoid finding any rule sets + # which belong to the superclass. We only want rule sets for this class. + if cls._rule_set[version-1] is None: + # The rule set for each version consists of the qname for this element + # ('{namespace}tag'), a dictionary (elements) for looking up the + # corresponding class member when given a child element's qname, and a + # dictionary (attributes) for looking up the corresponding class member + # when given an XML attribute's qname. + elements = {} + attributes = {} + if ('_members' not in cls.__dict__ or cls._members is None): + cls._members = tuple(cls._list_xml_members()) + for member_name, target in cls._members: + if isinstance(target, list): + # This member points to a repeating element. + elements[_get_qname(target[0], version)] = (member_name, target[0], + True) + elif isinstance(target, tuple): + # This member points to a versioned XML attribute. + if version <= len(target): + attributes[target[version-1]] = member_name + else: + attributes[target[-1]] = member_name + elif isinstance(target, (str, unicode)): + # This member points to an XML attribute. + attributes[target] = member_name + elif issubclass(target, XmlElement): + # This member points to a single occurance element. + elements[_get_qname(target, version)] = (member_name, target, False) + version_rules = (_get_qname(cls, version), elements, attributes) + cls._rule_set[version-1] = version_rules + return version_rules + else: + return cls._rule_set[version-1] + + _get_rules = classmethod(_get_rules) + + def get_elements(self, tag=None, namespace=None, version=1): + """Find all sub elements which match the tag and namespace. + + To find all elements in this object, call get_elements with the tag and + namespace both set to None (the default). This method searches through + the object's members and the elements stored in _other_elements which + did not match any of the XML parsing rules for this class. + + Args: + tag: str + namespace: str + version: int Specifies the version of the XML rules to be used when + searching for matching elements. + + Returns: + A list of the matching XmlElements. + """ + matches = [] + ignored1, elements, ignored2 = self.__class__._get_rules(version) + if elements: + for qname, element_def in elements.iteritems(): + member = getattr(self, element_def[0]) + if member: + if _qname_matches(tag, namespace, qname): + if element_def[2]: + # If this is a repeating element, copy all instances into the + # result list. + matches.extend(member) + else: + matches.append(member) + for element in self._other_elements: + if _qname_matches(tag, namespace, element._qname): + matches.append(element) + return matches + + GetElements = get_elements + # FindExtensions and FindChildren are provided for backwards compatibility + # to the atom.AtomBase class. + # However, FindExtensions may return more results than the v1 atom.AtomBase + # method does, because get_elements searches both the expected children + # and the unexpected "other elements". The old AtomBase.FindExtensions + # method searched only "other elements" AKA extension_elements. + FindExtensions = get_elements + FindChildren = get_elements + + def get_attributes(self, tag=None, namespace=None, version=1): + """Find all attributes which match the tag and namespace. + + To find all attributes in this object, call get_attributes with the tag + and namespace both set to None (the default). This method searches + through the object's members and the attributes stored in + _other_attributes which did not fit any of the XML parsing rules for this + class. + + Args: + tag: str + namespace: str + version: int Specifies the version of the XML rules to be used when + searching for matching attributes. + + Returns: + A list of XmlAttribute objects for the matching attributes. + """ + matches = [] + ignored1, ignored2, attributes = self.__class__._get_rules(version) + if attributes: + for qname, attribute_def in attributes.iteritems(): + if isinstance(attribute_def, (list, tuple)): + attribute_def = attribute_def[0] + member = getattr(self, attribute_def) + # TODO: ensure this hasn't broken existing behavior. + #member = getattr(self, attribute_def[0]) + if member: + if _qname_matches(tag, namespace, qname): + matches.append(XmlAttribute(qname, member)) + for qname, value in self._other_attributes.iteritems(): + if _qname_matches(tag, namespace, qname): + matches.append(XmlAttribute(qname, value)) + return matches + + GetAttributes = get_attributes + + def _harvest_tree(self, tree, version=1): + """Populates object members from the data in the tree Element.""" + qname, elements, attributes = self.__class__._get_rules(version) + for element in tree: + if elements and element.tag in elements: + definition = elements[element.tag] + # If this is a repeating element, make sure the member is set to a + # list. + if definition[2]: + if getattr(self, definition[0]) is None: + setattr(self, definition[0], []) + getattr(self, definition[0]).append(_xml_element_from_tree(element, + definition[1], version)) + else: + setattr(self, definition[0], _xml_element_from_tree(element, + definition[1], version)) + else: + self._other_elements.append(_xml_element_from_tree(element, XmlElement, + version)) + for attrib, value in tree.attrib.iteritems(): + if attributes and attrib in attributes: + setattr(self, attributes[attrib], value) + else: + self._other_attributes[attrib] = value + if tree.text: + self.text = tree.text + + def _to_tree(self, version=1, encoding=None): + new_tree = ElementTree.Element(_get_qname(self, version)) + self._attach_members(new_tree, version, encoding) + return new_tree + + def _attach_members(self, tree, version=1, encoding=None): + """Convert members to XML elements/attributes and add them to the tree. + + Args: + tree: An ElementTree.Element which will be modified. The members of + this object will be added as child elements or attributes + according to the rules described in _expected_elements and + _expected_attributes. The elements and attributes stored in + other_attributes and other_elements are also added a children + of this tree. + version: int Ingnored in this method but used by VersionedElement. + encoding: str (optional) + """ + qname, elements, attributes = self.__class__._get_rules(version) + encoding = encoding or STRING_ENCODING + # Add the expected elements and attributes to the tree. + if elements: + for tag, element_def in elements.iteritems(): + member = getattr(self, element_def[0]) + # If this is a repeating element and there are members in the list. + if member and element_def[2]: + for instance in member: + instance._become_child(tree, version) + elif member: + member._become_child(tree, version) + if attributes: + for attribute_tag, member_name in attributes.iteritems(): + value = getattr(self, member_name) + if value: + tree.attrib[attribute_tag] = value + # Add the unexpected (other) elements and attributes to the tree. + for element in self._other_elements: + element._become_child(tree, version) + for key, value in self._other_attributes.iteritems(): + # I'm not sure if unicode can be used in the attribute name, so for now + # we assume the encoding is correct for the attribute name. + if not isinstance(value, unicode): + value = value.decode(encoding) + tree.attrib[key] = value + if self.text: + if isinstance(self.text, unicode): + tree.text = self.text + else: + tree.text = self.text.decode(encoding) + + def to_string(self, version=1, encoding=None, pretty_print=None): + """Converts this object to XML.""" + + tree_string = ElementTree.tostring(self._to_tree(version, encoding)) + + if pretty_print and xmlString is not None: + return xmlString(tree_string).toprettyxml() + + return tree_string + + ToString = to_string + + def __str__(self): + return self.to_string() + + def _become_child(self, tree, version=1): + """Adds a child element to tree with the XML data in self.""" + new_child = ElementTree.Element('') + tree.append(new_child) + new_child.tag = _get_qname(self, version) + self._attach_members(new_child, version) + + def __get_extension_elements(self): + return self._other_elements + + def __set_extension_elements(self, elements): + self._other_elements = elements + + extension_elements = property(__get_extension_elements, + __set_extension_elements, + """Provides backwards compatibility for v1 atom.AtomBase classes.""") + + def __get_extension_attributes(self): + return self._other_attributes + + def __set_extension_attributes(self, attributes): + self._other_attributes = attributes + + extension_attributes = property(__get_extension_attributes, + __set_extension_attributes, + """Provides backwards compatibility for v1 atom.AtomBase classes.""") + + def _get_tag(self, version=1): + qname = _get_qname(self, version) + if qname: + return qname[qname.find('}')+1:] + return None + + def _get_namespace(self, version=1): + qname = _get_qname(self, version) + if qname.startswith('{'): + return qname[1:qname.find('}')] + else: + return None + + def _set_tag(self, tag): + if isinstance(self._qname, tuple): + self._qname = self._qname.copy() + if self._qname[0].startswith('{'): + self._qname[0] = '{%s}%s' % (self._get_namespace(1), tag) + else: + self._qname[0] = tag + else: + if self._qname is not None and self._qname.startswith('{'): + self._qname = '{%s}%s' % (self._get_namespace(), tag) + else: + self._qname = tag + + def _set_namespace(self, namespace): + tag = self._get_tag(1) + if tag is None: + tag = '' + if isinstance(self._qname, tuple): + self._qname = self._qname.copy() + if namespace: + self._qname[0] = '{%s}%s' % (namespace, tag) + else: + self._qname[0] = tag + else: + if namespace: + self._qname = '{%s}%s' % (namespace, tag) + else: + self._qname = tag + + tag = property(_get_tag, _set_tag, + """Provides backwards compatibility for v1 atom.AtomBase classes.""") + + namespace = property(_get_namespace, _set_namespace, + """Provides backwards compatibility for v1 atom.AtomBase classes.""") + + # Provided for backwards compatibility to atom.ExtensionElement + children = extension_elements + attributes = extension_attributes + + +def _get_qname(element, version): + if isinstance(element._qname, tuple): + if version <= len(element._qname): + return element._qname[version-1] + else: + return element._qname[-1] + else: + return element._qname + + +def _qname_matches(tag, namespace, qname): + """Logic determines if a QName matches the desired local tag and namespace. + + This is used in XmlElement.get_elements and XmlElement.get_attributes to + find matches in the element's members (among all expected-and-unexpected + elements-and-attributes). + + Args: + expected_tag: string + expected_namespace: string + qname: string in the form '{xml_namespace}localtag' or 'tag' if there is + no namespace. + + Returns: + boolean True if the member's tag and namespace fit the expected tag and + namespace. + """ + # If there is no expected namespace or tag, then everything will match. + if qname is None: + member_tag = None + member_namespace = None + else: + if qname.startswith('{'): + member_namespace = qname[1:qname.index('}')] + member_tag = qname[qname.index('}') + 1:] + else: + member_namespace = None + member_tag = qname + return ((tag is None and namespace is None) + # If there is a tag, but no namespace, see if the local tag matches. + or (namespace is None and member_tag == tag) + # There was no tag, but there was a namespace so see if the namespaces + # match. + or (tag is None and member_namespace == namespace) + # There was no tag, and the desired elements have no namespace, so check + # to see that the member's namespace is None. + or (tag is None and namespace == '' + and member_namespace is None) + # The tag and the namespace both match. + or (tag == member_tag + and namespace == member_namespace) + # The tag matches, and the expected namespace is the empty namespace, + # check to make sure the member's namespace is None. + or (tag == member_tag and namespace == '' + and member_namespace is None)) + + +def parse(xml_string, target_class=None, version=1, encoding=None): + """Parses the XML string according to the rules for the target_class. + + Args: + xml_string: str or unicode + target_class: XmlElement or a subclass. If None is specified, the + XmlElement class is used. + version: int (optional) The version of the schema which should be used when + converting the XML into an object. The default is 1. + encoding: str (optional) The character encoding of the bytes in the + xml_string. Default is 'UTF-8'. + """ + if target_class is None: + target_class = XmlElement + if isinstance(xml_string, unicode): + if encoding is None: + xml_string = xml_string.encode(STRING_ENCODING) + else: + xml_string = xml_string.encode(encoding) + tree = ElementTree.fromstring(xml_string) + return _xml_element_from_tree(tree, target_class, version) + + +Parse = parse +xml_element_from_string = parse +XmlElementFromString = xml_element_from_string + + +def _xml_element_from_tree(tree, target_class, version=1): + if target_class._qname is None: + instance = target_class() + instance._qname = tree.tag + instance._harvest_tree(tree, version) + return instance + # TODO handle the namespace-only case + # Namespace only will be used with Google Spreadsheets rows and + # Google Base item attributes. + elif tree.tag == _get_qname(target_class, version): + instance = target_class() + instance._harvest_tree(tree, version) + return instance + return None + + +class XmlAttribute(object): + + def __init__(self, qname, value): + self._qname = qname + self.value = value + diff --git a/gam/atom/data.py b/gam/atom/data.py new file mode 100755 index 00000000000..38017ceacdb --- /dev/null +++ b/gam/atom/data.py @@ -0,0 +1,338 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core + + +XML_TEMPLATE = '{http://www.w3.org/XML/1998/namespace}%s' +ATOM_TEMPLATE = '{http://www.w3.org/2005/Atom}%s' +APP_TEMPLATE_V1 = '{http://purl.org/atom/app#}%s' +APP_TEMPLATE_V2 = '{http://www.w3.org/2007/app}%s' + + +class Name(atom.core.XmlElement): + """The atom:name element.""" + _qname = ATOM_TEMPLATE % 'name' + + +class Email(atom.core.XmlElement): + """The atom:email element.""" + _qname = ATOM_TEMPLATE % 'email' + + +class Uri(atom.core.XmlElement): + """The atom:uri element.""" + _qname = ATOM_TEMPLATE % 'uri' + + +class Person(atom.core.XmlElement): + """A foundation class which atom:author and atom:contributor extend. + + A person contains information like name, email address, and web page URI for + an author or contributor to an Atom feed. + """ + name = Name + email = Email + uri = Uri + + +class Author(Person): + """The atom:author element. + + An author is a required element in Feed unless each Entry contains an Author. + """ + _qname = ATOM_TEMPLATE % 'author' + + +class Contributor(Person): + """The atom:contributor element.""" + _qname = ATOM_TEMPLATE % 'contributor' + + +class Link(atom.core.XmlElement): + """The atom:link element.""" + _qname = ATOM_TEMPLATE % 'link' + href = 'href' + rel = 'rel' + type = 'type' + hreflang = 'hreflang' + title = 'title' + length = 'length' + + +class Generator(atom.core.XmlElement): + """The atom:generator element.""" + _qname = ATOM_TEMPLATE % 'generator' + uri = 'uri' + version = 'version' + + +class Text(atom.core.XmlElement): + """A foundation class from which atom:title, summary, etc. extend. + + This class should never be instantiated. + """ + type = 'type' + + +class Title(Text): + """The atom:title element.""" + _qname = ATOM_TEMPLATE % 'title' + + +class Subtitle(Text): + """The atom:subtitle element.""" + _qname = ATOM_TEMPLATE % 'subtitle' + + +class Rights(Text): + """The atom:rights element.""" + _qname = ATOM_TEMPLATE % 'rights' + + +class Summary(Text): + """The atom:summary element.""" + _qname = ATOM_TEMPLATE % 'summary' + + +class Content(Text): + """The atom:content element.""" + _qname = ATOM_TEMPLATE % 'content' + src = 'src' + + +class Category(atom.core.XmlElement): + """The atom:category element.""" + _qname = ATOM_TEMPLATE % 'category' + term = 'term' + scheme = 'scheme' + label = 'label' + + +class Id(atom.core.XmlElement): + """The atom:id element.""" + _qname = ATOM_TEMPLATE % 'id' + + +class Icon(atom.core.XmlElement): + """The atom:icon element.""" + _qname = ATOM_TEMPLATE % 'icon' + + +class Logo(atom.core.XmlElement): + """The atom:logo element.""" + _qname = ATOM_TEMPLATE % 'logo' + + +class Draft(atom.core.XmlElement): + """The app:draft element which indicates if this entry should be public.""" + _qname = (APP_TEMPLATE_V1 % 'draft', APP_TEMPLATE_V2 % 'draft') + + +class Control(atom.core.XmlElement): + """The app:control element indicating restrictions on publication. + + The APP control element may contain a draft element indicating whether or + not this entry should be publicly available. + """ + _qname = (APP_TEMPLATE_V1 % 'control', APP_TEMPLATE_V2 % 'control') + draft = Draft + + +class Date(atom.core.XmlElement): + """A parent class for atom:updated, published, etc.""" + + +class Updated(Date): + """The atom:updated element.""" + _qname = ATOM_TEMPLATE % 'updated' + + +class Published(Date): + """The atom:published element.""" + _qname = ATOM_TEMPLATE % 'published' + + +class LinkFinder(object): + """An "interface" providing methods to find link elements + + Entry elements often contain multiple links which differ in the rel + attribute or content type. Often, developers are interested in a specific + type of link so this class provides methods to find specific classes of + links. + + This class is used as a mixin in Atom entries and feeds. + """ + + def find_url(self, rel): + """Returns the URL (as a string) in a link with the desired rel value.""" + for link in self.link: + if link.rel == rel and link.href: + return link.href + return None + + FindUrl = find_url + + def get_link(self, rel): + """Returns a link object which has the desired rel value. + + If you are interested in the URL instead of the link object, + consider using find_url instead. + """ + for link in self.link: + if link.rel == rel and link.href: + return link + return None + + GetLink = get_link + + def find_self_link(self): + """Find the first link with rel set to 'self' + + Returns: + A str containing the link's href or None if none of the links had rel + equal to 'self' + """ + return self.find_url('self') + + FindSelfLink = find_self_link + + def get_self_link(self): + return self.get_link('self') + + GetSelfLink = get_self_link + + def find_edit_link(self): + return self.find_url('edit') + + FindEditLink = find_edit_link + + def get_edit_link(self): + return self.get_link('edit') + + GetEditLink = get_edit_link + + def find_edit_media_link(self): + link = self.find_url('edit-media') + # Search for media-edit as well since Picasa API used media-edit instead. + if link is None: + return self.find_url('media-edit') + return link + + FindEditMediaLink = find_edit_media_link + + def get_edit_media_link(self): + link = self.get_link('edit-media') + if link is None: + return self.get_link('media-edit') + return link + + GetEditMediaLink = get_edit_media_link + + def find_next_link(self): + return self.find_url('next') + + FindNextLink = find_next_link + + def get_next_link(self): + return self.get_link('next') + + GetNextLink = get_next_link + + def find_license_link(self): + return self.find_url('license') + + FindLicenseLink = find_license_link + + def get_license_link(self): + return self.get_link('license') + + GetLicenseLink = get_license_link + + def find_alternate_link(self): + return self.find_url('alternate') + + FindAlternateLink = find_alternate_link + + def get_alternate_link(self): + return self.get_link('alternate') + + GetAlternateLink = get_alternate_link + + +class FeedEntryParent(atom.core.XmlElement, LinkFinder): + """A super class for atom:feed and entry, contains shared attributes""" + author = [Author] + category = [Category] + contributor = [Contributor] + id = Id + link = [Link] + rights = Rights + title = Title + updated = Updated + + def __init__(self, atom_id=None, text=None, *args, **kwargs): + if atom_id is not None: + self.id = atom_id + atom.core.XmlElement.__init__(self, text=text, *args, **kwargs) + + +class Source(FeedEntryParent): + """The atom:source element.""" + _qname = ATOM_TEMPLATE % 'source' + generator = Generator + icon = Icon + logo = Logo + subtitle = Subtitle + + +class Entry(FeedEntryParent): + """The atom:entry element.""" + _qname = ATOM_TEMPLATE % 'entry' + content = Content + published = Published + source = Source + summary = Summary + control = Control + + +class Feed(Source): + """The atom:feed element which contains entries.""" + _qname = ATOM_TEMPLATE % 'feed' + entry = [Entry] + + +class ExtensionElement(atom.core.XmlElement): + """Provided for backwards compatibility to the v1 atom.ExtensionElement.""" + + def __init__(self, tag=None, namespace=None, attributes=None, + children=None, text=None, *args, **kwargs): + if namespace: + self._qname = '{%s}%s' % (namespace, tag) + else: + self._qname = tag + self.children = children or [] + self.attributes = attributes or {} + self.text = text + + _BecomeChildElement = atom.core.XmlElement._become_child diff --git a/gam/atom/http.py b/gam/atom/http.py new file mode 100755 index 00000000000..94258e5bb3a --- /dev/null +++ b/gam/atom/http.py @@ -0,0 +1,364 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""HttpClients in this module use httplib to make HTTP requests. + +This module make HTTP requests based on httplib, but there are environments +in which an httplib based approach will not work (if running in Google App +Engine for example). In those cases, higher level classes (like AtomService +and GDataService) can swap out the HttpClient to transparently use a +different mechanism for making HTTP requests. + + HttpClient: Contains a request method which performs an HTTP call to the + server. + + ProxiedHttpClient: Contains a request method which connects to a proxy using + settings stored in operating system environment variables then + performs an HTTP call to the endpoint server. +""" + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import types +import os +import httplib +import atom.url +import atom.http_interface +import socket +import base64 +import atom.http_core +ssl_imported = False +ssl = None +try: + import ssl + ssl_imported = True +except ImportError: + pass + + + +class ProxyError(atom.http_interface.Error): + pass + + +class TestConfigurationError(Exception): + pass + + +DEFAULT_CONTENT_TYPE = 'application/atom+xml' + + +class HttpClient(atom.http_interface.GenericHttpClient): + # Added to allow old v1 HttpClient objects to use the new + # http_code.HttpClient. Used in unit tests to inject a mock client. + v2_http_client = None + + def __init__(self, headers=None): + self.debug = False + self.headers = headers or {} + + def request(self, operation, url, data=None, headers=None): + """Performs an HTTP call to the server, supports GET, POST, PUT, and + DELETE. + + Usage example, perform and HTTP GET on http://www.google.com/: + import atom.http + client = atom.http.HttpClient() + http_response = client.request('GET', 'http://www.google.com/') + + Args: + operation: str The HTTP operation to be performed. This is usually one + of 'GET', 'POST', 'PUT', or 'DELETE' + data: filestream, list of parts, or other object which can be converted + to a string. Should be set to None when performing a GET or DELETE. + If data is a file-like object which can be read, this method will + read a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be + evaluated and sent. + url: The full URL to which the request should be sent. Can be a string + or atom.url.Url. + headers: dict of strings. HTTP headers which should be sent + in the request. + """ + all_headers = self.headers.copy() + if headers: + all_headers.update(headers) + + # If the list of headers does not include a Content-Length, attempt to + # calculate it based on the data object. + if data and 'Content-Length' not in all_headers: + if isinstance(data, types.StringTypes): + all_headers['Content-Length'] = str(len(data)) + else: + raise atom.http_interface.ContentLengthRequired('Unable to calculate ' + 'the length of the data parameter. Specify a value for ' + 'Content-Length') + + # Set the content type to the default value if none was set. + if 'Content-Type' not in all_headers: + all_headers['Content-Type'] = DEFAULT_CONTENT_TYPE + + if self.v2_http_client is not None: + http_request = atom.http_core.HttpRequest(method=operation) + atom.http_core.Uri.parse_uri(str(url)).modify_request(http_request) + http_request.headers = all_headers + if data: + http_request._body_parts.append(data) + return self.v2_http_client.request(http_request=http_request) + + if not isinstance(url, atom.url.Url): + if isinstance(url, types.StringTypes): + url = atom.url.parse_url(url) + else: + raise atom.http_interface.UnparsableUrlObject('Unable to parse url ' + 'parameter because it was not a string or atom.url.Url') + + connection = self._prepare_connection(url, all_headers) + + if self.debug: + connection.debuglevel = 1 + + connection.putrequest(operation, self._get_access_url(url), + skip_host=True) + if url.port is not None: + connection.putheader('Host', '%s:%s' % (url.host, url.port)) + else: + connection.putheader('Host', url.host) + + # Overcome a bug in Python 2.4 and 2.5 + # httplib.HTTPConnection.putrequest adding + # HTTP request header 'Host: www.google.com:443' instead of + # 'Host: www.google.com', and thus resulting the error message + # 'Token invalid - AuthSub token has wrong scope' in the HTTP response. + if (url.protocol == 'https' and int(url.port or 443) == 443 and + hasattr(connection, '_buffer') and + isinstance(connection._buffer, list)): + header_line = 'Host: %s:443' % url.host + replacement_header_line = 'Host: %s' % url.host + try: + connection._buffer[connection._buffer.index(header_line)] = ( + replacement_header_line) + except ValueError: # header_line missing from connection._buffer + pass + + # Send the HTTP headers. + for header_name in all_headers: + connection.putheader(header_name, all_headers[header_name]) + connection.endheaders() + + # If there is data, send it in the request. + if data: + if isinstance(data, list): + for data_part in data: + _send_data_part(data_part, connection) + else: + _send_data_part(data, connection) + + # Return the HTTP Response from the server. + return connection.getresponse() + + def _prepare_connection(self, url, headers): + if not isinstance(url, atom.url.Url): + if isinstance(url, types.StringTypes): + url = atom.url.parse_url(url) + else: + raise atom.http_interface.UnparsableUrlObject('Unable to parse url ' + 'parameter because it was not a string or atom.url.Url') + if url.protocol == 'https': + if not url.port: + return httplib.HTTPSConnection(url.host) + return httplib.HTTPSConnection(url.host, int(url.port)) + else: + if not url.port: + return httplib.HTTPConnection(url.host) + return httplib.HTTPConnection(url.host, int(url.port)) + + def _get_access_url(self, url): + return url.to_string() + + +class ProxiedHttpClient(HttpClient): + """Performs an HTTP request through a proxy. + + The proxy settings are obtained from enviroment variables. The URL of the + proxy server is assumed to be stored in the environment variables + 'https_proxy' and 'http_proxy' respectively. If the proxy server requires + a Basic Auth authorization header, the username and password are expected to + be in the 'proxy-username' or 'proxy_username' variable and the + 'proxy-password' or 'proxy_password' variable, or in 'http_proxy' or + 'https_proxy' as "protocol://[username:password@]host:port". + + After connecting to the proxy server, the request is completed as in + HttpClient.request. + """ + def _prepare_connection(self, url, headers): + proxy_settings = os.environ.get('%s_proxy' % url.protocol) + if not proxy_settings: + # The request was HTTP or HTTPS, but there was no appropriate proxy set. + return HttpClient._prepare_connection(self, url, headers) + else: + #print '!!!!%s' % proxy_settings + proxy_auth = _get_proxy_auth(proxy_settings) + proxy_netloc = _get_proxy_net_location(proxy_settings) + #print '!!!!%s' % proxy_auth + #print '!!!!%s' % proxy_netloc + if url.protocol == 'https': + # Set any proxy auth headers + if proxy_auth: + proxy_auth = 'Proxy-authorization: %s' % proxy_auth + + # Construct the proxy connect command. + port = url.port + if not port: + port = '443' + proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (url.host, port) + + # Set the user agent to send to the proxy + if headers and 'User-Agent' in headers: + user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent']) + else: + user_agent = 'User-Agent: python\r\n' + + proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent) + + # Find the proxy host and port. + proxy_url = atom.url.parse_url(proxy_netloc) + if not proxy_url.port: + proxy_url.port = '80' + + # Connect to the proxy server, very simple recv and error checking + p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) + p_sock.connect((proxy_url.host, int(proxy_url.port))) + p_sock.sendall(proxy_pieces) + response = '' + + # Wait for the full response. + while response.find("\r\n\r\n") == -1: + response += p_sock.recv(8192) + + p_status = response.split()[1] + if p_status != str(200): + raise ProxyError('Error status=%s' % str(p_status)) + + # Trivial setup for ssl socket. + sslobj = None + if ssl_imported: + sslobj = ssl.wrap_socket(p_sock, None, None) + else: + sock_ssl = socket.ssl(p_sock, None, None) + sslobj = httplib.FakeSocket(p_sock, sock_ssl) + + # Initalize httplib and replace with the proxy socket. + connection = httplib.HTTPConnection(proxy_url.host) + connection.sock = sslobj + return connection + else: + # If protocol was not https. + # Find the proxy host and port. + proxy_url = atom.url.parse_url(proxy_netloc) + if not proxy_url.port: + proxy_url.port = '80' + + if proxy_auth: + headers['Proxy-Authorization'] = proxy_auth.strip() + + return httplib.HTTPConnection(proxy_url.host, int(proxy_url.port)) + + def _get_access_url(self, url): + return url.to_string() + + +def _get_proxy_auth(proxy_settings): + """Returns proxy authentication string for header. + + Will check environment variables for proxy authentication info, starting with + proxy(_/-)username and proxy(_/-)password before checking the given + proxy_settings for a [protocol://]username:password@host[:port] string. + + Args: + proxy_settings: String from http_proxy or https_proxy environment variable. + + Returns: + Authentication string for proxy, or empty string if no proxy username was + found. + """ + proxy_username = None + proxy_password = None + + proxy_username = os.environ.get('proxy-username') + if not proxy_username: + proxy_username = os.environ.get('proxy_username') + proxy_password = os.environ.get('proxy-password') + if not proxy_password: + proxy_password = os.environ.get('proxy_password') + + if not proxy_username: + if '@' in proxy_settings: + protocol_and_proxy_auth = proxy_settings.split('@')[0].split(':') + if len(protocol_and_proxy_auth) == 3: + # 3 elements means we have [, //, ] + proxy_username = protocol_and_proxy_auth[1].lstrip('/') + proxy_password = protocol_and_proxy_auth[2] + elif len(protocol_and_proxy_auth) == 2: + # 2 elements means we have [, ] + proxy_username = protocol_and_proxy_auth[0] + proxy_password = protocol_and_proxy_auth[1] + if proxy_username: + user_auth = base64.encodestring('%s:%s' % (proxy_username, + proxy_password)) + return 'Basic %s\r\n' % (user_auth.strip()) + else: + return '' + + +def _get_proxy_net_location(proxy_settings): + """Returns proxy host and port. + + Args: + proxy_settings: String from http_proxy or https_proxy environment variable. + Must be in the form of protocol://[username:password@]host:port + + Returns: + String in the form of protocol://host:port + """ + if '@' in proxy_settings: + protocol = proxy_settings.split(':')[0] + netloc = proxy_settings.split('@')[1] + return '%s://%s' % (protocol, netloc) + else: + return proxy_settings + + +def _send_data_part(data, connection): + if isinstance(data, types.StringTypes): + connection.send(data) + return + # Check to see if data is a file-like object that has a read method. + elif hasattr(data, 'read'): + # Read the file and send it a chunk at a time. + while 1: + binarydata = data.read(100000) + if binarydata == '': break + connection.send(binarydata) + return + else: + # The data object was not a file. + # Try to convert to a string and send the data. + connection.send(str(data)) + return diff --git a/gam/atom/http.pyc b/gam/atom/http.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d2a07f9aa04bb9f9537a59e7f0bae9f9b72c140 GIT binary patch literal 10834 zcmcIqU2hymcCDTnlH!OIDN>>&n~rPc)pBI>L-yLqhLID?6!l@tl+r`4w6ujrbGnD@ z;jnwy)gzJt32Y)`_cbqhPyWCnZvm2*1PFov0Rm)^zmPw$iGd(_*qn2#duC{99V{%9 zoMLr-+`9Kx)xGE5p7MW=SFd*e=21iCKSTKc5kBS}N2x%mO_Yw>bW~7ML0Pp+Y7-S( zFRRURQLm`YN>LwDo5MwYWT1XVZJsIGN7d$NQLl1LcvfwWaf!`y>S(6uI>*Owf?bc} z^Qsa~s@9t-|0k{D9 z(PG{u8By(xxU7P+D!eSMW4+cZDj4U(=4&cACzaP#Fd>y$GF`7tQfdECQtDoobynI@ z7-vRDvCeiPqxX`a+YWWtgn9?v?PyD9iQe;fLw#?3{gFz z?&@09)M+=4qj;OV-%XNjuzjo3sjkM`Q5@=KlIrl#-|Mu)>x=q6#Y-Sm16-MTy%TMN z5#0`-g>Btv`v$GL){Y?Ntt{EAhv~DZ5mx;;(06b9nV&aY$NX5E1HYpaD~#?dk&4Pv zKQNwMTVPnUSBeN&$L1|?F>w47#G?(>C5@&uC8|@dm2NlQ? zyA zN4fPH>Z*pzp)xjmUhMCMo+mUs&)yEtv!i4v3-bpkmNvkaS=x=#t)r#e$wAys{J<=2 z`+E!TEq?I9Lgxs(7B85zp=t_qcPX7qa<7`#GHBk!C+F2%KpmH~9W{hY5$v7Ay|$>Vf5Lid=X_B+m1fl*z$U{! z!z?THC2Tk<_RmpY!i*2m#?qw0R+ptR!kwZ6Xi!3aNOc7tGm9cq>KJ?HsAETUrc`7f z;R+PY9~HA!zJJIaYMsI1Zhx4B*bS_>k`E>UpRSVPtd!<5(Zq(*F7nzeGIcg!b=JR zR!GY78i#=$P&LN`LFw50J2_c+>FiC+avaQW=|Us_?r`+? z=)Qw0hqkUtsa3P2X0V78PYXdAV*A<3+FNw&yV$Isma^~XvpPeiV2mn5nRj9PN9gP^ zjU%BHtoQ?!&E+j{|Ca8|3Bb%m?_pFOL5k6QxSiChspB&wfAjJFPfL~Tx`3BsNaf)9 z34}ic|3m8b)7R7qhL6fB~0P;S&sgrbodT8_V&|h zb#hi^c))=gA-B%{zmy*DW4sK=+j5Q1lU}`c9+$!VR~eT4n;f3?4D;G_JDntjx4OH! zKCd4=th3xeL60sT>-Oq{)%8`t7<$MJ<8K3zShz5+b1x)=#XEyafn=7KmJSXM7Pkd( zEH+@omvavo?Quy>!@fwvo;NoHVd)S19gBUdSi7E&;sh@ZO(%(gQa%JGAue0Y;Z|)9 z9L&w@ISzY~gSqQgj9clpDY(gZ(g?}laT_TQ3m*hDG8CF zPPSSA720AyVk>Y2qye;&g~7n$0y;npU}90%cam;9;80j@im?VxK?5)%-#KCqpL@@h zik+qq_UC?6lSRtv3!4jq{9eWk*bn9bQ82WCUBsaStZ@X$Gsz ztIC|CD+CgT9u^*i@piUjo}g(XCWNFsK)sD_ z8}iHSwhSQ;J>5cYKyn#iu&#H)M$|mQ-cV;YRQMWky5FK{{iqYddnsiPK*qlOy4qOi z{wYqd5NzJQ;;1%!hEx%@&NmXp6nf zGt1YE02VS~qQAogK`q`^mvM$6e;mE6G4%cSe<=92XjrQ)pj)tvkZmNur4+8)!IcOF zv_*jRCiCNl#YGw9H}L(i!g3O*4+D1t4zpDLL)}7*a%9{pQ4f$hOtlXx7txe!({7%N zC^ABnkP$rIUcGZ`%OGTZ@ zzb&$_r`=F0bTgKXE{->$sx-?{IfKav&a$MDSeCI17SsC1&$84dbs0`OBgZI;gYZz8 z_%W}8gcm?$1>7EY_ibLVn>;jQOq+-t>(oN7-VRSy^A<>XyH z%aVOf33=P^wGq%~moNtUY~w&NXOPdeeVm_=#nQ#{RB6;1D_wR*oC)Wx()7sN_&?>0 z7f z(&TjE%#Ave&MeN@q|+$kVR{icHw+@VCz5O|qTim6$r8_cX^D0FtiQrLGYOhyHYd1JT8ai*e%=b?P>+yewP5Si!W0;O-hh^`KlV-3(6`{T zw2vZ5qd$?1k+znbC|eTGDNVSn)!^^%fhA>jF~?qHHUr6+D<|XX7+l~5&m_m}B46?g2OuXvs%(Z`XylIMX*eYP z!0i>4{?f@_V{Qryzy{$DTZ686PGmtAYp{JnWwVmAf^=3)5J3x)SI~@zasrXz(I=Jn zCd>M7RN^0%n8+%W$X$NIy>4krgxw0JBriBfeo%{q3gEV0qYqw_nB-b+TEuA^y6Q)? zGz+J!1EUzh-i&1Eun{J#t*)#yxVdH|v|M_3@y2-7(4=p`2rS&%hIur6&DDP-8H}Ot zA8fQqd34YzT{HY0uV!?jKe~Y%g1b^dFk7s|N(1O~a)71;ozEd=3e;{7b*W)vVsz@ttC0!w6I+W?Bac4E`+ z0$waQ>0=&(90_Yo(%6Mh;j*{xc=y*<*9BhJAFh1r)z{tCTc1f^Zh znursml7@|E5;z!uq|9Y-CoeF>lS_1LOxv^BFobwz(lKpW@F4X&o@K_RLlw+_2QL@v z!k~MbZPWqB*fkJxb|m@N2PZa$ApEj=DBPt`8&DSmlAflZC=2k$d#$;+GOde);~@!% zXsJJ~6NQMtY^cSPDWMo6I4cBl$qY$s&Q2cCF6Y21-L#QWl6H#YK?&}Mm?20;if;{P zhzNg)k6A~doGZ?7X}Ub-jFl%!!_Ih_amc7MU7m8zm)J9fr?K*7=S@Tz!}x!xG|AWn zamTQ8nbC@js34+Yyn+~}^jqLbcNHvrw=nW`0I?iK%G;klij?E)c|p?fyzdS@-hqVw zh>zhJ7#@dm?>C4I=~#Jc#kszX78rO$T`cjQLcA>es|~!+4xeX!f1q9b3)<fZ zr}Kq?veh15HVwLO#Hdc-90E7UH(!}A5T%Rog*j<-^pD3y`d z9a9zL=PH$Hr}hRJ;c*Z~JTV7J3g9Awh-ngVdlPAP|jqS`Q z+zsO}jT-lQSqDk4xkO1WZIa8w=29np6-({g;$FejH|>|P{!oLyVOAFDJYwHdzeKiW Zv{Yg5DwWyFOl7z{`=>LXkG?ZH^?#$`TIT=& literal 0 HcmV?d00001 diff --git a/gam/atom/http_core.py b/gam/atom/http_core.py new file mode 100755 index 00000000000..0889f3ecc9a --- /dev/null +++ b/gam/atom/http_core.py @@ -0,0 +1,597 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. +# TODO: add proxy handling. + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import os +import StringIO +import urlparse +import urllib +import httplib +ssl = None +try: + import ssl +except ImportError: + pass + + + +class Error(Exception): + pass + + +class UnknownSize(Error): + pass + + +class ProxyError(Error): + pass + + +MIME_BOUNDARY = 'END_OF_PART' + + +def get_headers(http_response): + """Retrieves all HTTP headers from an HTTP response from the server. + + This method is provided for backwards compatibility for Python2.2 and 2.3. + The httplib.HTTPResponse object in 2.2 and 2.3 does not have a getheaders + method so this function will use getheaders if available, but if not it + will retrieve a few using getheader. + """ + if hasattr(http_response, 'getheaders'): + return http_response.getheaders() + else: + headers = [] + for header in ( + 'location', 'content-type', 'content-length', 'age', 'allow', + 'cache-control', 'content-location', 'content-encoding', 'date', + 'etag', 'expires', 'last-modified', 'pragma', 'server', + 'set-cookie', 'transfer-encoding', 'vary', 'via', 'warning', + 'www-authenticate', 'gdata-version'): + value = http_response.getheader(header, None) + if value is not None: + headers.append((header, value)) + return headers + + +class HttpRequest(object): + """Contains all of the parameters for an HTTP 1.1 request. + + The HTTP headers are represented by a dictionary, and it is the + responsibility of the user to ensure that duplicate field names are combined + into one header value according to the rules in section 4.2 of RFC 2616. + """ + method = None + uri = None + + def __init__(self, uri=None, method=None, headers=None): + """Construct an HTTP request. + + Args: + uri: The full path or partial path as a Uri object or a string. + method: The HTTP method for the request, examples include 'GET', 'POST', + etc. + headers: dict of strings The HTTP headers to include in the request. + """ + self.headers = headers or {} + self._body_parts = [] + if method is not None: + self.method = method + if isinstance(uri, (str, unicode)): + uri = Uri.parse_uri(uri) + self.uri = uri or Uri() + + + def add_body_part(self, data, mime_type, size=None): + """Adds data to the HTTP request body. + + If more than one part is added, this is assumed to be a mime-multipart + request. This method is designed to create MIME 1.0 requests as specified + in RFC 1341. + + Args: + data: str or a file-like object containing a part of the request body. + mime_type: str The MIME type describing the data + size: int Required if the data is a file like object. If the data is a + string, the size is calculated so this parameter is ignored. + """ + if isinstance(data, str): + size = len(data) + if size is None: + # TODO: support chunked transfer if some of the body is of unknown size. + raise UnknownSize('Each part of the body must have a known size.') + if 'Content-Length' in self.headers: + content_length = int(self.headers['Content-Length']) + else: + content_length = 0 + # If this is the first part added to the body, then this is not a multipart + # request. + if len(self._body_parts) == 0: + self.headers['Content-Type'] = mime_type + content_length = size + self._body_parts.append(data) + elif len(self._body_parts) == 1: + # This is the first member in a mime-multipart request, so change the + # _body_parts list to indicate a multipart payload. + self._body_parts.insert(0, 'Media multipart posting') + boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,) + content_length += len(boundary_string) + size + self._body_parts.insert(1, boundary_string) + content_length += len('Media multipart posting') + # Put the content type of the first part of the body into the multipart + # payload. + original_type_string = 'Content-Type: %s\r\n\r\n' % ( + self.headers['Content-Type'],) + self._body_parts.insert(2, original_type_string) + content_length += len(original_type_string) + boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,) + self._body_parts.append(boundary_string) + content_length += len(boundary_string) + # Change the headers to indicate this is now a mime multipart request. + self.headers['Content-Type'] = 'multipart/related; boundary="%s"' % ( + MIME_BOUNDARY,) + self.headers['MIME-version'] = '1.0' + # Include the mime type of this part. + type_string = 'Content-Type: %s\r\n\r\n' % (mime_type) + self._body_parts.append(type_string) + content_length += len(type_string) + self._body_parts.append(data) + ending_boundary_string = '\r\n--%s--' % (MIME_BOUNDARY,) + self._body_parts.append(ending_boundary_string) + content_length += len(ending_boundary_string) + else: + # This is a mime multipart request. + boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,) + self._body_parts.insert(-1, boundary_string) + content_length += len(boundary_string) + size + # Include the mime type of this part. + type_string = 'Content-Type: %s\r\n\r\n' % (mime_type) + self._body_parts.insert(-1, type_string) + content_length += len(type_string) + self._body_parts.insert(-1, data) + self.headers['Content-Length'] = str(content_length) + # I could add an "append_to_body_part" method as well. + + AddBodyPart = add_body_part + + def add_form_inputs(self, form_data, + mime_type='application/x-www-form-urlencoded'): + """Form-encodes and adds data to the request body. + + Args: + form_data: dict or sequnce or two member tuples which contains the + form keys and values. + mime_type: str The MIME type of the form data being sent. Defaults + to 'application/x-www-form-urlencoded'. + """ + body = urllib.urlencode(form_data) + self.add_body_part(body, mime_type) + + AddFormInputs = add_form_inputs + + def _copy(self): + """Creates a deep copy of this request.""" + copied_uri = Uri(self.uri.scheme, self.uri.host, self.uri.port, + self.uri.path, self.uri.query.copy()) + new_request = HttpRequest(uri=copied_uri, method=self.method, + headers=self.headers.copy()) + new_request._body_parts = self._body_parts[:] + return new_request + + def _dump(self): + """Converts to a printable string for debugging purposes. + + In order to preserve the request, it does not read from file-like objects + in the body. + """ + output = 'HTTP Request\n method: %s\n url: %s\n headers:\n' % ( + self.method, str(self.uri)) + for header, value in self.headers.iteritems(): + output += ' %s: %s\n' % (header, value) + output += ' body sections:\n' + i = 0 + for part in self._body_parts: + if isinstance(part, (str, unicode)): + output += ' %s: %s\n' % (i, part) + else: + output += ' %s: \n' % i + i += 1 + return output + + +def _apply_defaults(http_request): + if http_request.uri.scheme is None: + if http_request.uri.port == 443: + http_request.uri.scheme = 'https' + else: + http_request.uri.scheme = 'http' + + +class Uri(object): + """A URI as used in HTTP 1.1""" + scheme = None + host = None + port = None + path = None + + def __init__(self, scheme=None, host=None, port=None, path=None, query=None): + """Constructor for a URI. + + Args: + scheme: str This is usually 'http' or 'https'. + host: str The host name or IP address of the desired server. + post: int The server's port number. + path: str The path of the resource following the host. This begins with + a /, example: '/calendar/feeds/default/allcalendars/full' + query: dict of strings The URL query parameters. The keys and values are + both escaped so this dict should contain the unescaped values. + For example {'my key': 'val', 'second': '!!!'} will become + '?my+key=val&second=%21%21%21' which is appended to the path. + """ + self.query = query or {} + if scheme is not None: + self.scheme = scheme + if host is not None: + self.host = host + if port is not None: + self.port = port + if path: + self.path = path + + def _get_query_string(self): + param_pairs = [] + for key, value in self.query.iteritems(): + param_pairs.append('='.join((urllib.quote_plus(key), + urllib.quote_plus(str(value))))) + return '&'.join(param_pairs) + + def _get_relative_path(self): + """Returns the path with the query parameters escaped and appended.""" + param_string = self._get_query_string() + if self.path is None: + path = '/' + else: + path = self.path + if param_string: + return '?'.join([path, param_string]) + else: + return path + + def _to_string(self): + if self.scheme is None and self.port == 443: + scheme = 'https' + elif self.scheme is None: + scheme = 'http' + else: + scheme = self.scheme + if self.path is None: + path = '/' + else: + path = self.path + if self.port is None: + return '%s://%s%s' % (scheme, self.host, self._get_relative_path()) + else: + return '%s://%s:%s%s' % (scheme, self.host, str(self.port), + self._get_relative_path()) + + def __str__(self): + return self._to_string() + + def modify_request(self, http_request=None): + """Sets HTTP request components based on the URI.""" + if http_request is None: + http_request = HttpRequest() + if http_request.uri is None: + http_request.uri = Uri() + # Determine the correct scheme. + if self.scheme: + http_request.uri.scheme = self.scheme + if self.port: + http_request.uri.port = self.port + if self.host: + http_request.uri.host = self.host + # Set the relative uri path + if self.path: + http_request.uri.path = self.path + if self.query: + http_request.uri.query = self.query.copy() + return http_request + + ModifyRequest = modify_request + + def parse_uri(uri_string): + """Creates a Uri object which corresponds to the URI string. + + This method can accept partial URIs, but it will leave missing + members of the Uri unset. + """ + parts = urlparse.urlparse(uri_string) + uri = Uri() + if parts[0]: + uri.scheme = parts[0] + if parts[1]: + host_parts = parts[1].split(':') + if host_parts[0]: + uri.host = host_parts[0] + if len(host_parts) > 1: + uri.port = int(host_parts[1]) + if parts[2]: + uri.path = parts[2] + if parts[4]: + param_pairs = parts[4].split('&') + for pair in param_pairs: + pair_parts = pair.split('=') + if len(pair_parts) > 1: + uri.query[urllib.unquote_plus(pair_parts[0])] = ( + urllib.unquote_plus(pair_parts[1])) + elif len(pair_parts) == 1: + uri.query[urllib.unquote_plus(pair_parts[0])] = None + return uri + + parse_uri = staticmethod(parse_uri) + + ParseUri = parse_uri + + +parse_uri = Uri.parse_uri + + +ParseUri = Uri.parse_uri + + +class HttpResponse(object): + status = None + reason = None + _body = None + + def __init__(self, status=None, reason=None, headers=None, body=None): + self._headers = headers or {} + if status is not None: + self.status = status + if reason is not None: + self.reason = reason + if body is not None: + if hasattr(body, 'read'): + self._body = body + else: + self._body = StringIO.StringIO(body) + + def getheader(self, name, default=None): + if name in self._headers: + return self._headers[name] + else: + return default + + def getheaders(self): + return self._headers + + def read(self, amt=None): + if self._body is None: + return None + if not amt: + return self._body.read() + else: + return self._body.read(amt) + + +def _dump_response(http_response): + """Converts to a string for printing debug messages. + + Does not read the body since that may consume the content. + """ + output = 'HttpResponse\n status: %s\n reason: %s\n headers:' % ( + http_response.status, http_response.reason) + headers = get_headers(http_response) + if isinstance(headers, dict): + for header, value in headers.iteritems(): + output += ' %s: %s\n' % (header, value) + else: + for pair in headers: + output += ' %s: %s\n' % (pair[0], pair[1]) + return output + + +class HttpClient(object): + """Performs HTTP requests using httplib.""" + debug = None + + def request(self, http_request): + return self._http_request(http_request.method, http_request.uri, + http_request.headers, http_request._body_parts) + + Request = request + + def _get_connection(self, uri, headers=None): + """Opens a socket connection to the server to set up an HTTP request. + + Args: + uri: The full URL for the request as a Uri object. + headers: A dict of string pairs containing the HTTP headers for the + request. + """ + connection = None + if uri.scheme == 'https': + if not uri.port: + connection = httplib.HTTPSConnection(uri.host) + else: + connection = httplib.HTTPSConnection(uri.host, int(uri.port)) + else: + if not uri.port: + connection = httplib.HTTPConnection(uri.host) + else: + connection = httplib.HTTPConnection(uri.host, int(uri.port)) + return connection + + def _http_request(self, method, uri, headers=None, body_parts=None): + """Makes an HTTP request using httplib. + + Args: + method: str example: 'GET', 'POST', 'PUT', 'DELETE', etc. + uri: str or atom.http_core.Uri + headers: dict of strings mapping to strings which will be sent as HTTP + headers in the request. + body_parts: list of strings, objects with a read method, or objects + which can be converted to strings using str. Each of these + will be sent in order as the body of the HTTP request. + """ + if isinstance(uri, (str, unicode)): + uri = Uri.parse_uri(uri) + + connection = self._get_connection(uri, headers=headers) + + if self.debug: + connection.debuglevel = 1 + + if connection.host != uri.host: + connection.putrequest(method, str(uri)) + else: + connection.putrequest(method, uri._get_relative_path()) + + # Overcome a bug in Python 2.4 and 2.5 + # httplib.HTTPConnection.putrequest adding + # HTTP request header 'Host: www.google.com:443' instead of + # 'Host: www.google.com', and thus resulting the error message + # 'Token invalid - AuthSub token has wrong scope' in the HTTP response. + if (uri.scheme == 'https' and int(uri.port or 443) == 443 and + hasattr(connection, '_buffer') and + isinstance(connection._buffer, list)): + header_line = 'Host: %s:443' % uri.host + replacement_header_line = 'Host: %s' % uri.host + try: + connection._buffer[connection._buffer.index(header_line)] = ( + replacement_header_line) + except ValueError: # header_line missing from connection._buffer + pass + + # Send the HTTP headers. + for header_name, value in headers.iteritems(): + connection.putheader(header_name, value) + connection.endheaders() + + # If there is data, send it in the request. + if body_parts and filter(lambda x: x != '', body_parts): + for part in body_parts: + _send_data_part(part, connection) + + # Return the HTTP Response from the server. + return connection.getresponse() + + +def _send_data_part(data, connection): + if isinstance(data, (str, unicode)): + # I might want to just allow str, not unicode. + connection.send(data) + return + # Check to see if data is a file-like object that has a read method. + elif hasattr(data, 'read'): + # Read the file and send it a chunk at a time. + while 1: + binarydata = data.read(100000) + if binarydata == '': break + connection.send(binarydata) + return + else: + # The data object was not a file. + # Try to convert to a string and send the data. + connection.send(str(data)) + return + + +class ProxiedHttpClient(HttpClient): + + def _get_connection(self, uri, headers=None): + # Check to see if there are proxy settings required for this request. + proxy = None + if uri.scheme == 'https': + proxy = os.environ.get('https_proxy') + elif uri.scheme == 'http': + proxy = os.environ.get('http_proxy') + if not proxy: + return HttpClient._get_connection(self, uri, headers=headers) + # Now we have the URL of the appropriate proxy server. + # Get a username and password for the proxy if required. + proxy_auth = _get_proxy_auth() + if uri.scheme == 'https': + import socket + if proxy_auth: + proxy_auth = 'Proxy-authorization: %s' % proxy_auth + # Construct the proxy connect command. + port = uri.port + if not port: + port = 443 + proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (uri.host, port) + # Set the user agent to send to the proxy + user_agent = '' + if headers and 'User-Agent' in headers: + user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent']) + proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent) + # Find the proxy host and port. + proxy_uri = Uri.parse_uri(proxy) + if not proxy_uri.port: + proxy_uri.port = '80' + # Connect to the proxy server, very simple recv and error checking + p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) + p_sock.connect((proxy_uri.host, int(proxy_uri.port))) + p_sock.sendall(proxy_pieces) + response = '' + # Wait for the full response. + while response.find("\r\n\r\n") == -1: + response += p_sock.recv(8192) + p_status = response.split()[1] + if p_status != str(200): + raise ProxyError('Error status=%s' % str(p_status)) + # Trivial setup for ssl socket. + sslobj = None + if ssl is not None: + sslobj = ssl.wrap_socket(p_sock, None, None) + else: + sock_ssl = socket.ssl(p_sock, None, Nonesock_) + sslobj = httplib.FakeSocket(p_sock, sock_ssl) + # Initalize httplib and replace with the proxy socket. + connection = httplib.HTTPConnection(proxy_uri.host) + connection.sock = sslobj + return connection + elif uri.scheme == 'http': + proxy_uri = Uri.parse_uri(proxy) + if not proxy_uri.port: + proxy_uri.port = '80' + if proxy_auth: + headers['Proxy-Authorization'] = proxy_auth.strip() + return httplib.HTTPConnection(proxy_uri.host, int(proxy_uri.port)) + return None + + +def _get_proxy_auth(): + import base64 + proxy_username = os.environ.get('proxy-username') + if not proxy_username: + proxy_username = os.environ.get('proxy_username') + proxy_password = os.environ.get('proxy-password') + if not proxy_password: + proxy_password = os.environ.get('proxy_password') + if proxy_username: + user_auth = base64.b64encode('%s:%s' % (proxy_username, + proxy_password)) + return 'Basic %s\r\n' % (user_auth.strip()) + else: + return '' diff --git a/gam/atom/http_core.pyc b/gam/atom/http_core.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfd0cc7b54305ec1fbb429049f345a1571455340 GIT binary patch literal 19330 zcmcJX-*X(h(tZfE$U&+~$N!H(d|}?ZADm zS#|NaTciCE_l|Q9o%{UF3D?}?;t5L~?WaEGntRLCv3}~LYmU2{Q?5Coq?`L(^D!4s zTh+;aA;vLf*#}&6Uzt7K&pznlgKp!HyLqUL9Cpq9Ww`_Waz|YApfxk&niTcD9PVcy zbzc{4D2MpNnbQN&73tI=ORdVvHbW>Vb<^=83!c3PG;qTCH8*ifgre!d|vI z&0dRFUy3AWvFr_EOV_hF&6YNkbai{_Lg#L~)u}hLrS>Z=ZG#DwaveUOwt5{-EmavzL#$&2g7L=k5n? zV??QXN6(~?|z1HL~))b@trse>#bJ!=Cx~A!&`A3#?8XD6lSis zePSAC-A+4;eQJIy4q>-Dak@AWhWuW;m1N;o42v~GV%@ZJCuzpbaIKSutM$g_-Fn*0 zLUu>Do+qnGE6KMl@2YX-i;FMPQ8RpT@g=o)jn>7Qtz>mkBZ|ttJF6RUBM*}{CHpnP zW{1(YJ9&7kekTs=;X1?cQ=qM4Oj#$)HN>@EyOAfIc6c{ox;^GIC>m83ciNMf)iox9nT3e_98;sw=BJFRU0aJ91W;l#Mz=rkEyE;eh{ z^VlMBUSH4mk`mwRCakEn+^T2!0+JK8fFF3aMa{yUpVoE_jb zPwVY$ElwA9bakhmZrcp*Bz5bFg>N$s8}Z${cNgkC7N?yj5G1zl*BM=X0Zz&w(DJ;t zgh|BrL$gS-G8$La{|BwIl2#TGts<@0yNML3lT4I)ALNOqDIaN#GOfV-qszRQ!RUFb zp4Ia_jnsRt8TVPuR(QG7j&0%U-EQ1&&TC4!hHIj}+{C^mzelXJ-!H~;cj~QPJP_Tx zqFYRtFwk1DFF(S7vd0PB*mN*a84aq4DF1IFI2cR@k9+m=CPE;JZ};LXZ>Y3>G$T`+ zt3TJ`bmh89GRSoFMzwMBsD>5jX^R9X!t^^w3-R$yxC!=|8sI91n%N6}Y- zg8^YjsVr3B4Ke)2n9|W;Xh4b)?>3bF&^I_Pe7a(IDSOhdQI@AYbVRun!^;qcXVdlU zG=tVpFHKIHs#)uy3eZcpLY9yf&69fZUWahu^)v|!#iJz-=>QUG>iY5e#W!S2TqqnV zJ|m%LgOg!=ufEmw5~dnoqIY2y!3-wzc*#oaYV^va0HQJTq(R;ls)h2=8HZiVEEM-y0@wemz2OyrdE z5otn&LU<8t##yqCvQXVd8cUzNb@{D};i<(Bm#R(ehgmmn7;?1M5IfQhr(SybR58?H ziL0rdmiUsO1wbdQc%hYS#-(@_x<$iE0o6?qOWF;Iv4R9%MrtlKVQAmtHPn?2P)Qnn zBSl$Cm64&23jv#9%AS_~3nL^mAQ*eacVdJW(QIJh(7+Z&@Uk7F>ket{<*}%aQAG19 z(Wti?y%tn!hNbBN$vsw3KD1>4n_e(y3kal(0Q?=3)_R9qy{vE7m?>^OESiK9Adw;Q zX|o&9wMaDq)-wGXlt^GJ;0wW9aWjDf%Eb@6oh*kwhP9_A78XuqQxlogz;N}`p^~R2 z64~=vNYHO!DUEG@pI}vb?Ix(@)u&HnPiIvM!6Y^kEl3INhdNqV@J^9=*~QUQ3O-0+ zx&tG~lu@L1yslD=1&f|gitGbZROqHS%?;bCo7%Z6*OxDxjXpmwDIYznVp2a^RZ~%Y zez9b&Q$A#T)e7TFAibPV%`a>&qIA+^J!#il#&KnahA(z2-)DFSbVF5CtEWkkwc8p` zh@QPZ(BNAA>>7canhvIe$%?|6%4qdSFcpjjhk_&3L&Odf+h45)2lSWs{lWf<-li%O ziWg~zs#D}2CQYRd1=(?R*Y&s(7`SwAL8crKXKSICLOJ1qW<%>FlI!nzp6nR{R4i_c zzFL*&Grt6K-c(l~zMXh*Oe%3`isxXvDPPr-jRpojdpB0=`7hGBY68lV}+ zUD%?#?KKbryVR49LRksiNc+PwY$Y;0H7HUwdkZ0Jk!}ZBr4-p2W@ls7;wn*+`iVZE zCi)23WZk^T;}=fAonkm>3}j32wBx(AVo43R{+=MLq-qK+FcDeX)${rUlxSQC1KY@#fnzDoQ`I-? z9vA5_f;kyt)c5_Bup1syd08KYLS8HsMw|zJk`37sV>D1G@Pb!_PCaCkqUMF2U&KnQ zaUv75sPc)-vdnoG8k8!{dQ4)E9>j!Y(K8y;FbaBY!oY+%L_DWTY5VSm)IVCMuko&kc2gB`fRg#4mo-iU>$u_ ziE4<%OQ?^_CjX*0NQo{I4CZICUGcG*1bK_b4aAL(jxw*&%E5z^mqx3TlcSaKaTB>B zw7}eXmApjYcP%2f>CDzWSEgWI;;F&re5sVENCh@0=MY}5+AB^ODtaNhocsmNn{<=A zF|+kl%&Q7trc>UOPJsNH`c$q=c-IdCwT zu9ysOkZM2T(Q`c6uMos`HpeXGWE?XmBO$gsAyz&i-bR$q@Govh&QVFh=SW4mt}cboo83L$5YVASD|~6YFsVvK`Ke zJ?4brES44UCk&-v%PM#?c{7KnN?yJy_^4Bk(u|Z%D}&E_?MndZ=%{sjll)2W?iH)>_tU--u(OcH5F**alndG;2K)!mo)b|aZ z4(FCI95Es4>C##pb9`Iy>k?EbbFw8lEa!?=3`V@SZ}58b>Cm!eRu*n*jV68+&B{Bx zV^C-ZyrJ08_3n@XYlu3#)#>3?E+Dhbvezz4?gYouB>W%%OXp|!mAS2L^*+Z`DJ=IV zuH{a}i%H1pjUC7e6wO`)LL-RBWC?Uz z!ir(gWmC_uTKJrSB~Qb%jKdq`8s0`HX-6*+G3W8^UMG)h-BvF%+kVIh5gsvy#Pyt{ znOXfTtKooMCT)IzzG^ynvVBk(<9kp#cB1J}k%9o>z#8cb%w=jK&;Yd16-q0Pz{G)z zKloXZ75v)9FKX$sD*~5ZlDrr3>~-D9x*{8cdWD_w2JLd@-b)b%C3l)su(wCIYkv=# zF>FVAIE`!}mMnaI*;95!&5D0LV_zXQWIjm2dXjsWhUat=ck%v6F77p2n`0=s!|Jp1 zCPnOh>Z&q{OJck-4$%))8dCfswsz!sp6rJNp5RRnM9@mm)+-}W$ed`)LbQ+XEcUt< zMd^Mm`iq6O^6h-dEQtPMQJB^7E&Bq$X4ZjUVsB*PzQ$0lx|tOev)7H7F4{6ykIkZp zFEyf4LO|M{9sxy{iAJAPAeT%gg$?>%S~`)P$jqK9-cBpQZ>XKP$S&ifTZ#v2C+r@NU_{ z=yT-w*t3f9?B>8l{F;Vct9?kL7Q}TyLx0N0#`iZQu%H4m?Bip0_G2s~AY*QaBI^~o zBjExP3s}UgKssLF!X{R@KtRR}X{+FhBFmtVXu%QDFIblG5*%F{HQYqjUYCB)vN-YZ z4el+o7~Wr7)|gIU{76)H%nI}o)*=&HiLqFBoE7Mz4ad7}(pT#Ohn(F?+6Z|S$AG2) zW$<|m?zm#cgr*9O^-P|eQO2tTcFJldT=Xfu^6cgcN=Dc%lCJk3=`{J1d~;W2$EG7jwreQYvr4Am21OK_Pz>sB(MV?Qa&V2Kt{(|Im6Mrp$6Z zF$ZST&j%V6G4*YSY;t}4lF;>qw|`cC4?w)~?40SrsEjS0%-PV@`!fL^z>Ntd4hFFC zn68QxsZ5#TXKw#!XI&)hk7S83r2nBfHEw8^$Yo|G$akUnz-2acOkR3bQ-5m#7j6N? zWUhD(RN(*NL#GC3S=>N~ySebH9l}#SD=uQ?9vHS_-SOH=GF`kf%<3JweT=VR^f(&G zJ>*LmqcgvSsms%Lkdcg#7%eN1!R&2*yQqc#gub)bd-Sv&%$m(`edm$wiRV;nPC*|s zde9>s+iMTO9@{wDWhUSa6mc+Kh*oP>)6BfzP;yh&LUA1b8D0x-NWwuKZ^BAGbC)+> z^faxX(^h+)h#S?Zxef#5{MX-u_({t5DK_E3%JZZ>i>E{if8Z&JF6q^ziir`U)__Ll zEHRW~3QgJ|`n-ah1pX99SJ1W#$Ci!C90S}y;I>$CEn$qpSG7Pa^MDQ(+Z9)Vt@}UG ziOC&vp~#JbBjwn!@x9}d;|Ip8lZSDpJULmh(YF7m2KDS`58*kU>?%RwJ>Z5l z+t%N+CbW}(UW;Gn-XW1aOA4+7yQRebeFu*L&H)mu#YH8|f3@CFphuj1o+tYm0_4!6 z20jRfE1~Z;@O}Ct20s1x^sk8dN*8Od=aJ!?)%9HHExrd>9+m1onl zy;iKe_GR2bb%x8La&j4O80=JFMru~$5ZbQf(ZkjF84g&enMKx(ZqycN&FvW(ER?TjDT2lkcTWojOeVr~aY`u+E z2)xP>9LD3-hDNC6(PIqqn?#Tap3Vx*cc3F4e^m*h1F;|{AOW_P?1a)`_-kjW(HY`=V~IKq)I%m96l9N~Dh zVSYzD&5>u$PH;Fk$uh24+vyEJ;zD_e+zw@gmF@loFaxiiU98-yZ_Dka8#RWd(_Bz1B4`#sTrgm;}}^zUFO!RGf(A|MHwFg8E7ug-L?6%)-~(spH*%C%d2 zl)U|`I}+vhAlEwyt*Q2zvU+MIz4cmFZ*!B9Hp|3!S7*z6$71%+jP42-)ph2cbsC#- zt}WIsZW|X?gLlcxh5*)wz3%(I+bc)7P9C(Y3##eVWcX@t>EbVsO~bQ~T(`w0!C6`Q zy-NrGj_bEY7iN{1=ez98H*Q{`Wq*7+fXUKsruYjMwZkekyK=ry2yc1F=&=2#+(G8R zvpRGXJ3-k2A$MB{*m;_~s)H{6pf3f+Mv(yH!I8>wEc7GX8!)UYr>~4gs^opgl9lAQh|lAx;9_;55q@9i#p6LIAv1%9QK=kz@|F35zVdCW@J^QB%@) zKJD`TMT)$J)E(pY1M-WFP`vO(<&%y(q8jUT5pEnPEci1wZod$W;6~Nwm2-Su8Swr& zW+r9GembOqUU>Tn^YD+k2TYA-4!irz8V3Y-wJX*b+nHy1=tzx zw}N|5N#S02`;dFUecYq&KHdVGN^twDfm!%8aZF7VcK(l9uD9x&`ih|cF&K_&j@ot# zSAFTQ^NM|WHvuhN9szOAv*X@7_usGEHTVk`KYj7qMNWid*SZIzoV39-b29arf0eV)HjVyqGA%!fyhV860~{S5f>q zRylr8aNZU}yD#dmm}QTglv#T_Cdn-Py%c8K$2P_NG?QV4SS%!!fP^_sOlJ90eW{$< zYHy8CzRZRFrX8E&$bR|dm+U0HOfc98%b0~Ei{H*#*t1eSlKd6Ymf18VI#F5k&fME= zK{ZCpXIk~G)n@&*f1#VqIyG=)H&g5aOGu*k_XrC_UnVd^)L^1P7>l*|NU~*;*!yF8 zc|o(t92(~HkcjqvA@hxVMNLT)i$$!>q}`0~S^dxIGb{UM)u5c++v7@%XFDswF>t|z zW+!tUhr2cYE(46)v}=yWgLeG{aamUSz zphdS;Z*YmNU4F9V^M=aGVD&Kb0u^?7;UaOzUuwZVOvFv{1xMuw>SVHlH>xtT_jy#z zL9VY(1=Y%vgnO&ATy34G9wUCZy6mq2i>>TC$LuFBCiY|2jZ9tMk>>lDl9VT zMsQ$`j2sH=AXuM?bA$5DcN-*Hh;ux4bt?7-iP9d8mw`3mVZfRb93r`6Cybtr{?cwR~6!R^VBbG6w=$ zOB(V>{%QrZ!5~^d0*a#af6pKX=aLjHR({+8L^u=3{#iZqCK}modaVTS)boWlW`;%_tpuW%| z6%q)V{eD2AO5-8|i|NazNq2i2R7BasqF(cWn|(m%d@giE{}}{j1Q!3%aMB?ql11VS zPw}Z2B;O~T1dZiOov088AJGjd{655)vCp;VY(~H-OV4L59ZdMR+xUPI?=w&)OH-sg zQB0Oz9xz`>wBOZ$hWl}1`Nj2I@@JLzChNn>4zjZ7->$Iq8&7GD+WLpx8J20dt6{n_ z5bqq**A&a9qXYh*P(-oLkP$JeGX40HK3!v*^PEbNOC?Wb+~pqsBFzHA@3%fn%C z!FJ>ME6d9l&tKEc(-R(LE%9Z@)PxBWLk0RjD;CbKqi5^`@xiMa;(L@a#-}rKy|Aa8 z``Cxg*0pa^Tv(u#Fn_~8Mt5^FUSChvTJWXH?p1oANynOMR(Rby_`A^kW zu0v+2 z;Cih^cT`m3_Y&}Z#q8_op3>BYiWn#nxa`hLuz4r@Gm05rkf5{KS|fvbN57*YX1Y$5 zMM`7PigF<_t>Im3=XOfsYjS0&?f-zje>sk=JPKxjJ=RPI45AXAZJUr$?
    L z!BKy7M*d`QtReuTQ|l4xTS;mk;_gv#dk6JZKo#Rj4Ph`Xi|vlC{00q=X>Qs|qm>4s zfe*36d7kWB1n3o{8cyg3FbcxRR9T_N%s-a#Icz`O%X|AMc97^58pe6WWotu)y@$vf z_FR|xbptIZNpuq}SIi46k!T0pvU#cCbiZb-kXPcJW&3!{?i3jtEr{9d{A3@w1!omW zL#4WW40#tL$XqA|Ll#My_}qk)38i!OENPhh8K55HQ(dKa<>lx%C>_Za9{m=9@$>2{ zFZ<7yqVJRBT{3^75FJzgEJ&O9@IVCJ*qhiBgSZ_-n>8vpHCkG5C@}J%W)PJiIS5%dl5%U%Rq+Nc$}p&PL0Y3L8#;)%Vm%x^0R}3eVMB-3#E;2 zFD*&=nK2Vz=}DGqwJm)$$(2^A(b9~R)GB|W@)eCw>^ z-X@QZgXeKNob!fm%WlyxmCFXO3Z^+RL9U!6=o`UHa>r!jE}R@=je<0ij~)ajC_CUW z%s@`D>qRb{tx8+oKXKB+QIHNv6M9Qd?uG0JIacF7JZ7g~?L)jwp)yrT!Nz@wFK&3J-(v*fG?wp`USvG|9oykOYNI#5xLqXU_ z(d7)ZlhL5rN2SN|WS|TpWoQ5KYFjSp?_m!Flem4P!y<}cW$Tw}FpypQv>N{tzdd~$ zqi$*I*Y*7qGlF_s?alU`ySKMC@7&q^>E_n$u+UhB9=4G-3mD(Y^DLj@gNL5`xb$V* z6!5_lez%3TXkh~iL*2bz8jMx1XO{TEZpdEGjzX8924*^+f)O zFxlLi%!Vz9 z(>jlDiqnReyc98}!{^|v8W@N4(T{P*l4+9{p#cCetAGtE4slq64a)m=?p6MulM4es zOCnIPsMeMTXp`i5v zYt@-!t)*|k>O~TP#&zZ-5!?1b91<%6=v`w9VMkRK={N@OALqeD#ztylkYGw^B%o?L zh!az5l?+xTe`kHkW)BL;u@kl3{MrPj(A#!BD6_B~P&9&8SOLxi+OlJbVq0!)Zm!{W zXHDMSyjx1aoH|S2lrmOY2Seo;evl?PkvacJT!4RZ2oYJ!-li(JR-=SC7kQ{OZVs?1boDs4z}>6ZPf{HPp2k>qf0 z-2$C896M16^7megD8yXCZ4~Ihn4DRtM{CaP(&g2C{FJ}g%Ti^q4+X-|H1XH^oSW8@ zmXVwZEdmq%kXDr&B-bY(^RLo7p4WP22L9(qRgNFwVAJdUg!KJ3ZsK~=yXGy|ZqydN zW$!xfi(W`p(N)8>i%Wlmn}xX}A)Yry{#}A3w9)xN7dY091h9$7Yme{#J7_Xi`Pa~5 zbBUSR$Vd%>A~6?YTdh0NgOd+J*-U zSPRKt!T3Uq;;FuXd}mA3yIR|8(fqS?!KZ}BzfL#ebE_R1l|177 zLdmCC{sb2*)bwu1<))!h=#3OVL?{2qJ@;|x&v8?nyeSlPAYoK!;AHSH3OKa!AGC~D zwryz{+=s_H54+6m2Dibj=wIOW5>Yqpp=tQioVzsV_i?k~+e4!!Cf7?+MFr_0aA{Tb z9}&KcD}1XG1<{www)ItG)#&}m1s`f z1)Wh00>rB|OB-;@w55i)G_i!Z0(7L{r_D#P#^Ln>$89O4KRunnH1)$ zicQo-*Vm6W+F=hmPdW#k|5w0~29ZjXQNKsatFkyR=-Ve})oW$7=`KbCrg8<{vE$ zrv^VpLvd;V8Fbcb>vo$Cph`zBfc|NTu0I71RL)63vpWR}&piGOV;%?*(-19EX@-+2 zt{y)&3Dq;G7gZ8z*^1&}Y=Q&=s60glhDw7=ad#ev z(v6Q)aoQ;R0oA5B^P(~a$3v70gB2woOxj`xavX|tT97LcK8^%2-Wr1P#g#JCb5EML zJZ^N~LJFN!Z zHAjfH-->uNzePOe@gLCR{{Z`lB3(|^bDsMXy6i@)Pb&ptnqBHAti-vGt#R~U@pPb) z#8ttdQ5<{5bjDmRR^Xxu5fOwT3g|LDqq7X0t;s|GiA{2_5QsqJRCcYA{?IA8Tk|MW zI$bfeoK(bt^uK1Xg|YI=2V zxzXY}d>!5H=-4hG)S^6!`wqJuzqj+Kvv**quk$ig6Uwdl4A>_W^EpZ=qHVg(b&^k@ j$dg`}NnF~0Y7e`AMw>@d*4nH$fxFH6%?AH#-@NrN6u-{@ literal 0 HcmV?d00001 diff --git a/gam/atom/mock_http.py b/gam/atom/mock_http.py new file mode 100755 index 00000000000..c420f37b72f --- /dev/null +++ b/gam/atom/mock_http.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import atom.http_interface +import atom.url + + +class Error(Exception): + pass + + +class NoRecordingFound(Error): + pass + + +class MockRequest(object): + """Holds parameters of an HTTP request for matching against future requests. + """ + def __init__(self, operation, url, data=None, headers=None): + self.operation = operation + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + self.url = url + self.data = data + self.headers = headers + + +class MockResponse(atom.http_interface.HttpResponse): + """Simulates an httplib.HTTPResponse object.""" + def __init__(self, body=None, status=None, reason=None, headers=None): + if body and hasattr(body, 'read'): + self.body = body.read() + else: + self.body = body + if status is not None: + self.status = int(status) + else: + self.status = None + self.reason = reason + self._headers = headers or {} + + def read(self): + return self.body + + +class MockHttpClient(atom.http_interface.GenericHttpClient): + def __init__(self, headers=None, recordings=None, real_client=None): + """An HttpClient which responds to request with stored data. + + The request-response pairs are stored as tuples in a member list named + recordings. + + The MockHttpClient can be switched from replay mode to record mode by + setting the real_client member to an instance of an HttpClient which will + make real HTTP requests and store the server's response in list of + recordings. + + Args: + headers: dict containing HTTP headers which should be included in all + HTTP requests. + recordings: The initial recordings to be used for responses. This list + contains tuples in the form: (MockRequest, MockResponse) + real_client: An HttpClient which will make a real HTTP request. The + response will be converted into a MockResponse and stored in + recordings. + """ + self.recordings = recordings or [] + self.real_client = real_client + self.headers = headers or {} + + def add_response(self, response, operation, url, data=None, headers=None): + """Adds a request-response pair to the recordings list. + + After the recording is added, future matching requests will receive the + response. + + Args: + response: MockResponse + operation: str + url: str + data: str, Currently the data is ignored when looking for matching + requests. + headers: dict of strings: Currently the headers are ignored when + looking for matching requests. + """ + request = MockRequest(operation, url, data=data, headers=headers) + self.recordings.append((request, response)) + + def request(self, operation, url, data=None, headers=None): + """Returns a matching MockResponse from the recordings. + + If the real_client is set, the request will be passed along and the + server's response will be added to the recordings and also returned. + + If there is no match, a NoRecordingFound error will be raised. + """ + if self.real_client is None: + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + for recording in self.recordings: + if recording[0].operation == operation and recording[0].url == url: + return recording[1] + raise NoRecordingFound('No recodings found for %s %s' % ( + operation, url)) + else: + # There is a real HTTP client, so make the request, and record the + # response. + response = self.real_client.request(operation, url, data=data, + headers=headers) + # TODO: copy the headers + stored_response = MockResponse(body=response, status=response.status, + reason=response.reason) + self.add_response(stored_response, operation, url, data=data, + headers=headers) + return stored_response diff --git a/gam/atom/mock_http_core.py b/gam/atom/mock_http_core.py new file mode 100755 index 00000000000..f55cdc58122 --- /dev/null +++ b/gam/atom/mock_http_core.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import StringIO +import pickle +import os.path +import tempfile +import atom.http_core + + +class Error(Exception): + pass + + +class NoRecordingFound(Error): + pass + + +class MockHttpClient(object): + debug = None + real_client = None + last_request_was_live = False + + # The following members are used to construct the session cache temp file + # name. + # These are combined to form the file name + # /tmp/cache_prefix.cache_case_name.cache_test_name + cache_name_prefix = 'gdata_live_test' + cache_case_name = '' + cache_test_name = '' + + def __init__(self, recordings=None, real_client=None): + self._recordings = recordings or [] + if real_client is not None: + self.real_client = real_client + + def add_response(self, http_request, status, reason, headers=None, + body=None): + response = MockHttpResponse(status, reason, headers, body) + # TODO Scrub the request and the response. + self._recordings.append((http_request._copy(), response)) + + AddResponse = add_response + + def request(self, http_request): + """Provide a recorded response, or record a response for replay. + + If the real_client is set, the request will be made using the + real_client, and the response from the server will be recorded. + If the real_client is None (the default), this method will examine + the recordings and find the first which matches. + """ + request = http_request._copy() + _scrub_request(request) + if self.real_client is None: + self.last_request_was_live = False + for recording in self._recordings: + if _match_request(recording[0], request): + return recording[1] + else: + # Pass along the debug settings to the real client. + self.real_client.debug = self.debug + # Make an actual request since we can use the real HTTP client. + self.last_request_was_live = True + response = self.real_client.request(http_request) + scrubbed_response = _scrub_response(response) + self.add_response(request, scrubbed_response.status, + scrubbed_response.reason, + dict(atom.http_core.get_headers(scrubbed_response)), + scrubbed_response.read()) + # Return the recording which we just added. + return self._recordings[-1][1] + raise NoRecordingFound('No recoding was found for request: %s %s' % ( + request.method, str(request.uri))) + + Request = request + + def _save_recordings(self, filename): + recording_file = open(os.path.join(tempfile.gettempdir(), filename), + 'wb') + pickle.dump(self._recordings, recording_file) + recording_file.close() + + def _load_recordings(self, filename): + recording_file = open(os.path.join(tempfile.gettempdir(), filename), + 'rb') + self._recordings = pickle.load(recording_file) + recording_file.close() + + def _delete_recordings(self, filename): + full_path = os.path.join(tempfile.gettempdir(), filename) + if os.path.exists(full_path): + os.remove(full_path) + + def _load_or_use_client(self, filename, http_client): + if os.path.exists(os.path.join(tempfile.gettempdir(), filename)): + self._load_recordings(filename) + else: + self.real_client = http_client + + def use_cached_session(self, name=None, real_http_client=None): + """Attempts to load recordings from a previous live request. + + If a temp file with the recordings exists, then it is used to fulfill + requests. If the file does not exist, then a real client is used to + actually make the desired HTTP requests. Requests and responses are + recorded and will be written to the desired temprary cache file when + close_session is called. + + Args: + name: str (optional) The file name of session file to be used. The file + is loaded from the temporary directory of this machine. If no name + is passed in, a default name will be constructed using the + cache_name_prefix, cache_case_name, and cache_test_name of this + object. + real_http_client: atom.http_core.HttpClient the real client to be used + if the cached recordings are not found. If the default + value is used, this will be an + atom.http_core.HttpClient. + """ + if real_http_client is None: + real_http_client = atom.http_core.HttpClient() + if name is None: + self._recordings_cache_name = self.get_cache_file_name() + else: + self._recordings_cache_name = name + self._load_or_use_client(self._recordings_cache_name, real_http_client) + + def close_session(self): + """Saves recordings in the temporary file named in use_cached_session.""" + if self.real_client is not None: + self._save_recordings(self._recordings_cache_name) + + def delete_session(self, name=None): + """Removes recordings from a previous live request.""" + if name is None: + self._delete_recordings(self._recordings_cache_name) + else: + self._delete_recordings(name) + + def get_cache_file_name(self): + return '%s.%s.%s' % (self.cache_name_prefix, self.cache_case_name, + self.cache_test_name) + + def _dump(self): + """Provides debug information in a string.""" + output = 'MockHttpClient\n real_client: %s\n cache file name: %s\n' % ( + self.real_client, self.get_cache_file_name()) + output += ' recordings:\n' + i = 0 + for recording in self._recordings: + output += ' recording %i is for: %s %s\n' % ( + i, recording[0].method, str(recording[0].uri)) + i += 1 + return output + + +def _match_request(http_request, stored_request): + """Determines whether a request is similar enough to a stored request + to cause the stored response to be returned.""" + # Check to see if the host names match. + if (http_request.uri.host is not None + and http_request.uri.host != stored_request.uri.host): + return False + # Check the request path in the URL (/feeds/private/full/x) + elif http_request.uri.path != stored_request.uri.path: + return False + # Check the method used in the request (GET, POST, etc.) + elif http_request.method != stored_request.method: + return False + # If there is a gsession ID in either request, make sure that it is matched + # exactly. + elif ('gsessionid' in http_request.uri.query + or 'gsessionid' in stored_request.uri.query): + if 'gsessionid' not in stored_request.uri.query: + return False + elif 'gsessionid' not in http_request.uri.query: + return False + elif (http_request.uri.query['gsessionid'] + != stored_request.uri.query['gsessionid']): + return False + # Ignores differences in the query params (?start-index=5&max-results=20), + # the body of the request, the port number, HTTP headers, just to name a + # few. + return True + + +def _scrub_request(http_request): + """ Removes email address and password from a client login request. + + Since the mock server saves the request and response in plantext, sensitive + information like the password should be removed before saving the + recordings. At the moment only requests sent to a ClientLogin url are + scrubbed. + """ + if (http_request and http_request.uri and http_request.uri.path and + http_request.uri.path.endswith('ClientLogin')): + # Remove the email and password from a ClientLogin request. + http_request._body_parts = [] + http_request.add_form_inputs( + {'form_data': 'client login request has been scrubbed'}) + else: + # We can remove the body of the post from the recorded request, since + # the request body is not used when finding a matching recording. + http_request._body_parts = [] + return http_request + + +def _scrub_response(http_response): + return http_response + + +class EchoHttpClient(object): + """Sends the request data back in the response. + + Used to check the formatting of the request as it was sent. Always responds + with a 200 OK, and some information from the HTTP request is returned in + special Echo-X headers in the response. The following headers are added + in the response: + 'Echo-Host': The host name and port number to which the HTTP connection is + made. If no port was passed in, the header will contain + host:None. + 'Echo-Uri': The path portion of the URL being requested. /example?x=1&y=2 + 'Echo-Scheme': The beginning of the URL, usually 'http' or 'https' + 'Echo-Method': The HTTP method being used, 'GET', 'POST', 'PUT', etc. + """ + + def request(self, http_request): + return self._http_request(http_request.uri, http_request.method, + http_request.headers, http_request._body_parts) + + def _http_request(self, uri, method, headers=None, body_parts=None): + body = StringIO.StringIO() + response = atom.http_core.HttpResponse(status=200, reason='OK', body=body) + if headers is None: + response._headers = {} + else: + # Copy headers from the request to the response but convert values to + # strings. Server response headers always come in as strings, so an int + # should be converted to a corresponding string when echoing. + for header, value in headers.iteritems(): + response._headers[header] = str(value) + response._headers['Echo-Host'] = '%s:%s' % (uri.host, str(uri.port)) + response._headers['Echo-Uri'] = uri._get_relative_path() + response._headers['Echo-Scheme'] = uri.scheme + response._headers['Echo-Method'] = method + for part in body_parts: + if isinstance(part, str): + body.write(part) + elif hasattr(part, 'read'): + body.write(part.read()) + body.seek(0) + return response + + +class SettableHttpClient(object): + """An HTTP Client which responds with the data given in set_response.""" + + def __init__(self, status, reason, body, headers): + """Configures the response for the server. + + See set_response for details on the arguments to the constructor. + """ + self.set_response(status, reason, body, headers) + self.last_request = None + + def set_response(self, status, reason, body, headers): + """Determines the response which will be sent for each request. + + Args: + status: An int for the HTTP status code, example: 200, 404, etc. + reason: String for the HTTP reason, example: OK, NOT FOUND, etc. + body: The body of the HTTP response as a string or a file-like + object (something with a read method). + headers: dict of strings containing the HTTP headers in the response. + """ + self.response = atom.http_core.HttpResponse(status=status, reason=reason, + body=body) + self.response._headers = headers.copy() + + def request(self, http_request): + self.last_request = http_request + return self.response + + +class MockHttpResponse(atom.http_core.HttpResponse): + + def __init__(self, status=None, reason=None, headers=None, body=None): + self._headers = headers or {} + if status is not None: + self.status = status + if reason is not None: + self.reason = reason + if body is not None: + # Instead of using a file-like object for the body, store as a string + # so that reads can be repeated. + if hasattr(body, 'read'): + self._body = body.read() + else: + self._body = body + + def read(self): + return self._body diff --git a/gam/atom/mock_service.py b/gam/atom/mock_service.py new file mode 100755 index 00000000000..601b68ae8fa --- /dev/null +++ b/gam/atom/mock_service.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""MockService provides CRUD ops. for mocking calls to AtomPub services. + + MockService: Exposes the publicly used methods of AtomService to provide + a mock interface which can be used in unit tests. +""" + +import atom.service +import pickle + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +# Recordings contains pairings of HTTP MockRequest objects with MockHttpResponse objects. +recordings = [] +# If set, the mock service HttpRequest are actually made through this object. +real_request_handler = None + +def ConcealValueWithSha(source): + import sha + return sha.new(source[:-5]).hexdigest() + +def DumpRecordings(conceal_func=ConcealValueWithSha): + if conceal_func: + for recording_pair in recordings: + recording_pair[0].ConcealSecrets(conceal_func) + return pickle.dumps(recordings) + +def LoadRecordings(recordings_file_or_string): + if isinstance(recordings_file_or_string, str): + atom.mock_service.recordings = pickle.loads(recordings_file_or_string) + elif hasattr(recordings_file_or_string, 'read'): + atom.mock_service.recordings = pickle.loads( + recordings_file_or_string.read()) + +def HttpRequest(service, operation, data, uri, extra_headers=None, + url_params=None, escape_params=True, content_type='application/atom+xml'): + """Simulates an HTTP call to the server, makes an actual HTTP request if + real_request_handler is set. + + This function operates in two different modes depending on if + real_request_handler is set or not. If real_request_handler is not set, + HttpRequest will look in this module's recordings list to find a response + which matches the parameters in the function call. If real_request_handler + is set, this function will call real_request_handler.HttpRequest, add the + response to the recordings list, and respond with the actual response. + + Args: + service: atom.AtomService object which contains some of the parameters + needed to make the request. The following members are used to + construct the HTTP call: server (str), additional_headers (dict), + port (int), and ssl (bool). + operation: str The HTTP operation to be performed. This is usually one of + 'GET', 'POST', 'PUT', or 'DELETE' + data: ElementTree, filestream, list of parts, or other object which can be + converted to a string. + Should be set to None when performing a GET or PUT. + If data is a file-like object which can be read, this method will read + a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be evaluated + and sent. + uri: The beginning of the URL to which the request should be sent. + Examples: '/', '/base/feeds/snippets', + '/m8/feeds/contacts/default/base' + extra_headers: dict of strings. HTTP headers which should be sent + in the request. These headers are in addition to those stored in + service.additional_headers. + url_params: dict of strings. Key value pairs to be added to the URL as + URL parameters. For example {'foo':'bar', 'test':'param'} will + become ?foo=bar&test=param. + escape_params: bool default True. If true, the keys and values in + url_params will be URL escaped when the form is constructed + (Special characters converted to %XX form.) + content_type: str The MIME type for the data being sent. Defaults to + 'application/atom+xml', this is only used if data is set. + """ + full_uri = atom.service.BuildUri(uri, url_params, escape_params) + (server, port, ssl, uri) = atom.service.ProcessUrl(service, uri) + current_request = MockRequest(operation, full_uri, host=server, ssl=ssl, + data=data, extra_headers=extra_headers, url_params=url_params, + escape_params=escape_params, content_type=content_type) + # If the request handler is set, we should actually make the request using + # the request handler and record the response to replay later. + if real_request_handler: + response = real_request_handler.HttpRequest(service, operation, data, uri, + extra_headers=extra_headers, url_params=url_params, + escape_params=escape_params, content_type=content_type) + # TODO: need to copy the HTTP headers from the real response into the + # recorded_response. + recorded_response = MockHttpResponse(body=response.read(), + status=response.status, reason=response.reason) + # Insert a tuple which maps the request to the response object returned + # when making an HTTP call using the real_request_handler. + recordings.append((current_request, recorded_response)) + return recorded_response + else: + # Look through available recordings to see if one matches the current + # request. + for request_response_pair in recordings: + if request_response_pair[0].IsMatch(current_request): + return request_response_pair[1] + return None + + +class MockRequest(object): + """Represents a request made to an AtomPub server. + + These objects are used to determine if a client request matches a recorded + HTTP request to determine what the mock server's response will be. + """ + + def __init__(self, operation, uri, host=None, ssl=False, port=None, + data=None, extra_headers=None, url_params=None, escape_params=True, + content_type='application/atom+xml'): + """Constructor for a MockRequest + + Args: + operation: str One of 'GET', 'POST', 'PUT', or 'DELETE' this is the + HTTP operation requested on the resource. + uri: str The URL describing the resource to be modified or feed to be + retrieved. This should include the protocol (http/https) and the host + (aka domain). For example, these are some valud full_uris: + 'http://example.com', 'https://www.google.com/accounts/ClientLogin' + host: str (optional) The server name which will be placed at the + beginning of the URL if the uri parameter does not begin with 'http'. + Examples include 'example.com', 'www.google.com', 'www.blogger.com'. + ssl: boolean (optional) If true, the request URL will begin with https + instead of http. + data: ElementTree, filestream, list of parts, or other object which can be + converted to a string. (optional) + Should be set to None when performing a GET or PUT. + If data is a file-like object which can be read, the constructor + will read the entire file into memory. If the data is a list of + parts to be sent, each part will be evaluated and stored. + extra_headers: dict (optional) HTTP headers included in the request. + url_params: dict (optional) Key value pairs which should be added to + the URL as URL parameters in the request. For example uri='/', + url_parameters={'foo':'1','bar':'2'} could become '/?foo=1&bar=2'. + escape_params: boolean (optional) Perform URL escaping on the keys and + values specified in url_params. Defaults to True. + content_type: str (optional) Provides the MIME type of the data being + sent. + """ + self.operation = operation + self.uri = _ConstructFullUrlBase(uri, host=host, ssl=ssl) + self.data = data + self.extra_headers = extra_headers + self.url_params = url_params or {} + self.escape_params = escape_params + self.content_type = content_type + + def ConcealSecrets(self, conceal_func): + """Conceal secret data in this request.""" + if self.extra_headers.has_key('Authorization'): + self.extra_headers['Authorization'] = conceal_func( + self.extra_headers['Authorization']) + + def IsMatch(self, other_request): + """Check to see if the other_request is equivalent to this request. + + Used to determine if a recording matches an incoming request so that a + recorded response should be sent to the client. + + The matching is not exact, only the operation and URL are examined + currently. + + Args: + other_request: MockRequest The request which we want to check this + (self) MockRequest against to see if they are equivalent. + """ + # More accurate matching logic will likely be required. + return (self.operation == other_request.operation and self.uri == + other_request.uri) + + +def _ConstructFullUrlBase(uri, host=None, ssl=False): + """Puts URL components into the form http(s)://full.host.strinf/uri/path + + Used to construct a roughly canonical URL so that URLs which begin with + 'http://example.com/' can be compared to a uri of '/' when the host is + set to 'example.com' + + If the uri contains 'http://host' already, the host and ssl parameters + are ignored. + + Args: + uri: str The path component of the URL, examples include '/' + host: str (optional) The host name which should prepend the URL. Example: + 'example.com' + ssl: boolean (optional) If true, the returned URL will begin with https + instead of http. + + Returns: + String which has the form http(s)://example.com/uri/string/contents + """ + if uri.startswith('http'): + return uri + if ssl: + return 'https://%s%s' % (host, uri) + else: + return 'http://%s%s' % (host, uri) + + +class MockHttpResponse(object): + """Returned from MockService crud methods as the server's response.""" + + def __init__(self, body=None, status=None, reason=None, headers=None): + """Construct a mock HTTPResponse and set members. + + Args: + body: str (optional) The HTTP body of the server's response. + status: int (optional) + reason: str (optional) + headers: dict (optional) + """ + self.body = body + self.status = status + self.reason = reason + self.headers = headers or {} + + def read(self): + return self.body + + def getheader(self, header_name): + return self.headers[header_name] + diff --git a/gam/atom/service.py b/gam/atom/service.py new file mode 100755 index 00000000000..00f66379ad3 --- /dev/null +++ b/gam/atom/service.py @@ -0,0 +1,740 @@ +#!/usr/bin/python +# +# Copyright (C) 2006, 2007, 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""AtomService provides CRUD ops. in line with the Atom Publishing Protocol. + + AtomService: Encapsulates the ability to perform insert, update and delete + operations with the Atom Publishing Protocol on which GData is + based. An instance can perform query, insertion, deletion, and + update. + + HttpRequest: Function that performs a GET, POST, PUT, or DELETE HTTP request + to the specified end point. An AtomService object or a subclass can be + used to specify information about the request. +""" + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import atom.http_interface +import atom.url +import atom.http +import atom.token_store + +import os +import httplib +import urllib +import re +import base64 +import socket +import warnings +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom + + +class AtomService(object): + """Performs Atom Publishing Protocol CRUD operations. + + The AtomService contains methods to perform HTTP CRUD operations. + """ + + # Default values for members + port = 80 + ssl = False + # Set the current_token to force the AtomService to use this token + # instead of searching for an appropriate token in the token_store. + current_token = None + auto_store_tokens = True + auto_set_current_token = True + + def _get_override_token(self): + return self.current_token + + def _set_override_token(self, token): + self.current_token = token + + override_token = property(_get_override_token, _set_override_token) + + #@atom.v1_deprecated('Please use atom.client.AtomPubClient instead.') + def __init__(self, server=None, additional_headers=None, + application_name='', http_client=None, token_store=None): + """Creates a new AtomService client. + + Args: + server: string (optional) The start of a URL for the server + to which all operations should be directed. Example: + 'www.google.com' + additional_headers: dict (optional) Any additional HTTP headers which + should be included with CRUD operations. + http_client: An object responsible for making HTTP requests using a + request method. If none is provided, a new instance of + atom.http.ProxiedHttpClient will be used. + token_store: Keeps a collection of authorization tokens which can be + applied to requests for a specific URLs. Critical methods are + find_token based on a URL (atom.url.Url or a string), add_token, + and remove_token. + """ + self.http_client = http_client or atom.http.ProxiedHttpClient() + self.token_store = token_store or atom.token_store.TokenStore() + self.server = server + self.additional_headers = additional_headers or {} + self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % ( + application_name,) + # If debug is True, the HTTPConnection will display debug information + self._set_debug(False) + + __init__ = atom.v1_deprecated( + 'Please use atom.client.AtomPubClient instead.')( + __init__) + + def _get_debug(self): + return self.http_client.debug + + def _set_debug(self, value): + self.http_client.debug = value + + debug = property(_get_debug, _set_debug, + doc='If True, HTTP debug information is printed.') + + def use_basic_auth(self, username, password, scopes=None): + if username is not None and password is not None: + if scopes is None: + scopes = [atom.token_store.SCOPE_ALL] + base_64_string = base64.encodestring('%s:%s' % (username, password)) + token = BasicAuthToken('Basic %s' % base_64_string.strip(), + scopes=[atom.token_store.SCOPE_ALL]) + if self.auto_set_current_token: + self.current_token = token + if self.auto_store_tokens: + return self.token_store.add_token(token) + return True + return False + + def UseBasicAuth(self, username, password, for_proxy=False): + """Sets an Authenticaiton: Basic HTTP header containing plaintext. + + Deprecated, use use_basic_auth instead. + + The username and password are base64 encoded and added to an HTTP header + which will be included in each request. Note that your username and + password are sent in plaintext. + + Args: + username: str + password: str + """ + self.use_basic_auth(username, password) + + #@atom.v1_deprecated('Please use atom.client.AtomPubClient for requests.') + def request(self, operation, url, data=None, headers=None, + url_params=None): + if isinstance(url, (str, unicode)): + if url.startswith('http:') and self.ssl: + # Force all requests to be https if self.ssl is True. + url = atom.url.parse_url('https:' + url[5:]) + elif not url.startswith('http') and self.ssl: + url = atom.url.parse_url('https://%s%s' % (self.server, url)) + elif not url.startswith('http'): + url = atom.url.parse_url('http://%s%s' % (self.server, url)) + else: + url = atom.url.parse_url(url) + + if url_params: + for name, value in url_params.iteritems(): + url.params[name] = value + + all_headers = self.additional_headers.copy() + if headers: + all_headers.update(headers) + + # If the list of headers does not include a Content-Length, attempt to + # calculate it based on the data object. + if data and 'Content-Length' not in all_headers: + content_length = CalculateDataLength(data) + if content_length: + all_headers['Content-Length'] = str(content_length) + + # Find an Authorization token for this URL if one is available. + if self.override_token: + auth_token = self.override_token + else: + auth_token = self.token_store.find_token(url) + return auth_token.perform_request(self.http_client, operation, url, + data=data, headers=all_headers) + + request = atom.v1_deprecated( + 'Please use atom.client.AtomPubClient for requests.')( + request) + + # CRUD operations + def Get(self, uri, extra_headers=None, url_params=None, escape_params=True): + """Query the APP server with the given URI + + The uri is the portion of the URI after the server value + (server example: 'www.google.com'). + + Example use: + To perform a query against Google Base, set the server to + 'base.google.com' and set the uri to '/base/feeds/...', where ... is + your query. For example, to find snippets for all digital cameras uri + should be set to: '/base/feeds/snippets?bq=digital+camera' + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dicty (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + url_params: dict (optional) Additional URL parameters to be included + in the query. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + httplib.HTTPResponse The server's response to the GET request. + """ + return self.request('GET', uri, data=None, headers=extra_headers, + url_params=url_params) + + def Post(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, content_type='application/atom+xml'): + """Insert data into an APP server at the given URI. + + Args: + data: string, ElementTree._Element, or something with a __str__ method + The XML to be sent to the uri. + uri: string The location (feed) to which the data should be inserted. + Example: '/base/feeds/items'. + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + httplib.HTTPResponse Server's response to the POST request. + """ + if extra_headers is None: + extra_headers = {} + if content_type: + extra_headers['Content-Type'] = content_type + return self.request('POST', uri, data=data, headers=extra_headers, + url_params=url_params) + + def Put(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, content_type='application/atom+xml'): + """Updates an entry at the given URI. + + Args: + data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The + XML containing the updated data. + uri: string A URI indicating entry to which the update will be applied. + Example: '/base/feeds/items/ITEM-ID' + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + httplib.HTTPResponse Server's response to the PUT request. + """ + if extra_headers is None: + extra_headers = {} + if content_type: + extra_headers['Content-Type'] = content_type + return self.request('PUT', uri, data=data, headers=extra_headers, + url_params=url_params) + + def Delete(self, uri, extra_headers=None, url_params=None, + escape_params=True): + """Deletes the entry at the given URI. + + Args: + uri: string The URI of the entry to be deleted. Example: + '/base/feeds/items/ITEM-ID' + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type and + Authorization headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + httplib.HTTPResponse Server's response to the DELETE request. + """ + return self.request('DELETE', uri, data=None, headers=extra_headers, + url_params=url_params) + + +class BasicAuthToken(atom.http_interface.GenericToken): + def __init__(self, auth_header, scopes=None): + """Creates a token used to add Basic Auth headers to HTTP requests. + + Args: + auth_header: str The value for the Authorization header. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + """ + self.auth_header = auth_header + self.scopes = scopes or [] + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header to the basic auth string.""" + if headers is None: + headers = {'Authorization':self.auth_header} + else: + headers['Authorization'] = self.auth_header + return http_client.request(operation, url, data=data, headers=headers) + + def __str__(self): + return self.auth_header + + def valid_for_scope(self, url): + """Tells the caller if the token authorizes access to the desired URL. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + for scope in self.scopes: + if scope == atom.token_store.SCOPE_ALL: + return True + if isinstance(scope, (str, unicode)): + scope = atom.url.parse_url(scope) + if scope == url: + return True + # Check the host and the path, but ignore the port and protocol. + elif scope.host == url.host and not scope.path: + return True + elif scope.host == url.host and scope.path and not url.path: + continue + elif scope.host == url.host and url.path.startswith(scope.path): + return True + return False + + +def PrepareConnection(service, full_uri): + """Opens a connection to the server based on the full URI. + + This method is deprecated, instead use atom.http.HttpClient.request. + + Examines the target URI and the proxy settings, which are set as + environment variables, to open a connection with the server. This + connection is used to make an HTTP request. + + Args: + service: atom.AtomService or a subclass. It must have a server string which + represents the server host to which the request should be made. It may also + have a dictionary of additional_headers to send in the HTTP request. + full_uri: str Which is the target relative (lacks protocol and host) or + absolute URL to be opened. Example: + 'https://www.google.com/accounts/ClientLogin' or + 'base/feeds/snippets' where the server is set to www.google.com. + + Returns: + A tuple containing the httplib.HTTPConnection and the full_uri for the + request. + """ + deprecation('calling deprecated function PrepareConnection') + (server, port, ssl, partial_uri) = ProcessUrl(service, full_uri) + if ssl: + # destination is https + proxy = os.environ.get('https_proxy') + if proxy: + (p_server, p_port, p_ssl, p_uri) = ProcessUrl(service, proxy, True) + proxy_username = os.environ.get('proxy-username') + if not proxy_username: + proxy_username = os.environ.get('proxy_username') + proxy_password = os.environ.get('proxy-password') + if not proxy_password: + proxy_password = os.environ.get('proxy_password') + if proxy_username: + user_auth = base64.encodestring('%s:%s' % (proxy_username, + proxy_password)) + proxy_authorization = ('Proxy-authorization: Basic %s\r\n' % ( + user_auth.strip())) + else: + proxy_authorization = '' + proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (server, port) + user_agent = 'User-Agent: %s\r\n' % ( + service.additional_headers['User-Agent']) + proxy_pieces = (proxy_connect + proxy_authorization + user_agent + + '\r\n') + + #now connect, very simple recv and error checking + p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) + p_sock.connect((p_server,p_port)) + p_sock.sendall(proxy_pieces) + response = '' + + # Wait for the full response. + while response.find("\r\n\r\n") == -1: + response += p_sock.recv(8192) + + p_status=response.split()[1] + if p_status!=str(200): + raise atom.http.ProxyError('Error status=%s' % p_status) + + # Trivial setup for ssl socket. + ssl = socket.ssl(p_sock, None, None) + fake_sock = httplib.FakeSocket(p_sock, ssl) + + # Initalize httplib and replace with the proxy socket. + connection = httplib.HTTPConnection(server) + connection.sock=fake_sock + full_uri = partial_uri + + else: + connection = httplib.HTTPSConnection(server, port) + full_uri = partial_uri + + else: + # destination is http + proxy = os.environ.get('http_proxy') + if proxy: + (p_server, p_port, p_ssl, p_uri) = ProcessUrl(service.server, proxy, True) + proxy_username = os.environ.get('proxy-username') + if not proxy_username: + proxy_username = os.environ.get('proxy_username') + proxy_password = os.environ.get('proxy-password') + if not proxy_password: + proxy_password = os.environ.get('proxy_password') + if proxy_username: + UseBasicAuth(service, proxy_username, proxy_password, True) + connection = httplib.HTTPConnection(p_server, p_port) + if not full_uri.startswith("http://"): + if full_uri.startswith("/"): + full_uri = "http://%s%s" % (service.server, full_uri) + else: + full_uri = "http://%s/%s" % (service.server, full_uri) + else: + connection = httplib.HTTPConnection(server, port) + full_uri = partial_uri + + return (connection, full_uri) + + +def UseBasicAuth(service, username, password, for_proxy=False): + """Sets an Authenticaiton: Basic HTTP header containing plaintext. + + Deprecated, use AtomService.use_basic_auth insread. + + The username and password are base64 encoded and added to an HTTP header + which will be included in each request. Note that your username and + password are sent in plaintext. The auth header is added to the + additional_headers dictionary in the service object. + + Args: + service: atom.AtomService or a subclass which has an + additional_headers dict as a member. + username: str + password: str + """ + deprecation('calling deprecated function UseBasicAuth') + base_64_string = base64.encodestring('%s:%s' % (username, password)) + base_64_string = base_64_string.strip() + if for_proxy: + header_name = 'Proxy-Authorization' + else: + header_name = 'Authorization' + service.additional_headers[header_name] = 'Basic %s' % (base_64_string,) + + +def ProcessUrl(service, url, for_proxy=False): + """Processes a passed URL. If the URL does not begin with https?, then + the default value for server is used + + This method is deprecated, use atom.url.parse_url instead. + """ + if not isinstance(url, atom.url.Url): + url = atom.url.parse_url(url) + + server = url.host + ssl = False + port = 80 + + if not server: + if hasattr(service, 'server'): + server = service.server + else: + server = service + if not url.protocol and hasattr(service, 'ssl'): + ssl = service.ssl + if hasattr(service, 'port'): + port = service.port + else: + if url.protocol == 'https': + ssl = True + elif url.protocol == 'http': + ssl = False + if url.port: + port = int(url.port) + elif port == 80 and ssl: + port = 443 + + return (server, port, ssl, url.get_request_uri()) + +def DictionaryToParamList(url_parameters, escape_params=True): + """Convert a dictionary of URL arguments into a URL parameter string. + + This function is deprcated, use atom.url.Url instead. + + Args: + url_parameters: The dictionaty of key-value pairs which will be converted + into URL parameters. For example, + {'dry-run': 'true', 'foo': 'bar'} + will become ['dry-run=true', 'foo=bar']. + + Returns: + A list which contains a string for each key-value pair. The strings are + ready to be incorporated into a URL by using '&'.join([] + parameter_list) + """ + # Choose which function to use when modifying the query and parameters. + # Use quote_plus when escape_params is true. + transform_op = [str, urllib.quote_plus][bool(escape_params)] + # Create a list of tuples containing the escaped version of the + # parameter-value pairs. + parameter_tuples = [(transform_op(param), transform_op(value)) + for param, value in (url_parameters or {}).items()] + # Turn parameter-value tuples into a list of strings in the form + # 'PARAMETER=VALUE'. + return ['='.join(x) for x in parameter_tuples] + + +def BuildUri(uri, url_params=None, escape_params=True): + """Converts a uri string and a collection of parameters into a URI. + + This function is deprcated, use atom.url.Url instead. + + Args: + uri: string + url_params: dict (optional) + escape_params: boolean (optional) + uri: string The start of the desired URI. This string can alrady contain + URL parameters. Examples: '/base/feeds/snippets', + '/base/feeds/snippets?bq=digital+camera' + url_parameters: dict (optional) Additional URL parameters to be included + in the query. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + string The URI consisting of the escaped URL parameters appended to the + initial uri string. + """ + # Prepare URL parameters for inclusion into the GET request. + parameter_list = DictionaryToParamList(url_params, escape_params) + + # Append the URL parameters to the URL. + if parameter_list: + if uri.find('?') != -1: + # If there are already URL parameters in the uri string, add the + # parameters after a new & character. + full_uri = '&'.join([uri] + parameter_list) + else: + # The uri string did not have any URL parameters (no ? character) + # so put a ? between the uri and URL parameters. + full_uri = '%s%s' % (uri, '?%s' % ('&'.join([] + parameter_list))) + else: + full_uri = uri + + return full_uri + + +def HttpRequest(service, operation, data, uri, extra_headers=None, + url_params=None, escape_params=True, content_type='application/atom+xml'): + """Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE. + + This method is deprecated, use atom.http.HttpClient.request instead. + + Usage example, perform and HTTP GET on http://www.google.com/: + import atom.service + client = atom.service.AtomService() + http_response = client.Get('http://www.google.com/') + or you could set the client.server to 'www.google.com' and use the + following: + client.server = 'www.google.com' + http_response = client.Get('/') + + Args: + service: atom.AtomService object which contains some of the parameters + needed to make the request. The following members are used to + construct the HTTP call: server (str), additional_headers (dict), + port (int), and ssl (bool). + operation: str The HTTP operation to be performed. This is usually one of + 'GET', 'POST', 'PUT', or 'DELETE' + data: ElementTree, filestream, list of parts, or other object which can be + converted to a string. + Should be set to None when performing a GET or PUT. + If data is a file-like object which can be read, this method will read + a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be evaluated + and sent. + uri: The beginning of the URL to which the request should be sent. + Examples: '/', '/base/feeds/snippets', + '/m8/feeds/contacts/default/base' + extra_headers: dict of strings. HTTP headers which should be sent + in the request. These headers are in addition to those stored in + service.additional_headers. + url_params: dict of strings. Key value pairs to be added to the URL as + URL parameters. For example {'foo':'bar', 'test':'param'} will + become ?foo=bar&test=param. + escape_params: bool default True. If true, the keys and values in + url_params will be URL escaped when the form is constructed + (Special characters converted to %XX form.) + content_type: str The MIME type for the data being sent. Defaults to + 'application/atom+xml', this is only used if data is set. + """ + deprecation('call to deprecated function HttpRequest') + full_uri = BuildUri(uri, url_params, escape_params) + (connection, full_uri) = PrepareConnection(service, full_uri) + + if extra_headers is None: + extra_headers = {} + + # Turn on debug mode if the debug member is set. + if service.debug: + connection.debuglevel = 1 + + connection.putrequest(operation, full_uri) + + # If the list of headers does not include a Content-Length, attempt to + # calculate it based on the data object. + if (data and not service.additional_headers.has_key('Content-Length') and + not extra_headers.has_key('Content-Length')): + content_length = CalculateDataLength(data) + if content_length: + extra_headers['Content-Length'] = str(content_length) + + if content_type: + extra_headers['Content-Type'] = content_type + + # Send the HTTP headers. + if isinstance(service.additional_headers, dict): + for header in service.additional_headers: + connection.putheader(header, service.additional_headers[header]) + if isinstance(extra_headers, dict): + for header in extra_headers: + connection.putheader(header, extra_headers[header]) + connection.endheaders() + + # If there is data, send it in the request. + if data: + if isinstance(data, list): + for data_part in data: + __SendDataPart(data_part, connection) + else: + __SendDataPart(data, connection) + + # Return the HTTP Response from the server. + return connection.getresponse() + + +def __SendDataPart(data, connection): + """This method is deprecated, use atom.http._send_data_part""" + deprecated('call to deprecated function __SendDataPart') + if isinstance(data, str): + #TODO add handling for unicode. + connection.send(data) + return + elif ElementTree.iselement(data): + connection.send(ElementTree.tostring(data)) + return + # Check to see if data is a file-like object that has a read method. + elif hasattr(data, 'read'): + # Read the file and send it a chunk at a time. + while 1: + binarydata = data.read(100000) + if binarydata == '': break + connection.send(binarydata) + return + else: + # The data object was not a file. + # Try to convert to a string and send the data. + connection.send(str(data)) + return + + +def CalculateDataLength(data): + """Attempts to determine the length of the data to send. + + This method will respond with a length only if the data is a string or + and ElementTree element. + + Args: + data: object If this is not a string or ElementTree element this funtion + will return None. + """ + if isinstance(data, str): + return len(data) + elif isinstance(data, list): + return None + elif ElementTree.iselement(data): + return len(ElementTree.tostring(data)) + elif hasattr(data, 'read'): + # If this is a file-like object, don't try to guess the length. + return None + else: + return len(str(data)) + + +def deprecation(message): + warnings.warn(message, DeprecationWarning, stacklevel=2) diff --git a/gam/atom/service.pyc b/gam/atom/service.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa69d205416493b52641aaa10110cdf78fbaa93e GIT binary patch literal 26803 zcmeHwTWlOxn%=2yiV`VOq$r6JEqh${jKr2rHSJlCcf(lYktxd7Sf;r}D)MT^+bMPz zsivCUt*UBCoDqQ_@=lUnU=ajCvdBZQ2#^6XdwWT0qz_IS9t0G8E9Ri{p!I+y=`IWGT?$?;cv|7@*pvOmZ0?|1M^{<&vN z!ACIWnEIH``gkR8bCPX1S2UP0yS72wGoQF$whm>@anwX-%oY~sxcSVOXU6=>J#>G< zL}jyc&g{(U*;xMUylIT{=H3Mp&6&nY^BI=*1zk_*`l5+mG>u7Jzht7X>GqWQ46FXK zZf(t!X-u2Xu>8JhoK}agn8u7QUNwy~y7)S`YrS%od--z7nCoe0cRh+9HS3YzjXRH; zjVSS#gF83;PB*Fg&9>iawj=*>GrjMp_amR}{k7g^tC`$ywzvJYxRZA3omO>x+}Fdb zvn79}T@SlSuN9`~k*|iE%~mtr^V5#sjpD6Nyo>IWC{7prUblf}e%NmKji?o+5jq?A z)4>~I+U&HG&mW54Y5R}wH|zKP_iu!0=r@yty={g`)TsK`+Z;d|wlVj5*v?1wuouO9 z3)$fC%>_FsU15j^d$WVr0(_9B-5|pINxJ0U?6vD0GA19UuJgnX{r6Wk7W}o3*IC{{ z(TV*VE4NoRR{Re(HrD*ucHlmP<>Klk-KgH&YDNt|!YXw;&33986gRN5xf9h>_80m| zZ?oPClSDJy%ontWnR5qhcYBy5H$qE`4eIn#eJmS7bv(HUKnc6e>P}McH5yUuSH2r< zZTai=?n?8|@n^LHqF~Yq6!n!>v>Ub4jW~*I?Mc)YDpTGU#+*}_QPjVOUs6S95s(4s zgAynQWGI^mV13Lq&XR6KpcrE+MIVScAP!~0_k@WiO?1*k6XqGv;}l=v-w}6l%zOr- zG-aY`vvb-sM$I$NL{ltLX%Y6`43U-Ho>6ZzMCS(R(m49`h|2hDvGg1V*05MQ>pp>} zz*Uff^X@CB^p*4a3b0*0U>1!jHM>x#0ht2oVzM9vc{5Z3jZyhRp%VCvPh2#?dgUC~ zZ6C#&#jE7GXzEbgVN(@QhQAvbW;;qy@3hkpxZ&?c>HSUv_}j->!DvPy@^08ka}6V? z6BOKUnd3T+;;At^24sg1GH98^WxW^20G3+Xc@VWNxQy_XsI^tWzeYv!ZCowh0ij41 zA2j35y~P`y$L&@pY$S`@;qKLMR=@SFtKB_}vwby*>we|UD~lnh)FR#>S*zmN8FW$G zj?!A^Q546ZarTq9P@RAXV#Ia+<9-@92l&+E&}(9O#nA_4Jgl&JzyYUZk9HEcIO1U^ zhlYI{-^hlY@_TjSj{9ls2`ZkBnC^^0gB{Q8l*}{jLal6`9y8t7R6k;Nj=A~~H`h_~ zv}C&PnWk-jTrE$SoiX#YOujYCy#(*}%sadqH^68h51He7h0BtBjB+`SME66#9X&4c z1;5^EV#^74f;=#dcsp6rt?zToqIk(q(wNMp(&>_bhOH}tP>|{v)NRWT{X4;JAB4+N zM!l5v;J5|=*7AU`)dDMXfa)jrJH1xJ-;Df5GX}9FA6R)3?si+z5`nn*Xa4cy$JOml zXS)?u>z&>C>~Yv=H0`iz_oEOrFMVSk@wkMab3f17atY(%O;m- zWkSbW>jWzZJKDd2|15U6)oeE`A@LQTB`-PPB<4c zbHTgdjd@e}oh~I;QM1;HAS4nleeGpNwytBjA^m31OTY&Os2*l|TvE&QKz$wO5WmpQ zkzZ(K#PqZtM>|b^Ukp!Qd5F8`@;R=)<#RiDM^e|J0SD*N=m5Wa6t;TNk&Zebn!)M0KHp&BU zKl8l(V*&wm{I|$-DM*a5OUpLnf250&**VFl=yV+t^m;l8aN+n@6OR5;vUDj)2(<5o zNwe->N-TQT!P1fnVOPk%3d$Ue;Z7Kn@@jqgVX*)vX0a;D*kEo4(PP$>~RicmQG)|-Az6I1c``DjyNZ1Zu$tuP{W6s^%{A~5AcoT0ty3qFyYO3 zbKV)0W4Ipkro9nQpvCPWXbQAAFN^6GLZEec0GD-POG3)99;M)5ZJ$dF;tkHyOgrr* zU+Z2J4Z!3ic9SP}Tadrg=m{hxv56Z|7vc(JDsWvArNMEF1(On~tqO|tZh@s(y9M^a z&BU+XehaE3VWFYt*fYxS$mP304CmKQz~(MChg!%a+y?U~1d|XKYp`%U**jVw zB0ay8MA}U5@ZZF}rC*bh%c<}3Ct&$8zySmfgani(6*&RC#Q%ylpgMpX(1G~70;myQ z02_BkC2xb)fM}J?%jZddO6Fny(gOmRDg9pcX4O~SfKMXwtBpfxM$M_^l=+Vc~ z|EGErgP$^S>~R)TW`EqYXEY;e``OqrpmDQf>(69_9ST1_03{#YJu8>iE99V-22;{m zN=SZql`J($m;*jfxcRnnaq&_DO3j^C`|O?+dKp*^NWXeJYHz3aMG;}`3R5b^nNA>k zoP?$(Byww#O3Qc-+NXBCc9WQ@CQ^eEssy%4(h}*{4Ma`>dpC?h*l;H?OI1yXYWVG1 z4uS^ZZeryx(laoWJ#h+a&638v9JcDR-_eYgjh+)4RH7}*;d1ol;grH%X5_17)Ikzm zg$rbLW-_TZ+c_~{A1LcErXAgX1sG= z@=d()g(OVsxK3HA)(Lq0ap^=)Ud4rFK2}1Z6&lhClzd!3r3cEC%Ijjg$zKKp|1vF` zQkt)?t!3JF-=MnPd=#~z1KqMLM;s(>QsV>Tg2^#<>KAX(%n!E!Qw1H|mrQ8Y=!#V# z{KzTS2es)d8E4D1Z!!yO2H5D^nnGW;SU=pRH7fPrmoiSy2|*C*Me*Cv_{AUQH}SNn zwu`&D#yw03E$0{6YH=%yU`47{tMdy`A0seETyw(eNjyvgsQNdt5!qB0*a2bPPuk6H zm)xD(L_kg`B?2GiwqPW?&O~ynZ`#N>r?l6Bx&M?eJAE8NNE|w3v!;%{qOD_v&x<01%!N?u zot#kocwc|c1`K1kgYYV?Jdkaip@f18S~&%j%Ram-7z6N{ECvm}!G*92+dL$%L;5QX z7$8pRo||ERP`cux+Sh4l=+HV2Yd~v{5NC10k@StdZY1y;J}$}weKSQiE%(trc^hIr z%EwHT1Oy%sYj{QCFR-%N^fR!v3(AGW9<>uXW4J9Bh^!|;)cj>y zxM~lgy=%hp%fuNslj8E_wzMD4?}ksV!V1-ErO7zvORrI?%)0MQDQ_xJ63>u#nq?eWAEa=IGxC#)U0d9vgeZ_aka2y_iBS z9Ws2~E`sD*O*13~Yc{m#6?8qk8%&BO$A(|OkBxy15&LpKd<3aF0{yg8Xk3xWES1Wd z-aqVvvWsTz>(&frUo4D^?ywMs#6b`kJ|=)PeRt3R^gM{tUfj;@suW3D&CM$1`oJ0n zBWqHX?G#v&nF|3Y*4^u1bjN_I(o<0RYi-%MU`@A@V z%bXKu+gT+4eAUta1;5~0tJbm`c@z_%8Q>1MA>(oA*QiL>YMD`Ukdl((ee%(5Za(O? z^s)>Nz`?4nEw0^%1w5^eHQ4zT^3f}N+bW7gEzPjc_Q1YDacONY!f#G326&RB)cL&S z0PGy5l?7eZ*?>jwG!@5!^3IC13fG+Y7bo>YEw=Dr6v;dcyg*yWAgt8vIGQ7vf3c(= zl9}VAUlD%0-(L~_BH>?u9?@q^LN1I6Po?2HV5YPqRgCS1^8vDefXqM?&zfPR)vfTs zO%~)D!TTs|z(A(ArRe);832j?S|>^WGyc^y%p9$Fqu!)<(fdC`@P7|uxTDY(82}+? zLHjyP;sv8x?k_>-!M$paV=%QSu4eTAuDum;D^gpLcq+?8BDx!yFppF-#tZ*8o6boJH^=UzP$az8L_O9N1T zMeT>I%U5Fl|CeHZ7CrGr1pVjJ`#YaY?kPqiNbTpR_5mg8qowc~TQ5BZqxxY2Pd$Xc zT%!M3Y-J}vxNk%ZM6~+;^C68twY>&Ij~LXd-g& z2bXyJbrvL@0f}kAm#w=#Goc1ctY)NubwbhD^XN^4{Wy1ZKzF(#I}}7BA8qhD>$Gkk z@ajVrAF=otMSr1mbp-LB0V8iX(BwNPMn^`=qn>`=XlblGR-VHB%nPXV#&i!op)_%7 zVtUk@__}J#Gv65Xre3zcz(*%_{Pd@c@;&^LZ=r~6+%iIfje_-IBEmKPdJ5LZMag`I zHuBN%VwB;#T-iGNald8u$Bkr20d@-)h{>EW+p;w=M#pmj2LA=S-w(D_R2MxdigJVH zI}0)(F?KYVW}1O0z#%rKkghXpua?C}lCyR0(A7b9HjKVIkv~y*$`Vos;IjeygW*X* za0~>5?L$FvSpd4zLpG!BW}8|`!WS5aS70mrrn4;38^u_)EUsOXJ>Z^c;)@!A3#cge zHJ`a>GiMG1I~TV)9XNn6E4mJbeISby%;*9JP#D~EI)sBMp7%EqP~S{B)^;bQZB2W_ z`Gii<{`}rH!yj1X$QoO%|FgeCikMO8t<|^x3INB};lg6mQQ#19{CZAnPL6qb*fEKM zK$ggiPyhft!>L|2O@S|Mqn>#i;ZSWj`DH6*BL)yF+zPNwTPjN6hvJi;mEeFURwTcO zV%>RNhJhTRncxU_9>Svtw^{ITRR-pPSNi08)h-uNEYNas`2FZ48+>?;PY36Y|KxFKL4}qd5jCZ#D;%n9=^rD2fIRzS|D(R zj}-DAzolN$`mUn(xN!+lU?Xa^tlmKP6g=c0NQ6;Kw+N$j3PQCA>-7j3IvEHfC=Vg& zaz0r?s`$Qu;RV4Wi#J$MpRrK+7OxmBrwEMTC0<=ZA+8ROXlmJenBEWGKuja7ZUq0HLukZE=|x;$0{fl!PC$eq zcm-=Lewl@^R1yXeCisNK_fhnRtgEBINFUvPhXZB&2)O^LvNv&JtgPe^hQN|0iPD8Y z0{|+B7@%9RNWuaHbA5k=pbcLUVni7MiU6lKO9=Hsco#4NRF3EX?-ATcvn2*G;l)vI z#Iv&AJU^iF{jPk{#o<%tU(Mr zlBdw-0fNf@wTHl_owLlsL_Q>*&f>ve7jfbUXW3~3nOAe%4F>sMFgtUslpoLRpHs~X zTm-zhkM`%x(^*ryNVW$e_wa6+ue_M862O6N^r(H=>+k1Ty>6l4-Li#(otJ(_K=}Iw zu=-p;_!ou72LRzemeQ{Y3}F;Rji!}lqZ*&u(4hSd*H*!ccxt_PwY zTCkA`Yw0=?k&sO$(>T3-{HQ`p86iR|dBSiaW}wmm$B3VhB52fp)QmfA##=%a+ZE9eV)R;I$O07AHLFbcjj7|&S8UfLYe>e!DLHvP%H`a?Gk6zS zQ}#n4{2{HxiPPC98EV3`FE~q>>yyXAxELgMkMHK8K1FTDa!x`3q93gr!@sMopj=NT zY_~3s5DSBNH^0&f>kpLEq&#MMZ#jo6SQkw++)O&H9^#rAhJi0bi05|EmF>J5Y9~D) zSz25i%o192J>;@E&Bv)%#Q>1yJ`y?Lb?YTP2jA6%ayUX>9)mJntW6(ltQUD?o=)<0hUum&gZ%Jq-*EF(L*Ch9@1m2}B4NL?T~;gOXl z5onZJc~zn+=kEslWKXNpH#Dg~P9DKYYIGBXY~2ot&_T>Dz!Vps1D1TrXY2ikHlIv} zB}%&$ifh?$MdoC+a&T@NucL{^qA3G@(`G2Lp@|6QtgQ<`8oss%Q?MM))!rM3_}0Hjh-ZM|DwE*5d27BbCG4szR|+l;W&*#NWzglZSG z)5o{4jy1Joms$4!8%fG4jzC<_(hUz_{zHIu66338b{wAeQA9kCdS8RMKMG;r zmXNIBg)e*4h?1Vhzo$ww>OG~2`J0>CDaoJ)OqS1$+-PcZdtosW&zhe-|L z`vt)u^I8)zCBx|C3$eqWS>(w?6-AG>`{8NN`94HBNNT&$?j`~*#3$X#%TlTuH+xr; z5aWIt`@B;~AS!UTGk9%H+>KC`M-m5j3ECt8?`27Wt2XR%6IHvVp6ITXIQ_{SZG{om+87t!%qTeMy@Hmi6#HFfG zz>!~I=RyySBOmI~B1Row`R_yw#>bgan8HeoP8$VcOd(H*`9l-rZpb0BndxKTcHo4i zI%Pdho2avlU-AeA(8l7-wydgv7C_e=Oi|l_A*{$HhA_hd^^8e+-%ygCOdQspgSNbo z83ERqUq#N}0s)XOAHxghXtM%{0^eY*LTk48)H$IpVMLI`C?ffx&cf)cZ-hUnTsF%9 zzyKX46j0_#4bEVngIBIq0PZOABJ#|oy($;qxsY1w3IyM?G7%4f`+p; zpeEUw7fPqd^j<_$cVdXY{VWo@m79B@4^%bhFV9zZa5Pord!PER_m`-~;atHSrSe>} zs78~v1X^sE6aOFfkU(7PwqSpt2*>mf(FdEi$SUA7;*TPlkWa+Enj zKg5mYgTP8KA2KYMFSo^=GHA+YqEcpL51GElvq#YKfq1U zV)1uTSS@FTxBV;v>J>Q|5~m3$4;?Krr%?G`ui0wciJMnRzxh>jk%2?!%5x*%D2k-s{pXm>fFXlU&nA59Y=iaFyW>sa%7iJ~YG;1p=PwQb3_Q35Fp z1(|>35Mc%<4kTk&hn*5O)4sXOF! z_Cwlb#YII8`YRUa`6BG&NUaB1;Yob*YhWSmH>;V(iPb%LUVvuC72>j^mn{e)oyQvOwweG5Ta)$FRq)gvGk4&WLccrnnYTTNiNN0(_+@l#$(qJgg9loWg&Kw>wx0lmpuWY=y{^ z&lSsN)jEzj%*47vTIhOyFl6Rw0q7WFV@sz zLjkG_XKkvR{$3OXWI{plFpgnT^8DiNFJ(?rY-5U)`(!aQR_Rp>HNz6eu!L5aftD`N zb!SW1(e_=^9AQ55OcX9+C+LoQl|Fs6aQ0!&{onxHQtABCya*M}We^zT z!?ve1`0v6*K^wzmHom5&78r)e+PU>`1Llo^vkxK~TmZiSCs|#nD)wQazXCZZau!a) z6ao^csLj(5nUg;(LO$NgI#0*u!7Kd}83qvk(kGv&hpJ^On9@%N6kWn4`N|nL)3?CX0W_;%~5ck%hdX zbP4JJ#a8qvYRT`~?SZyJ^-EIg8m6cUhX@b+2>udZq_svCSs7-?NaSb4dlsJ63xxj+ zy48y0M`oH@Z5@wpAjNbIBnL7YOcfD6Xa$D~W#d5J35DF!9y%<6LK$R|?Smh&$#oPm zAY~(tA}b;y*`R*a@6UZjxU@omm)%N%mX`2?ILZX?kQ14ow~Q1Ox6Q9GBB@ zeNHLmcpSO{ug;dHZ9rie@AJ2eU-E}2VDTxa1h}{Ei`QiOhlYUvl|XjTGM1N?SXIR9 z*>hIPo`Y6-L+b~Dgb#W6x6G48$>fmvZNL~rU!535&%G>5tl-Y|BHyVaHiwf+Uo2h?~#}lfQxI!SA53x~JffkfyFTn+!^BY>lkku&|^@EmT4HnbF?#I9o-=Q8aP8>LWc zJg~dJpuL1OGzXG+m2FugD6w0+jx|AV1Vr=}+LzD@DxkoP{c7hqgk&0PdOgLda$QD& zb-)C08V=*+B%IpA*qofewPPFZg`^hHzarFRf)I&9cH(vDyBBIG zMD9`$82xO64|s$*ge51JNnAol_)Fhd=uCa!nL~QAS4r8VB z2>p1Y!=zn^1J>}aZJ;U3i-%*OsL~$o%r`Np|HPSdbjGWE17#4P(CBLl&_RfLO+h;H zE>U5|0nvbC9&By1+D_mF=Xw$a$y?gPtC`rO=ze=_d4NSSL6^nf=BN1WII^DkZPtp~ zlWO7ghAKH>#330=p~!4*Ni$cpl-Q-CS7<4gXH^U3wFRfqUZFzoBPua&);E|Q75MB= zjQ=dJUT0BfafyZOYA^Ci0>V#tMM`9uJr}LN4Z1plPym6&J2nsT+x)hYCE)Kw=8OIV u8!i4<_RiWUA(gy-(VHmG{<%5+;+td9#0wLzPF$S2TJ~l>8JinB|GxngKM(Z) literal 0 HcmV?d00001 diff --git a/gam/atom/token_store.py b/gam/atom/token_store.py new file mode 100755 index 00000000000..d618965cd4e --- /dev/null +++ b/gam/atom/token_store.py @@ -0,0 +1,117 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module provides a TokenStore class which is designed to manage +auth tokens required for different services. + +Each token is valid for a set of scopes which is the start of a URL. An HTTP +client will use a token store to find a valid Authorization header to send +in requests to the specified URL. If the HTTP client determines that a token +has expired or been revoked, it can remove the token from the store so that +it will not be used in future requests. +""" + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import atom.http_interface +import atom.url + + +SCOPE_ALL = 'http' + + +class TokenStore(object): + """Manages Authorization tokens which will be sent in HTTP headers.""" + def __init__(self, scoped_tokens=None): + self._tokens = scoped_tokens or {} + + def add_token(self, token): + """Adds a new token to the store (replaces tokens with the same scope). + + Args: + token: A subclass of http_interface.GenericToken. The token object is + responsible for adding the Authorization header to the HTTP request. + The scopes defined in the token are used to determine if the token + is valid for a requested scope when find_token is called. + + Returns: + True if the token was added, False if the token was not added becase + no scopes were provided. + """ + if not hasattr(token, 'scopes') or not token.scopes: + return False + + for scope in token.scopes: + self._tokens[str(scope)] = token + return True + + def find_token(self, url): + """Selects an Authorization header token which can be used for the URL. + + Args: + url: str or atom.url.Url or a list containing the same. + The URL which is going to be requested. All + tokens are examined to see if any scopes begin match the beginning + of the URL. The first match found is returned. + + Returns: + The token object which should execute the HTTP request. If there was + no token for the url (the url did not begin with any of the token + scopes available), then the atom.http_interface.GenericToken will be + returned because the GenericToken calls through to the http client + without adding an Authorization header. + """ + if url is None: + return None + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + if url in self._tokens: + token = self._tokens[url] + if token.valid_for_scope(url): + return token + else: + del self._tokens[url] + for scope, token in self._tokens.iteritems(): + if token.valid_for_scope(url): + return token + return atom.http_interface.GenericToken() + + def remove_token(self, token): + """Removes the token from the token_store. + + This method is used when a token is determined to be invalid. If the + token was found by find_token, but resulted in a 401 or 403 error stating + that the token was invlid, then the token should be removed to prevent + future use. + + Returns: + True if a token was found and then removed from the token + store. False if the token was not in the TokenStore. + """ + token_found = False + scopes_to_delete = [] + for scope, stored_token in self._tokens.iteritems(): + if stored_token == token: + scopes_to_delete.append(scope) + token_found = True + for scope in scopes_to_delete: + del self._tokens[scope] + return token_found + + def remove_all_tokens(self): + self._tokens = {} diff --git a/gam/atom/token_store.pyc b/gam/atom/token_store.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd948385e96fd1c5aaa6bd9469f56d2fb24ec8e0 GIT binary patch literal 4398 zcmcInU2_~q747}dTG~j7NC?EiBn?GT>x#W{AbD}+OIQIy8R6QM%Y_SPsx{NAZFzQP zHQg)8l=S3Oehbe$z>nbP@Wd0Gb9;8Ra$-M;m8ac3)6;$L?Q_n()B5*D_vf>JK1fyl zYvKPdaQPiHnNp9@Hq_&W%9<)`sl2Hkx90t}^wF-UY(?3Q8ev*TeWTQKrT+Z*8ebUp zu5q=0HI&*LJFh2YHp{J^R^_qFtk*{Gl}~Lk3}t0?nj7!+$=Ic1jk$R44hoy;Q0j>( z%z^EiSr}u8H@vRw(acqNJSr=lxzWg0wg}qW>e!{$_q*LM4HlBwT<+N9E>1R>6m&V# zJ}sv(VzFZwTkV6X&JkG2Z&%c~5qo+SH94ypc`68p&O5jHt-QnGLp@xB~Wu zu?h42yJO?EJ)KG<5Qu$ix%x3;l-<%U=+tmLDUYoz8I2xQ-5pe}Y3bQ`bNA=`5pby+7E|NDt%aru#2`yP=I);}E8yi_YHS zoKuYC*F^gpT>djO7GMK@&;n^4qV5|2uT8K4QN&nFI(XEU4)C=iomDlcVfF>q`J*7k z>&qt~ZqH9ze1v~Ej6nY+%<+tTKc!W?TDb1v@^7IzYpCg(a_A_P2s4H~U|Q`;8wm0w z?$P7B;<~r_C~ROPFw5r8aYOakc>2Ee5PqGD+eJ7m|bvds?6x;s~AS;1Wl^LMMnXi0<>I zTKDhLvBQR1uijIksfMS&sBi2%xvtKe9Khkj79Bcks^<;$$x%-|3M(Ksck%y05Vo_7 zL@w+}O^G?C1zWw!PICi7#)miqM-G!|W`c$kya}>u{I;tDe^+`s&c3U+wV&-rVE|!? zCU^=ajIdv}ORURgga3-9)!Ka>oUWv)07OD<#O zf~~Ytz)?fLzqA~8tMwzZ;C`eE--tFCFh&T3Xe_v^T_mbMOo2wE&cw^z;VrJeK%L<0U&5`JrPsMoR`@|2X986bXYyseEZAX&9Y zLSE;(jeQx>KxD{)bHrW{lkfK4lz@HIF z(Nxu466A129d`H#G&ygmLf#~OuBoRTRUHeJu5rS3Qmd&BSL@l8<8T<~;`yg|_w-IZ z|8R}@bmz(Y>Mu=o-V$0azx%Dcd-N@62^+fV@HNh_`T2LmeQ0yA0RSjo5XA-15%xL> zGF!}*pCloknUTKB&smkb;~SI;MDd0d8K9Eo#R+ z@(8K!(Li=GR4*|x(=Sm`dRx1&tSB{O&9QO0f!y7^MK23+RgUTvG8zj296)W;+-W90L ziqsBBW6_#f;nEV~O1jc95g5cOaK_YB-X<7D?Rf*Yq&7zvMU=7}6?=?1@boE+AWxA03&oM%_#&2`I(Qdqrzc(5ixZ2H)#tr`LmfcWiEvEdlHifK#;9`NJ z8~pd^^c_Cjd9ta_Q4m1dAcbx9>Cr>=hyo_XkpCNMf`L?=gvC4`E=o>8YF)iVS&3f; z7WfdpN!FBjApvWsl7(iR)1sKSxX#IU-8hG(zN z*mH+3TrkieA#beV@c}b0-7hhorB?q{xgC;0SN0ZFcw}0Sd3UhNh8cSBHX%b0nduat zDg&ZWa(e_l@gAh%I$#ncGHl{zlpG}1Bty0dR!AZAkutSTxmV8;axX?ECiSmr3#qBr zJB<#Qu@08-@AXE?dYu;?6@IWbcP}aFB^-ih@%-v7{{c3yEk`LAt@mK-ynNs7iOlh6 zlB7WDOcK$-B#Gb97|K^il9g$a3@Ba$IZS#rc!$kT*}RA5!g}dl(TB%?!TcpI&r@Ky z*IR2pTzh}*M?G3S_!*koWPZ0pb(6qvsqaZa14ctWWaQyo!iV?%^x(^6`~H1_WX!L! c>;K0gG2oLJ-p{VXA}J3$txjtlu60`f1Na218~^|S literal 0 HcmV?d00001 diff --git a/gam/atom/url.py b/gam/atom/url.py new file mode 100755 index 00000000000..0effa1033ce --- /dev/null +++ b/gam/atom/url.py @@ -0,0 +1,139 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import urlparse +import urllib + + +DEFAULT_PROTOCOL = 'http' +DEFAULT_PORT = 80 + + +def parse_url(url_string): + """Creates a Url object which corresponds to the URL string. + + This method can accept partial URLs, but it will leave missing + members of the Url unset. + """ + parts = urlparse.urlparse(url_string) + url = Url() + if parts[0]: + url.protocol = parts[0] + if parts[1]: + host_parts = parts[1].split(':') + if host_parts[0]: + url.host = host_parts[0] + if len(host_parts) > 1: + url.port = host_parts[1] + if parts[2]: + url.path = parts[2] + if parts[4]: + param_pairs = parts[4].split('&') + for pair in param_pairs: + pair_parts = pair.split('=') + if len(pair_parts) > 1: + url.params[urllib.unquote_plus(pair_parts[0])] = ( + urllib.unquote_plus(pair_parts[1])) + elif len(pair_parts) == 1: + url.params[urllib.unquote_plus(pair_parts[0])] = None + return url + +class Url(object): + """Represents a URL and implements comparison logic. + + URL strings which are not identical can still be equivalent, so this object + provides a better interface for comparing and manipulating URLs than + strings. URL parameters are represented as a dictionary of strings, and + defaults are used for the protocol (http) and port (80) if not provided. + """ + def __init__(self, protocol=None, host=None, port=None, path=None, + params=None): + self.protocol = protocol + self.host = host + self.port = port + self.path = path + self.params = params or {} + + def to_string(self): + url_parts = ['', '', '', '', '', ''] + if self.protocol: + url_parts[0] = self.protocol + if self.host: + if self.port: + url_parts[1] = ':'.join((self.host, str(self.port))) + else: + url_parts[1] = self.host + if self.path: + url_parts[2] = self.path + if self.params: + url_parts[4] = self.get_param_string() + return urlparse.urlunparse(url_parts) + + def get_param_string(self): + param_pairs = [] + for key, value in self.params.iteritems(): + param_pairs.append('='.join((urllib.quote_plus(key), + urllib.quote_plus(str(value))))) + return '&'.join(param_pairs) + + def get_request_uri(self): + """Returns the path with the parameters escaped and appended.""" + param_string = self.get_param_string() + if param_string: + return '?'.join([self.path, param_string]) + else: + return self.path + + def __cmp__(self, other): + if not isinstance(other, Url): + return cmp(self.to_string(), str(other)) + difference = 0 + # Compare the protocol + if self.protocol and other.protocol: + difference = cmp(self.protocol, other.protocol) + elif self.protocol and not other.protocol: + difference = cmp(self.protocol, DEFAULT_PROTOCOL) + elif not self.protocol and other.protocol: + difference = cmp(DEFAULT_PROTOCOL, other.protocol) + if difference != 0: + return difference + # Compare the host + difference = cmp(self.host, other.host) + if difference != 0: + return difference + # Compare the port + if self.port and other.port: + difference = cmp(self.port, other.port) + elif self.port and not other.port: + difference = cmp(self.port, DEFAULT_PORT) + elif not self.port and other.port: + difference = cmp(DEFAULT_PORT, other.port) + if difference != 0: + return difference + # Compare the path + difference = cmp(self.path, other.path) + if difference != 0: + return difference + # Compare the parameters + return cmp(self.params, other.params) + + def __str__(self): + return self.to_string() + diff --git a/gam/atom/url.pyc b/gam/atom/url.pyc new file mode 100644 index 0000000000000000000000000000000000000000..346791387267835be50789a213ee84877e685286 GIT binary patch literal 4199 zcmcIn-Hseb6+Yebx3k`0Yn_Cwh@}RhXdLX_BoKij0|6T@A~SY+Y%N(4b$YvNW_q@# zySJ;xiN#v7l-wXtuDC?vHkaJN4R4TF;3;?j_`cIU94Ptv}a zzD&V3;L4{W9~=#Melpsk*l%Hqt89N^lU16UIvBlYrqf{3d|l7}g`ctOf@|(vmE8xC zc$fwleT>@}_7E#hWFc|}6`nlt#Cg>9#FsNQa<6`CO`Z7?`BH#aZQrXuUsDTM-_i3O zIRovv0KMaEX(y@HlvV*&|0^PCsAE?&A0tDjO6x2kV_4b|xP6gQC{H zYVFD=zn8PF-1+dSJb{*;aBrY{ebuXct=?;%+mgjl&ibky20_PSTQ;k(AFH=czdV1| zm-2G-SCG27n`0{lLhXEsvULiffm}Ir;64;CSVN3{tZ^4{{Sf zGQl#l7D6CkX_m*Pwm~^<6u^j8VU45w4E9OJyg=MV-pPD^Gi&twHJyJv9_?_2dAF)_ zXvXOdK5!phtm@L0Ntq+UxO0_fu1UPoI_1kvp$k=6H#=4A=8jniad~#^D1KF(u1aU3 zDqmUMgPwS4_29TH%!rxmwwpsy<04f^PgvChY05B%;;31^1b!8S@2qa5ky1Vq4c`cC zqLY0G)WJgwj~$$3_3`6_JLShkUdE|Cn8nMRZ|%SR_RZ=s!cp9`brOu;ynYb7a(Mvd z`&eh#q0pKL3)k>rU&kc<%ihr2_O4)D)xW##Uw*&fUG{eU*S%fuyIx2iCOpC)`sn8v zc7n+O7?Apyff>Mp=YKuTI-ma$0jhR ztL#zCjJOe4raZE{sR^Vb66q1n(S&2;98fJQFi+#e1kJxAg9R=yRX%x;WrQiv9PUPU(}>QOigK)xU4MV`Y}tSJ$i#0ASpG|c$L#BIJ&Yp zLpxZg>!c4x1la3p5feNZ{pgMBK{izV0t%SkkVCme(us_5EFa!2R3K}Hv2@nMV z1nw$<&z@8_1+~g`^augGuw2I@CQC?}A&WC)b%ad$@Ee#$+|c^5CZB#a8@Mhe_h~~E zWku$qNVAsqh(AQ?^It-n2{5!v9q@M$?1blkXMHvz+;t&Rzu6K^)44@-#fB&KZPn3qym{xtcjY%eUl~sjH;>^$4j_i`%L0MWxHU9dLGx;O7Ech~>C1A6cFw4`fuJA9 zA?J{O(ZFaGsB}T$qpCWk_aG)aV}u-F;POA7RN7a>U<((_Z!y2A+3i4_1f$8RdxLgr8vlr@q*c zI5-I|#6uKYzzR(tY)z1WeDz`vknn46G(>wq;Q=dP1zT}{&H1$D+^L_gW%mSG*7A*7 zzMbYpOOaoYk7=zMT0R)5HE`0A`fvJxZ1oP54;Z9+H%$LfCYrk?^?+~UhG>uBm8vV} z%mwE`%ee#U87oTUoVi=FQy;EH9+D3n=j1(a?#zQ#lUCTa@@@Zzxqo<`>5?*CBGW{( zXRP4ayfRq?8J@1z#YJNDIi$sxEUPfUo>r$eGeycfzxd_P9^QQr-4E|QxOe;B-AyCZ zSLL(qd*Oo(#x`lJ(8&&3q5_&1VVX^+rZ!M~aj8o_q>14Hr#CQd3|8uYe3NSq@mX)# zTKj`OoV+s_ULFpHTf=Ybf2Uh>;}zcZxSO_HZ4Ke~XwwZ&-^8Ttqj|2X)QdG#J$S3} W*-v(%&Js}j9hlqcd;QMv+P?vr(P*Ur literal 0 HcmV?d00001 diff --git a/gam/gam.py.symlink b/gam/gam.py.symlink new file mode 100755 index 00000000000..152a875122f --- /dev/null +++ b/gam/gam.py.symlink @@ -0,0 +1,6748 @@ +#!/usr/bin/env python +# +# Google Apps Manager +# +# Copyright 2012 Dito, LLC All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Apps Manager (GAM) is a command line tool which allows Administrators to control their Google Apps domain and accounts. + +With GAM you can programatically create users, turn on/off services for users like POP and Forwarding and much more. +For more information, see http://code.google.com/p/google-apps-manager + +""" + +__author__ = 'jay@ditoweb.com (Jay Lee)' +__version__ = '2.55' +__license__ = 'Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)' + +import sys, os, time, datetime, random, cgi, socket, urllib, urllib2, csv, getpass, platform, re, webbrowser, pickle, calendar, struct +import xml.dom.minidom +from sys import exit +import gdata.apps.service +import gdata.apps.emailsettings.service +import gdata.apps.adminsettings.service +import gdata.apps.groups.service +import gdata.apps.audit.service +try: + import gdata.apps.adminaudit.service +except ImportError: + pass +import gdata.apps.multidomain.service +import gdata.apps.orgs.service +import gdata.apps.res_cal.service +import gdata.calendar +import gdata.calendar.service +import gdata.apps.groupsettings.service +import gdata.apps.reporting.service +import gdata.auth +import atom +import gdata.contacts +import gdata.contacts.service +from hashlib import sha1 + +def showUsage(): + doGAMVersion() + print ''' +Usage: gam [OPTIONS]... + +Google Apps Manager. Retrieve or set Google Apps domain, +user, group and alias settings. Exhaustive list of commands +can be found at: http://code.google.com/p/google-apps-manager/wiki + +Examples: +gam info domain +gam create user jsmith firstname John lastname Smith password secretpass +gam update user jsmith suspended on +gam.exe update group announcements add member jsmith +... + +''' + +def getGamPath(): + if os.name == 'windows' or os.name == 'nt': + divider = '\\' + else: + divider = '/' + return os.path.dirname(os.path.realpath(sys.argv[0]))+divider + +def doGAMVersion(): + print 'Google Apps Manager %s\r\n%s\r\nPython %s.%s.%s %s-bit %s\r\n%s %s' % (__version__, __author__, + sys.version_info[0], sys.version_info[1], sys.version_info[2], struct.calcsize('P')*8, + sys.version_info[3], platform.platform(), platform.machine()) + +def doGAMCheckForUpdates(): + if os.path.isfile(getGamPath()+'noupdatecheck.txt'): return + if os.path.isfile(getGamPath()+'lastupdatecheck.txt'): + f = open(getGamPath()+'lastupdatecheck.txt', 'r') + last_check_time = int(f.readline()) + f.close() + else: + last_check_time = 0 + now_time = calendar.timegm(time.gmtime()) + one_week_ago_time = now_time - 604800 + if last_check_time > one_week_ago_time: return + try: + c = urllib2.urlopen('https://gam-update.appspot.com/latest-version.txt?v=%s' % __version__) + try: + latest_version = float(c.read()) + except ValueError: + return + current_version = float(__version__) + if latest_version <= current_version: + f = open(getGamPath()+'lastupdatecheck.txt', 'w') + f.write(str(now_time)) + f.close() + return + a = urllib2.urlopen('https://gam-update.appspot.com/latest-version-announcement.txt') + announcement = a.read() + sys.stderr.write(announcement) + visit_gam = raw_input("\n\nHit Y to visit the GAM website and download the latest release. Hit Enter to just continue with this boring old version. GAM won't bother you with this announcemnt for 1 week or you can create a file named noupdatecheck.txt in the same location as gam.py or gam.exe and GAM won't ever check for updates: ") + if visit_gam.lower() == 'y': + webbrowser.open('http://google-apps-manager.googlecode.com') + print 'GAM is now exiting so that you can overwrite this old version with the latest release' + sys.exit(0) + f = open(getGamPath()+'lastupdatecheck.txt', 'w') + f.write(str(now_time)) + f.close() + except urllib2.HTTPError: + return + except urllib2.URLError: + return + +def commonAppsObjInit(appsObj): + #Identify GAM to Google's Servers + appsObj.source = 'Google Apps Manager %s / %s / Python %s.%s.%s %s / %s %s /' % (__version__, __author__, + sys.version_info[0], sys.version_info[1], sys.version_info[2], + sys.version_info[3], platform.platform(), platform.machine()) + #Show debugging output if debug.gam exists + if os.path.isfile(getGamPath()+'debug.gam'): + appsObj.debug = True + return appsObj + +def checkErrorCode(e): + if e[0]['body'][:34] == 'Required field must not be blank: ' or e[0]['body'][:34] == 'These characters are not allowed: ': + return e[0]['body'] + if e.error_code == 600 and e[0]['body'] == 'Quota exceeded for the current request' or e[0]['reason'] == 'Bad Gateway': + return False + if e.error_code == 600 and e[0]['reason'] == 'Token invalid - Invalid token: Token disabled, revoked, or expired.': + return '403 - Token disabled, revoked, or expired. Please delete and re-create oauth.txt' + if e.error_code == 1000: # UnknownError + return False + elif e.error_code == 1001: # ServerBusy + return False + elif e.error_code == 1002: + return '1002 - Unauthorized and forbidden' + elif e.error_code == 1100: + return '1100 - User deleted recently' + elif e.error_code == 1200: + return '1200 - Domain user limit exceeded' + elif e.error_code == 1201: + return '1201 - Domain alias limit exceeded' + elif e.error_code == 1202: + return '1202 - Domain suspended' + elif e.error_code == 1203: + return '1203 - Domain feature unavailable' + elif e.error_code == 1300: + if e.invalidInput != '': + return '1300 - Entity %s exists' % e.invalidInput + else: + return '1300 - Entity exists' + elif e.error_code == 1301: + if e.invalidInput != '': + return '1301 - Entity %s Does Not Exist' % e.invalidInput + else: + return '1301 - Entity Does Not Exist' + elif e.error_code == 1302: + return '1302 - Entity Name Is Reserved' + elif e.error_code == 1303: + if e.invalidInput != '': + return '1303 - Entity %s name not valid' % e.invalidInput + else: + return '1303 - Entity name not valid' + elif e.error_code == 1306: + if e.invalidInput != '': + return '1306 - %s has members. Cannot delete.' % e.invalidInput + else: + return '1306 - Entity has members. Cannot delete.' + elif e.error_code == 1400: + return '1400 - Invalid Given Name' + elif e.error_code == 1401: + return '1401 - Invalid Family Name' + elif e.error_code == 1402: + return '1402 - Invalid Password' + elif e.error_code == 1403: + return '1403 - Invalid Username' + elif e.error_code == 1404: + return '1404 - Invalid Hash Function Name' + elif e.error_code == 1405: + return '1405 - Invalid Hash Digest Length' + elif e.error_code == 1406: + return '1406 - Invalid Email Address' + elif e.error_code == 1407: + return '1407 - Invalid Query Parameter Value' + elif e.error_code == 1408: + return '1408 - Invalid SSO Signing Key' + elif e.error_code == 1500: + return '1500 - Too Many Recipients On Email List' + elif e.error_code == 1501: + return '1501 - Too Many Nicknames For User' + elif e.error_code == 1502: + return '1502 - Too Many Delegates For User' + elif e.error_code == 1601: + return '1601 - Duplicate Destinations' + elif e.error_code == 1602: + return '1602 - Too Many Destinations' + elif e.error_code == 1603: + return '1603 - Invalid Route Address' + elif e.error_code == 1700: + return '1700 - Group Cannot Contain Cycle' + elif e.error_code == 1800: + return '1800 - Invalid Domain Edition' + elif e.error_code == 1801: + if e.invalidInput != '': + return '1801 - Invalid value %s' % e.invalidInput + else: + return '1801 - Invalid Value' + else: + return '%s: Unknown Error: %s' % (e.error_code, str(e)) + +def tryOAuth(gdataObject): + global domain + oauth_filename = 'oauth.txt' + try: + oauth_filename = os.environ['OAUTHFILE'] + except KeyError: + pass + if os.path.isfile(getGamPath()+oauth_filename): + oauthfile = open(getGamPath()+oauth_filename, 'rb') + domain = oauthfile.readline()[0:-1] + try: + token = pickle.load(oauthfile) + oauthfile.close() + except ImportError: # Deals with tokens created by windows on old GAM versions. Rewrites them with binary mode set + oauthfile = open(getGamPath()+oauth_filename, 'r') + domain = oauthfile.readline()[0:-1] + try: + token = pickle.load(oauthfile) + oauthfile.close() + f = open(getGamPath()+oauth_filename, 'wb') + f.write('%s\n' % (domain,)) + pickle.dump(token, f) + f.close() + except ImportError: # Deals with stupid issue where gdata and atom were copied inside existing folder for awhile and pickle got confused on token creation. Rewrites token. + oauthfile = open(getGamPath()+oauth_filename, 'r') + domain = oauthfile.readline()[0:-1] + token_string = oauthfile.read() + oauthfile.close() + token_string = token_string.replace('gdata.gdata', 'gdata') + token = pickle.loads(token_string) + f = open(getGamPath()+oauth_filename, 'wb') + f.write('%s\n' % (domain,)) + pickle.dump(token, f) + f.close() + gdataObject.domain = domain + gdataObject.SetOAuthInputParameters(gdata.auth.OAuthSignatureMethod.HMAC_SHA1, consumer_key=token.oauth_input_params._consumer.key, consumer_secret=token.oauth_input_params._consumer.secret) + token.oauth_input_params = gdataObject._oauth_input_params + gdataObject.SetOAuthToken(token) + return True + else: + return False + +def getAppsObject(): + apps = gdata.apps.service.AppsService() + if not tryOAuth(apps): + doRequestOAuth() + tryOAuth(apps) + apps = commonAppsObjInit(apps) + return apps + +def getProfilesObject(): + profiles = gdata.contacts.service.ContactsService(contact_list='domain') + profiles.ssl = True + if not tryOAuth(profiles): + doRequestOAuth() + tryOAuth(profiles) + profiles = commonAppsObjInit(profiles) + return profiles + +def getCalendarObject(): + calendars = gdata.calendar.service.CalendarService() + calendars.ssl = True + if not tryOAuth(calendars): + doRequestOAuth() + tryOAuth(calendars) + calendars = commonAppsObjInit(calendars) + return calendars + +def getGroupSettingsObject(): + groupsettings = gdata.apps.groupsettings.service.GroupSettingsService() + if not tryOAuth(groupsettings): + doRequestOAuth() + tryOAuth(groupsettings) + groupsettings = commonAppsObjInit(groupsettings) + return groupsettings + +def getEmailSettingsObject(): + emailsettings = gdata.apps.emailsettings.service.EmailSettingsService() + if not tryOAuth(emailsettings): + doRequestOAuth() + tryOAuth(emailsettings) + emailsettings = emailsettings = commonAppsObjInit(emailsettings) + return emailsettings + +def getAdminSettingsObject(): + global domain + adminsettings = gdata.apps.adminsettings.service.AdminSettingsService() + if not tryOAuth(adminsettings): + doRequestOAuth() + tryOAuth(adminsettings) + adminsettings = commonAppsObjInit(adminsettings) + return adminsettings + +def getGroupsObject(): + global domain + groupsObj = gdata.apps.groups.service.GroupsService() + if not tryOAuth(groupsObj): + doRequestOAuth() + tryOAuth(groupsObj) + groupsObj = commonAppsObjInit(groupsObj) + return groupsObj + +def getAuditObject(): + auditObj = gdata.apps.audit.service.AuditService() + if not tryOAuth(auditObj): + doRequestOAuth() + tryOAuth(auditObj) + auditObj = commonAppsObjInit(auditObj) + return auditObj + +def getAdminAuditObject(): + try: + adminAuditObj = gdata.apps.adminaudit.service.AdminAuditService() + except AttributeError: + print "gam audit admin commands require Python 2.6 or 2.7" + sys.exit(3) + if not tryOAuth(adminAuditObj): + doRequestOAuth() + tryOAuth(adminAuditObj) + adminAuditObj = commonAppsObjInit(adminAuditObj) + return adminAuditObj + +def getMultiDomainObject(): + multidomainObj = gdata.apps.multidomain.service.MultiDomainService() + if not tryOAuth(multidomainObj): + doRequestOAuth() + tryOAuth(multidomainObj) + multidomainObj = commonAppsObjInit(multidomainObj) + return multidomainObj + +def getOrgObject(): + orgObj = gdata.apps.orgs.service.OrganizationService() + if not tryOAuth(orgObj): + doRequestOAuth() + tryOAuth(orgObj) + orgObj = commonAppsObjInit(orgObj) + return orgObj + +def getResCalObject(): + resCalObj = gdata.apps.res_cal.service.ResCalService() + if not tryOAuth(resCalObj): + doRequestOAuth() + tryOAuth(resCalObj) + resCalObj = commonAppsObjInit(resCalObj) + return resCalObj + +def getRepObject(): + repObj = gdata.apps.reporting.service.ReportService() + if not tryOAuth(repObj): + doRequestOAuth() + tryOAuth(repObj) + repObj = commonAppsObjInit(repObj) + return repObj + +def _reporthook(numblocks, blocksize, filesize, url=None): + base = os.path.basename(url) + #XXX Should handle possible filesize=-1. + try: + percent = min((numblocks*blocksize*100)/filesize, 100) + except: + percent = 100 + if numblocks != 0: + sys.stdout.write("\b"*70) + sys.stdout.write(str(percent)+'% ') + #print str(percent)+"%\b\b" + +def geturl(url, dst): + if sys.stdout.isatty(): + urllib.urlretrieve(url, dst, + lambda nb, bs, fs, url=url: _reporthook(nb,bs,fs,url)) + sys.stdout.write('\n') + else: + urllib.urlretrieve(url, dst) + +def showReport(): + report = sys.argv[2].lower() + date = page = None + if len(sys.argv) > 3: + date = sys.argv[3] + rep = getRepObject() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + report_data = rep.retrieve_report(report=report, date=date) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + elif e.error_code == 600 and e[0]['reason'] == 'Bad Request': + sys.stderr.write('Error: Bad request - No report by that name\n') + sys.exit(e.error_code) + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + sys.stdout.write(report_data) + +def doDelegates(users): + emailsettings = getEmailSettingsObject() + if sys.argv[4].lower() == 'to': + delegate = sys.argv[5].lower() + #delegate needs to be a full email address, tack + #on domain of 1st user if there isn't one + if not delegate.find('@') > 0: + delegate_domain = domain.lower() + delegate_email = '%s@%s' % (delegate, delegate_domain) + else: + delegate_domain = delegate[delegate.find('@')+1:].lower() + delegate_email = delegate + else: + showUsage() + exit(6) + count = len(users) + i = 1 + for delegator in users: + if delegator.find('@') > 0: + delegator_domain = delegator[delegator.find('@')+1:].lower() + delegator_email = delegator + delegator = delegator[:delegator.find('@')] + else: + delegator_domain = domain.lower() + delegator_email = '%s@%s' % (delegator, delegator_domain) + emailsettings.domain = delegator_domain + print "Giving %s delegate access to %s (%s of %s)" % (delegate_email, delegator_email, i, count) + i = i + 1 + delete_alias = False + if delegate_domain == delegator_domain: + use_delegate_address = delegate_email + else: + # Need to use an alias in delegator domain, first check to see if delegate already has one... + multi = getMultiDomainObject() + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + aliases = multi.GetUserAliases(delegate_email) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + found_alias_in_delegator_domain = False + for alias in aliases: + alias_domain = alias['aliasEmail'][alias['aliasEmail'].find('@')+1:].lower() + if alias_domain == delegator_domain: + use_delegate_address = alias['aliasEmail'] + print ' Using existing alias %s for delegation' % use_delegate_address + found_alias_in_delegator_domain = True + break + if not found_alias_in_delegator_domain: + delete_alias = True + use_delegate_address = '%s@%s' % (''.join(random.sample('abcdefghijklmnopqrstuvwxyz0123456789', 10)), delegator_domain) + print ' Giving %s temporary alias %s for delegation' % (delegate_email, use_delegate_address) + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + multi.CreateAlias(user_email=delegate_email, alias_email=use_delegate_address) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + time.sleep(5) + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.CreateDelegate(delegate=use_delegate_address, delegator=delegator) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + if try_count == 0: + get_try_count = 0 + get_wait_on_fail = .5 + get_hard_fail = False + while get_try_count < 10: + try: + get_delegates = emailsettings.GetDelegates(delegator=delegator) + break + except gdata.apps.service.AppsForYourDomainException, get_e: + get_terminating_error = checkErrorCode(get_e) + if not get_terminating_error: + get_try_count = get_try_count + 1 + if get_try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(get_e.error_code), get_try_count, str(get_wait_on_fail))) + time.sleep(get_wait_on_fail) + get_wait_on_fail = get_wait_on_fail * 2 if get_wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % get_terminating_error) + get_hard_fail = True + break + if get_try_count == 10 or get_hard_fail: + sys.stderr.write('Giving up\n') + continue + for get_delegate in get_delegates: + if get_delegate['address'].lower() == delegate_email: # Delegation is already in place + if delete_alias: + print ' Deleting temporary alias...' + doDeleteNickName(alias_email=use_delegate_address) + sys.exit(0) # Emulate functionality of duplicate delegation between users in same domain, returning clean + + #Check if either user account is suspended or requires password change + multi = getMultiDomainObject() + prov_try_count = 0 + prov_wait_on_fail = .5 + while prov_try_count < 10: + try: + delegate_user_details = multi.RetrieveUser(delegate_email) + delegator_user_details = multi.RetrieveUser(delegator_email) + break + except gdata.apps.service.AppsForYourDomainException, prov_e: + prov_terminating_error = checkErrorCode(prov_e) + if not prov_terminating_error: + prov_try_count = prov_try_count + 1 + if prov_try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(prov_e.error_code), prov_try_count, str(prov_wait_on_fail))) + time.sleep(prov_wait_on_fail) + prov_wait_on_fail = prov_wait_on_fail * 2 if prov_wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % prov_terminating_error) + sys.exit(prov_e.error_code) + if prov_try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(prov_e.error_code) + if delegate_user_details['isSuspended'] == 'true': + sys.stderr.write('Error: User %s is suspended. You must unsuspend for delegation.\n' % delegate_email) + sys.exit(5) + if delegator_user_details['isSuspended'] == 'true': + sys.stderr.write('Error: User %s is suspended. You must unsuspend for delegation.\n' % delegator_email) + sys.exit(5) + if delegate_user_details['isChangePasswordAtNextLogin'] == 'true': + sys.stderr.write('Error: User %s is required to change password at next login. You must change password or clear changepassword flag for delegation.\n' % delegate_email) + sys.exit(5) + if delegator_user_details['isChangePasswordAtNextLogin'] == 'true': + sys.stderr.write('Error: User %s is required to change password at next login. You must change password or clear changepassword flag for delegation.\n' % delegator_email) + sys.exit(5) + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + time.sleep(10) + if delete_alias: + print ' Deleting temporary alias...' + doDeleteNickName(alias_email=use_delegate_address) + +def getDelegates(users): + emailsettings = getEmailSettingsObject() + csv_format = False + try: + if sys.argv[5].lower() == 'csv': + csv_format = True + except IndexError: + pass + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain + sys.stderr.write("Getting delegates for %s...\n" % (user + '@' + emailsettings.domain)) + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + delegates = emailsettings.GetDelegates(delegator=user) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + for delegate in delegates: + if csv_format: + print '%s,%s,%s' % (user + '@' + emailsettings.domain, delegate['address'], delegate['status']) + else: + print "Delegator: %s\n Delegate: %s\n Status: %s\n Delegate Email: %s\n Delegate ID: %s\n" % (user, delegate['delegate'], delegate['status'], delegate['address'], delegate['delegationId']) + +def deleteDelegate(users): + emailsettings = getEmailSettingsObject() + delegate = sys.argv[5] + if not delegate.find('@') > 0: + if users[0].find('@') > 0: + delegatedomain = users[0][users[0].find('@')+1:] + else: + delegatedomain = domain + delegate = delegate+'@'+delegatedomain + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Deleting %s delegate access to %s (%s of %s)" % (delegate, user+'@'+emailsettings.domain, i, count) + i = i + 1 + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.DeleteDelegate(delegate=delegate, delegator=user) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def deleteCalendar(users): + del_cal = sys.argv[5] + cal = getCalendarObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + user_domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + user_domain = domain + uri = 'https://www.google.com/calendar/feeds/%s/allcalendars/full/%s' % (user+'@'+user_domain, del_cal) + try: + calendar_entry = cal.GetCalendarListEntry(uri) + except gdata.service.RequestError, e: + print 'Error: %s - %s' % (e[0]['reason'], e[0]['body']) + continue + try: + edit_uri = calendar_entry.GetEditLink().href + print "Removing user %s's subscription to %s calendar (%s of %s)" % (user+'@'+user_domain, del_cal, i, count) + cal.DeleteCalendarEntry(edit_uri) + except gdata.service.RequestError, e: + print 'Error: %s - %s' % (e[0]['reason'], e[0]['body']) + i = i + 1 + +def addCalendar(users): + add_cal = sys.argv[5] + cal = getCalendarObject() + selected = 'true' + hidden = 'false' + color = None + i = 6 + while i < len(sys.argv): + if sys.argv[i].lower() == 'selected': + if sys.argv[i+1].lower() == 'true': + selected = 'true' + elif sys.argv[i+1].lower() == 'false': + selected = 'false' + else: + showUsage() + print 'Value for selected must be true or false, not %s' % sys.argv[i+1] + exit(4) + i = i + 2 + elif sys.argv[i].lower() == 'hidden': + if sys.argv[i+1].lower() == 'true': + hidden = 'true' + elif sys.argv[i+1].lower() == 'false': + hidden = 'false' + else: + showUsage() + print 'Value for hidden must be true or false, not %s' % sys.argv[i+1] + exit(4) + i = i + 2 + elif sys.argv[i].lower() == 'color': + color = sys.argv[i+1] + i = i + 2 + else: + showUsage() + print '%s is not a valid argument for "gam add calendar"' % sys.argv[i] + i = 1 + count = len(users) + for user in users: + if user.find('@') > 0: + user_domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + user_domain = domain + calendar_entry = gdata.calendar.CalendarListEntry() + try: + insert_uri = 'https://www.google.com/calendar/feeds/%s/allcalendars/full' % (user+'@'+user_domain) + calendar_entry.id = atom.Id(text=add_cal) + calendar_entry.hidden = gdata.calendar.Hidden(value=hidden) + calendar_entry.selected = gdata.calendar.Selected(value=selected) + if color != None: + calendar_entry.color = gdata.calendar.Color(value=color) + print "Subscribing %s to %s calendar (%s of %s)" % (user+'@'+user_domain, add_cal, i, count) + cal.InsertCalendarSubscription(insert_uri=insert_uri, calendar=calendar_entry) + except gdata.service.RequestError, e: + print 'Error: %s - %s' % (e[0]['reason'], e[0]['body']) + i = i + 1 + +def updateCalendar(users): + update_cal = sys.argv[5] + cal = getCalendarObject() + selected = hidden = color = None + i = 6 + while i < len(sys.argv): + if sys.argv[i].lower() == 'selected': + if sys.argv[i+1].lower() == 'true': + selected = 'true' + elif sys.argv[i+1].lower() == 'false': + selected = 'false' + else: + showUsage() + print 'Value for selected must be true or false, not %s' % sys.argv[i+1] + exit(4) + i = i + 2 + elif sys.argv[i].lower() == 'hidden': + if sys.argv[i+1].lower() == 'true': + hidden = 'true' + elif sys.argv[i+1].lower() == 'false': + calendar_entry.hidden = gdata.calendar.Hidden(value='false') + hidden = 'false' + else: + showUsage() + print 'Value for hidden must be true or false, not %s' % sys.argv[i+1] + exit(4) + i = i + 2 + elif sys.argv[i].lower() == 'color': + color = sys.argv[i+1] + i = i + 2 + else: + showUsage() + print '%s is not a valid argument for "gam update calendar"' % sys.argv[i] + i = 1 + count = len(users) + for user in users: + if user.find('@') > 0: + user_domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + user_domain = domain + uri = 'https://www.google.com/calendar/feeds/%s/allcalendars/full/%s' % (user+'@'+user_domain, update_cal) + try: + calendar_entry = cal.GetCalendarListEntry(uri) + except gdata.service.RequestError, e: + print 'Error: %s - %s' % (e[0]['reason'], e[0]['body']) + continue + if selected != None: + calendar_entry.selected = gdata.calendar.Selected(value=selected) + if hidden != None: + calendar_entry.hidden = gdata.calendar.Hidden(value=hidden) + if color != None: + calendar_entry.color = gdata.calendar.Color(value=color) + try: + edit_uri = calendar_entry.GetEditLink().href + print "Updating %s's subscription to calendar %s (%s of %s)" % (user+'@'+user_domain, update_cal, i, count) + cal.UpdateCalendar(calendar_entry) + except gdata.service.RequestError, e: + print 'Error: %s - %s' % (e[0]['reason'], e[0]['body']) + continue + +def doCalendarShowACL(): + show_cal = sys.argv[2] + cal = getCalendarObject() + if show_cal.find('@') == -1: + show_cal = show_cal+'@'+cal.domain + uri = 'https://www.google.com/calendar/feeds/%s/acl/full' % (show_cal) + try: + feed = cal.GetCalendarAclFeed(uri=uri) + except gdata.service.RequestError, e: + print 'Error: %s - %s' % (e[0]['reason'], e[0]['body']) + sys.exit(e[0]['status']) + print feed.title.text + for i, a_rule in enumerate(feed.entry): + print ' Scope %s - %s' % (a_rule.scope.type, a_rule.scope.value) + print ' Role: %s' % (a_rule.title.text) + print '' + +def doCalendarAddACL(): + use_cal = sys.argv[2] + role = sys.argv[4].lower() + if role != 'freebusy' and role != 'read' and role != 'editor' and role != 'owner': + print 'Error: Role must be freebusy, read, editor or owner. Not %s' % role + sys.exit (33) + cal = getCalendarObject() + user_to_add = sys.argv[5].lower() + if user_to_add == 'domain' or user_to_add == 'default': + print 'Error: The special users domain and default can\'t be added, please use update instead of add.' + sys.exit(34) + if user_to_add.find('@') == -1: + user_to_add = user_to_add+'@'+cal.domain + if use_cal.find('@') == -1: + use_cal = use_cal+'@'+cal.domain + rule = gdata.calendar.CalendarAclEntry() + rule.scope = gdata.calendar.Scope(value=user_to_add) + rule.scope.type = 'user' + roleValue = 'http://schemas.google.com/gCal/2005#%s' % (role) + rule.role = gdata.calendar.Role(value=roleValue) + aclUrl = '/calendar/feeds/%s/acl/full' % use_cal + try: + print "Giving %s %s access to calendar %s" % (user_to_add, role, use_cal) + returned_rule = cal.InsertAclEntry(rule, aclUrl) + except gdata.service.RequestError, e: + print 'Error: %s - %s' % (e[0]['reason'], e[0]['body']) + sys.exit(e[0]['status']) + +def doCalendarWipeData(): + use_cal = sys.argv[2] + cal = getCalendarObject() + try: + response = cal.Post('calendarId=%s' % use_cal, 'https://www.googleapis.com/calendar/v3/calendars/%s/clear' % use_cal) + except gdata.service.RequestError, e: + if e[0]['status'] == 204: + print 'All data for Calendar %s has been wiped' % use_cal + else: + print 'Error: %s - %s' % (e[0]['status'], e[0]['reason']) + +def doCalendarUpdateACL(): + use_cal = sys.argv[2] + role = sys.argv[4].lower() + if role != 'freebusy' and role != 'read' and role != 'editor' and role != 'owner': + print 'Error: Role must be freebusy, read, editor or owner. Not %s' % role + exit (33) + user_to_add = sys.argv[5].lower() + cal = getCalendarObject() + if use_cal.find('@') == -1: + use_cal = use_cal+'@'+cal.domain + if user_to_add.find('@') == -1 and user_to_add != 'domain' and user_to_add != 'default': + user_to_add = user_to_add+'@'+cal.domain + rule = gdata.calendar.CalendarAclEntry() + if user_to_add == 'domain': + rule_value = cal.domain + rule_type = 'domain' + elif user_to_add == 'default': + rule_value = None + rule_type = 'default' + else: + rule_value = user_to_add + rule_type = 'user' + rule.scope = gdata.calendar.Scope(value=rule_value) + rule.scope.type = rule_type + roleValue = 'http://schemas.google.com/gCal/2005#%s' % (role) + rule.role = gdata.calendar.Role(value=roleValue) + if rule_type != 'default': + aclUrl = '/calendar/feeds/%s/acl/full/%s%%3A%s' % (use_cal, rule_type, rule_value) + else: + aclUrl = '/calendar/feeds/%s/acl/full/default' % (use_cal) + try: + returned_rule = cal.UpdateAclEntry(edit_uri=aclUrl, updated_rule=rule) + except gdata.service.RequestError, e: + print 'Error: %s - %s' % (e[0]['reason'], e[0]['body']) + sys.exit(e[0]['status']) + +def doCalendarDelACL(): + use_cal = sys.argv[2] + if sys.argv[4].lower() != 'user': + print 'invalid syntax' + exit(9) + user_to_del = sys.argv[5].lower() + cal = getCalendarObject() + if use_cal.find('@') == -1: + use_cal = use_cal+'@'+cal.domain + if user_to_del.find('@') == -1 and user_to_del != 'domain' and user_to_del != 'default': + user_to_del = user_to_del+'@'+cal.domain + uri = 'https://www.google.com/calendar/feeds/%s/acl/full' % (use_cal) + try: + feed = cal.GetCalendarAclFeed(uri=uri) + except gdata.service.RequestError, e: + print 'Error: %s - %s' % (e[0]['reason'], e[0]['body']) + sys.exit(e[0]['status']) + found_rule = False + print "Removing %s's access to calendar %s" % (user_to_del, use_cal) + for i, a_rule in enumerate(feed.entry): + try: + if (user_to_del == 'default' and a_rule.scope.value == None) or (a_rule.scope.type.lower() == 'domain' and user_to_del == 'domain') or a_rule.scope.value.lower() == user_to_del: + found_rule = True + try: + result = cal.DeleteAclEntry(a_rule.GetEditLink().href) + except gdata.service.RequestError, e: + print 'Error: %s - %s' % (e[0]['reason'], e[0]['body']) + sys.exit(e[0]['status']) + break + except AttributeError: + continue + if not found_rule: + print 'Error: that object does not seem to have access to that calendar' + exit(34) + +def doProfile(users): + if sys.argv[4].lower() == 'share' or sys.argv[4].lower() == 'shared': + indexed = 'true' + elif sys.argv[4].lower() == 'unshare' or sys.argv[4].lower() == 'unshared': + indexed = 'false' + profiles = getProfilesObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + user_domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + user_domain = domain + print 'Setting Profile Sharing to %s for %s@%s (%s of %s)' % (indexed, user, user_domain, i, count) + uri = '/m8/feeds/profiles/domain/%s/full/%s?v=3.0' % (user_domain, user) + try: + user_profile = profiles.GetProfile(uri) + user_profile.extension_elements[2].attributes['indexed'] = indexed + profiles.UpdateProfile(user_profile.GetEditLink().href, user_profile) + except gdata.service.RequestError, e: + print 'Error: %s@%s %s - %s' % (user, user_domain, e[0]['body'], e[0]['reason']) + i += 1 + +def showProfile(users): + profiles = getProfilesObject() + for user in users: + if user.find('@') > 0: + user_domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + user_domain = domain + uri = '/m8/feeds/profiles/domain/%s/full/%s?v=3.0' % (user_domain, user) + try: + user_profile = profiles.GetProfile(uri) + except gdata.service.RequestError, e: + print 'Error: %s@%s %s - %s' % (user, user_domain, e[0]['body'], e[0]['reason']) + continue + indexed = user_profile.extension_elements[2].attributes['indexed'] + print '''User: %s@%s + Profile Shared: %s''' % (user, user_domain, indexed) + +def doPhoto(users): + filename = sys.argv[5] + profiles = getProfilesObject() + i = 1 + count = len(users) + for user in users: + if user.find('@') > 0: + user_domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + user_domain = domain + uri = '/m8/feeds/profiles/domain/%s/full/%s?v=3' % (user_domain, user) + try: + user_profile = profiles.GetProfile(uri) + photo_uri = user_profile.link[0].href + try: + if sys.argv[6].lower() == 'nooverwrite': + etag = user_profile.link[0].extension_attributes['{http://schemas.google.com/g/2005}etag'] + print 'Not overwriting existing photo for %s@%s' % (user, user_domain) + continue + except IndexError: + pass + except KeyError: + pass + print "Updating photo for %s (%s of %s)" % (user+'@'+user_domain, i, count) + results = profiles.ChangePhoto(media=filename, content_type='image/jpeg', contact_entry_or_url=photo_uri) + except gdata.service.RequestError, e: + print 'Error: %s@%s %s - %s' % (user, user_domain, e[0]['body'], e[0]['reason']) + i = i + 1 + +def getPhoto(users): + profiles = getProfilesObject() + i = 1 + count = len(users) + for user in users: + if user.find('@') > 0: + user_domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + user_domain = domain + uri = '/m8/feeds/profiles/domain/%s/full/%s?v=3' % (user_domain, user) + try: + user_profile = profiles.GetProfile(uri) + try: + etag = user_profile.link[0].extension_attributes['{http://schemas.google.com/g/2005}etag'] + except KeyError: + print ' No photo for %s@%s' % (user, user_domain) + i = i + 1 + continue + photo_uri = user_profile.link[0].href + filename = '%s-%s.jpg' % (user, user_domain) + print "Saving photo for %s to %s (%s of %s)" % (user+'@'+user_domain, filename, i, count) + photo = profiles.GetPhoto(contact_entry_or_url=photo_uri) + except gdata.service.RequestError, e: + print 'Error: %s@%s %s - %s' % (user, user_domain, e[0]['body'], e[0]['reason']) + i = i + 1 + continue + photo_file = open(filename, 'wb') + photo_file.write(photo) + photo_file.close() + i = i + 1 + +def deletePhoto(users): + profiles = getProfilesObject() + i = 1 + count = len(users) + for user in users: + if user.find('@') > 0: + user_domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + user_domain = domain + uri = '/m8/feeds/profiles/domain/%s/full/%s?v=3' % (user_domain, user) + try: + user_profile = profiles.GetProfile(uri) + photo_uri = user_profile.link[0].href + print "Deleting photo for %s (%s of %s)" % (user+'@'+user_domain, i, count) + results = profiles.DeletePhoto(photo_uri) + except gdata.service.RequestError, e: + print 'Error: %s@%s %s - %s' % (user, user_domain, e[0]['body'], e[0]['reason']) + i = i + 1 + +def showCalendars(users): + cal = getCalendarObject() + for user in users: + if user.find('@') > 0: + user_domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + user_domain = domain + uri = '/calendar/feeds/%s/allcalendars/full' % (user+'@'+user_domain,) + try: + feed = cal.GetAllCalendarsFeed(uri) + except gdata.service.RequestError, e: + print 'Error: %s@%s %s - %s' % (user, user_domain, e[0]['body'], e[0]['reason']) + continue + print '%s' % feed.title.text + for i, a_calendar in enumerate(feed.entry): + print ' Name: %s' % str(a_calendar.title.text) + print ' ID: %s' % urllib.unquote(str(a_calendar.id.text).rpartition('/')[2]) + print ' Access Level: %s' % str(a_calendar.access_level.value) + print ' Timezone: %s' % str(a_calendar.timezone.value) + print ' Hidden: %s' % str(a_calendar.hidden.value) + print ' Selected: %s' % str(a_calendar.selected.value) + print ' Color: %s' % str(a_calendar.color.value) + print '' + +def showCalSettings(users): + cal = getCalendarObject() + for user in users: + if user.find('@') > 0: + user_domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + user_domain = domain + uri = 'https://www.googleapis.com/calendar/v3/users/%s/settings' % ('me') + try: + feed = cal.Get(uri, converter=str) + except gdata.service.RequestError, e: + print 'Error: %s - %s' % (e[0]['reason'], e[0]['body']) + sys.exit(e[0]['status']) + print feed + +def doImap(users): + checkTOS = False + if sys.argv[4].lower() == 'on': + enable = True + elif sys.argv[4].lower() == 'off': + enable = False + if len(sys.argv) > 5 and sys.argv[5] == 'confirm': + checkTOS = True + emailsettings = getEmailSettingsObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Setting IMAP Access to %s for %s (%s of %s)" % (str(enable), user+'@'+emailsettings.domain, i, count) + i = i + 1 + if checkTOS: + if not hasAgreed2TOS(user+'@'+emailsettings.domain): + print ' Warning: IMAP has been enabled but '+user+'@'+emailsettings.domain+' has not logged into GMail to agree to the terms of service (captcha). IMAP will not work until they do.' + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.UpdateImap(username=user, enable=enable) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def getImap(users): + emailsettings = getEmailSettingsObject() + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain + count = len(users) + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + imapsettings = emailsettings.GetImap(username=user) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + i = i + 1 + continue + print 'User %s IMAP Enabled:%s (%s of %s)' % (user+'@'+emailsettings.domain, imapsettings['enable'], i, count) + i = i + 1 + +def doPop(users): + checkTOS = False + if sys.argv[4].lower() == 'on': + enable = True + elif sys.argv[4].lower() == 'off': + enable = False + enable_for = 'ALL_MAIL' + action = 'KEEP' + i = 5 + while i < len(sys.argv): + if sys.argv[i].lower() == 'for': + if sys.argv[i+1].lower() == 'allmail': + enable_for = 'ALL_MAIL' + i = i + 2 + elif sys.argv[i+1].lower() == 'newmail': + enable_for = 'MAIL_FROM_NOW_ON' + i = i + 2 + elif sys.argv[i].lower() == 'action': + if sys.argv[i+1].lower() == 'keep': + action = 'KEEP' + i = i + 2 + elif sys.argv[i+1].lower() == 'archive': + action = 'ARCHIVE' + i = i + 2 + elif sys.argv[i+1].lower() == 'delete': + action = 'DELETE' + i = i + 2 + elif sys.argv[i].lower() == 'confirm': + checkTOS = True + i = i + 1 + else: + showUsage() + sys.exit(2) + emailsettings = getEmailSettingsObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Setting POP Access to %s for %s (%s of %s)" % (str(enable), user+'@'+emailsettings.domain, i, count) + i = i + 1 + if checkTOS: + if not hasAgreed2TOS(user): + print ' Warning: POP has been enabled but '+user+'@'+emailsettings.domain+' has not logged into GMail to agree to the terms of service (captcha). POP will not work until they do.' + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.UpdatePop(username=user, enable=enable, enable_for=enable_for, action=action) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def getPop(users): + emailsettings = getEmailSettingsObject() + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + popsettings = emailsettings.GetPop(username=user) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + print 'User %s POP Enabled:%s Action:%s' % (user+'@'+emailsettings.domain, popsettings['enable'], popsettings['action']) + +def doSendAs(users): + sendas = sys.argv[4] + sendasName = sys.argv[5] + make_default = reply_to = None + i = 6 + while i < len(sys.argv): + if sys.argv[i].lower() == 'default': + make_default = True + i = i + 1 + elif sys.argv[i].lower() == 'replyto': + reply_to = sys.argv[i+1] + i = i + 2 + else: + showUsage() + sys.exit(2) + emailsettings = getEmailSettingsObject() + if sendas.find('@') < 0: + sendas = sendas+'@'+domain + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Allowing %s to send as %s (%s of %s)" % (user+'@'+emailsettings.domain, sendas, i, count) + i = i + 1 + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.CreateSendAsAlias(username=user, name=sendasName, address=sendas, make_default=make_default, reply_to=reply_to) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def showSendAs(users): + emailsettings = getEmailSettingsObject() + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain + print '%s has the following send as aliases:' % (user+'@'+emailsettings.domain) + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + sendases = emailsettings.GetSendAsAlias(username=user) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + for sendas in sendases: + if sendas['isDefault'] == 'true': + default = 'yes' + else: + default = 'no' + if sendas['replyTo']: + replyto = ' Reply To:<'+sendas['replyTo']+'>' + else: + replyto = '' + if sendas['verified'] == 'true': + verified = 'yes' + else: + verified = 'no' + print ' "%s" <%s>%s Default:%s Verified:%s' % (sendas['name'], sendas['address'], replyto, default, verified) + print '' + +def doLanguage(users): + language = sys.argv[4].lower() + emailsettings = getEmailSettingsObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Setting the language for %s to %s (%s of %s)" % (user+'@'+emailsettings.domain, language, i, count) + i = i + 1 + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.UpdateLanguage(username=user, language=language) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def doUTF(users): + if sys.argv[4].lower() == 'on': + SetUTF = True + elif sys.argv[4].lower() == 'off': + SetUTF = False + emailsettings = getEmailSettingsObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Setting UTF-8 to %s for %s (%s of %s)" % (str(SetUTF), user+'@'+emailsettings.domain, i, count) + i = i + 1 + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.UpdateGeneral(username=user, unicode=SetUTF) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def doPageSize(users): + if sys.argv[4] == '25' or sys.argv[4] == '50' or sys.argv[4] == '100': + PageSize = sys.argv[4] + else: + showUsage() + sys.exit(2) + emailsettings = getEmailSettingsObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Setting Page Size to %s for %s (%s of %s)" % (PageSize, user+'@'+emailsettings.domain, i, count) + i = i + 1 + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.UpdateGeneral(username=user, page_size=PageSize) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def doShortCuts(users): + if sys.argv[4].lower() == 'on': + SetShortCuts = True + elif sys.argv[4].lower() == 'off': + SetShortCuts = False + emailsettings = getEmailSettingsObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Setting Keyboard Short Cuts to %s for %s (%s of %s)" % (str(SetShortCuts), user+'@'+emailsettings.domain, i, count) + i = i + 1 + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.UpdateGeneral(username=user, shortcuts=SetShortCuts) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def doAdminAudit(): + i = 3 + admin = event = start_date = end_date = max_results = None + while i < len(sys.argv): + if sys.argv[i].lower() == 'admin': + admin = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'event': + event = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'start_date': + start_date = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'end_date': + end_date = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'max_results': + max_results = sys.argv[i+1] + i = i + 2 + else: + showUsage() + sys.exit(2) + orgs = getOrgObject() + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + customer_id = orgs.RetrieveCustomerId()['customerId'] + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + aa = getAdminAuditObject() + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + results = aa.retrieve_audit(customer_id=customer_id, admin=admin, event=event, start_date=start_date, end_date=end_date, max_results=max_results) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print results + +def doArrows(users): + if sys.argv[4].lower() == 'on': + SetArrows = True + elif sys.argv[4].lower() == 'off': + SetArrows = False + emailsettings = getEmailSettingsObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Setting Personal Indicator Arrows to %s for %s (%s of %s)" % (str(SetArrows), user+'@'+emailsettings.domain, i, count) + i = i + 1 + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.UpdateGeneral(username=user, arrows=SetArrows) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def doSnippets(users): + if sys.argv[4].lower() == 'on': + SetSnippets = True + elif sys.argv[4].lower() == 'off': + SetSnippets = False + emailsettings = getEmailSettingsObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Setting Preview Snippets to %s for %s (%s of %s)" % (str(SetSnippets), user+'@'+emailsettings.domain, i, count) + i = i + 1 + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.UpdateGeneral(username=user, snippets=SetSnippets) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def doLabel(users): + label = sys.argv[4] + emailsettings = getEmailSettingsObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Creating label %s for %s (%s of %s)" % (label, user+'@'+emailsettings.domain, i, count) + i = i + 1 + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.CreateLabel(username=user, label=label) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def doDeleteLabel(users): + label = sys.argv[5] + emailsettings = getEmailSettingsObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain + print "Deleting label %s for %s (%s of %s)" % (label, user+'@'+emailsettings.domain, i, count) + i = i + 1 + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + results = emailsettings.DeleteLabel(username=user, label=label) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def showLabels(users): + emailsettings = getEmailSettingsObject() + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain + print '%s has the following labels:' % (user+'@'+emailsettings.domain) + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + labels = emailsettings.GetLabels(username=user) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + for label in labels: + print ' %s Unread:%s Visibility:%s' % (label['label'], label['unreadCount'], label['visibility']) + print '' + +def doFilter(users): + i = 4 # filter arguments start here + from_ = to = subject = has_the_word = does_not_have_the_word = has_attachment = label = should_mark_as_read = should_archive = should_star = forward_to = should_trash = should_not_spam = None + haveCondition = False + while sys.argv[i].lower() == 'from' or sys.argv[i].lower() == 'to' or sys.argv[i].lower() == 'subject' or sys.argv[i].lower() == 'haswords' or sys.argv[i].lower() == 'nowords' or sys.argv[i].lower() == 'musthaveattachment': + if sys.argv[i].lower() == 'from': + from_ = sys.argv[i+1] + i = i + 2 + haveCondition = True + elif sys.argv[i].lower() == 'to': + to = sys.argv[i+1] + i = i + 2 + haveCondition = True + elif sys.argv[i].lower() == 'subject': + subject = sys.argv[i+1] + i = i + 2 + haveCondition = True + elif sys.argv[i].lower() == 'haswords': + has_the_word = sys.argv[i+1] + i = i + 2 + haveCondition = True + elif sys.argv[i].lower() == 'nowords': + does_not_have_the_word = sys.argv[i+1] + i = i + 2 + haveCondition = True + elif sys.argv[i].lower() == 'musthaveattachment': + has_attachment = True + i = i + 1 + haveCondition = True + if not haveCondition: + showUsage() + sys.exit(2) + haveAction = False + while i < len(sys.argv): + if sys.argv[i].lower() == 'label': + label = sys.argv[i+1] + i = i + 2 + haveAction = True + elif sys.argv[i].lower() == 'markread': + should_mark_as_read = True + i = i + 1 + haveAction = True + elif sys.argv[i].lower() == 'archive': + should_archive = True + i = i + 1 + haveAction = True + elif sys.argv[i].lower() == 'star': + should_star = True + i = i + 1 + haveAction = True + elif sys.argv[i].lower() == 'forward': + forward_to = sys.argv[i+1] + i = i + 2 + haveAction = True + elif sys.argv[i].lower() == 'trash': + should_trash = True + i = i + 1 + haveAction = True + elif sys.argv[i].lower() == 'neverspam': + should_not_spam = True + i = i + 1 + haveAction = True + else: + showUsage() + sys.exit(2) + if not haveAction: + showUsage() + sys.exit(2) + emailsettings = getEmailSettingsObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Creating filter for %s (%s of %s)" % (user+'@'+emailsettings.domain, i, count) + i = i + 1 + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.CreateFilter(username=user, from_=from_, to=to, subject=subject, has_the_word=has_the_word, does_not_have_the_word=does_not_have_the_word, has_attachment=has_attachment, label=label, should_mark_as_read=should_mark_as_read, should_archive=should_archive, should_star=should_star, forward_to=forward_to, should_trash=should_trash, should_not_spam=should_not_spam) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def doForward(users): + checkTOS = False + action = forward_to = None + gotAction = gotForward = False + if sys.argv[4] == 'on': + enable = True + elif sys.argv[4] == 'off': + enable = False + else: + showUsage() + sys.exit(2) + i = 5 + while i < len(sys.argv): + if sys.argv[i].lower() == 'keep' or sys.argv[i].lower() == 'archive' or sys.argv[i].lower() == 'delete': + action = sys.argv[i].upper() + i = i + 1 + gotAction = True + elif sys.argv[i].lower() == 'confirm': + checkTOS = True + i = i + 1 + elif sys.argv[i].find('@') != -1: + forward_to = sys.argv[i] + gotForward = True + i = i + 1 + else: + showUsage() + sys.exit(2) + if enable and (not gotAction or not gotForward): + showUsage() + sys.exit() + emailsettings = getEmailSettingsObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Turning forward %s for %s, emails will be %s (%s of %s)" % (sys.argv[4], user+'@'+emailsettings.domain, action, i, count) + i = i + 1 + if checkTOS: + if not hasAgreed2TOS(user): + print ' Warning: Forwarding has been enabled but '+user+'@'+emailsettings.domain+' has not logged into GMail to agree to the terms of service (captcha). Forwarding will not work until they do.' + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.UpdateForwarding(username=user, enable=enable, action=action, forward_to=forward_to) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def getForward(users): + emailsettings = getEmailSettingsObject() + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + forward = emailsettings.GetForward(username=user) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + print "User %s: Forward To:%s Enabled:%s Action:%s" % (user+'@'+emailsettings.domain, forward['forwardTo'], forward['enable'], forward['action']) + +def doSignature(users): + if sys.argv[4].lower() == 'file': + fp = open(sys.argv[5], 'rb') + signature = cgi.escape(fp.read().replace('\\n', ' ').replace('"', "'")) + fp.close() + else: + signature = cgi.escape(sys.argv[4]).replace('\\n', ' ').replace('"', "'") + xmlsig = ''' + + +''' % signature + emailsettings = getEmailSettingsObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Setting Signature for %s (%s of %s)" % (user+'@'+emailsettings.domain, i, count) + uri = 'https://apps-apis.google.com/a/feeds/emailsettings/2.0/%s/%s/signature' % (emailsettings.domain, user) + i = i + 1 + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.Put(xmlsig, uri) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def getSignature(users): + emailsettings = getEmailSettingsObject() + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + signature = emailsettings.GetSignature(username=user) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + sys.stderr.write("User %s signature:\n " % (user+'@'+emailsettings.domain)) + print "%s" % signature['signature'] + +def doWebClips(users): + if sys.argv[4].lower() == 'on': + enable = True + elif sys.argv[4].lower() == 'off': + enable = False + else: + showUsage() + sys.exit(2) + emailsettings = getEmailSettingsObject() + count = len(users) + i = 1 + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Turning Web Clips %s for %s (%s of %s)" % (sys.argv[4], user+'@'+emailsettings.domain, i, count) + i = i + 1 + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.UpdateWebClipSettings(username=user, enable=enable) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def doVacation(users): + subject = message = '' + if sys.argv[4] == 'on': + enable = 'true' + elif sys.argv[4] == 'off': + enable = 'false' + else: + showUsage() + sys.exit(2) + contacts_only = domain_only = 'false' + start_date = end_date = None + i = 5 + while i < len(sys.argv): + if sys.argv[i].lower() == 'subject': + subject = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'message': + message = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'contactsonly': + contacts_only = 'true' + i = i + 1 + elif sys.argv[i].lower() == 'domainonly': + domain_only = 'true' + i = i + 1 + elif sys.argv[i].lower() == 'startdate': + start_date = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'enddate': + end_date = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'file': + fp = open(sys.argv[i+1], 'rb') + message = fp.read() + fp.close() + i = i + 2 + else: + showUsage() + sys.exit(2) + i = 1 + count = len(users) + emailsettings = getEmailSettingsObject() + message = cgi.escape(message).replace('\\n', ' ').replace('"', "'") + vacxml = ''' + + ''' % enable + vacxml += ''' + + + ''' % (subject, message, contacts_only, domain_only) + if start_date != None: + vacxml += '''''' % start_date + if end_date != None: + vacxml += '''''' % end_date + vacxml += '' + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain #make sure it's back at default domain + print "Setting Vacation for %s (%s of %s)" % (user+'@'+emailsettings.domain, i, count) + uri = 'https://apps-apis.google.com/a/feeds/emailsettings/2.0/%s/%s/vacation' % (emailsettings.domain, user) + i = i + 1 + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + emailsettings.Put(vacxml, uri) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + +def getVacation(users): + emailsettings = getEmailSettingsObject() + for user in users: + if user.find('@') > 0: + emailsettings.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + else: + emailsettings.domain = domain + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + vacationsettings = emailsettings.GetVacation(username=user) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + continue + print '''User %s + Enabled: %s + Contacts Only: %s + Domain Only: %s + Subject: %s + Message: %s + Start Date: %s + End Date: %s +''' % (user+'@'+emailsettings.domain, vacationsettings['enable'], vacationsettings['contactsOnly'], vacationsettings['domainOnly'], vacationsettings['subject'], vacationsettings['message'], vacationsettings['startDate'], vacationsettings['endDate']) + +def doCreateUser(): + gotFirstName = gotLastName = gotPassword = doOrg = False + suspended = hash_function = change_password = ip_whitelisted = quota_in_gb = is_admin = agreed_to_terms = nohash = customer_id = None + user_email = sys.argv[3] + i = 4 + while i < len(sys.argv): + if sys.argv[i].lower() == 'firstname': + first_name = sys.argv[i+1] + gotFirstName = True + i = i + 2 + elif sys.argv[i].lower() == 'lastname': + last_name = sys.argv[i+1] + gotLastName = True + i = i + 2 + elif sys.argv[i].lower() == 'password': + password = sys.argv[i+1] + gotPassword = True + i = i + 2 + elif sys.argv[i].lower() == 'suspended': + if sys.argv[i+1].lower() == 'on': + suspended = True + elif sys.argv[i+1].lower() == 'off': + suspended = False + else: + print 'Error: suspended should be on or off, not %s' % sys.argv[i+1] + sys.exit(5) + i = i + 2 + elif sys.argv[i].lower() == 'sha' or sys.argv[i].lower() == 'sha1' or sys.argv[i].lower() == 'sha-1': + hash_function = 'SHA-1' + i = i + 1 + elif sys.argv[i].lower() == 'md5': + hash_function = 'MD5' + i = i + 1 + elif sys.argv[i].lower() == 'nohash': + nohash = True + i = i + 1 + elif sys.argv[i].lower() == 'quota': + quota_in_gb = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'changepassword': + if sys.argv[i+1] == 'on': + change_password = True + elif sys.argv[i+1] == 'off': + change_password = False + else: + print 'Error: changepassword should be on or off, not %s' % sys.argv[i+1] + sys.exit(5) + i = i + 2 + elif sys.argv[i].lower() == 'ipwhitelisted': + if sys.argv[i+1] == 'on': + ip_whitelisted = True + elif sys.argv[i+1] == 'off': + ip_whitelisted = False + else: + print 'Error: ipwhitelisted should be on or off, not %s' % sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'admin': + if sys.argv[i+1] == 'on': + is_admin = True + elif sys.argv[i+1] == 'off': + is_admin = False + else: + print 'Error: admin should be on or off, not %s' % sys.argv[i+1] + sys.exit(5) + i = i + 2 + elif sys.argv[i].lower() == 'agreedtoterms': + if sys.argv[i+1] == 'on': + agreed_to_terms = True + elif sys.argv[i+1] == 'off': + agreed_to_terms = False + else: + print 'Error: agreedtoterms should be on or off, not %s' % sys.argv[i+1] + sys.exit(5) + i = i + 2 + elif sys.argv[i].lower() == 'org' or sys.argv[i].lower() == 'ou': + org = sys.argv[i+1] + doOrg = True + i = i + 2 + elif sys.argv[i].lower() == 'customerid': + customer_id = sys.argv[i+1] + i = i + 2 + else: + showUsage() + sys.exit(2) + if not gotFirstName: + first_name = 'Unknown' + if not gotLastName: + last_name = 'Unknown' + if not gotPassword: + password = ''.join(random.sample('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789~`!@#$%^&*()-=_+:;"\'{}[]\\|', 25)) + if hash_function == None and not nohash: + newhash = sha1() + newhash.update(password) + password = newhash.hexdigest() + hash_function = 'SHA-1' + multi = getMultiDomainObject() + if user_email.find('@') == -1: + user_email = '%s@%s' % (user_email, domain) + print "Creating account for %s" % user_email + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + multi.CreateUser(user_email=user_email, last_name=last_name, first_name=first_name, password=password, suspended=suspended, quota_in_gb=quota_in_gb, hash_function=hash_function, change_password=change_password, is_admin=is_admin, agreed_to_terms=agreed_to_terms, ip_whitelisted=ip_whitelisted) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + if doOrg: + orgObj = getOrgObject() + print "Moving %s to org %s" % (user_email, org) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + orgObj.UpdateUserOrganization(user=user_email, new_name=org, customer_id=customer_id) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doCreateGroup(): + group = sys.argv[3] + got_name = False + group_description = None + group_permission = None + i = 4 + while i < len(sys.argv): + if sys.argv[i].lower() == 'name': + group_name = sys.argv[i+1] + got_name = True + i = i + 2 + elif sys.argv[i].lower() == 'description': + group_description = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'permission': + group_permission = sys.argv[i+1] + if group_permission.lower() == 'owner': + group_permission = 'Owner' + elif group_permission.lower() == 'member': + group_permission = 'Member' + elif group_permission.lower() == 'domain': + group_permission = 'Domain' + elif group_permission.lower() == 'anyone': + group_permission = 'Anyone' + else: + showUsage() + sys.exit(2) + got_permission = True + i = i + 2 + groupObj = getGroupsObject() + if group.find('@') == -1: + group = '%s@%s' % (group, groupObj.domain) + if not got_name: + group_name = group + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + result = groupObj.CreateGroup(group, group_name, group_description, group_permission) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doCreateNickName(): + alias_email = sys.argv[3] + if sys.argv[4].lower() != 'user': + showUsage() + sys.exit(2) + user_email = sys.argv[5] + multi = getMultiDomainObject() + if alias_email.find('@') == -1: + alias_email = '%s@%s' % (alias_email, domain) + if user_email.find('@') == -1: + user_email = '%s@%s' % (user_email, domain) + print 'Creating alias %s for user %s' % (alias_email, user_email) + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + multi.CreateAlias(user_email=user_email, alias_email=alias_email) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doCreateOrg(): + name = sys.argv[3] + description = '' + parent_org_unit_path = '/' + block_inheritance = False + i = 4 + while i < len(sys.argv): + if sys.argv[i].lower() == 'description': + description = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'parent': + parent_org_unit_path = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'noinherit': + block_inheritance = True + i = i + 1 + org = getOrgObject() + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + org.CreateOrganizationUnit(name=name, description=description, parent_org_unit_path=parent_org_unit_path, block_inheritance=block_inheritance) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doCreateResource(): + id = sys.argv[3] + common_name = sys.argv[4] + description = None + type = None + i = 5 + while i < len(sys.argv): + if sys.argv[i].lower() == 'description': + description = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'type': + type = sys.argv[i+1] + i = i + 2 + rescal = getResCalObject() + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + rescal.CreateResourceCalendar(id=id, common_name=common_name, description=description, type=type) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doUpdateUser(): + gotPassword = isMD5 = isSHA1 = doOrg = False + first_name = last_name = password = suspended = hash_function = change_password = ip_whitelisted = quota_in_gb = is_admin = agreed_to_terms = nohash = customer_id = None + user_email = sys.argv[3] + i = 4 + do_update_user = False + do_rename_user = False + while i < len(sys.argv): + if sys.argv[i].lower() == 'firstname': + do_update_user = True + first_name = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'lastname': + do_update_user = True + last_name = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'username': + new_email = sys.argv[i+1] + do_rename_user = True + i = i + 2 + elif sys.argv[i].lower() == 'password': + do_update_user = True + password = sys.argv[i+1] + i = i + 2 + gotPassword = True + elif sys.argv[i].lower() == 'admin': + do_update_user = True + if sys.argv[i+1].lower() == 'on': + is_admin = True + elif sys.argv[i+1].lower() == 'off': + is_admin = False + i = i + 2 + elif sys.argv[i].lower() == 'suspended': + do_update_user = True + if sys.argv[i+1].lower() == 'on': + suspended = True + elif sys.argv[i+1].lower() == 'off': + suspended = False + i = i + 2 + elif sys.argv[i].lower() == 'ipwhitelisted': + do_update_user = True + if sys.argv[i+1].lower() == 'on': + ip_whitelisted = True + elif sys.argv[i+1].lower() == 'off': + ip_whitelisted = False + i = i + 2 + elif sys.argv[i].lower() == 'sha1' or sys.argv[i].lower() == 'sha1' or sys.argv[i].lower() == 'sha-1': + do_update_user = True + hash_function = 'SHA-1' + i = i + 1 + isSHA1 = True + elif sys.argv[i].lower() == 'md5': + do_update_user = True + hash_function = 'MD5' + i = i + 1 + isMD5 = True + elif sys.argv[i].lower() == 'nohash': + nohash = True + i = i + 1 + elif sys.argv[i].lower() == 'changepassword': + do_update_user = True + if sys.argv[i+1].lower() == 'on': + change_password = True + elif sys.argv[i+1].lower() == 'off': + change_password = False + i = i + 2 + elif sys.argv[i].lower() == 'org' or sys.argv[i].lower() == 'ou': + doOrg = True + org = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'customerid': + customer_id = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'agreedtoterms': + do_update_user = True + if sys.argv[i+1].lower() == 'on': + agreed_to_terms = True + elif sys.argv[i+1].lower() == 'off': + agreed_to_terms = False + i = i + 2 + else: + showUsage() + sys.exit(2) + if gotPassword and not (isSHA1 or isMD5 or nohash): + newhash = sha1() + newhash.update(password) + password = newhash.hexdigest() + hash_function = 'SHA-1' + multi = getMultiDomainObject() + if user_email.find('@') == -1: + user_email = '%s@%s' % (user_email, domain) + if do_update_user: + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + result = multi.UpdateUser(user_email=user_email, password=password, first_name=first_name, last_name=last_name, is_admin=is_admin, hash_function=hash_function, + change_password=change_password, agreed_to_terms=agreed_to_terms, + suspended=suspended, ip_whitelisted=ip_whitelisted, quota_in_gb=quota_in_gb) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + if do_rename_user: + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + multi.RenameUser(old_email=user_email, new_email=new_email) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + if doOrg: + orgObj = getOrgObject() + print "Moving %s to org %s" % (user_email, org) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + orgObj.UpdateUserOrganization(user=user_email, new_name=org, customer_id=customer_id) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doUpdateGroup(): + groupObj = getGroupsObject() + group = sys.argv[3] + if group.find('@') == -1: + group = group+'@'+domain + if sys.argv[4].lower() == 'add': + if sys.argv[5].lower() == 'owner': + userType = 'Owner' + elif sys.argv[5].lower() == 'member': + userType = 'Member' + user = sys.argv[6] + if user != '*' and user.find('@') == -1: + email = user+'@'+domain + else: + email = user + if userType == 'Member': + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + result = groupObj.AddMemberToGroup(email, group) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + elif userType == 'Owner': + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + result2 = groupObj.AddOwnerToGroup(email, group) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + elif sys.argv[4].lower() == 'remove': + if sys.argv[5].lower() == 'owner': + userType = 'Owner' + elif sys.argv[5].lower() == 'member': + userType = 'Member' + user = sys.argv[6] + if user != '*' and user.find('@') == -1: + email = user+'@'+domain + else: + email = user + if userType == 'Member': + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + result = groupObj.RemoveMemberFromGroup(email, group) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + elif userType == 'Owner': + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + result = groupObj.RemoveOwnerFromGroup(email, group) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + + else: + i = 4 + use_prov_api = True + if not sys.argv[i].lower() == 'settings': + groupInfo = groupObj.RetrieveGroup(group) + while i < len(sys.argv): + if sys.argv[i].lower() == 'name': + groupInfo['groupName'] = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'description': + groupInfo['description'] = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'permission': + if sys.argv[i+1].lower() == 'owner': + groupInfo['emailPermission'] = 'Owner' + elif sys.argv[i+1].lower() == 'member': + groupInfo['emailPermission'] = 'Member' + elif sys.argv[i+1].lower() == 'domain': + groupInfo['emailPermission'] = 'Domain' + elif sys.argv[i+1].lower() == 'anyone': + groupInfo['emailPermission'] = 'Anyone' + i = i + 2 + else: + use_prov_api = False + i = i + 1 + if use_prov_api: + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + result = groupObj.UpdateGroup(group, groupInfo['groupName'], groupInfo['description'], groupInfo['emailPermission']) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + else: + allow_external_members = allow_google_communication = allow_web_posting = archive_only = custom_reply_to = default_message_deny_notification_text = description = is_archived = max_message_bytes = members_can_post_as_the_group = message_display_font = message_moderation_level = name = primary_language = reply_to = send_message_deny_notification = show_in_group_directory = who_can_invite = who_can_join = who_can_post_message = who_can_view_group = who_can_view_membership = include_in_global_address_list = spam_moderation_level = None + while i < len(sys.argv): + if sys.argv[i].lower() == 'allow_external_members': + allow_external_members = sys.argv[i+1].lower() + if allow_external_members != 'true' and allow_external_members != 'false': + print 'Error: Value for allow_external_members must be true or false. Got %s' % allow_external_members + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'include_in_global_address_list': + include_in_global_address_list = sys.argv[i+1].lower() + if include_in_global_address_list != 'true' and include_in_global_address_list != 'false': + print 'Error: Value for include_in_global_address_list must be true or false. Got %s' % include_in_global_address_list + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'spam_moderation_level': + spam_moderation_level = sys.argv[i+1].upper() + if spam_moderation_level != 'ALLOW' and spam_moderation_level != 'MODERATE' and spam_moderation_level != 'SILENTLY_MODERATE' and spam_moderation_level != 'REJECT': + print 'Error: Value for spam_moderation_level must be allow, moderate, silently_moderate or reject. Got %s' % spam_moderation_level + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'message_moderation_level': + message_moderation_level = sys.argv[i+1].upper() + if message_moderation_level != 'MODERATE_ALL_MESSAGES' and message_moderation_level != 'MODERATE_NEW_MEMBERS' and message_moderation_level != 'MODERATE_NONE' and message_moderation_level != 'MODERATE_NON_MEMBERS': + print 'Error: Value for message_moderation_level must be moderate_all_messages, moderate_new_members, moderate_none or moderate_non_members. Got %s' % allow_external_members + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'name': + name = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'primary_language': + primary_language = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'reply_to': + reply_to = sys.argv[i+1].upper() + if reply_to != 'REPLY_TO_CUSTOM' and reply_to != 'REPLY_TO_IGNORE' and reply_to != 'REPLY_TO_LIST' and reply_to != 'REPLY_TO_MANAGERS' and reply_to != 'REPLY_TO_OWNER' and reply_to != 'REPLY_TO_SENDER': + print 'Error: Value for reply_to must be reply_to_custom, reply_to_ignore, reply_to_list, reply_to_managers, reply_to_owner or reply_to_sender. Got %s' % reply_to + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'send_message_deny_notification': + send_message_deny_notification = sys.argv[i+1].lower() + if send_message_deny_notification != 'true' and send_message_deny_notification != 'false': + print 'Error: Value for send_message_deny_notification must be true or false. Got %s' % send_message_deny_notification + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'show_in_groups_directory' or sys.argv[i].lower() == 'show_in_group_directory': + show_in_group_directory = sys.argv[i+1].lower() + if show_in_group_directory != 'true' and show_in_group_directory != 'false': + print 'Error: Value for show_in_group_directory must be true or false. Got %s' % show_in_group_directory + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'who_can_invite': + who_can_invite = sys.argv[i+1].upper() + if who_can_invite != 'ALL_MANAGERS_CAN_INVITE' and who_can_invite != 'ALL_MEMBERS_CAN_INVITE': + print 'Error: Value for who_can_invite must be all_managers_can_invite or all_members_can_invite. Got %s' % who_can_invite + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'who_can_join': + who_can_join = sys.argv[i+1].upper() + if who_can_join != 'ALL_IN_DOMAIN_CAN_JOIN' and who_can_join != 'ANYONE_CAN_JOIN' and who_can_join != 'CAN_REQUEST_TO_JOIN' and who_can_join != 'INVITED_CAN_JOIN': + print 'Error: Value for who_can_join must be all_in_domain_can_join, anyone_can_join, can_request_to_join or invited_can_join. Got %s' % who_can_join + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'who_can_post_message': + who_can_post_message = sys.argv[i+1].upper() + if who_can_post_message != 'ALL_IN_DOMAIN_CAN_POST' and who_can_post_message != 'ALL_MANAGERS_CAN_POST' and who_can_post_message != 'ALL_MEMBERS_CAN_POST' and who_can_post_message != 'ANYONE_CAN_POST' and who_can_post_message != 'NONE_CAN_POST': + print 'Error: Value for who_can_post_message must be all_in_domain_can_post, all_managers_can_post, all_members_can_post, anyone_can_post or none_can_post. Got %s' % who_can_post_message + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'who_can_view_group': + who_can_view_group = sys.argv[i+1].upper() + if who_can_view_group != 'ALL_IN_DOMAIN_CAN_VIEW' and who_can_view_group != 'ALL_MANAGERS_CAN_VIEW' and who_can_view_group != 'ALL_MEMBERS_CAN_VIEW' and who_can_view_group != 'ANYONE_CAN_VIEW': + print 'Error: Value for who_can_view_group must be all_in_domain_can_view, all_managers_can_view, all_members_can_view or anyone_can_view. Got %s' % who_can_view_group + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'who_can_view_membership': + who_can_view_membership = sys.argv[i+1].upper() + if who_can_view_membership != 'ALL_IN_DOMAIN_CAN_VIEW' and who_can_view_membership != 'ALL_MANAGERS_CAN_VIEW' and who_can_view_membership != 'ALL_MEMBERS_CAN_VIEW': + print 'Error: Value for who_can_view_membership must be all_in_domain_can_view, all_managers_can_view or all_members_can_view. Got %s' % who_can_view_membership + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'allow_google_communication': + allow_google_communication = sys.argv[i+1].lower() + if allow_google_communication != 'true' and allow_google_communication != 'false': + print 'Error: Value for allow_google_communication must be true or false. Got %s' % allow_google_communication + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'allow_web_posting': + allow_web_posting = sys.argv[i+1].lower() + if allow_web_posting != 'true' and allow_web_posting != 'false': + print 'Error: Value for allow_web_posting must be true or false. Got %s' % allow_web_posting + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'archive_only': + archive_only = sys.argv[i+1].lower() + if archive_only != 'true' and archive_only != 'false': + print 'Error: Value for archive_only must be true or false. Got %s' % archive_only + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'custom_reply_to': + custom_reply_to = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'default_message_deny_notification_text': + default_message_deny_notification_text = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'description': + description = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'is_archived': + is_archived = sys.argv[i+1].lower() + if is_archived != 'true' and is_archived != 'false': + print 'Error: Value for is_archived must be true or false. Got %s' % is_archived + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'max_message_bytes': + max_message_bytes = sys.argv[i+1] + try: + if max_message_bytes[-1:].upper() == 'M': + max_message_bytes = str(int(max_message_bytes[:-1]) * 1024 * 1024) + elif max_message_bytes[-1:].upper() == 'K': + max_message_bytes = str(int(max_message_bytes[:-1]) * 1024) + elif max_message_bytes[-1].upper() == 'B': + max_message_bytes = str(int(max_message_bytes[:-1])) + else: + max_message_bytes = str(int(max_message_bytes)) + except ValueError: + print 'Error: max_message_bytes must be a number ending with M (megabytes), K (kilobytes) or nothing (bytes). Got %s' % max_message_bytes + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'members_can_post_as_the_group': + members_can_post_as_the_group = sys.argv[i+1].lower() + if members_can_post_as_the_group != 'true' and members_can_post_as_the_group != 'false': + print 'Error: Value for members_can_post_as_the_group must be true or false. Got %s' % members_can_post_as_the_group + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'message_display_font': + message_display_font = sys.argv[i+1].upper() + if message_display_font != 'DEFAULT_FONT' and message_display_font != 'FIXED_WIDTH_FONT': + print 'Error: Value for message_display_font must be default_font or fixed_width_font. Got %s' % message_display_font + sys.exit(9) + i = i + 2 + else: + print 'Error: %s is not a valid setting for groups' % sys.argv[i] + sys.exit(10) + gs = getGroupSettingsObject() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = gs.UpdateGroupSettings(group_email=group, allow_external_members=allow_external_members, allow_google_communication=allow_google_communication, allow_web_posting=allow_web_posting, archive_only=archive_only, custom_reply_to=custom_reply_to, default_message_deny_notification_text=default_message_deny_notification_text, description=description, is_archived=is_archived, max_message_bytes=max_message_bytes, members_can_post_as_the_group=members_can_post_as_the_group, message_display_font=message_display_font, message_moderation_level=message_moderation_level, name=name, primary_language=primary_language, reply_to=reply_to, send_message_deny_notification=send_message_deny_notification, show_in_group_directory=show_in_group_directory, who_can_invite=who_can_invite, who_can_join=who_can_join, who_can_post_message=who_can_post_message, who_can_view_group=who_can_view_group, who_can_view_membership=who_can_view_membership, include_in_global_address_list=include_in_global_address_list, spam_moderation_level=spam_moderation_level) + break + except gdata.service.RequestError, e: + if e[0]['status'] == 503 and e[0]['reason'] == 'Service Unavailable': + try_count = try_count + 1 + group_name = group[:group.find('@')] + group_domain = group[group.find('@')+1:] + print 'Working around Google backend error. Opening group and sleeping %s sec(s)...\n' % str(wait_on_fail) + try: + url = 'https://groups.google.com/a/%s/group/%s' % (group_domain, group_name) + b = urllib2.urlopen(url) + b.read(100) + b.close() + except urllib2.HTTPError: + pass + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + print 'Error: %s - %s' % (e[0]['reason'], e[0]['body']) + sys.exit(e[0]['status']) + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doUpdateNickName(): + alias_email = sys.argv[3] + if sys.argv[4].lower() != 'user': + showUsage() + sys.exit(2) + user_email = sys.argv[5] + multi = getMultiDomainObject() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + multi.DeleteAlias(alias_email=alias_email) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + multi.CreateAlias(user_email=user_email, alias_email=alias_email) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doUpdateResourceCalendar(): + id = sys.argv[3] + common_name = None + description = None + type = None + i = 4 + while i < len(sys.argv): + if sys.argv[i].lower() == 'name': + common_name = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'description': + description = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'type': + type = sys.argv[i+1] + i = i + 2 + rescal = getResCalObject() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + rescal.UpdateResourceCalendar(id=id, common_name=common_name, description=description, type=type) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doUpdateOrg(): + name = sys.argv[3] + new_name = None + description = None + parent_org_unit_path = None + block_inheritance = None + users_to_move = [] + org = getOrgObject() + users = [] + i = 4 + if sys.argv[4].lower() == 'add': + users = sys.argv[5].split(' ') + i = 6 + elif sys.argv[4].lower() == 'fileadd' or sys.argv[4].lower() == 'addfile': + users = [] + filename = sys.argv[5] + usernames = csv.reader(open(filename, 'rb')) + for row in usernames: + users.append(row.pop()) + i = 6 + elif sys.argv[4].lower() == 'groupadd'or sys.argv[4].lower() == 'addgroup': + groupsObj = getGroupsObject() + group = sys.argv[5] + members = groupsObj.RetrieveAllMembers(group) + for member in members: + users.append(member['memberId']) + i = 6 + elif sys.argv[4].lower() == 'addnotingroup': + print 'Retrieving all users in Google Apps Organization (may take some time)' + allorgusersresults = org.RetrieveAllOrganizationUsers() + print 'Retrieved %s users' % len(allorgusersresults) + for auser in allorgusersresults: + users.append(auser['orgUserEmail']) + group = sys.argv[5] + print 'Retrieving all members of %s group (may take some time)' % group + groupsObj = getGroupsObject() + members = groupsObj.RetrieveAllMembers(group) + for member in members: + try: + users.remove(member['memberId']) + except ValueError: + continue + i = 6 + totalusers = len(users) + currentrange = 1 + while len(users) > 25: + while len(users_to_move) < 25: + users_to_move.append(users.pop()) + print "Adding users %s to %s out of %s total to org %s" % (currentrange, currentrange+24, totalusers, name) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + org.UpdateOrganizationUnit(old_name=name, users_to_move=users_to_move) + break + except gdata.apps.service.AppsForYourDomainException, e: + if e.error_code == 1301 and (e.invalidInput[-19:] == ',orgUnitUsersToMove' or e.invalidInput[:19] == 'orgUnitUsersToMove,'): + invalid_user = e.invalidInput[:e.invalidInput.find(',')] + if invalid_user == 'orgUnitUsersToMove': + invalid_user = e.invalidInput[e.invalidInput.find(',')+1:] + print 'Error: %s not valid, skipping' % invalid_user + users_to_move.remove(invalid_user) + continue + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + currentrange = currentrange + 25 + users_to_move = [] + continue + while len(users) > 0: + users_to_move.append(users.pop()) + if len(users_to_move) < 1: + users_to_move = None + else: + print 'Adding users %s to %s and making other updates to org %s' % (currentrange, totalusers, name) + while i < len(sys.argv): + if sys.argv[i].lower() == 'name': + new_name = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'description': + description = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'parent': + parent_org_unit_path = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'noinherit': + block_inheritance = True + i = i + 1 + elif sys.argv[i].lower() == 'inherit': + block_inheritance = False + i = i + 1 + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + org.UpdateOrganizationUnit(old_name=name, new_name=new_name, description=description, parent_org_unit_path=parent_org_unit_path, block_inheritance=block_inheritance, users_to_move=users_to_move) + break + except gdata.apps.service.AppsForYourDomainException, e: + if e.error_code == 1301 and (e.invalidInput[-19:] == ',orgUnitUsersToMove' or e.invalidInput[:19] == 'orgUnitUsersToMove,'): + invalid_user = e.invalidInput[:e.invalidInput.find(',')] + if invalid_user == 'orgUnitUsersToMove': + invalid_user = e.invalidInput[e.invalidInput.find(',')+1:] + print 'Error: %s not valid, skipping' % invalid_user + users_to_move.remove(invalid_user) + continue + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doWhatIs(): + email = sys.argv[2] + multi = getMultiDomainObject() + if email.find('@') == -1: + email = '%s@%s' % (email, multi.domain) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + user = multi.RetrieveAlias(email) + sys.stderr.write('%s is an alias\n\n' % email) + doGetNickNameInfo(alias_email=email) + return + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + elif e.error_code == 1301: + sys.stderr.write('%s is not an alias...\n' % email) + break + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + user = multi.RetrieveUser(email) + sys.stderr.write('%s is a user\n\n' % email) + doGetUserInfo(user_email=email) + return + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + elif e.error_code == 1301: + sys.stderr.write('%s is not a user...\n' % email) + break + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + groupObj = getGroupsObject() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + user = groupObj.RetrieveGroup(email) + sys.stderr.write('%s is a group\n\n' % email) + doGetGroupInfo(group_name=email) + return + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + elif e.error_code == 1301: + sys.stderr.write('%s is not a group either. Email address doesn\'t seem to exist!\n' % email) + return + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + user = multi.RetrieveAlias(email) + sys.stderr.write('%s is an alias\n\n' % email) + doGetNickNameInfo(alias_email=email) + return + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + elif e.error_code == 1301: + sys.stderr.write('Not an alias...\n') + break + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doGetUserInfo(user_email=None): + if user_email == None: + user_email = sys.argv[3] + getAliases = getGroups = getOrg = True + i = 4 + while i < len(sys.argv): + if sys.argv[i].lower() == 'noaliases': + getAliases = False + i = i + 1 + elif sys.argv[i].lower() == 'nogroups': + getGroups = False + i = i + 1 + elif sys.argv[i].lower() == 'noorg': + getOrg = False + i = i + 1 + multi = getMultiDomainObject() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + user = multi.RetrieveUser(user_email) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'User: %s' % user['userEmail'] + print 'First Name: %s' % user['firstName'] + print 'Last Name: %s' % user['lastName'] + print 'Is an admin: %s' % user['isAdmin'] + print 'Has agreed to terms: %s' % user['agreedToTerms'] + print 'IP Whitelisted: %s' % user['ipWhitelisted'] + print 'Account Suspended: %s' % user['isSuspended'] + print 'Must Change Password: %s' % user['isChangePasswordAtNextLogin'] + if getOrg: + orgObj = getOrgObject() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + user_org = orgObj.RetrieveUserOrganization(user_email) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Organization: %s' % user_org['orgUnitPath'] + if getAliases: + print 'Email Aliases (Nicknames):' + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + nicknames = multi.GetUserAliases(user_email) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + for nick in nicknames: + print ' ' + nick['aliasEmail'] + if getGroups: + groupObj = getGroupsObject() + groupObj.domain = multi.domain + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + groups = groupObj.RetrieveGroups(user_email) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Groups:' + for group in groups: + if group['directMember'] == 'true': + directIndirect = 'direct' + else: + directIndirect = 'indirect' + print ' ' + group['groupName'] + ' <' + group['groupId'] + '> (' + directIndirect + ' member)' + +def doGetGroupInfo(group_name=None): + if group_name == None: + group_name = sys.argv[3] + show_group_settings = False + try: + if sys.argv[4].lower() == 'settings': + show_group_settings = True + except IndexError: + pass + if not show_group_settings: + groupObj = getGroupsObject() + if group_name.find('@') == -1: + group_name = group_name+'@'+domain + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + group = groupObj.RetrieveGroup(group_name) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Group Name: ',group['groupName'] + try: + print 'Email Permission: ',group['emailPermission'] + except KeyError: + print 'Email Permission: Unknown' + print 'Group ID: ',group['groupId'] + print 'Description: ',group['description'] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + owners = groupObj.RetrieveAllOwners(group_name, suspended_users=True) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + owner_list = [] + for owner in owners: + owner_list.append(owner['email']) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + members = groupObj.RetrieveAllMembers(group_name, suspended_users=True) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + users = [] + for member in members: + users.append(member) + for user in users: + if user['memberId'] in owner_list: + print 'Owner: %s Type: %s Direct Member: %s' % (user['memberId'], user['memberType'], user['directMember']) + owner_list.remove(user['memberId']) + else: + print 'Member: %s Type: %s Direct Member: %s' % (user['memberId'], user['memberType'], user['directMember']) + # remaining owners are owners but not members + for non_member_owner in owner_list: + print 'Non-member owner: %s' % non_member_owner + else: # show group settings + gs = getGroupSettingsObject() + if group_name.find('@') == -1: + group_name = group_name+'@'+gs.domain + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + settings = gs.RetrieveGroupSettings(group_name) + break + except gdata.service.RequestError, e: + if e[0]['status'] == 503 and e[0]['reason'] == 'Service Unavailable': + try_count = try_count + 1 + group_username = group_name[:group_name.find('@')] + group_domain = group_name[group_name.find('@')+1:] + sys.stderr.write('Working around Google backend error. Opening group and sleeping %s sec(s)...\n' % str(wait_on_fail)) + try: + url = 'https://groups.google.com/a/%s/group/%s' % (group_domain, group_username) + b = urllib2.urlopen(url) + b.read(100) + b.close() + except urllib2.HTTPError: + pass + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + sys.stderr.write('Error: %s - %s' % (e[0]['reason'], e[0]['body'])) + sys.exit(e[0]['status']) + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print '' + print 'Group Settings:' + for setting in settings: + setting_key = re.sub(r'([A-Z])', r'_\1', setting.keys()[0]).lower() + setting_value = setting.values()[0] + if setting_value == None: + setting_value = '' + setting_value = setting_value + print ' %s: %s' % (setting_key, setting_value) + +def doGetNickNameInfo(alias_email=None): + if alias_email == None: + alias_email = sys.argv[3] + multi = getMultiDomainObject() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + result = multi.RetrieveAlias(alias_email=alias_email) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print ' Alias Email: '+result['aliasEmail'] + print ' User Email: '+result['userEmail'] + +def doGetResourceCalendarInfo(): + id = sys.argv[3] + rescal = getResCalObject() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + result = rescal.RetrieveResourceCalendar(id) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print ' Resource ID: '+result['resourceId'] + print ' Common Name: '+result['resourceCommonName'] + print ' Email: '+result['resourceEmail'] + try: + print ' Type: '+result['resourceType'] + except KeyError: + print ' Type: ' + try: + print ' Description: '+result['resourceDescription'] + except KeyError: + print ' Description: ' + +def doGetOrgInfo(): + name = sys.argv[3] + org = getOrgObject() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + result = org.RetrieveOrganizationUnit(name) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Organization Unit: '+result['name'] + if result['description'] != None: + print 'Description: '+result['description'] + else: + print 'Description: ' + if result['parentOrgUnitPath'] != None: + print 'Parent Org: '+result['parentOrgUnitPath'] + else: + print 'Parent Org: /' + print 'Block Inheritance: '+result['blockInheritance'] + result2 = org.RetrieveAllOrganizationUnitUsers(name) + print 'Users: ' + for user in result2: + print ' '+user['orgUserEmail'] + +def doUpdateDomain(): + adminObj = getAdminSettingsObject() + command = sys.argv[3].lower() + if command == 'language': + language = sys.argv[4] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + adminObj.UpdateDefaultLanguage(language) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + elif command == 'name': + name = sys.argv[4] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + adminObj.UpdateOrganizationName(name) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + elif command == 'admin_secondary_email': + admin_secondary_email = sys.argv[4] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + adminObj.UpdateAdminSecondaryEmail(admin_secondary_email) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + elif command == 'logo': + logo_file = sys.argv[4] + try: + fp = open(logo_file, 'rb') + logo_image = fp.read() + fp.close() + except IOError: + print 'Error: can\'t open file %s' % logo_file + sys.exit(11) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + adminObj.UpdateDomainLogo(logo_image) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + elif command == 'cname_verify': + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + result = adminObj.UpdateCNAMEVerificationStatus() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + if e[0]['status'] == 400: + print 'Record Name: Error - Google disabled this function.' + print 'Verification Method: Error - Google disabled this function.' + print 'Verified: Error - Google disabled this functino.' + break + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + try: + print 'Record Name: %s' % result['recordName'] + print 'Verification Method: %s' % result['verificationMethod'] + print 'Verified: %s' % result['verified'] + except UnboundLocalError: + pass + elif command == 'mx_verify': + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + result = adminObj.UpdateMXVerificationStatus() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Verification Method: %s' % result['verificationMethod'] + print 'Verified: %s' % result['verified'] + elif command == 'sso_settings': + enableSSO = samlSignonUri = samlLogoutUri = changePasswordUri = ssoWhitelist = useDomainSpecificIssuer = None + i = 4 + while i < len(sys.argv): + if sys.argv[i].lower() == 'enabled': + if sys.argv[i+1].lower() == 'true': + enableSSO = True + elif sys.argv[i+1].lower() == 'false': + enableSSO = False + else: + print 'Error: value for enabled must be true or false, got %s' % sys.argv[i+1] + exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'sign_on_uri': + samlSignonUri = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'sign_out_uri': + samlLogoutUri = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'password_uri': + changePasswordUri = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'whitelist': + ssoWhitelist = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'use_domain_specific_issuer': + if sys.argv[i+1].lower() == 'true': + useDomainSpecificIssuer = True + elif sys.argv[i+1].lower() == 'false': + useDomainSpecificIssuer = False + else: + print 'Error: value for use_domain_specific_issuer must be true or false, got %s' % sys.argv[i+1] + sys.exit(9) + i = i + 2 + else: + print 'Error: unknown option for "gam update domain sso_settings...": %s' % sys.argv[i] + sys.exit(9) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + adminObj.UpdateSSOSettings(enableSSO=enableSSO, samlSignonUri=samlSignonUri, samlLogoutUri=samlLogoutUri, changePasswordUri=changePasswordUri, ssoWhitelist=ssoWhitelist, useDomainSpecificIssuer=useDomainSpecificIssuer) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + elif command == 'sso_key': + key_file = sys.argv[4] + try: + fp = open(key_file, 'rb') + key_data = fp.read() + fp.close() + except IOError: + print 'Error: can\'t open file %s' % logo_file + sys.exit(11) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + adminObj.UpdateSSOKey(key_data) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + elif command == 'user_migrations': + value = sys.argv[4].lower() + if value != 'true' and value != 'false': + print 'Error: value for user_migrations must be true or false, got %s' % sys.argv[4] + sys.exit(9) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + result = adminObj.UpdateUserMigrationStatus(value) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + elif command == 'outbound_gateway': + gateway = sys.argv[4] + mode = sys.argv[6].upper() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + result = adminObj.UpdateOutboundGatewaySettings(gateway, mode) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + except TypeError, e: + if gateway == "": + break + else: + print e + exit(3) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + elif command == 'email_route': + i = 4 + while i < len(sys.argv): + if sys.argv[i].lower() == 'destination': + destination = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'rewrite_to': + rewrite_to = sys.argv[i+1].lower() + if rewrite_to == 'true': + rewrite_to = True + elif rewrite_to == 'false': + rewrite_to = False + else: + print 'Error: value for rewrite_to must be true or false, got %s' % sys.argv[i+1] + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'enabled': + enabled = sys.argv[i+1].lower() + if enabled == 'true': + enabled = True + elif enabled == 'false': + enabled = False + else: + print 'Error: value for enabled must be true or false, got %s' % sys.argv[i+1] + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'bounce_notifications': + bounce_notifications = sys.argv[i+1].lower() + if bounce_notifications == 'true': + bounce_notifications = True + elif bounce_notifications == 'false': + bounce_notifications = False + else: + print 'Error: value for bounce_notifications must be true or false, got %s' % sys.argv[i+1] + sys.exit(9) + i = i + 2 + elif sys.argv[i].lower() == 'account_handling': + account_handling = sys.argv[i+1].lower() + if account_handling == 'all_accounts': + account_handling = 'allAccounts' + elif account_handling == 'provisioned_accounts': + account_handling = 'provisionedAccounts' + elif account_handling == 'unknown_accounts': + account_handling = 'unknownAccounts' + else: + print 'Error: value for account_handling must be all_accounts, provisioned_account or unknown_accounts. Got %s' % sys.argv[i+1] + sys.exit(9) + i = i + 2 + else: + print 'Error: invalid setting for "gam update domain email_route..."' + sys.exit(10) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + response = adminObj.AddEmailRoute(routeDestination=destination, routeRewriteTo=rewrite_to, routeEnabled=enabled, bounceNotifications=bounce_notifications, accountHandling=account_handling) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + else: + print 'Error: that is not a valid "gam update domain" command' + +def doGetDomainInfo(): + adminObj = getAdminSettingsObject() + if len(sys.argv) > 4 and sys.argv[3].lower() == 'logo': + target_file = sys.argv[4] + logo_image = adminObj.GetDomainLogo() + try: + fp = open(target_file, 'wb') + fp.write(logo_image) + fp.close() + except IOError: + print 'Error: can\'t open file %s for writing' % target_file + sys.exit(11) + sys.exit(0) + print 'Google Apps Domain: ', adminObj.domain + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + default_language = adminObj.GetDefaultLanguage() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Default Language: %s' % default_language + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + org_name = adminObj.GetOrganizationName() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Organization Name: %s' % org_name + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + max_users = adminObj.GetMaximumNumberOfUsers() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Maximum Users: %s' % max_users + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + current_users = adminObj.GetCurrentNumberOfUsers() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Current Users: %s' % current_users + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + is_dom_verified = adminObj.IsDomainVerified() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Domain is Verified: %s' % is_dom_verified + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + support_pin = adminObj.GetSupportPIN() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Support PIN: %s' % support_pin + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + domain_edition = adminObj.GetEdition() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Domain Edition: %s' % domain_edition + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + customer_pin = adminObj.GetCustomerPIN() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Customer PIN: %s' % customer_pin + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + creation_time = adminObj.GetCreationTime() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Domain Creation Time: %s' % creation_time + org = getOrgObject() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + customer_id = org.RetrieveCustomerId()['customerId'] + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Customer ID: %s' % customer_id + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + country_code = adminObj.GetCountryCode() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Domain Country Code: %s' % country_code + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + admin_sec_email = adminObj.GetAdminSecondaryEmail() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Admin Secondary Email: %s' % admin_sec_email + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + cnameverificationstatus = adminObj.GetCNAMEVerificationStatus() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + if e[0]['status'] == 400: + print 'CNAME Verification Record Name: Error - Google disabled this function.' + print 'CNAME Verification Verified: Error - Google disabled this function.' + print 'CNAME Verification Method: Error - Google disabled this functino.' + break + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + try: + print 'CNAME Verification Record Name: %s' % cnameverificationstatus['recordName'] + print 'CNAME Verification Verified: %s' % cnameverificationstatus['verified'] + print 'CNAME Verification Method: %s' %cnameverificationstatus['verificationMethod'] + except UnboundLocalError: + pass + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + mxverificationstatus = adminObj.GetMXVerificationStatus() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'MX Verification Verified: ', mxverificationstatus['verified'] + print 'MX Verification Method: ', mxverificationstatus['verificationMethod'] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + ssosettings = adminObj.GetSSOSettings() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'SSO Enabled: ', ssosettings['enableSSO'] + print 'SSO Signon Page: ', ssosettings['samlSignonUri'] + print 'SSO Logout Page: ', ssosettings['samlLogoutUri'] + print 'SSO Password Page: ', ssosettings['changePasswordUri'] + print 'SSO Whitelist IPs: ', ssosettings['ssoWhitelist'] + print 'SSO Use Domain Specific Issuer: ', ssosettings['useDomainSpecificIssuer'] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + ssokey = adminObj.GetSSOKey() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + elif terminating_error == '1408 - Invalid SSO Signing Key': # This always gets returned if SSO is disabled + ssokey = {} + break + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + try: + algorithm = str(ssokey['algorithm']) + print 'SSO Key Algorithm: ' + algorithm + except KeyError: + pass + try: + format = str(ssokey['format']) + print 'SSO Key Format: ' + format + except KeyError: + pass + try: + modulus = str(ssokey['modulus']) + print 'SSO Key Modulus: ' + modulus + except KeyError: + pass + try: + exponent = str(ssokey['exponent']) + print 'SSO Key Exponent: ' + exponent + except KeyError: + pass + try: + yValue = str(ssokey['yValue']) + print 'SSO Key yValue: ' + yValue + except KeyError: + pass + try: + signingKey = str(ssokey['signingKey']) + print 'Full SSO Key: ' + signingKey + except KeyError: + pass + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + migration_status = adminObj.IsUserMigrationEnabled() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'User Migration Enabled: ', str(migration_status) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + outbound_gateway_settings = {'smartHost': '', 'smtpMode': ''} # Initialize blank in case we get an 1801 Error + outbound_gateway_settings = adminObj.GetOutboundGatewaySettings() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + elif e.error_code == 1801: + break + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + try: + print 'Outbound Gateway Smart Host: ', outbound_gateway_settings['smartHost'] + except KeyError: + print '' + try: + print 'Outbound Gateway SMTP Mode: ', outbound_gateway_settings['smtpMode'] + except KeyError: + print '' + +def doDeleteUser(): + user_email = sys.argv[3] + multi = getMultiDomainObject() + if user_email.find('@') == -1: + user_email = '%s@%s' % (user_email, multi.domain) + do_rename = False + try: + if sys.argv[4].lower() == 'norename': + do_rename = False + elif sys.argv[4].lower() == 'dorename': + do_rename = True + except IndexError: + pass + print "Deleting account for %s" % (user_email) + #Rename the user to a random string, this allows the user to be recreated + #immediately instead of waiting the usual 5 days + user_to_delete = user_email + if do_rename: + timestamp = time.strftime("%Y%m%d%H%M%S") + user_name = user_email[:user_email.find('@')] + user_domain = user_email[user_email.find('@')+1:] + renameduser = user_name[:43]+'-'+timestamp+'-' # include max 43 chars of username so there's room for datestamp and some randomness + randomstring = ''.join(random.sample('abcdefghijklmnopqrstuvwxyz0123456789', 25)) + renameduser = renameduser+randomstring + renameduser = renameduser[:64] + renameduser_email = '%s@%s' % (renameduser, user_domain) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + rename_result = multi.RenameUser(old_email=user_email, new_email=renameduser_email) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Renamed %s to %s' % (user_email, renameduser_email) + user_to_delete = renameduser_email + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + multi.DeleteUser(user_to_delete) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Deleted user %s' % (user_to_delete) + +def doDeleteGroup(): + group = sys.argv[3] + groupObj = getGroupsObject() + print "Deleting group %s" % group + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + groupObj.DeleteGroup(group) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doDeleteNickName(alias_email=None): + if alias_email == None: + alias_email = sys.argv[3] + multi = getMultiDomainObject() + print "Deleting alias %s" % alias_email + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + multi.DeleteAlias(alias_email=alias_email) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doDeleteResourceCalendar(): + res_id = sys.argv[3] + rescal = getResCalObject() + print "Deleting resource calendar %s" % res_id + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + rescal.DeleteResourceCalendar(res_id) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doDeleteOrg(): + name = sys.argv[3] + org = getOrgObject() + print "Deleting organization %s" % name + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + org.DeleteOrganizationUnit(name) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doPrintPostini(): + if os.path.isfile(getGamPath()+'postini-format.txt'): + postini_format_file = open(getGamPath()+'postini-format.txt', 'rb') + user_template = postini_format_file.readline()[0:-1] + alias_template = postini_format_file.readline()[0:-1] + group_template = postini_format_file.readline() + else: + user_template = 'adduser %user%' + alias_template = 'addalias %user%, %alias%' + group_template = 'addalias %list_owner%, %group%' + try: + list_owner = sys.argv[3] + except IndexError: + print 'You must include an email address that will own all group addresses' + sys.exit(3) + org = getOrgObject() + sys.stderr.write("Getting all users in the %s organization (may take some time on a large Google Apps account)..." % org.domain) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + users = org.RetrieveAllOrganizationUsers() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + sys.stderr.write("done.\r\n") + multi = getMultiDomainObject() + sys.stderr.write("Getting all email aliases in the organization...") + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + aliases = multi.RetrieveAllAliases() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + sys.stderr.write("done.\r\n") + groupsObj = getGroupsObject() + sys.stderr.write("Getting all groups in the organization...") + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + groups = groupsObj.RetrieveAllGroups() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + sys.stderr.write("done.\r\n") + print "# Begin Users" + print "" + for user in users: + if user['orgUserEmail'][:2] == '.@' or user['orgUserEmail'][:11] == 'gcc_websvc@' or user['orgUserEmail'][-16:] == '@gtempaccount.com': # not real users, skip em + continue + if user['orgUnitPath'] is None: + user['orgUnitPath'] == '' + print user_template.replace('%user%', str(user['orgUserEmail'])).replace('%ou%', str(user['orgUnitPath'])) + print "" + print "# Begin Aliases" + print "" + for alias in aliases: + print alias_template.replace('%user%', str(alias['userEmail'])).replace('%alias%', str(alias['aliasEmail'])) + print "" + print "# Begin Groups" + print "" + for group in groups: + print group_template.replace('%group%', str(group['groupId'])).replace('%name%', str(group['groupName'])).replace('%description%', str(group['description'])).replace('%list_owner%', list_owner) + +def doPrintUsers(): + org = getOrgObject() + i = 3 + getUserFeed = getNickFeed = getGroupFeed = False + firstname = lastname = username = ou = suspended = changepassword = agreed2terms = admin = nicknames = groups = False + user_attributes = [] + # the titles list ensures the CSV output has its parameters in the specified order. + # Python's dicts can be listed in any order, and the order often changes between the + # header (user_attributes[0]) and the actual data rows. + titles = ['Email'] + user_attributes.append({'Email': 'Email'}) + while i < len(sys.argv): + if sys.argv[i].lower() == 'firstname': + getUserFeed = True + firstname = True + user_attributes[0].update(Firstname='Firstname') + titles.append('Firstname') + i = i + 1 + elif sys.argv[i].lower() == 'lastname': + getUserFeed = True + lastname = True + user_attributes[0].update(Lastname='Lastname') + titles.append('Lastname') + i = i + 1 + elif sys.argv[i].lower() == 'username': + username = True + user_attributes[0].update(Username='Username') + titles.append('Username') + i = i + 1 + elif sys.argv[i].lower() == 'ou': + ou = True + user_attributes[0].update(OU='OU') + titles.append('OU') + i = i + 1 + elif sys.argv[i].lower() == 'suspended': + getUserFeed = True + suspended = True + user_attributes[0].update(Suspended='Suspended') + titles.append('Suspended') + i = i + 1 + elif sys.argv[i].lower() == 'changepassword': + getUserFeed = True + changepassword = True + user_attributes[0].update(ChangePassword='ChangePassword') + titles.append('ChangePassword') + i = i + 1 + elif sys.argv[i].lower() == 'agreed2terms': + getUserFeed = True + agreed2terms = True + user_attributes[0].update(AgreedToTerms='AgreedToTerms') + titles.append('AgreedToTerms') + i = i + 1 + elif sys.argv[i].lower() == 'admin': + getUserFeed = True + admin = True + user_attributes[0].update(Admin='Admin') + titles.append('Admin') + i = i + 1 + elif sys.argv[i].lower() == 'nicknames' or sys.argv[i].lower() == 'aliases': + getNickFeed = True + nicknames = True + user_attributes[0].update(Aliases='Aliases') + titles.append('Aliases') + i = i + 1 + elif sys.argv[i].lower() == 'groups': + getGroupFeed = True + groups = True + user_attributes[0].update(Groups='Groups') + titles.append('Groups') + i = i + 1 + else: + showUsage() + exit(5) + sys.stderr.write("Getting all users in the %s organization (may take some time on a large Google Apps account)... " % org.domain) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + all_users = org.RetrieveAllOrganizationUsers() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + sys.stderr.write("done.\r\n") + for user in all_users: + email = user['orgUserEmail'].lower() + domain = email[email.find('@')+1:] + if domain == 'gtempaccount.com': + continue + if email[:2] == '.@' or email[:11] == 'gcc_websvc@' or email[:27] == 'secure-data-connector-user@': # not real users, skip em + continue + user_attributes.append({'Email': email}) + location = 0 + try: + location = user_attributes.index({'Email': email}) + if username: + user_attributes[location].update(Username=email[:email.find('@')]) + if ou: + user_ou = user['orgUnitPath'] + if user_ou == None: + user_ou = '' + user_attributes[location].update(OU=user_ou) + except ValueError: + raise + del(email, domain) + del(all_users) + total_users = len(user_attributes) - 1 + multi = getMultiDomainObject() + if getUserFeed: + sys.stderr.write("Getting details for all users... (may take some time on a large Google Apps account)... ") + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + all_users = multi.RetrieveAllUsers() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + except socket.error, e: + sys.stderr.write('Network Error, retrying\n') + continue + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + sys.stderr.write("done.\n") + for user in all_users: + email = user['userEmail'].lower() + try: + location = 0 + gotLocation = False + while not gotLocation and location < len(user_attributes): + location = location + 1 + try: + if user_attributes[location]['Email'] == email: + gotLocation = True + except IndexError: + continue + if firstname: + userfirstname = user['firstName'] + if userfirstname == None: + userfirstname = '' + try: + user_attributes[location].update(Firstname=userfirstname) + except IndexError: + continue + if lastname: + userlastname = user['lastName'] + if userlastname == None: + userlastname = '' + try: + user_attributes[location].update(Lastname=userlastname) + except IndexError: + continue + if suspended: + try: + user_attributes[location].update(Suspended=user['isSuspended']) + except IndexError: + continue + if agreed2terms: + try: + user_attributes[location].update(AgreedToTerms=user['agreedToTerms']) + except IndexError: + continue + if changepassword: + try: + user_attributes[location].update(ChangePassword=user['isChangePasswordAtNextLogin']) + except IndexError: + continue + if admin: + try: + user_attributes[location].update(Admin=user['isAdmin']) + except IndexError: + continue + except ValueError: + pass + del (email) + try: + del(all_users) + except UnboundLocalError: + pass + if getNickFeed: + for user in user_attributes[1:]: + user['Aliases'] = '' + sys.stderr.write("Getting all email aliases in the organization... ") + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + aliases = multi.RetrieveAllAliases() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + sys.stderr.write("done.\r\n") + for alias in aliases: + user_email = alias['userEmail'].lower() + alias_email = alias['aliasEmail'].lower() + try: + location = 0 + gotLocation = False + while not gotLocation and location < len(user_attributes): + location = location + 1 + if user_attributes[location]['Email'] == user_email: + gotLocation = True + user_attributes[location]['Aliases'] += '%s ' % alias_email + except IndexError: + continue + for user in user_attributes[1:]: + if user['Aliases'][-1:] == ' ': + user['Aliases'] = user['Aliases'][:-1] + try: + del(aliases) + except UnboundLocalError: + pass + if getGroupFeed: + groupsObj = getGroupsObject() + user_count = 1 + for user in user_attributes[1:]: + sys.stderr.write("Getting Group Membership for %s (%s/%s)\r\n" % (user['Email'], user_count, total_users)) + username = user['Email'][:user['Email'].find('@')] + groups = [] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + groups = groupsObj.RetrieveGroups(user['Email']) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + grouplist = '' + for groupname in groups: + grouplist += groupname['groupId']+' ' + if grouplist[-1:] == ' ': + grouplist = grouplist[:-1] + user.update(Groups=grouplist) + user_count = user_count + 1 + del (username, groups, grouplist) + csv.register_dialect('nixstdout', lineterminator='\n') + writer = csv.DictWriter(sys.stdout, fieldnames=titles, dialect='nixstdout', quoting=csv.QUOTE_MINIMAL) + writer.writerows(user_attributes) + +def doPrintGroups(): + i = 3 + printname = printdesc = printperm = usedomain = nousermanagedgroups = onlyusermanagedgroups = members = owners = settings = False + group_attributes = [] + group_attributes.append({'GroupID': 'GroupID'}) + titles = ['GroupID'] + while i < len(sys.argv): + if sys.argv[i].lower() == 'name': + printname = True + group_attributes[0].update(Name='Name') + titles.append('Name') + i = i + 1 + elif sys.argv[i].lower() == 'description': + group_attributes[0].update(Description='Description') + titles.append('Description') + printdesc = True + i = i + 1 + elif sys.argv[i].lower() == 'permission': + group_attributes[0].update(Permission='Permission') + titles.append('Permission') + printperm = True + i = i + 1 + elif sys.argv[i].lower() == 'nousermanagedgroups': + nousermanagedgroups = True + i = i + 1 + elif sys.argv[i].lower() == 'onlyusermanagedgroups': + onlyusermanagedgroups = True + i = i + 1 + elif sys.argv[i].lower() == 'members': + group_attributes[0].update(Members='Members') + titles.append('Members') + members = True + i = i + 1 + elif sys.argv[i].lower() == 'owners': + group_attributes[0].update(Owners='Owners') + titles.append('Owners') + owners = True + i = i + 1 + elif sys.argv[i].lower() == 'settings': + group_attributes[0].update(who_Can_Join='who_Can_Join') + titles.append('who_Can_Join') + group_attributes[0].update(who_Can_View_Membership='who_Can_View_Membership') + titles.append('who_Can_View_Membership') + group_attributes[0].update(who_Can_View_Group='who_Can_View_Group') + titles.append('who_Can_View_Group') + group_attributes[0].update(who_Can_Invite='who_Can_Invite') + titles.append('who_Can_Invite') + group_attributes[0].update(allow_External_Members='allow_External_Members') + titles.append('allow_External_Members') + group_attributes[0].update(who_Can_Post_Message='who_Can_Post_Message') + titles.append('who_Can_Post_Message') + group_attributes[0].update(allow_Web_Posting='allow_Web_Posting') + titles.append('allow_Web_Posting') + group_attributes[0].update(max_Message_Bytes='max_Message_Bytes') + titles.append('max_Message_Bytes') + group_attributes[0].update(is_Archived='is_Archived') + titles.append('is_Archived') + group_attributes[0].update(archive_Only='archive_Only') + titles.append('archive_Only') + group_attributes[0].update(message_Moderation_Level='message_Moderation_Level') + titles.append('message_Moderation_Level') + group_attributes[0].update(primary_Language='primary_Language') + titles.append('primary_Language') + group_attributes[0].update(reply_To='reply_To') + titles.append('reply_To') + group_attributes[0].update(custom_Reply_To='custom_Reply_To') + titles.append('custom_Reply_To') + group_attributes[0].update(send_Message_Deny_Notification='send_Message_Deny_Notification') + titles.append('send_Message_Deny_Notification') + group_attributes[0].update(default_Message_Deny_Notification_Text='default_Message_Deny_Notification_Text') + titles.append('default_Message_Deny_Notification_Text') + group_attributes[0].update(show_In_Group_Directory='show_In_Group_Directory') + titles.append('show_In_Group_Directory') + group_attributes[0].update(allow_Google_Communication='allow_Google_Communication') + titles.append('allow_Google_Communication') + group_attributes[0].update(members_Can_Post_As_The_Group='members_Can_Post_As_The_Group') + titles.append('members_Can_Post_As_The_Group') + group_attributes[0].update(message_Display_Font='message_Display_Font') + titles.append('message_Display_Font') + group_attributes[0].update(include_In_Global_Address_List='include_In_Global_Address_List') + titles.append('include_In_Global_Address_List') + group_attributes[0].update(spam_Moderation_Level='spam_Moderation_Level') + titles.append('spam_Moderation_Level') + settings = True + i = i + 1 + else: + showUsage() + exit(7) + groupsObj = getGroupsObject() + sys.stderr.write("Retrieving All Groups for domain %s (may take some time on large domain)..." % groupsObj.domain) + if not onlyusermanagedgroups: + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + all_groups = groupsObj.RetrieveAllGroups(nousermanagedgroups) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + else: + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + admin_and_user_groups = groupsObj.RetrieveAllGroups(False) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + admin_groups = groupsObj.RetrieveAllGroups(True) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + all_groups = [] + for this_group in admin_and_user_groups: + this_group_is_admin_created = False + for that_group in admin_groups: + if this_group['groupId'] == that_group['groupId']: + this_group_is_admin_created = True + break + if not this_group_is_admin_created: + all_groups.append(this_group) + total_groups = len(all_groups) + count = 0 + for group_vals in all_groups: + count = count + 1 + group = {} + group.update({'GroupID': group_vals['groupId']}) + if printname: + name = group_vals['groupName'] + if name == None: + name = '' + group.update({'Name': name}) + if printdesc: + description = group_vals['description'] + if description == None: + description = '' + group.update({'Description': description}) + if printperm: + try: + group.update({'Permission': group_vals['emailPermission']}) + except KeyError: + group.update({'Permission': 'Unknown'}) + if members: + all_members = '' + sys.stderr.write("Retrieving Membership for group %s (%s of %s)...\r\n" % (group_vals['groupId'], count, total_groups)) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + group_members = groupsObj.RetrieveAllMembers(group_vals['groupId'], suspended_users=True) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + for member in group_members: + all_members += '%s ' % member['memberId'] + if all_members[-1:] == ' ': + all_members = all_members[:-1] + group.update({'Members': all_members}) + if owners: + all_owners = '' + sys.stderr.write("Retrieving Ownership for group %s (%s of %s)...\r\n" % (group_vals['groupId'], count, total_groups)) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + group_owners = groupsObj.RetrieveAllOwners(group_vals['groupId'], suspended_users=True) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + for owner in group_owners: + all_owners += '%s ' % owner['email'] + if all_owners[-1:] == ' ': + all_owners = all_owners[:-1] + group.update({'Owners': all_owners}) + if settings: + sys.stderr.write("Retrieving Settings for group %s (%s of %s)...\r\n" % (group_vals['groupId'], count, total_groups)) + gs = getGroupSettingsObject() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + settings = gs.RetrieveGroupSettings(group_vals['groupId']) + break + except gdata.service.RequestError, e: + if e[0]['status'] == 503 and e[0]['reason'] == 'Service Unavailable': + try_count = try_count + 1 + group_name = group_vals['groupId'][:group_vals['groupId'].find('@')] + group_domain = group_vals['groupId'][group_vals['groupId'].find('@')+1:] + sys.stderr.write('Working around Google backend error. Opening group and sleeping %s sec(s)...\n' % str(wait_on_fail)) + try: + url = 'https://groups.google.com/a/%s/group/%s' % (group_domain, group_name) + b = urllib2.urlopen(url) + b.read(100) + b.close() + except urllib2.HTTPError: + pass + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + print 'Error: %s - %s' % (e[0]['reason'], e[0]['body']) + sys.exit(e[0]['status']) + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + for setting in settings: + setting_key = re.sub(r'([A-Z])', r'_\1', setting.keys()[0]) + if setting_key == 'email' or setting_key == 'name' or setting_key == 'description': + continue + setting_value = setting.values()[0] + if setting_value == None: + setting_value = '' + group.update({setting_key: setting_value}) + group_attributes.append(group) + csv.register_dialect('nixstdout', lineterminator='\n') + writer = csv.DictWriter(sys.stdout, fieldnames=titles, dialect='nixstdout', quoting=csv.QUOTE_MINIMAL, extrasaction='ignore') + writer.writerows(group_attributes) + +def doPrintOrgs(): + i = 3 + printname = printdesc = printparent = printinherit = False + org_attributes = [] + org_attributes.append({'Path': 'Path'}) + titles = ['Path'] + while i < len(sys.argv): + if sys.argv[i].lower() == 'name': + printname = True + org_attributes[0].update(Name='Name') + titles.append('Name') + i = i + 1 + elif sys.argv[i].lower() == 'description': + printdesc = True + org_attributes[0].update(Description='Description') + titles.append('Description') + i = i + 1 + elif sys.argv[i].lower() == 'parent': + printparent = True + org_attributes[0].update(Parent='Parent') + titles.append('Parent') + i = i + 1 + elif sys.argv[i].lower() == 'inherit': + printinherit = True + org_attributes[0].update(InheritanceBlocked='InheritanceBlocked') + titles.append('InheritanceBlocked') + i = i + 1 + else: + showUsage() + exit(8) + org = getOrgObject() + sys.stderr.write("Retrieving All Organizational Units for your account (may take some time on large domain)...") + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + orgs = org.RetrieveAllOrganizationUnits() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + for org_vals in orgs: + orgUnit = {} + orgUnit.update({'Path': org_vals['orgUnitPath']}) + if printname: + name = org_vals['name'] + if name == None: + name = '' + orgUnit.update({'Name': name}) + if printdesc: + desc = org_vals['description'] + if desc == None: + desc = '' + orgUnit.update({'Description': desc}) + if printparent: + parent = org_vals['parentOrgUnitPath'] + if parent == None: + parent = '' + orgUnit.update({'Parent': parent}) + if printinherit: + orgUnit.update({'InheritanceBlocked': org_vals['blockInheritance']}) + org_attributes.append(orgUnit) + csv.register_dialect('nixstdout', lineterminator='\n') + writer = csv.DictWriter(sys.stdout, fieldnames=titles, dialect='nixstdout', quoting=csv.QUOTE_MINIMAL) + writer.writerows(org_attributes) + +def doPrintNicknames(): + multi = getMultiDomainObject() + sys.stderr.write("Retrieving All Aliases for %s organization (may take some time on large domain)...\r\n\r\n" % multi.domain) + alias_attributes = [] + alias_attributes.append({'Alias': 'Alias'}) + alias_attributes[0].update(User='User') + titles = ['Alias', 'User'] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + nicknames = multi.RetrieveAllAliases() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + for nickname in nicknames: + alias_attributes.append({'Alias': nickname['aliasEmail'], 'User': nickname['userEmail']}) + csv.register_dialect('nixstdout', lineterminator='\n') + writer = csv.DictWriter(sys.stdout, fieldnames=titles, dialect='nixstdout', quoting=csv.QUOTE_MINIMAL) + writer.writerows(alias_attributes) + +def doPrintResources(): + i = 3 + res_attributes = [] + res_attributes.append({'Name': 'Name'}) + titles = ['Name'] + printid = printdesc = printemail = False + while i < len(sys.argv): + if sys.argv[i].lower() == 'id': + printid = True + res_attributes[0].update(ID='ID') + titles.append('ID') + i = i + 1 + elif sys.argv[i].lower() == 'description': + printdesc = True + res_attributes[0].update(Description='Description') + titles.append('Description') + i = i + 1 + elif sys.argv[i].lower() == 'email': + printemail = True + res_attributes[0].update(Email='Email') + titles.append('Email') + i = i + 1 + else: + showUsage() + sys.exit(2) + resObj = getResCalObject() + sys.stderr.write("Retrieving All Resource Calendars for your account (may take some time on a large domain)") + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + resources = resObj.RetrieveAllResourceCalendars() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + for resource in resources: + resUnit = {} + resUnit.update({'Name': resource['resourceCommonName']}) + if printid: + resUnit.update({'ID': resource['resourceId']}) + if printdesc: + try: + desc = resource['resourceDescription'] + except KeyError: + desc = '' + resUnit.update({'Description': desc}) + if printemail: + resUnit.update({'Email': resource['resourceEmail']}) + res_attributes.append(resUnit) + csv.register_dialect('nixstdout', lineterminator='\n') + writer = csv.DictWriter(sys.stdout, fieldnames=titles, dialect='nixstdout', quoting=csv.QUOTE_MINIMAL) + writer.writerows(res_attributes) + +def hasAgreed2TOS(user_email): + multi = getMultiDomainObject() + if user_email.find('@') == -1: + user_email = '%s@%s' % (user_name, multi.domain) + try_count = 0 + wait_on_fail = .5 + hard_fail = False + while try_count < 10: + try: + userInfo = multi.RetrieveUser(user_email) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + hard_fail = True + break + if try_count == 10 or hard_fail: + sys.stderr.write('Giving up\n') + return True + if userInfo['agreedToTerms'] == 'true': + return True + return False + +def doCreateMonitor(): + source_user = sys.argv[4].lower() + destination_user = sys.argv[5].lower() + #end_date defaults to 30 days in the future... + end_date = (datetime.datetime.now() + datetime.timedelta(days=30)).strftime("%Y-%m-%d %H:%M") + begin_date = None + incoming_headers_only = outgoing_headers_only = drafts_headers_only = chats_headers_only = False + drafts = chats = True + i = 6 + while i < len(sys.argv): + if sys.argv[i].lower() == 'end': + end_date = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'begin': + begin_date = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'incoming_headers': + incoming_headers_only = True + i = i + 1 + elif sys.argv[i].lower() == 'outgoing_headers': + outgoing_headers_only = True + i = i + 1 + elif sys.argv[i].lower() == 'nochats': + chats = False + i = i + 1 + elif sys.argv[i].lower() == 'nodrafts': + drafts = False + i = i + 1 + elif sys.argv[i].lower() == 'chat_headers': + chats_headers_only = True + i = i + 1 + elif sys.argv[i].lower() == 'draft_headers': + drafts_headers_only = True + i = i + 1 + else: + showUsage() + sys.exit(2) + audit = getAuditObject() + if source_user.find('@') > 0: + audit.domain = source_user[source_user.find('@')+1:] + source_user = source_user[:source_user.find('@')] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = audit.createEmailMonitor(source_user=source_user, destination_user=destination_user, end_date=end_date, begin_date=begin_date, + incoming_headers_only=incoming_headers_only, outgoing_headers_only=outgoing_headers_only, + drafts=drafts, drafts_headers_only=drafts_headers_only, chats=chats, chats_headers_only=chats_headers_only) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doShowMonitors(): + user = sys.argv[4].lower() + audit = getAuditObject() + if user.find('@') > 0: + audit.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = audit.getEmailMonitors(user) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print sys.argv[4].lower()+' has the following monitors:' + print '' + for monitor in results: + print ' Destination: '+monitor['destUserName'] + try: + print ' Begin: '+monitor['beginDate'] + except KeyError: + print ' Begin: immediately' + print ' End: '+monitor['endDate'] + print ' Monitor Incoming: '+monitor['outgoingEmailMonitorLevel'] + print ' Monitor Outgoing: '+monitor['incomingEmailMonitorLevel'] + print ' Monitor Chats: '+monitor['chatMonitorLevel'] + print ' Monitor Drafts: '+monitor['draftMonitorLevel'] + print '' + +def doDeleteMonitor(): + source_user = sys.argv[4].lower() + destination_user = sys.argv[5].lower() + audit = getAuditObject() + if source_user.find('@') > 0: + audit.domain = source_user[source_user.find('@')+1:] + source_user = source_user[:source_user.find('@')] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = audit.deleteEmailMonitor(source_user=source_user, destination_user=destination_user) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doRequestActivity(): + user = sys.argv[4].lower() + audit = getAuditObject() + if user.find('@') > 0: + audit.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = audit.createAccountInformationRequest(user) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Request successfully submitted:' + print ' Request ID: '+results['requestId'] + print ' User: '+results['userEmailAddress'] + print ' Status: '+results['status'] + print ' Request Date: '+results['requestDate'] + print ' Requested By: '+results['adminEmailAddress'] + +def doStatusActivityRequests(): + audit = getAuditObject() + try: + user = sys.argv[4].lower() + if user.find('@') > 0: + audit.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + request_id = sys.argv[5].lower() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = audit.getAccountInformationRequestStatus(user, request_id) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print '' + print ' Request ID: '+results['requestId'] + print ' User: '+results['userEmailAddress'] + print ' Status: '+results['status'] + print ' Request Date: '+results['requestDate'] + print ' Requested By: '+results['adminEmailAddress'] + try: + print ' Number Of Files: '+results['numberOfFiles'] + for i in range(int(results['numberOfFiles'])): + print ' Url%s: %s' % (i, results['fileUrl%s' % i]) + except KeyError: + pass + print '' + except IndexError: + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = audit.getAllAccountInformationRequestsStatus() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Current Activity Requests:' + print '' + for request in results: + print ' Request ID: '+request['requestId'] + print ' User: '+request['userEmailAddress'] + print ' Status: '+request['status'] + print ' Request Date: '+request['requestDate'] + print ' Requested By: '+request['adminEmailAddress'] + print '' + +def doDownloadActivityRequest(): + user = sys.argv[4].lower() + request_id = sys.argv[5].lower() + audit = getAuditObject() + if user.find('@') > 0: + audit.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = audit.getAccountInformationRequestStatus(user, request_id) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + if results['status'] != 'COMPLETED': + print 'Request needs to be completed before downloading, current status is: '+results['status'] + sys.exit(4) + try: + if int(results['numberOfFiles']) < 1: + print 'ERROR: Request completed but no results were returned, try requesting again' + sys.exit(4) + except KeyError: + print 'ERROR: Request completed but no files were returned, try requesting again' + sys.exit(4) + for i in range(0, int(results['numberOfFiles'])): + url = results['fileUrl'+str(i)] + filename = 'activity-'+user+'-'+request_id+'-'+str(i)+'.txt.gpg' + print 'Downloading '+filename+' ('+str(i+1)+' of '+results['numberOfFiles']+')' + geturl(url, filename) + +def doRequestExport(): + begin_date = end_date = search_query = None + headers_only = include_deleted = False + user = sys.argv[4].lower() + i = 5 + while i < len(sys.argv): + if sys.argv[i].lower() == 'begin': + begin_date = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'end': + end_date = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'search': + search_query = sys.argv[i+1] + i = i + 2 + elif sys.argv[i].lower() == 'headersonly': + headers_only = True + i = i + 1 + elif sys.argv[i].lower() == 'includedeleted': + include_deleted = True + i = i + 1 + else: + showUsage() + sys.exit(2) + audit = getAuditObject() + if user.find('@') > 0: + audit.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = audit.createMailboxExportRequest(user=user, begin_date=begin_date, end_date=end_date, include_deleted=include_deleted, + search_query=search_query, headers_only=headers_only) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Export request successfully submitted:' + print ' Request ID: '+results['requestId'] + print ' User: '+results['userEmailAddress'] + print ' Status: '+results['status'] + print ' Request Date: '+results['requestDate'] + print ' Requested By: '+results['adminEmailAddress'] + print ' Include Deleted: '+results['includeDeleted'] + print ' Requested Parts: '+results['packageContent'] + try: + print ' Begin: '+results['beginDate'] + except KeyError: + print ' Begin: account creation date' + try: + print ' End: '+results['endDate'] + except KeyError: + print ' End: export request date' + +def doDeleteExport(): + audit = getAuditObject() + user = sys.argv[4].lower() + if user.find('@') > 0: + audit.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + request_id = sys.argv[5].lower() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = audit.deleteMailboxExportRequest(user=user, request_id=request_id) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doDeleteActivityRequest(): + audit = getAuditObject() + user = sys.argv[4].lower() + if user.find('@') > 0: + audit.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + request_id = sys.argv[5].lower() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = audit.deleteAccountInformationRequest(user=user, request_id=request_id) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def doStatusExportRequests(): + audit = getAuditObject() + try: + user = sys.argv[4].lower() + if user.find('@') > 0: + audit.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + request_id = sys.argv[5].lower() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = audit.getMailboxExportRequestStatus(user, request_id) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print '' + print ' Request ID: '+results['requestId'] + print ' User: '+results['userEmailAddress'] + print ' Status: '+results['status'] + print ' Request Date: '+results['requestDate'] + print ' Requested By: '+results['adminEmailAddress'] + print ' Requested Parts: '+results['packageContent'] + try: + print ' Request Filter: '+results['searchQuery'] + except KeyError: + print ' Request Filter: None' + print ' Include Deleted: '+results['includeDeleted'] + try: + print ' Number Of Files: '+results['numberOfFiles'] + for i in range(int(results['numberOfFiles'])): + print ' Url%s: %s' % (i, results['fileUrl%s' % i]) + except KeyError: + pass + except IndexError: + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = audit.getAllMailboxExportRequestsStatus() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print 'Current Export Requests:' + print '' + for request in results: + print ' Request ID: '+request['requestId'] + print ' User: '+request['userEmailAddress'] + print ' Status: '+request['status'] + print ' Request Date: '+request['requestDate'] + print ' Requested By: '+request['adminEmailAddress'] + print ' Requested Parts: '+request['packageContent'] + try: + print ' Request Filter: '+request['searchQuery'] + except KeyError: + print ' Request Filter: None' + print ' Include Deleted: '+request['includeDeleted'] + try: + print ' Number Of Files: '+request['numberOfFiles'] + except KeyError: + pass + print '' + +def doDownloadExportRequest(): + user = sys.argv[4].lower() + request_id = sys.argv[5].lower() + audit = getAuditObject() + if user.find('@') > 0: + audit.domain = user[user.find('@')+1:] + user = user[:user.find('@')] + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = audit.getMailboxExportRequestStatus(user, request_id) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + if results['status'] != 'COMPLETED': + print 'Request needs to be completed before downloading, current status is: '+results['status'] + sys.exit(4) + try: + if int(results['numberOfFiles']) < 1: + print 'ERROR: Request completed but no results were returned, try requesting again' + sys.exit(4) + except KeyError: + print 'ERROR: Request completed but no files were returned, try requesting again' + sys.exit(4) + for i in range(0, int(results['numberOfFiles'])): + url = results['fileUrl'+str(i)] + filename = 'export-'+user+'-'+request_id+'-'+str(i)+'.mbox.gpg' + #don't download existing files. This does not check validity of existing local + #file so partial/corrupt downloads will need to be deleted manually. + if os.path.isfile(filename): + continue + print 'Downloading '+filename+' ('+str(i+1)+' of '+results['numberOfFiles']+')' + geturl(url, filename) + +def doUploadAuditKey(): + auditkey = sys.stdin.read() + audit = getAuditObject() + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + results = audit.updatePGPKey(auditkey) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + +def getUsersToModify(): + entity = sys.argv[1].lower() + if entity == 'user': + users = [sys.argv[2].lower(),] + elif entity == 'group': + groupsObj = getGroupsObject() + group = sys.argv[2].lower() + sys.stderr.write("Getting all members of %s (may take some time for large groups)..." % group) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + members = groupsObj.RetrieveAllMembers(group) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print "done.\r\n" + users = [] + for member in members: + users.append(member['memberId'][0:member['memberId'].find('@')]) + elif entity == 'ou': + orgObj = getOrgObject() + ou = sys.argv[2] + sys.stderr.write("Getting all users of %s Organizational Unit (May take some time for large OUs)..." % ou) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + members = orgObj.RetrieveAllOrganizationUnitUsers(ou) + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + print "done.\r\n" + users = [] + for member in members: + users.append(member['orgUserEmail']) + elif entity == 'all': + orgObj = getOrgObject() + users = [] + sys.stderr.write("Getting all users in the Google Apps %s organization (may take some time on a large domain)..." % orgObj.domain) + try_count = 0 + wait_on_fail = .5 + while try_count < 10: + try: + members = orgObj.RetrieveAllOrganizationUsers() + break + except gdata.apps.service.AppsForYourDomainException, e: + terminating_error = checkErrorCode(e) + if not terminating_error: + try_count = try_count + 1 + if try_count > 5: sys.stderr.write('Temporary error %s. Retry %s in %s seconds\n' % (str(e.error_code), try_count, str(wait_on_fail))) + time.sleep(wait_on_fail) + wait_on_fail = wait_on_fail * 2 if wait_on_fail < 32 else 60 + continue + else: + sys.stderr.write('Error: %s\n' % terminating_error) + sys.exit(e.error_code) + if try_count == 10: + sys.stderr.write('Giving up\n') + sys.exit(e.error_code) + for member in members: + if member['orgUserEmail'][:2] == '.@' or member['orgUserEmail'][:11] == 'gcc_websvc@' or member['orgUserEmail'][:27] == 'secure-data-connector-user@' or member['orgUserEmail'][-16:] == '@gtempaccount.com': # not real users, skip em + continue + users.append(member['orgUserEmail']) + sys.stderr.write("done.\r\n") + else: + showUsage() + sys.exit(2) + return users + +def OAuthInfo(): + selected_file = 'oauth.txt' + try: + selected_file = os.environ['OAUTHFILE'] + except KeyError: + pass + print "\nOAuth File: %s%s" % (getGamPath(), selected_file) + if os.path.isfile(getGamPath()+selected_file): + oauthfile = open(getGamPath()+selected_file, 'rb') + domain = oauthfile.readline()[0:-1] + try: + token = pickle.load(oauthfile) + oauthfile.close() + except ImportError: # Deals with tokens created by windows on old GAM versions. Rewrites them with binary mode set + oauthfile = open(getGamPath()+oauth_filename, 'r') + domain = oauthfile.readline()[0:-1] + token = pickle.load(oauthfile) + oauthfile.close() + f = open(getGamPath()+oauth_filename, 'wb') + f.write('%s\n' % (domain,)) + pickle.dump(token, f) + f.close() + print 'Google Apps Domain: %s' % domain + print "Client ID: %s\nSecret: %s" % (token.oauth_input_params._consumer.key, token.oauth_input_params._consumer.secret) + print 'Scopes:' + email_scope_present = False + for scope in token.scopes: + print ' %s' % scope + if scope == 'https://www.googleapis.com/auth/userinfo.email': + email_scope_present = True + apps = getAppsObject() + try: + is_valid = apps.Post(' ', 'https://www.google.com/accounts/AuthSubTokenInfo', converter=str) + except gdata.service.RequestError, e: + print 'Error: %s' % e[0]['reason'] + exit(0) + if email_scope_present: + admin_email = apps.Get('https://www.googleapis.com/userinfo/email', converter=str) + admin_email = admin_email[6:admin_email.find('&')] + print 'Google Apps Admin: %s' % admin_email + else: + print 'Google Apps Admin: Not authorized to check (recreate GAM OAuth token to correct)' + else: + print 'Error: That OAuth file doesn\'t exist!' + +def doDeleteOAuth(): + sys.stderr.write('This OAuth token will self-destruct in 3...') + time.sleep(1) + sys.stderr.write('2...') + time.sleep(1) + sys.stderr.write('1...') + time.sleep(1) + sys.stderr.write('boom!\n') + selected_file = 'oauth.txt' + try: + selected_file = os.environ['OAUTHFILE'] + except KeyError: + pass + if os.path.isfile(getGamPath()+selected_file): + multi = getMultiDomainObject() + os.remove(getGamPath()+selected_file) + try: + revoke_result = multi.Get('https://www.google.com/accounts/AuthSubRevokeToken', converter=str) + except gdata.service.RequestError, e: + print 'Error: %s' % e[0]['reason'] + sys.exit(e[0]['status']) + print revoke_result + else: + print 'Error: The OAuth token %s does not exist' % (getGamPath()+selected_file) + sys.exit(1) + +def doRequestOAuth(): + if not os.path.isfile(getGamPath()+'nodito.txt'): + print "\n\nGAM is made possible and maintained by the work of Dito. Who is Dito?\n\nDito is solely focused on moving organizations to Google's cloud. After hundreds of successful deployments over the last 5 years, we have gained notoriety for our complete understanding of the platform, our change management & training ability, and our rock-star deployment engineers. We are known worldwide as the Google Apps Experts.\n" + visit_dito = raw_input("Want to learn more about Dito? Hit Y to visit our website (you can switch back to this window when you're done). Hit Enter to continue without visiting Dito: ") + if visit_dito.lower() == 'y': + webbrowser.open('http://www.ditoweb.com?s=gam') + domain = raw_input("\nEnter your Primary Google Apps Domain (e.g. example.com): ") + if os.path.isfile(getGamPath()+'key-and-secret.txt'): + secret_file = open(getGamPath()+'key-and-secret.txt', 'rb') + client_key = secret_file.readline() + if client_key[-1:] == "\n" or client_key[-1:] == "\r": + client_key = client_key[:-1] + client_secret = secret_file.readline() + if client_secret[-1:] == "\n" or client_secret[-1:] == "\r": + client_secret = client_secret[:-1] + secret_file.close() + print "\nUsing Client Key and Secret from %s:\n\tClient Key: \"%s\"\n\tClient Secret: \"%s\"\n\nPress Enter to Continue...\n" % (getGamPath()+'key-and-secret.txt', client_key, client_secret) + raw_input() + else: + print "\nIf you plan to use Group Settings commands, you\'ll need an Client ID and secret from the Google API console, see http://code.google.com/p/google-apps-manager/wiki/GettingAnOAuthConsoleKey for details. If you don\'t plan to use Group Settings commands you can just press enter here." + client_key = raw_input("\nEnter your Client ID (e.g. XXXXXX.apps.googleusercontent.com or leave blank): ") + if client_key == '': + client_key = 'anonymous' + client_secret = 'anonymous' + else: + client_secret = raw_input("\nEnter your Client Secret: ") + save_secret = raw_input('Do you wish to save the Client Key and Secret to %s for future use? (y/N): ' % (getGamPath()+'key-and-secret.txt')) + if save_secret.lower() == 'y': + secret_file = open(getGamPath()+'key-and-secret.txt', 'wb') + secret_file.write('%s\n%s' % (client_key, client_secret)) + secret_file.close() + fetch_params = {'xoauth_displayname':'Google Apps Manager'} + selected_scopes = ['*', '*', '*', '*', '*', '*', '*', '*', '*', '*', '*', '*', '*'] + menu = '''Select the authorized scopes for this OAuth token and set the token name: + +[%s] 0) Group Provisioning +[%s] 1) Email Alias Provisioning +[%s] 2) Organizational Unit Provisioning +[%s] 3) User Provisioning +[%s] 4) User Email Settings +[%s] 5) Calendar Resources +[%s] 6) Audit Monitors, Activity and Mailbox Exports +[%s] 7) Admin Settings +[%s] 8) Admin Auditing +[%s] 9) Group Settings API +[%s] 10) Profiles API (Hide / Unhide from contact sharing) +[%s] 11) Calendar Data API +[%s] 12) Reporting API + + 13) Select all scopes + 14) Unselect all scopes + 15) Set OAuth token name (currently: %s) + 16) Continue +''' + os.system(['clear','cls'][os.name == 'nt']) + while True: + selection = raw_input(menu % (selected_scopes[0], selected_scopes[1], selected_scopes[2], selected_scopes[3], selected_scopes[4], selected_scopes[5], selected_scopes[6], selected_scopes[7], selected_scopes[8], selected_scopes[9], selected_scopes[10], selected_scopes[11], selected_scopes[12], fetch_params['xoauth_displayname'])) + try: + if int(selection) > -1 and int(selection) < 13: + if selected_scopes[int(selection)] == ' ': + selected_scopes[int(selection)] = '*' + else: + selected_scopes[int(selection)] = ' ' + elif selection == '13': + for i in range(0, len(selected_scopes)): + selected_scopes[i] = '*' + elif selection == '14': + for i in range(0, len(selected_scopes)): + selected_scopes[i] = ' ' + elif selection == '15': + fetch_params['xoauth_displayname'] = raw_input('Enter the name for your OAuth token: ') + elif selection == '16': + at_least_one = False + for i in range(0, len(selected_scopes)): + if selected_scopes[i] == '*': + at_least_one = True + if at_least_one: + break + else: + os.system(['clear','cls'][os.name == 'nt']) + print "You must select at least one scope!\n" + continue + else: + os.system(['clear','cls'][os.name == 'nt']) + print 'Not a valid selection.' + continue + os.system(['clear','cls'][os.name == 'nt']) + except ValueError: + os.system(['clear','cls'][os.name == 'nt']) + print 'Not a valid selection.' + continue + + possible_scopes = ['https://apps-apis.google.com/a/feeds/groups/', # Groups Provisioning API + 'https://apps-apis.google.com/a/feeds/alias/', # Nickname Provisioning API + 'https://apps-apis.google.com/a/feeds/policies/', # Organization Provisioning API + 'https://apps-apis.google.com/a/feeds/user/', # Users Provisioning API + 'https://apps-apis.google.com/a/feeds/emailsettings/2.0/', # Email Settings API + 'https://apps-apis.google.com/a/feeds/calendar/resource/', # Calendar Resource API + 'https://apps-apis.google.com/a/feeds/compliance/audit/', # Audit API + 'https://apps-apis.google.com/a/feeds/domain/', # Admin Settings API + 'https://www.googleapis.com/auth/apps/reporting/audit.readonly', # Admin Audit API + 'https://www.googleapis.com/auth/apps.groups.settings', # Group Settings API + 'https://www.google.com/m8/feeds', # Contacts / Profiles API + 'https://www.google.com/calendar/feeds/', # Calendar Data API + 'https://www.google.com/hosted/services/v1.0/reports/ReportingData'] # Reporting API + scopes = ['https://www.googleapis.com/auth/userinfo.email',] # Email Display Scope, always included + for i in range(0, len(selected_scopes)): + if selected_scopes[i] == '*': + scopes.append(possible_scopes[i]) + if possible_scopes[i] == 'https://www.google.com/calendar/feeds/': + scopes.append('https://www.googleapis.com/auth/calendar') # Get the new Calendar API scope also + apps = gdata.apps.service.AppsService(domain=domain) + apps = commonAppsObjInit(apps) + apps.SetOAuthInputParameters(gdata.auth.OAuthSignatureMethod.HMAC_SHA1, consumer_key=client_key, consumer_secret=client_secret) + try: + request_token = apps.FetchOAuthRequestToken(scopes=scopes, extra_parameters=fetch_params) + except gdata.service.FetchingOAuthRequestTokenFailed, e: + if str(e).find('Timestamp') != -1: + print "In order to use OAuth, your system time needs to be correct.\nPlease fix your time and try again." + sys.exit(5) + else: + print "Error: %s" % e + sys.exit(6) + url_params = {'hd': domain} + url = apps.GenerateOAuthAuthorizationURL(request_token=request_token, extra_params=url_params) + raw_input("\nNow GAM will open a web page in order for you to grant %s access. Make sure you are logged in as an Administrator of your Google Apps domain before granting access. Press the Enter key to open your browser." % fetch_params['xoauth_displayname']) + try: + webbrowser.open(str(url)) + except Exception, e: + pass + raw_input("\n\nYou should now see a web page asking you to grant %s\n" + 'access. If the page didn\'t open, you can manually\n' + 'go to\n\n%s\n\nto grant access.\n' + '\n' + 'Once you\'ve granted access, press the Enter key.' % (fetch_params['xoauth_displayname'], url)) + try: + final_token = apps.UpgradeToOAuthAccessToken(request_token) + except gdata.service.TokenUpgradeFailed: + print 'Failed to upgrade the token. Did you grant GAM access in your browser?' + exit(4) + oauth_filename = 'oauth.txt' + try: + oauth_filename = os.environ['OAUTHFILE'] + except KeyError: + pass + f = open(getGamPath()+oauth_filename, 'wb') + f.write('%s\n' % (domain,)) + pickle.dump(final_token, f) + f.close() + +try: + # perform update check + doGAMCheckForUpdates() + if sys.argv[1].lower() == 'version': + doGAMVersion() + exit(0) + if sys.argv[1].lower() == 'create': + if sys.argv[2].lower() == 'user': + doCreateUser() + elif sys.argv[2].lower() == 'group': + doCreateGroup() + elif sys.argv[2].lower() == 'nickname' or sys.argv[2].lower() == 'alias': + doCreateNickName() + elif sys.argv[2].lower() == 'org': + doCreateOrg() + elif sys.argv[2].lower() == 'resource': + doCreateResource() + sys.exit(0) + elif sys.argv[1].lower() == 'update': + if sys.argv[2].lower() == 'user': + doUpdateUser() + elif sys.argv[2].lower() == 'group': + doUpdateGroup() + elif sys.argv[2].lower() == 'nickname' or sys.argv[2].lower() == 'alias': + doUpdateNickName() + elif sys.argv[2].lower() == 'org': + doUpdateOrg() + elif sys.argv[2].lower() == 'resource': + doUpdateResourceCalendar() + elif sys.argv[2].lower() == 'domain': + doUpdateDomain() + else: + showUsage() + print 'Error: invalid argument to "gam update..."' + sys.exit(2) + sys.exit(0) + elif sys.argv[1].lower() == 'info': + if sys.argv[2].lower() == 'user': + doGetUserInfo() + elif sys.argv[2].lower() == 'group': + doGetGroupInfo() + elif sys.argv[2].lower() == 'nickname' or sys.argv[2].lower() == 'alias': + doGetNickNameInfo() + elif sys.argv[2].lower() == 'domain': + doGetDomainInfo() + elif sys.argv[2].lower() == 'org': + doGetOrgInfo() + elif sys.argv[2].lower() == 'resource': + doGetResourceCalendarInfo() + sys.exit(0) + elif sys.argv[1].lower() == 'delete': + if sys.argv[2].lower() == 'user': + doDeleteUser() + elif sys.argv[2].lower() == 'group': + doDeleteGroup() + elif sys.argv[2].lower() == 'nickname' or sys.argv[2].lower() == 'alias': + doDeleteNickName() + elif sys.argv[2].lower() == 'org': + doDeleteOrg() + elif sys.argv[2].lower() == 'resource': + doDeleteResourceCalendar() + sys.exit(0) + elif sys.argv[1].lower() == 'audit': + if sys.argv[2].lower() == 'monitor': + if sys.argv[3].lower() == 'create': + doCreateMonitor() + elif sys.argv[3].lower() == 'list': + doShowMonitors() + elif sys.argv[3].lower() == 'delete': + doDeleteMonitor() + elif sys.argv[2].lower() == 'activity': + if sys.argv[3].lower() == 'request': + doRequestActivity() + elif sys.argv[3].lower() == 'status': + doStatusActivityRequests() + elif sys.argv[3].lower() == 'download': + doDownloadActivityRequest() + elif sys.argv[3].lower() == 'delete': + doDeleteActivityRequest() + elif sys.argv[2].lower() == 'export': + if sys.argv[3].lower() == 'status': + doStatusExportRequests() + elif sys.argv[3].lower() == 'download': + doDownloadExportRequest() + elif sys.argv[3].lower() == 'request': + doRequestExport() + elif sys.argv[3].lower() == 'delete': + doDeleteExport() + elif sys.argv[2].lower() == 'uploadkey': + doUploadAuditKey() + elif sys.argv[2].lower() == 'admin': + doAdminAudit() + sys.exit(0) + elif sys.argv[1].lower() == 'print': + if sys.argv[2].lower() == 'users': + doPrintUsers() + elif sys.argv[2].lower() == 'nicknames' or sys.argv[2].lower() == 'aliases': + doPrintNicknames() + elif sys.argv[2].lower() == 'groups': + doPrintGroups() + elif sys.argv[2].lower() == 'orgs' or sys.argv[2].lower() == 'ous': + doPrintOrgs() + elif sys.argv[2].lower() == 'resources': + doPrintResources() + elif sys.argv[2].lower() == 'postini': + doPrintPostini() + sys.exit(0) + elif sys.argv[1].lower() == 'oauth': + if sys.argv[2].lower() == 'request' or sys.argv[2].lower() == 'create': + doRequestOAuth() + elif sys.argv[2].lower() == 'info': + OAuthInfo() + elif sys.argv[2].lower() == 'delete' or sys.argv[2].lower() == 'revoke': + doDeleteOAuth() + elif sys.argv[2].lower() == 'select': + doOAuthSelect() + sys.exit(0) + elif sys.argv[1].lower() == 'calendar': + if sys.argv[3].lower() == 'showacl': + doCalendarShowACL() + elif sys.argv[3].lower() == 'add': + doCalendarAddACL() + elif sys.argv[3].lower() == 'del' or sys.argv[3].lower() == 'delete': + doCalendarDelACL() + elif sys.argv[3].lower() == 'update': + doCalendarUpdateACL() + elif sys.argv[3].lower() == 'wipe': + doCalendarWipeData() + sys.exit(0) + elif sys.argv[1].lower() == 'report': + showReport() + sys.exit(0) + elif sys.argv[1].lower() == 'whatis': + doWhatIs() + sys.exit(0) + users = getUsersToModify() + command = sys.argv[3].lower() + if command == 'print': + for user in users: + print user + elif command == 'show': + readWhat = sys.argv[4].lower() + if readWhat == 'labels' or readWhat == 'label': + showLabels(users) + elif readWhat == 'profile': + showProfile(users) + elif readWhat == 'calendars': + showCalendars(users) + elif readWhat == 'calsettings': + showCalSettings(users) + elif readWhat == 'sendas': + showSendAs(users) + elif readWhat == 'sig' or readWhat == 'signature': + getSignature(users) + elif readWhat == 'forward': + getForward(users) + elif readWhat == 'pop' or readWhat == 'pop3': + getPop(users) + elif readWhat == 'imap' or readWhat == 'imap4': + getImap(users) + elif readWhat == 'vacation': + getVacation(users) + elif readWhat == 'delegate' or readWhat == 'delegates': + getDelegates(users) + elif command == 'delete' or command == 'del': + delWhat = sys.argv[4].lower() + if delWhat == 'delegate': + deleteDelegate(users) + elif delWhat == 'calendar': + deleteCalendar(users) + elif delWhat == 'label': + doDeleteLabel(users) + elif delWhat == 'photo': + deletePhoto(users) + elif command == 'add': + addWhat = sys.argv[4].lower() + if addWhat == 'calendar': + addCalendar(users) + elif command == 'update': + if sys.argv[4].lower() == 'calendar': + updateCalendar(users) + elif sys.argv[4].lower() == 'photo': + doPhoto(users) + elif command == 'get': + if sys.argv[4].lower() == 'photo': + getPhoto(users) + elif command == 'profile': + doProfile(users) + elif command == 'imap': + doImap(users) + elif command == 'pop' or command == 'pop3': + doPop(users) + elif command == 'sendas': + doSendAs(users) + elif command == 'language': + doLanguage(users) + elif command == 'utf' or command == 'utf8' or command == 'utf-8' or command == 'unicode': + doUTF(users) + elif command == 'pagesize': + doPageSize(users) + elif command == 'shortcuts': + doShortCuts(users) + elif command == 'arrows': + doArrows(users) + elif command == 'snippets': + doSnippets(users) + elif command == 'label': + doLabel(users) + elif command == 'filter': + doFilter(users) + elif command == 'forward': + doForward(users) + elif command == 'sig' or command == 'signature': + doSignature(users) + elif command == 'vacation': + doVacation(users) + elif command == 'webclips': + doWebClips(users) + elif command == 'delegate' or command == 'delegates': + doDelegates(users) + else: + showUsage() + sys.exit(2) +except IndexError: + showUsage() + sys.exit(2) +except KeyboardInterrupt: + sys.exit(50) +except socket.error, e: + print '\nError: %s' % e + sys.exit(3) diff --git a/gam/gdata/Crypto/Cipher/AES.pyd b/gam/gdata/Crypto/Cipher/AES.pyd new file mode 100755 index 0000000000000000000000000000000000000000..707b36ff2d69f7f0013b6da4dd3374b7ec87d969 GIT binary patch literal 27648 zcmeI*33yHC+W7w+*$6^{gdo_6Impf&Nr;SQF$6_KBteKIO?D775wg1lRa9G4P_){b zQZxvDLObnrRr%?SrtF&oPX)wuBvJ@ zt7cNcUDbv;V;CwhSe6Vve5x{IlxgP6ahov6rxIEGWWuqtiE>^lmBzTOY#?P2%Euy= z%2y^;i`0P+ zgRIk@p-lLwR9Wp4j4Q5g$AVx6S;vu~O!!bj`;m!>GV7%Lm?=O0F=k46QO5F=I$Fd! zvi}hbWx_|wi#D#O)ZuCtR$>^*P$qnoSg5^Hw)SIYW>}%>-S{+0mk*)$@)2EDwyjRn!kb5E174!o`z5}LoahflI4Cm z6pI}-v#;A&?C4^WebrKXfRPH>SIv|WQ|$qLZHpa!)y5Snmab5#bcM>S&|l3OX2vzD zl&(>=bd74Vh8b&^8`r2_x<-xCH7sNe^I0!vax6*X_o+ru4L!^)cKFpWsOL6kX0em0 z#g3m0R?B8xXF{btX|dy!af%*Eds2Ti`woY1wM=``N&TH_67}h4>Ex%@`&l{pnd$v( zw1@S6wm~VyRgU<&ShY$ItF|1LC2Lq2*RU>KqfY4> zb!82!SueBXz^9cQxD^Lp=ifhY8|A=ll>^r}`I+ne>^O2Wj$Ce`e0z@EAZKT%_iN?k zXQlVEPsy<wza1aue5m!#c6s?Pii`CDg}DKD*_}$Coqer>N3Xln`WOQ4a!3aC z0z>Fsy?T}2YJp+EDz(9Xfm-jsO06HTK+Uun(`HN~aDf@q=1f!aDs!gIQ*zY$(7P%5 zYL8=6w^BY8t1<)>dIa3HNcU!9zuf96hb#>>lnSueVLR{jiE~5GGpJXpA~R#0)Js)k zGGj2n%nxNMCNpN%HK`tEER-4X-1LebsQ{Xr(|pxtsvX` zT5@md3uV(^|8~=#N;cI$GZtNTdn)fZXpIBN*FT3XJ7e=NsF zACRZCv~nILHm#P*8R$!!t=N5UYa zkVYF~<;(in$xA8A5RxZ_F`*QeEn^hs)C&dQdw6Edc37EmaovR$H@ z64~%IFhj}FpK-D+*<+#XkqP6bcV$$LU%AxEsdLYj-)rf)b690hJ|M63?Af}jd4BO< z${ovP*sb($UbAQ2rW&}5U)O+oHk}iT=!??)OqCLC49>G>-7@arbw!o#fE%5rw4`(<0yuGsFS+xTn@!56 z*_P8y^Q$PI?4XjqFwL*BA%GHX47Rgp-H;uWsnF8$(Lsri4C-yh6Tl2jp6nt=&S1Ei zaz7f9tkMFNQOmTzDl*F~EwF0Z@M=nXVKARP>zeTdUe|f)4zg*I%hnLp6F`-*gKXJB zNt0s;u}bq*S|7_a-|AATS(7HyE3ZGPfLtr8uS4pt#qqhH^9kccs~&FsDwX&Bp7Ssbo{7 z*-&~{rP(lUmnY4J9E8#r7+a3AR7#jdA771vUS;(<2A#Qjg^)|*|@aj zFz&msv;vGR$6sYy4jpBdwj5e_P98Z2kVLEeGG&^r4URNk#f;FSu9ver)JvxrcjHmGacmVrf>& zQyYt=Jg3UH;KnIWHM;t_JaD#}lo^vTHG}J@;`?2(^%zDAhO@4&K0jtuW?NmA0>(8u z%Yxc_GZjUvXn27 zCV$fJgg!DP6&a?z(BD;_g;VZY(l{6&gyrc(c^;lE#VU`)eB<8qxVZR;JU0LHRc9Sg zcEbkSp1s<@jIY31T{2^etur~dTpu&aRpt6P%kh?!*{W>C=^v&{cUI}@7y_M(%~IS| zN#=|hg5~RPaX*_}KV`%8DON9(uP3|I6!UOX`V_m;F+X?e)$AsFk@}Ur$aycZVcO49 zCUgCy`uH%D52~uzdW&3uaZPQWXOaH@0@g!90!; z1-}2}2J3G{g&T{NYps@)RP#Dg12^@4X53v;9r|DOCsPgv4=wX;v!`L2S$e=(HZe~R zI4K3lNyTzfrFTGZ=~a*){VZ)1(e0cS0At|>Fj zOqHMNH!r)cT#dcH>=UGPwn_>%1X-qLC7Y&XRn{NKer9Uubyj~^f99hn&&km_y6_pThoNKGx!FOewc#gYFJ7>Iu?{aP&NhOPU(-<=ZnHAfm zZke1JvrkUhGwG~F`bOrbhq)U9j++QIGku|{Qh##A^1PlG7xR^w6{lX#pU#@Ew~J>= z*@q$Eq=_`@mX7{)t7AelQVA2nqfJUvrp0(dL1{YYsj(q*DxMP4da2-kOL{`&6XQ0Ck2&W zz?|&oraQ%aYdImabpOUB$6+OxB)&G6q^R}zlq@^SO}DC{bPpw&!Uz_=aU3Rxr1Iyt z0ZV=hQ-0GU<+cqn_Y62Y)w8scr!_N#nWwAW(*12LEp`N}IXOqG8flCK98a^V6eySF zsv*eSl$#>RJpVGk6;L)c3^LcBG9Ky4lp@viHu_`fR&MD7s#1!tQi{4p8Y3ac)6{hW z^}m#yltsu`4p^mJWJAbF6TXZ3TUa@& ztETahrmof{<*CK=Tas-lDQU)zx0ZCFNWQ6^ZC%I-C?#^d77MI6lfN%oS>NOEjO+Q5 z<0-8yi}cG(z4aEEpzP$lTxi-VZa5k)?l)SLAWgYjr|gX*xA7k(x8sKVnQ!bXfvM7# z!w}@Gw6%KlkJ_{1a%SlUX2^$BpR|M08LeO%yWUcQB^om(F{6y#^35hU zl;r&{ag!KXTeAacQLUG-A>ETMfTCXYM_%QMl!oaaiVQt2z~6gSo~}X0e*FU+DQpnyM#{i~og=;qw1y$FR4wV{kD9nx*8K z@(Y6rZ#)kCeZMeE3ZJg@3xWTvUwF0fU+NdyzU~(;@X%VOUpOxH{MLTq3C$@-Lcfs4 z7$-u%U_T`@X1}QRoJPN}T^iisO21Gf*G*qkreEm8gKkQ$B`eY|=vnjic2kt{tn`6) zhCmyAU@JqQtv=A+5U9}yI+W;qRaAPQ(kIAcv(aI%<0emON!pxu^shrRQd9aCl4WYk zG^Uk)-tw(AX{;3r~JjS~&f~bLk(Rmg^tN*0$0=OfT&pLQWdg zb@hSt4|Uc0Ksu_rX8J%ns=DU-Kutj}L!k5zfq&aSG%7g`rGE&n&ZQ|-`Um|p(~y&8 zy9a-kQCLYqTy&FMxd!vwO83y_Rre66bPt_M>v~!X!*KI-t4`?y>eH}yr&l;hRaK^C zcQ~xH?RA6X0$nl$Dm_G?dHxmJRb^|#Ky&?>GCf2){V%Ty8dA2P^YBhnHxAMtGp>Eu zqiDvN+`zJ(MA7tK=}L{8r3V^231h{p9+p}P%;&c^(ue%E4U1V(T#b!yzDAQC@U)gk7@fiZ{qUQQ~IpZlB0X|tkPjB z+m%VtygJH)R;Am?1Bt(n>FZqj^$40}XhUr_&)^JiylA;D*GiBwO99Y2^*6hhi z?5b97O$)wJO6kSc%6dMmH%QjwLcdX>tjDa<^=zbL;E1?6)GQg%MUp!kr3%WWP`Gog zGFG!-?LpR|PAwmDJMd{tz);6hs@w^yWx3jJ40|uO{*o>9Im|Yuida0E`O5jsVUaAL zRaLf2kp=RUS-L=OHz{y_`2}>o%7Ufk7fAKV7OToHkT;WL!G`h+q{?K0)ae^LXTGvP zD)H5VSE+{ToZuFg6kD(3EK>MgWoXW#;^uNQFIps*XG!AtF_3P7lOuKEG~YBOQp?9@jl2R zRY6rBgKeserd~*TRIu;O_jiot8yaeOd57gL%AR&92VGs}$xiK*oz{@|k^XUi&n^@f z4=%qyE93r7v%j)$^KkyVR!`$*`>nvrco>|w!6YTy%w)E_^*HOPEKRt>jaK@;PsSn+ zg8DObRTp2=KMvXLtJG(Ii*K`1?ncU2s`QYJ+6yHw5EWZnQT@9cvyO5OywSzC!pa5S zCP%tYsxfnFn~hzP9#uKURPuuq&jCr5^*oNfIFdux`5nzpXE{Q7i3u}#4WtD? zI`j72mtvKrlBebsr@X&_UA{hf-)z?1y{u~RmSayX)|r*O)0LAOu)GVAaTQkS)0XE3 ztdPmvfYtJ$57?P;lLff}>x^?Z8t3lK$W!KiX`H*oI5#Wft}-{%IC(ImkV$>O;fzPh z=kbgp<@0353+3~y#kUnzoWm$RPxM|iK$g^#Qip23UZ=jx1?MaWaMXGEjLQYLn6r>E znlbH>eX>A#r%PvNmRBSz`>Er(qm)X{^)t(Iq0jN*Y;#RzRfDy^jQh1>T*Yfl8H>2D zrK7Q4A|rmb8NteOd+9v&{WW?&hm7T-z#0BAj!yIMYtuL0)h3-ioa&EvfOLDJTO zt$CkNPALvPi<@iSXm5&}JCvJtE;rq(+_ZbSX`gb_Ugf6!%T3#ro3<}EEg)aFhsDk1 z#w$B574~|X_AWHP@_F85GzQgLyhU0uLdNCuzY?)g6I+doO19Swl{d3P`Rq0@v~@>K zzbIW&LaZiS!wQ-%0sS>mu@MQ$@w(`^SWVo>u~8Aa_Nu`=t;iFLJQqeX#4wcKcYtv- zjc!boycj+qIx;F!lc-CGjvcLu8>JZ;9UGpItdVz!<%Q*cjL0Zux2=quaA5w%8UJd5 ziOI1Mt=KFuE}`VO6E#VRvUfS6;Gkg5sHmvOk>L^JlvmU3nawNg1MM|Z+L(m6*yzbpLbOgZF7K2P)2o?Y#B^MT%ov&s)m5g;*T4wS-PUBr$a9eDEz4!Q(uibTTX~Wqg4sqs zJ87Bc@Q>xnvi}?6WXZi-_E<{qh77=l1&eW+zA5EP2d-{|qkE=g; zQ2gDvap$(i#a$lo{rB$&J2;FmH8H91`RLI=WlLp+SKdg2@~!H z96h>l40w>%S}^;pWh+tWOSn`)!i#Uw>Wu`hyR=$F^wEbBL>JOqb4`r&oCL(Q*Avg5-_&2L9ogC1x{bM7ZPqV0cWG3!W`VoZ5f(@4tS`sZ+`?6728Y%O0W8_%wd;;>O{vTa)h% z9lB%R*s*5=j~z?@W#7KhJv=-TE_d&~$V?s@ZbjzHf#u)J$?F%AaC!=6NB5V1qS@ll! z>9cCa#fxi4HErrYy-k||n-(qdOi4?tb~G?>e(T?V&poqq=QuAbt5(UEFK@U$eE8mm z)vDE*aN)vNo>#A480hF2*}G#$U8Aa1tuA!ywzz7gO6D#PjvxQ){Pc8_$e(`N zdb)S-6<0t1yc@mJ`DX(Mep&g^qleA!-1%{HRMgQ0lP5pQ|N8478#6P@pLXw#OPe&Q zFs^y??g{PN53{qib-%xU{h-{;%*4Kj4t>}xFYnmYOP4ZM=yZ8a8aM8AsYj1x7r*;1 ze6GPzJ@vi!EEdJbU-j+OY3jjmzIpeHQKOEZ@7i@yMN`vCCXXLKsa2zf?b`7017R*M zW1kKh^!3SIyQ1HpJGbuO?Cg(PHf-2&*Q!-5-LGByrFKzK!S?0L+d9>(*>q2I^y&Ct ze)(L#U_s>#OO`n8_~eszO&c}xT0C!_wI)CRNVgw;m{%MU^2MZ9t%4_Cx$@b<`SVSS zGBVzosn=JjTfKVyU-$2i(%!oD^C4f~IXlOUIhoU?%j_*5ebjnlyLLlrT39qXwq?td z7Xt=tJhx}h_>Ohz*u(!b_|Jj=NAORFe|7kO2mh_`9{~Sg_?yE2bNEk#e?9n*g#Ru0 ze+>Ts_^*V&5B%rB-x~e}@IM0oYVf}c|IzUO3;uWDzaRcj;J*z1L*ah_{B*le+&431OF=U zcZL5a@b`v)3;0LF{{;L$g?|J1uY$i0{_Wx44*uKV|2zDj!~Ze-d%-^({y)Hf3j904 z|6TZh2>(CfKSo+(_}jyO1pFJr|1kXT!G9n81L6M*{CmLvGW^fNKM(%h;Xe-kZQ%bK z{OiE~d-(qh{}A{uf&Y*2w}gKs_|JlWA^iQ}zZ?EH;QtW*FX5jB|M%ek75w+YzbpLJ z@V^58Z1^X^KLY*^@UIX58t@+v|DN!-g1-y=JHY=M{MW$$IQ%`}KM4L0;6EGwLGb?) z{fqP z{=MPf2>ut~Ulsl_@V^QFQ258f{}1@DhyOP-wyuw;hzitzVL4b|Ecg_0skiO zzXbn_@Sh9+RQNA~zc2g`!v72SpND@%_?y7L7W~)3KMej);eQhT@56sE{9D3*7yRAf zUmO10;qL_hJ@Ai*zaIV@;J*X@P2s;7{u=mqgMTsnC&7O*{1?K%2>vtSUl;zr!e0yj zL-5}T{~Y*lf&WDK*M$Eu_`iVvIrw*ke+2xe!ha(Cr@;RK{KvsR4*uW6-vRz6@E;BT zw(x%m|DWN%3H}q{e-!?+;eQ1F)8PLM{_ns)2>yle{|f$R;lBp{o8i9#{_EhM0RJrb ze+&P=;Qs;qTfpBH{+;3f1pde2?+pLX;Qt%^@5BFH_@9Tr7XH@oSHnLS{>|W@1AiO% zFN1$Y_x4{QcnH3;s^D5 z&xU_P_^*QhHTW07e>wbX!ao}RzrcS1{FlK06ZkiR|2+8T!~X~PhrquT{I9@&KKwJ_ zuZMqi`0t1RE%^Jwe+>M)!2cuow}Zb0{I|e=0Q~pBzYhE(;6D}q6X8Dv{tw_k4*qfQ z{~rDh@Hc_~X!y5<|4aD)4F65=p8)@(@ShF;Bk-RF|7Y-j2mV3uFNFVB@IMRxHSpgI z{}u3G2mb{4XTkql`2Pj}58&Sd{;u%v4F4zaKMsFq_h>2{@dZ-8vZxn-vRz};NKtqihuaugTDs;FW|ow{zKtE7XHWJzYqQ% z@b3=)p70+G{~zIh68?X{{|op(hrb{Ed%@oc{;BW}fd5nYSAhQv_y@z^3;v(Ne{4L>M4F8AlKMnsA@HdBlZTR1Xe;E8f zgug%h>%#va{5QaVI{dxi9|`|Z_}7DfBK$|de-`}v!2crro5H^h{1?GL4gP`f{~i82 z;co^1%kUo#|7!5R0ROA-cZ7dO_*aF0H~3eAza9LC!T$jK&%pmn_>YAD`|w{1|C{hn zhW~B&ABMjh{KvyT9sWPTzc>6phkp$G2g3gm{O`a&3jUMf|26#0;J+LGli=SR{_Wv! z3;*@-&xHRW_~*g@68v@WZw&t)@c$0}2Kc`R|9JR!g8w)09|ixe@Hd73WBAvAe>nVI z;6DidyWl?;{@L(v2>(^^zXtyz_%DZlP54K{{}=c#fd3Nse**tT@Sg|&eE9zW{}A}M zg8vox&xd~o{Ppm!4*&h|zXg9^_>X~q7x;e!|90@Vfd3Zw4}kw3_}7UbrV>z$6nh=(R7UIZ>OqON0?068=P8;vlhsm`->Tkwhp_k4Pj& z5VMFr#6_Yh(T0%hbQ%#z{7&p7tcc6RaH1N4{tH(LN1`K9mFPxPBJ7A^!~xCJ zXyO-Q0kMSmglI&}Bl3wKh!CO`afO&qWDt6yII>TM?^cqg4jX~AodV- zh~N#6`u30NGCgNq-TKRhx9oUq+$PWa0V^AC8vN|b4@__L2zln9c|3l?SMN_ap4Pl) zg+E{X{9B)&kG1)JroT&zg=ooYI zOYiP#xuegueRaP|D;)Ku=16A8AEw!KzY;N9TmQEaPJKTNn9+aa)rpZ?=X^bGkL~JN zFPF46dpdYb*Z6vW1uUEwb9u?PMN|4UJCnHSdW9YdgBCWv(PmS~sLHJhwwWIIro+cm zzMuPrSyrJ+x<%b#9qkErt^lQ{l`zlsy zJkt5pdoR}?T-m?9^Xia?YaFk=S23yJ@j8#YDb>FCV}wtoPv0Hh*E06Ir|&iCtkU&fTgNZsYc}egFeK)AagDzQx~Ez+ z`sr)$mLBa~BY&}|+GPE(F{cI${iI`iSG%pFtD0Nf9`wQKWxr>xdjFFKeZJ3{xkhs_ zJi5okA$984vaP!NgX_PwS)Ju~wW(Lbx(#Yot6rtXSGk4zna&ObIJ2vLUy&VzvZ|*#LQTwxH zYJ{&((+_KFY95+1&8~ZDk@ud{H65RRJwBs#ZjTPD8a{N`cJXe%xz$hJ$etKeXM5X^ zcEx}Gtm~4RO(XMz4$gnEv3}>m)#vS}4g6z~Q`6ku-R5PVa{uM!nxpewtDRl`2-Z0I^GB#pG-jqF_7phEZapIB9JFzC;etdW1$9+Xz1H!KwRl_5CNm-tAH&_{5A3kKeDGey-PqUR(a~8_?^E<5$$n zZr=Fj)T#z)V|(swY@a=&Z&aZA&hV9?F7r(ib@QIJ?^5mgkcOkT_d9)IvC}}y{Mte1 z#?KG``Qypv8Ey-#ir&$C)h_tG<>x=R90~vS-fyR6`d0wge3o9WwYNc zH}TGiC;f+h@mH8tocEQv7yKv9dbs&$r-Ng5**n|0hxx2MkdV``j(PLzdkx*ZT6I}k zz4%>|jUJb`o13k;nR$Lu_Q1&SImf2N?{NG{H!P;B@1UJ0;U5hD{_xL%|1$U=ga3W_ zZ-V~^@DG9iWB9)h|K{+20so)jKNJ27;olAZqv5|C{@37N0sgh%zZU+7;C~zbOW|Jt z|6k$X4*qB0-x~h@@E;5RN$?*B|4Z=S0sp%29|iwR_}jpLHvC7xKLGw$;Xeodw(ws9 z|H1ID2mg8S{}%qu;C~(dgW%r={*~cx3jdGc{~7!rz<(S3cf)@e{M*8RG5ibRp8@|9 z@c#+^jp6?u{8z$%HT)gnp9Fso__4}||W@UIB}IQSRC-wysC z!v8q@C&K?b_@}|&9sU>K9|Zq=_#YAf@K1q%SNMMk|K9L_4*!AhZv_9A@Q;Ll6ZoHk ze@FOlg}(*-Ps4u|{QJOv4g90wUkCnG;r|=_{ovma{?*|B75sO>|1kVh;olSfd*J^U z{6B^NUig0xe`oklfd57KkAZ&#{Ex!l68=8$*T8=o{EOgU6aM4j-vj;+;eQwYC*dCh z|BvAR4E{~we-Qrl;eQ_ff51N%{@L(<34d4k{|Nt9@NWVCb@0Cl|LXAn1pXu8e*pe@ z@E;EUKjFU*{!`)qIsCW7-xK~D;2#VBDe$iX|3~mQf&WJMN5OwO{LSEh3;w^r{|ES= zh5tGDZ-&1X{tocZg1;O5*Ta7c{H@{N9sW6#j|uZx8<=@b3qIC-~Qf|9tpQhW`Ti>*4=9{9WLG5B^^8e+T}Z;hzlu{qVm7 z|Do`=g8y9j&w_s^_}jxj4E_o5H;2Cg{$1ezF8nXUe+B#(!9N`S@$lEd-xvNH;2#D5 zb?_e!|8ejSfd63lo5KGY{Kvz80{nZz|7Z9ggTFug*TR1Y{2Rc(2K=+&p9KFx_$!vKN9{g;ol$rA@IKj{{r~mh5sM${}ld~@P7~fmEd0y{@=kr9sZ&4 zKMQ|r_&EBqtjKOg>`;cpNBMey$p|5NZ^1OIC9p9TNR@LvG`{qSE2|Hbe> z4*x*-*M|R2_&YHwKlty1zYhLa;eQ1F2jIT|{)6Ga3;x&Op9X&o{C|M| z75LYOe_#0bhkqpe$H9LV{LSFs75)M6zYPB=@K1z)5BN8Re+c{w;QtN$zlXmY{9D0) zKm1MMUkCm_!ao`QmEj)^|6k!>5&q8bUl0HG@P7#Z_u&5+{weSu0snX59}E8`@V^BA zPVm14{{iqn2>(j(4~GA8_^BmUt(6aH=CUlaaA z;9mp&sqp_A{_Wsz0smp}9}0h0_*aGhAo%|d|4-nb1^z&`>0qu~D>{(kUp2LF5TzX^XW{3GE1A^Z=)zdQW* z!2c=yTf=`9{I|h>F8s6Mza9SZ@LvM|eE7eBet|0no2hyR!Gw}Jl(_?1^nNE|Hts} z1OKJ)FNXhK_*=vOHvHq@|0(>pz<(Y5JH!7o_^*L~Z}`uK|04KrfWI34cj12m{_n%T z7ySQ#{}=FI2LDs=9}9nb_(#EiIQ-|se;)j+!GARTFTmdt{^#KTGyF5)Uj+Z!@c$hC z;qbSGe-Qjn!~X&Nt>B*v|0nSO3;y2l_lN&x_>Y0V9sJkAzajju!`}=3)#1Mp{^sz{ zg#SSJAA`Rm{A1w1Q}SDq1CuKZkQ}4r`Xt9AIULE`NDf5uFOnCL{E6g`B*!N?O3A}X{!wyE zk|UDbjpXbk=OcMY$-hXxMsiz{pOL(YLp9lB<;5ljL+IHzqkI$^S{NRdREZACugl z*O8UrNqZ@|TjIlU$nQ*Cg*Mxhu)DN}gEqx{`a9+^*!sB*!YbXUS8DzVwr=)`7yqE$U8 z_4Pj$EpO$(f({kd{T@q zIzA>!-adBcAE(w$jEaiYXf@%nk+Mz8l7nrji4W%=v5w-nUu_!BamK`ikCcs-SD-nD zx2+!+86JBy{-ny=FrPnWc%Rd25RU6MlYPkG$lQ*UR;o5l`-@{HE6PFaD z?0Zaje0)@_N_DAKX3SW-%$PL{HJfL~JY#TfnHjU1;h&se$!p|)bAIKInd6O)jfhJ~ z;5GL$*FeR4+Oym6S1=6PwwW>Y>@#D++GWNZW031=eAiqP7ax@nuGE{Hdt}t8=vb;_ z6hxGF%inn4TvhT~y{hy8r3@)6HZHbZ)THP{9bhyg`KPnTmEPvYTTc_N<0bl$NxCRa z%jStKRes@eg(9P3xMMY-dA(TPvj4~JWgq{-YOn6jl1nc4%HNVyc4O2Hf8z*$KA>VF zm7o0aKwNu2<>mPHGN)G`|A4RnKVRdBpRZrZNMOkv-px18=~Fs~`%Ohl!Ffi)*X+_jW-8y7p_=t6T3NX0y>mC&#eF zG2sbOk@32OcAXSD-rfqKxKXWGCoG_ASpTkF`UUv6mVLBu-+})|M?|!9YVX|MMaDWr z#Kn$^9^E0@$=$UB<+U@G*Ny_)E7vn9O4mgfH@-JbN*7J5a7OuS0{-I^*hgaP)|w{X z8i%)?!GHg}(tms3k-u7$&Y*N?#k4(x14A1ssw1B&xti_clOt7f4gRjy|ND>mfA{d8 zseu8jUzd}%PNg!>;Gg9$GcSvI8s;r8Gtd1q{=S5He-~LSJI-*v%sP3D7cr>U@PNT! z!(hi?&*0AB&k)Q|8p9ZmVVKFVnjw?nAj5fvyA0~Jl)+%n;KMM0VGP3@h7}B37)~-= zWhi7&a~@UF^(Q(3Ys<80HME*{R@#U(psDy;rIH?Eg3gIK;nCnns)To%GMCI9|bA?d}QBf*Y zLWS`>|H%KvL8Y2hfk%mm_+%MRmhtg%6IewxS;iBiq*x+2xl6+6u;B27L|JB1c6vY( z4`K4)S!Nw&UYFQN9tkJLB}B@f1U6u~U&&KfnBSPFh;dwN<^?1qgz-0B%@9OXl}d9*&GNKP{QpKbbCg zym8)|l6eD?es)<=1u43kqejxzrw^Nj}|)+MpzEH1pBC&e4*Qf2++nVPu#dbu*|%Ij%4 zY*noa!O1cb)-ADr(ny`mQdyPGQ`S(ahA{kOZl@nRh&$Bqxm3yUU7<~M+~#QSG|uUzlehD5msu`pF3VgtxEysk<#NTP(8bENfop5m zj;_O9qg)eR=eQ1ci*k!|OLqInZMEAMZoAxmal7jFz|F*6)kiI?{Yure%bxD`y=;acUzC<9*!Q~9{oL*d9L)_;JL%|oaZIa+n)D5YkJvuwefQH z>hCqoYmCCwixQ2X_wXJfgEo zqf*J=nY%i8Ih=Fw(RS4i)lSe((azO=tKF;3)#hpMX^XV&9i1FIIR-lpa}0Nkah&Qn z%W=Nr5y!qxp-!nzUpsAg+UKNpuHtOv?C$LC+|_xM^FFszZs*YnF*&%MaKgU48p2_CCF4tX5&IOk#8u}Me!j=>#6I*#Z#y<@)j3vbg-wL3NL zWY?)fr_fGgJH>UH+-YN{YMm`RH|X52^YqT|b+3 zA=DwsVX{N2!zWy$%?^7UvK)>%Ty}WqP~>2$t)qQMJ6pRzyIH$a`?L0{wpd%+u^!np zH^)Ja!yO|XCpgY-Vk~T<^F(aDC=l z!Oh&Qp4(8j(QYwrlicRHEpl7VeX_-Er(3q$5x0wO*W9YQTXVOxb$4~|LfsBWr%Ma`18Rcj)TS z(_w@*l6yE_tJ6-^&eYD)&eyKiuG4<0J+3`ZE#1`4bX3Jl-^lYq$IOm9JMQh6)$w4* Z!yWBC9Xy>q-95cLeLVd=S^WQO{2$QKpBexF literal 0 HcmV?d00001 diff --git a/gam/gdata/Crypto/Cipher/ARC2.pyd b/gam/gdata/Crypto/Cipher/ARC2.pyd new file mode 100755 index 0000000000000000000000000000000000000000..a9dfbf691c4fdf7e4ba78894f38e6cc4835b72f8 GIT binary patch literal 15872 zcmeHO4P2AgxqkyuqN2eTlv-MYMFp!!_)LKC6#{BSQ9!Lq5g{1XkkF7==|pJ*EAM?x zXT5H7o9m=?o$Y#CyLI@{t_AcK?Z>^;x;3q|#SYsrXh+p5we52M&v_FFQM=yXZ@=ws zx9^XW_nhZE=Q+>Ud7g9L_oZO%PA-t+I4O$T&2fiurz?y7e*I4)x~I%MI)!^~!t0TT zB}-qAEH+fvDQYd|O_uT+MMb&EWVR|c>J=87Nl|T5mOtcrXY1vr+%`!PpyFl$l8O?565=UYxN`<0YXeSvOHk;}u6_7L;}QfRxj0TC zj->%a0~fp1avYnmk>eJPW8iE5?8Iy||KIqEwd%K6K~t*04>3ftjtGG|t_}E$tt_{e zqkfQh$U-5UIVe6?7RR;6T13RG@#rW-k)g~(@wu`9W+qF zpIg1E$UYb>Gz8n-5_!|FFe$$^N-C6w2n8~JeUyw3j&e260Q6;`5zg*TWqb1_`R?}S zPU|`$jN4!^HlMQUgj+&T8<@P)_p%R2)o)sE5(1mstry(gA~plo4LGcGI6gGWb>LQx zQ=Z}(p)P3@N$qL^tl4b~@shXYDK9bnA&36ry1U9sJA*lD1HaLW{}|(>-D>OtgyS9~ zc3fw`xvL64{d3%I_fC;X$7as)!dyc72K*`AuJPc8*ARIxe|@)rq8SujS5WKXfk#U@Emwbm{s?7=HM2ccha)A9+R6&FgvvU7t`QlOK|HR`I$Mfx2GF3E=W4 zI$UyxbP=lpe~7;l`HF~N(g(Tahl;xc_-;aXVk+L*;hN%*?qOA+5A&}?zB7h=Zl@IV z572-iFv|~hxF$QKVif~U@b5&P_oF{vq(2N{`QeVj60|LR&3}lRrG{-XzMw&sG!L929Il26ycmLvc3%ZaN8L2=f zl!Ub8NLtngkc6_9oIrl4B}Yo!qcM&jZpj%B-l!2|Kfa!P`Iy&g^RHpGc3#)#Z#c5# z7{iezeGTXA()G$=IH91g+VX2_&k#y6T&-qU~&Q2i_12Diz))Y$mM055CHqU^e zfRO>8@^5#XmGjOwT#^oON8UmMn0xgIrQ(24=Lr7W_J-bZ%zV9*yynySnh&F6I^~Bt zp)7uA)T%LE@W)qFbUm!DzmJ1}y zNj;VuCvPk5Ygs# zd#k%ecbR|1kxjlQZ_`~4kWQ1gwaeQYxk7^xeJ#=iEkv-aa5>+WKXWgnCwXtie>jP9`K?nea6lF^2#D~ko% zljM6|j`<7rZBd+cnvfUemf2ODEf_VSkRp|PWq6mCjp)*b68Bx&aPbx)-V}6mXrsGw zqhyV!;w~i+MY*7x2tS0HU0RBixY7QEgHqgK2f@@RyHH+nmu`q5>0FFc-LTlYNH>RW z80w66=q}>b0_|I5fmSts zrDN?L0s6jfGivweF&q)sexEGRwhFr|Av>hGy*|+19>jNcTnQ8k2Kf{Gn?Jt%1-4Vk z6hZR5{T;)BBs=j{cwO@Ai9A*U^lb}+GITw#GV|o!G^Q3ZT;<8pYz)aVUjASkgch=7 z?x^N9MF!(bBzjuIpxn_%@KTxmSTqG>T7NQrIFKzivY_k1%Wk*5O9tGn@AV%7^61bf z&~n&%nT2602{M zXllGPzD0Ut&KQ$G!13|U|uN9AVd=Yq<95Kd8 zN`kE&xic6m@JsLn%M9E|Mv}!i?d=glL60L;&II@D?>?s9QkkIjF}$RUjIbjTE2w`h z&a`X-c*pH|!D+g|tyx}WH_R693I=J(up>7-MBY{?g`DxB@)lHdJuRU@`OH9IO2`Qg zgrDRD_fG^pHlAcz5zN0Ka^Gzq=C;MY$d=Tr7$CXgD4qzCIgF%KjzkD`v`DAt^KbJ* zF;4aH_P0QxE1Ly(_@Lb-Qx9)jHDU_UC=p|gQuvwM%`zdgKLVoU3dBNMoDv!@o!Ej~ zi*!=9{qvx0e}vV&RJT9*^;EB1q|Ml140a$gh{z^@IP!;X#S!*vXJVt&C$_)ee*y8< zt5&(=99r<<>-iSXCZw$tpA#zNN%8?0eD1lGhNC!?fhL0N}Ia{o^h;q~#kumE+jr z=%0ZJC?tpN#z15@s}7__5?Y@hNeXONk+k*DPUrVX_996-i!qWrfbA&hWv8DpYjg1h zc=ok2f?(^B{6#2rNxpCt^g@R>l2=Hlv6$h}p?t@y*tQMiF{hT%nj_gG5WQNhqp}mD zSZj2UP4_Z3p!7&TZv-+E4#3pOWbg$2VhP?7I1?@6QDuFsNVCtMcY30N7NGm!gR=$%0 zowNUU!1oYg&XraIA_s^FP7L{>e13$GAI|41#BH~ilp~*|fIXllrhtN;zyh3fr8DAl zrz6Bn1%1p4Ku8M!%)QRCc?+Cr1%q3E(XShw_j$&ADdvr%dBv#Uz}#><&qVb@-ibpNk$^&^TP+00T&r2|2i4Qkvr2f zHA^lJO@@hv#oSWj3@mZ^=9Y)iIc8>w&MA~S(;&=_WJ)e~d6UcS`7E<6^D|h(LZJ)^ z#_0$Raum-%I#`0l;u376%kfAFaVJ<(n3l)eoUrfK(?UMWGx@>&fgtUnCaezT-}Ff{ z$^6^k!Vx-COfhTyXgWt}@~?`+PN;{rztWZOlWWp+&TT71E65!AEX{a~G5Cqs7#^#y zpN@^Buf5}(Z~Wp>orQe=M3aNwziFb;_0ZnurHS6G5QYiHBpQ~p_E1RqYdNc=hqnIK5mK)}f@>uh zj{^)0UO&48+Gqg;mb$K$Unuaf03hG-Cgm6{0@>Fl7%`cinc$IeAQAbClE=vr$SiMy zVe0TE7&tgd^eE8$a*X#}PKFwSZoH?KV&pUCdU&Y1)xHe}keP&Xk<{mt_D0UD1FP-Z zf@RiRAupIDaNHG&xFF@v$8^%RG=fZ1jM;@%!Jc*LD2}H^BtCY;4q+8~yYko(tMxD@ z$0;w2?sVE3ea;ZgmtN#;!+gV+^zaxDI(0*WE%>DgI6@x5`scp9@VNX(o$_7ns!nT+ zP!jAt_huD?ZzJVs@tjTR`Y^2K?mvW4&1b|jOsyXlNhe}GOg>yBO$*xtkTq#!TLi6+ z?1`9y_7e#||6(KtR*Z4RxzOgwuIDWph3Uxmjyho1bDFW7>a}kkQXmp%&T+?m!fAwHa${0 zfSLSd#WS4H)_v%B>^N|s%#aHd|M4J(sF89#cj-Rrmqy)y+1+7JVFZ7Sx)B{%beat~ zlcxz?)Io2daF@nX2W?cn9pqosk>cM$5so^t{5wc})UnvVgWh0K$4dVW;+i^0r(-4u zoOO^AM><9xcT9$W>(Jv4+Yc%1$3yXWV&nhjBlH)?%O7d`nE}F zqwSKz;n7lvFqs;{3aKMo`WRK4PucoU9&T0+h2CFXJ>M(m-0n60^M#7@ zox*&hzQg1{lC=s>cWxk5WCL;w5_^YC@&LUhzyEXUwft$+;YwO}f(htPSR{3yS z&0+d1k{@cf4^~o0c9a}YoY?;!yb>4w&+rpuN;*n9mJb4Vt+YgsNdKm)`pQdF&FTJpKTsKoBK==V)&wol}Suee2#49n^ zS6JZw`ou}={=pZ3DrkuJX17)5c?Wo`OSdN*S8F3?a5U`c((R>Ymu?^3c-`UFCUkV^ z_KVO1BJ_pU76yGvgdP;3?X5c)^f|G4ymcpBZ ziIY!9APRw=m=2{lsW;yPgG0(109Lu0TQ+!g8sujYgfrBdFG$@N2#(TnO7u6>UUoA?u${S(7hP@Y=Z6 zJ+unK_OkxO)(zMr4m|7nDd>zuT9jlW|P9a@jLno zYb?_mcA~T!rA>Y?N+^o2Hg)1Ch1H;^55qQBSL!PjbyiEYX_LZSrPx?)D!0@t=o=!D zobOnwSL)fU(c%y=mMa3jDZ#pWQ$;j}<(VxWw(AtOI+~q0D$FlbRO$7V8_O&1W}l|b z2N<8Q)#$ATb0tBltE%;N40rLW`3jgWf`) zcacQV*eg22MV)NOG8p5~PM#^Y_o1yq`zNyvMx+3)7j2T+_nLqp`p`6kk@mOTHTr3s z_nHjc1D-|z#!$6tuECf;=W4p%G5_#;+||5!D(j zm+W)??B#7K_S-iXJ+yIYWlOH`#iP@Ia_46c{$}}&<+>Wjg!>!ffA{^Yf$tsq?xdj~ zM9x}&_p%3y|DgZ(|Jv~Dh`Lqnp}*YmY0nL|S7xTY*!M<`=5*X+>1}h&FHLzTuR3V; z-1E2m^o8Bpz~QM~mv;U+D%e!uWU)va&ITz`76apfnMo1U~hl~nMZc|V+S>YlY> zi>-J3y!$R``2NAp-fWY6#jCGHJ^V~z>&062kGG~a-*))9`@RfFUN^hsN6H1ISN8nz z-*!Dc?){jMO~#`q-{e00@3W5_m|rnXlDXys{)x$|#G8MW;6C{LqTsS4&%Pe}pBK*L zpZmq$dnZ1wF#KhE$A2L9aTNKu#jCgqv(04HTev#?rW(D;T2@zmpPs9`ZEJPiZOf~< zJM{HFpR`i+FxiYoPb|Z5MUAbFeeF8CTCuU-il|{Un>LZll$h=|neR1uBE#5#qnKSi zX1)Pd zA~krp7QWeJtVb}^SJ{kWAk!AFWKj@86W@zXwKl6qK*U6kc;%>#vIb%CWv@|h+GI6+ z(=ntwkB$|jW5_dx$TBkmaE%#xp?q_BwI~QC@y$XUvFcHQqRM8fV0o#sy3#b)s<3!- zmzXrJjVmIJCL{V(8LAc~Ar1T6jUxJ0wo4Q9MV-NHGqSl2<+ZhX6ZZ4B8jLs3Fc?>% zyomCfn+?Vpw-}5MpnRQto_zPscm}`KrV6vgf^6(#gWHi$j0%JCS(KPagE3^5!FbNqU$Njtw{D8ZC1Tv?(Dj`oVJ`SRH-+@V-;95qusO{xOzOz@eR;MywhWH z<}24-lWZPwd1L*H73_038pUa8mt&6AvNRscKn2Tlb!9qj4#z6m9IdC4=Ye1wAwrgW zA#hI)-@ilDYE!kfy4*-^jyPk(nq(wNF1i*hU7VAz)viv6U!YsGAYnn#lBG))rDLv&22Po7QCZQV?3KFQ7@`>)yAXey zDk>Hv#3sfjQEg#`*;G}%X<>DODtRI3EfDD~0KwP_8k4WLW?RiQ%MePk718CWVz+}o z@Qpd@Vqz4L8busoetZ3^X~6rx7xA^?|5DJNl1en%^HJhZR^rh^8txohB(b&il^l8Y z7+ zlx~z>lpz!;Q(ZH2&QuB^PS4(C+V_i$x;i^XInJ%k06Z!`lM7f@AYwAHaaU1mU)MX%>LOF#|w zmGoac9JeI^TeOPWdaBn`y~ey5DBOKiuhSD*T!r=7mQ7`a<(4|?llZoaZP?t>zZLo5 zFj%&!5?k$i&6Y}f#CJ3LwVvHq?z>| zSdrdZ#GW<))`@P_xV~2J8OlBG zfvqYx+PuK`09%anL!QN4v($42h?Er87qI+a27{qjYc|zc%T1)H8uTq^8}%|0RjdNu z4YOrvLZUG;CgSgpMl;VZ4=wv4FQGqR4H3rS7h60B@h?tg)t0{ zTZ?vC9v;aUMJexMZMaqedo044sl6D{Ulb^j=G`4oHX=l>-pD=T(_V(HRTs6(mcjqP zvaLe&k46milsj}Qm+6)!#K*!(0p&sg|0boTZgYjjDvpZAn7>!4t;Z80Q~U_O#PdJp z6~b}*!MHZAAnu;H9dVDwy&l&ScRp^aa=vn-(x}{}+^gJ^^mLLd>60XT@`uTvBui2v zQX*64r>slakaBm*j+BQ}_NJUlc`N0El#3~xYMd%WrBKaQ#i?pk7gTQ5Ty>Fpjk-ep zYxN;@r}{(nfclbpdg^1EHqGxeuWR1cyr=2cOwX8~q0C6n_)f;}Gv3KKml2U!oVg*h zCG%kBZ!_C7!JGoQK!SfQjGGiUEiN@KGj2(oAuc#^X5yU0~4sUhi!q~9jJmh?taPf~nxYH~$#Q!<~tJNe1vXOfR5zn*+L`E2sX$>)>5 zN}ilDD`jpI6nBbDHCL6U%2usam8rI?zNgxudPMb0)ge{8s$2E8 zs$X?p6`-D^zF8fmPE>2u1?pAm_3BOP8ubI}UFt_6ZKwK_`UCZObwKJ3sk2h!Q*%;R zrfy8Nq&|?kEA`RTuGG`1@23u@-k5evT5Os+?as7~X*Fr~G)LMaX-}keroEHan|3}e zGJS1&W%`!%2hyKTe>wgA^v}{KX=Z3HkfmV5T(*>k~F4R3;b_j0v>~rxTnBJqf)DeF?6F!31=Fd;K5O!2bZz CDWGrw literal 0 HcmV?d00001 diff --git a/gam/gdata/Crypto/Cipher/ARC4.pyd b/gam/gdata/Crypto/Cipher/ARC4.pyd new file mode 100755 index 0000000000000000000000000000000000000000..4cd51f209b382223f06a5104fbd2d5198c9f3561 GIT binary patch literal 8704 zcmeHM4{%e*nO`~Yf|3~Qwo)-Eghwk9yTwAXY+07%J3F$#fFTYxshh-+W$S_E%90~J zB`C#Lg3yd#Rkf61=4da;MIpm+)7;#pLn+WqqS_RPwzNK)Yng<}q@89e*W4XPe8j!s z?%nU(CmCUAd!5N$|7FMD+x`CR_iuNV@v?3Dr3jdQ)}VRU;o$*_L7w+ zm$0L^on3WY*Ku}L_h2w4jzl9nqW)oVz#k4rWU*foqw%m942x~|b&11~fMj00_;yo4 zb*GoH4&5Rq@BC3)(b`*V$;a-{l{wfFQ0@Yh0V)e1qGvrr90#5J)&Z#JZ8x4kuD;E9 zT#Sj@T$(^S(0JF$7&q9**z)oW)Z zvY$jK2O@xEJ_smjEsW*NQB5#MzFGhz|6xE$Yr(Rb`(rU8P0^!;9yhk#q)*cH@f>wR zhV)fkFWW-;f|?x9VITOn0`3Hqv=;846p9QKaybHKF`nz??kz#zAO1gNV4L#xCm-la zUMx?KmM2wR#on*NsIto>q(@KI6fqWP#q|`@aec+HFHmi)IQEPV-7DmkYqWASoVj18Or_2xZp}PN{M9J@ zs`5`Y->Nt!STlyI0&fp)g97h|H4_!b3|8=MV}fX)!CZyw6`4mMPuG~ z>V&%Vw(>Dy>3Ue(wSSB|F~)sJo)T#7CNwY8z4{a6)L8kl)+c{S=KgZ_bdp?Rr<3He z=JU%KFfItSu(1@@m+7W52Fs0}{;1|OaUpfrn6PZja93u%?#u6vRw_R(TKHzsf#gL! z7E@f+Go%^^{XIix$Sa%#1Ki4Z^pkBSeHnZ}&&x8c)@N_SHdt;_jRD9fPZ=KKkzkG~ z|5eK2M8-R(w3!SU?<~4XOf9b#v1g!4u+Z99f-E3YP^KEIp(K@yFIDeIx>)?Sv_~M_ zomD3(-WgwVbdJSWl6gJmX5ElZGd{5%Lrut;<_7g?Zcu>REAG4i%>DpX@986BVb z3^@c(%-ER9X#~mRa8n!(NZb&|CM^KOLDmJO-ju%uQwvl@rljeUIUe1e|D~#hcp+CX zIYCpNr6Tn@1tvc92w$52yU4o9)_i1rzfEK{n0nL5TCV=8$WG%{y*@J36Qy@niO(sc zSJS=a%A2nofDb27i`dn74p;ouc*XvlV?17gm7#;A8e5R}nHI?X4as36-yjE4QLc*W zK9wpg1~lZbiB;pnv~s<(>G)iF^r~|4T7t9P;2bio;ybo( zMVMEOeVCz4@ZH=_!_(lL;+zgb3p?-!pzt+b10w07P-xFlto$Ih6&OV$6x=*RI|DcY zi=q9Wx%M8&7q*91FxAy`_V?{01=Q5lbhb-zI;N#KoxW0>&RHo=N2L^}BT52OQ;Ji5N^we1DNea5#VIAFIE8!--*%jGh)VHCFDt8j>hsdMq^GZUGkcT;|}8YyJO6__xm$~CE+G9iCRxl*|ARKwH6+7!QnoUy(Gzkypi!JS@AG%{-9 zMwih2f1|z6!Cucodj`$kIoK<`JrVzjoE2NSKx?NCzqFmW_l3rQibj3Gv$%_HH+cVpaSl=jI*sAejABy!#$nY5O28C+4 zXr@_rX5-E!yLWy3NNardw(kNWE5enCXhT6ET4QA6OVV3hxF2bou9vh zj+bB>1MQptPHH)qUeoYOk^dLGRxiva-tV5g_!30Z&E$JZmGy;psEJAM!B(^^gSO1* z!Ab8^gid;&rB3l4&kA5ndY{**M>Oh7Sv{w|pi%!(qvo>ZoO)D4Ph~5CD&AMK2Ht-! zTgCgS>~h|}R`HEL6EWJZc+V?tl<_?B#B(U%ib*|+1xJ+K@H#(@F)et--Y;T=nqfWj zGBNl&lx))H^CavMrk10yVTiHbvJ6*E3!z>il;+4zv@-~erFPOXN1^PDTGL0~+~N-9J{O0AL_p&{i{Ny#+i zxlHY(Ax~#&7Y((H?k;3>@3oAMWm05?m)D>5QC=@4FAAwE@q4Z3aGLKzh0N2h!1=_3 zi`?he;%Y@zUP0R=;}H(+=Dq&Vv(#&^-4n{CydGjr=atLx7iV6~r=h3t?n#}xdJYD! z)f;uS-hVw_z5r(n@X7_aVF6yX0KdAxp0I#V2e8y1+GwRXCFmNC`qn%Bx3l=Apf#jX zt)!}jMq}##nA%oJjz`0yUkpo+i>+Hd^`aCWh$cqlU?eO?`iG%jt0X!#K?BBKN$8$6C!;h(9|r{5zyvWYg1~R;%}*NFZ|LOG~Zgp zo}teJf1`l!2ENeVLi`BiTRzk?gsRHeP2{Pb6x$1sEiVwtF^A}1H~wZD#reOB-cs1R zrM8Rvcwt{E^ZxixBLkn75-j#H#`w_(Qu5W7j&wL43KfrYwMFbt$WlxUMZ!C3ke)3O zaabCT;C%ZZ^9Muz{*XkUc}uIvuazmBeO9PAtcdsHOW~(|vEUODgGbr`<%#>qJXD+_ zW>0WrP>Pz#7N?t~XfzUK+7n-%?~&n3P!9S-P(xpx<8Y2kU%#%k9UJal@9})nR#)q7 zueH^7ZS2@syS}@9YwhL@Th@aN@yBg-zSy8YDg{R5XzibqV`6i&NPT2`HDr9=c3)R} z>sD_YrG=TzYw@>ZV4&7!t~WQ(Xzf5Gygj&MZP4bhuZ6x^OCRA;y8Etqx)(d~XTS$bXvaaD1Ad@{Rvuu?(kfR83XJ)jNH0~iDp z+wYS>t>=V7P!>xUK>q9_I4}La4OT&LI7tD8hXHSWJr2_#pG4&V?fs zpRvr}9|6@;wtagj9vdXuUzBZ+N)ltyvSB||@J#G1L#ZAZNf7@D8pkA3h1Qv9jqdPu z`lB&S!M|ln@b36XNNNoSHc5%cBhdi;WM~J!EjS?ix+HmnEDd+@S3Sl)1-dsH_1!lx z5Ral#l4hwugJ@BVeTis02Yj3P#eo?p+J_6Y2mGP9q>aF?+@ z2o+$Hck32!hpotlJ9D8gaDr+;UA^%>$NCM%6>*oC+U*2zji4Ce(O_KX1mp9xAoZuZJ)6{Z~HsjQQPabkJR5& ze_ws5{?Ynf_2c#bQLk&LX;{~=so{Z!ry9P~Fx_yu;RE*7_D|To_Ko%-`=j>9?R)HL z`o?Zv99sn#;uM0jnT$Ejr$v)Yy4W{c;mUo zpEfRX7#u4d4o8=x*AaFk9A9=k=lFZagyXE^O~f>^L) zL&Qe$hy@kp=s`tAMGeAHL{tPdcg;>f;CSvm?|I+<-v7P3KPG!-&6>4lm04?M?*#AJ zxtJV=VFV~*F@}}G9bHcN=YRf@3Ej%NWy;t|g&TUMGF~_I0%GHm$cb!L3_Co592w4J zvN+@j2AR!elH-_UH(!5p0xODPI&`R_zLc~d9mBk2;$(&7YnOcaB+0gmP^0|njjs~A3^k+}-P<-#zsWGothXuyTvGJ!8@6p96b&ebQ2Ja>7*=k| zmLTRMS;C+UhGGY$&*cOZOe2z#5U3V?B%+VMUYUp|A81bw~sV?)OAIXYr+vjq^umK$UbpB8E|A+YUHpdp+Mm6Y=Ak189=vnx4s1SBj`6q9|2Ll!{DpDPMMMARgz(WXL|9=dhopfKR|;2CmHao z`Y0Ie7t$O9GD=|*dkW}aPgP+tT986JvCytlATJ0c3Pw??_zx;x4`24|iyTN)Um;R9 zKsABJaJdcUXiXy6U>s%v03wE|lvK%WSb^k&sS7*_l-m}*1pc+}M6UdR2*3sqsgM$( z)LDcQ|M%yBxGVh}ch#TcuJLo+HGht~*3WS#{~UMypW|-$bKH%8j=TBKaku2$^K&^t)lJ2=N*o_9%O&OCgsc#1MUi{ffEy$RjUU{0c# z3&Wr$2y7A%)!o$!TqNU-k_)$yml8VtNxV zy-6;kd|7w+s1<=6?P`!23lqf@aMnVU#~`?iNwH9E{zV2&T7=kC(_1AsTm)f=2nIoj z1^zHu$hmwieG#n-QV335A)`?MC=z*vTpSr_K^*Q?uySAIW!>j}z8&Cd^3p{ZR}Gg- zI+&N8q!pVA?R5LHSDBNVQfq4j}ssJ^r75EbnoJVhx1+5OtIR2fk zM2;Rkfmu-I1U;!E;`AnPN0~On1T?r|B{CSIwPEE3sPQh#O3ih#$6UvFmyCNB0IyXQ z^p6rZN^N~y7qTbH&Glcz;;U$-ND(Ev{{iCB?}(Z|q-6WGml6>+5ab!67%}N2I7hV; z5mBvn!Z$rQGqfyK1zaNkJugEa;|v$L>x+pzYmBP|HGv=6HHc?uR=eUOs$J1>0IPP@ zl-#r=H!`{jT@8_Y<${KB-vkhnYV=5ikJJt~T=;5N+y`O7ib=&VDc&Vj>9T{V1XR=8 za9p|wpDDdl(&|7rA-xmSW+|i}@bv?RhxKr34o1TRMM4)buXE6%acFaf1|0g^0psv_iR8GG zdFh=PSH7CwiMR;pou0f-$3^76Y-@f%1(xSv?gRno426ifFp=zXt~KI`h5uE?79C!C z2gcDv{pLUD7wrY#f&d4Wa8*S_UG?z+gP<;kNg?e+NJiQxfJYz?@P`p%QWVaZGeSIs zN5i-Zuq0tsiTyNjrM9^Jr=I1n0ScE8Mz$uPP5g=BNCzUmD-qlZDTM!kU(34`Fz0&! zx0ASrlEgJ|HO~W)daUo)V-sY3EjYiTu*KEFHG|A3IT?saxq!#7Z0>@S?nip7JjVvs z9500^$FYG=LVvqQ4N%#4yPt*OkP>-|iE`W^lm?wuv&&QEc;)i^%8D*Ifp;ta3cqga zr*1gfOosI+aPR4_=#)dUqj_!lTk>-ec~}Xc?^F^fgWdpE#yzsFe?)$#9KP7_xHuMaF0Yyh zxM`1@PXqGzq5GjF`#50OE4*^t zXKp~E5y$~wzN-f0NP*i30^e0r%I8S4Z$CcH5G1ZoaJ-KC#NK;<^IOCKY-m#8{%)zB0OBhXwV6KM&Spclz= zy?d-!RnaY1iX73X9w`ZIEp!`&PX+uE<^+~`3lCLbUPkr2axH;(gHS^S7hHPQdWw2O zhyvF>hL@>EMp&f>E2w!k9Gq|l;2jg)m1y+VG^ZYAryB_7DFJDjPNAEos!E9;0pz4= zsN_I}-jJgq?|~Z#oylC3hPTz<*A46HoPyxE(DuqCb0{aeV!;-g0 zLu+6&PI2f)>d%ODN(w@yPlmKemt;)-Rl2Bg3h{ zAwhlwWPyeZKNymX41w>V+2RAZkYPOjCc_Bbbfv8E$z@(oa+&CXXBM9U21_>+ghGO;7i!2014cnQmRAH9@cvzphW3nasZrvk+?NN>a@10W|Bs)8t>5 z47*~}w)9%HN1t3nqt|Clmso))^uW`M)EGJK1FSxG1Z*UcYxLOLydGQtH~KS)v!OHt z1eYHDAwer32ytQ}IIM>f?oW7f8H19G)Up1Cg=7?%`%!ZTiu97zfafQ0GZobM|paY1m?rWH4@{LbwA(qkha_$*qG0*`@-#szx+ zTN4ev0quRN6nnA)2^jQCGT;ZWK2(j!PU`| zV5Ac51+)PyP2QY~bfnZR8Tq$FLJ!T>5-M3PqkutS&%z18mc5v!mS83hxN z&>fw&dJgWqRzrSB2cO%?Pyd2mpoAC^^HF55PjwNwC%;AND8ENN7LAnbf;20zc{9 z6X6Q#KqLn2iEt7v)B8E&c+h6=uE#J62K9Lg&^x;ptzNeiz>%hnKV1XX4BuH3f3776 z=@4)`1?gY-t@vrQzk1L<9n`~r{6pf0jLL0Q<7KFDVzX3 z<iBhF2T~vE*gLQTy~RcyM+bJG zS)&f5(|*hWj&~p>_IC6>?idOJmO_u1v>T*A512#xzD>)`6&oO5&do(+&?PoN&*}QM ziwCr44rn8v?2C_{@bqn?2QPiwegoRM1KK$Q+UQ`>cRV^C^lhWPecv_;P<`9T&HA>H z)AVg4%lB;~tM+Xp#eHuFmLmUvmyqy27QKSRAD0pf8o`&~p(Q+CQWXqE4Z(DR(3wz( zs@ZqA%~z|0C&wZE0zP;sp&-)&9dg7RqilFw&r#=pl)gU|Yy673rg1-l<@`+Hh3O6Qp9Ni5i=vZ2D;rfIJax2O=>PszLSLXwhL^9qai+@X zeez%V*#f_r1Nmu4_}u}1{rxBKOg;k~??sD-WEvp1K!#UAlv$3xwX*Cp>Se$F?sS0# zxiAUPAG}%>KjnW6EOWv2>92CSoYxwK!m=>1i1H9Bl5?d7sJ~1XaBvymjXWGOs=QQqsIs&<0ID{YpUxey=I$ z#P78QU-0|yDyIiwAVV}CHiLE$j8s8SxDE+$TF+hx1_vnvVAi5$s3U``WUYZ3gfz<4 z3#bEsvjESmM4brPm0%7Yg)6z*m6&M;iN^_~t(HTm0+Ji5yBb0rgU3(>ZQ$2@e@y|Y z165qL3jFYXW0X1h{$#$Zc|j*y1*9){zhyxf^z;3#1*9lcakVQ*#0R)Z1~~CuJq09! zKQP7MgYWN$o1E|J#jn@9f`4B?pDc5g;GB~+!4rl|O?`#yoSz*FEnLg|?08f~s^w=V zqAJoSKbwQ9xGu9(P!(5ab{eWmG@2>ZXx4X)<_NP97d-ka;d*U@HJkmB>q5B$t62x) ztO$Lps8{jO3%VDX;m@Y|+Q?0Wu7tu&{7x*)M|a8BII8$J1xhNfroW6-cS}l?8)L1oyOy%kXiJwV zkst5V`clu3o~&MLeJeWPLcCJ5DbJNPUE;WAVv9&<|Q~cT!&)Q{(!@sP~t|!Mu z#`y#{_K92JH z_MGOGT5`TIeskI2azcsk{)$UsJDn=?=FW@MpFi>_t2_3Tf4oD>aT|43>kOLi^e+j< z8_l=7(U;XIoS%1;Q2YGC*Qpkd&cp7yGv~`w0~M)Z zqb_3w_fpx$PTvf6#?%z*c6T=KS=~8?`tZqct{rJX_}42ser5+}3_9F(%AOjwa_m>n zB^3YVPcVnG!E^KSSFx^{2CW?RFmA+?k3}wbtHc?Gm#Hy_k^+5K6_1}2cV*_avmu4z zr}@L$O1Ic|sh0BezQ&*WGIdVfZIjd{|7{FxMgkbk%g-dzs|qAenIW+)wPWV)kn?d*sVUj zfxEi<*~c|e6V=+)N^^3w+J@%l&pYg^RkbE|?YmpI4_6eO&>UlCmOMiF!Z#D{>jGU_ zfl5vO3lWEP>fZZ9np>aoZ#McCojl#B{&gAcQri8Z!*`;0g$^H-yfITFsb+n$#wa}o z`S8mXU8j;ixCB(+n)GY8($O0&`Fw{Zb)%i{P07$XVxnw-%{#G>d=`rxbF{HEc>Cli zeVsE3ZQ5%i0}t$dGK=q8k+Rf(_WfOR(#gM_c_F(lTYr4Gs+@jrK6EV)qWv5tWMBV1m!3$b9wUh1-ouJ2vEGhYX z=9TFnOL0_C_HQ93#r_3rcJANmQAV6EpUrGeTs+Oq=)@^g1@6PK<3?U8-pf3k{$$^j z(y%u>qL2T%X5p#>t>O{e;^JpS1wG9>R=G!Z@<<~u1+J6&tb7Ms&8BTz0vs8?y3D!P zv}VCQ$4^`3cibuXU@m(5(%QtkBE+C=1^JQJxR%7M%gxIp=hf)QIxdJ+xcautK|^yh z*4u4rpb`JIp=ucxkI!`s&j5;lWl9Y>S5~`Lz*rwc5ld zFxgQbl#U$doU)s_m}hWaPod$n`r6wc&Gx4yc~CdVczhVM?8OV)h6j~amWxhZ+4t!E zHurG1{mOE+Dh}0GTAg*cySLu3erBfpnpSW&lS(<^plK}#@FAW_@Rb|t!n#OM{Ftj& zYMh$3S)W~;8g!I@^qg+;w#TZ(_4n0|2u_$f-fGZgs|D&WTwwb0(I~%_H?nqWddyrr z;sKF!ZrjdttJ0@6+qN!l?b6PE811SMd8aDsl82_&TjG5Z)r#i&Xn0wrzN>eE+Vo3| z39{G*zei2B>Uo_{76$rIRPQApTDE=Uu+VowlL!l{3;&>L4zp9(qd)71c<4B0IQV^#Ltd_&u4_V94}+X-1_ z%sGZ7dZff~c_XgJqgGYY&T#Xh8ud-*!&`K|S<2mtlsk|+H|dnku0*dPd3kp=9v`@V zZbi+TeZ~=C2j?E#!OVJp{HTr~>RQ|Rk!Rw@=KGzU&7jVXbvki!g5BNbJj$n1z4DPm z0=93>4_%qD_0z_pbo+>HIaZy?CeJsAuK#n`GPT&@Mmdv|@9Dm*E_m{I)7G+?DSNU$ z1V^&Rj22h%wr_H;9Mz(D;zoeEsehB_{WXS^a|v(TTV}o%U$zYY)9_o8UD})b8dF1_ zpN4~=BM)H9U(b6bx!zVJy`P1*Ri%b`EoQuVF{k~A2`1|}0vc>7&PrDus$NWq` z+UU5`^*3{T^)#Gv1Q)fxZ1g_5_SB}jM;(cDv;62*g>x6PH=mcQO*piakbRwUVtmUc zXPd8qFFgz%Y*V~;&>+Rnu)~Pp|6ql&Vs}DGbA(1iEEeba$L@0F*f+w0@w>h``mQ>> zQZ%@6)v1zu9gEX+ZXRe2wW`}%m~zu)!UCsBdEdxqvftl7_=n9KfsY#PQ`c4PAkBnn zl<2~7$2UKJ&FNy=QeEdf`t-^JJ4u(Z_G}EVH%s4_qnakx%bm z@NL@0O||D@iUMbDBNap%?XR(T_;hQ~)Cqe(8&DjzGCJm^QH*kBMV((0&BhbvlFnv6 zH#QvPNvha5CgF0Eqk4sB!wQw^H&;K)tLPG(cc*$djbP@4v`i1B-Y(yMcTL5)_NYn2 zoGnw97=-PZ5LPJad@?#dQn&J?nsOfXsqAgv=%b`%x34vB9J}D@ItPo0A^Nm2XD+-^ z-gP{C>(H{7tCZM}4L|KNp{Jjr%GJ+&Lz=1(jVR&a%*jV;XKm)95eN^`iJLCznHpVNy|Ojq7$YzR;jaw z)ogw?F4Kf!+OhAjODt{T{L3|7Ts5tCrYTk~YV%)BUw8WD*@D4-k=@fYRTqrv`t#bb z`tt_%SF4^rn>F0UZH2m;BC}QL{)E>D@A||XjS4MV@gS%9;)QQjO;N#_=YCyL{;92V zNlKV^m|U1U@9XViUF(m8BlP5&FO&bUSm452cu=QprE_L+=se8sLU4Yv{t?mntuF3? z;ifY$uIZTRv+L`lcI&sCj>_}=l~tdk{W+^oTnTLFo;-8^i+lLEmc?05v<0ck^Ru39 z3|+f=7Sla3_#}C4;N4YTk7chuJv8J@?V#mnJtK;Se@wG39j$hTe&3T1m@?fv9s#AlD!VMRXXlq>Yq5QziFf1ay!TgWB zm%}1oTaVrFsM6nZcx7eKo9kyEmz>KjNdMBZNPE^zg{DEQ9GSFf_KIv9{j_yyq3q`? z&iv}lJ~^ji)R)q{x|G0xKTqxXLaa-(ufL>n*n8evKb_8(;m7W4Wbd+5pC13J`Y!8| z&(olr7bi@%x^nrEC3V{$YReutZv%a-ulp0XO&!eaJ0#9v-4t#=ek%G z`Z|qYUUy;pxvjsh9(I?Mq;fGs%jDjYOIY=S(W_3=a|`q?9Ah3o$(%gj?4W0GDs#iR zJBtc0U#XnJc+;ukpZ-|ruC17K_p*1b*Tb_mMt<&P`$gk69wI!SccJZWSHZUFCi9=I zdK)?B&5CWMmHhILruwc2M&{;+2PADr{f>!htsXYi{%AKP>a^pKddr zE|@55d+6?_^u6ga_Z)8BHZ)03yJfIbcYA63VfM*;v%Nm#%wBlNsPW~Vk&Ryu6?<&& zTK}i;Qk|$Km}Y1ceYexlR6TXAOlV4A823*%C_9p$r;Z} zirnpus^a_~duF~=k5VNb*`c?CuHZ6_X;)1z)Zj-Pqt3R&yXrwenO zgfmAkmAO-M_W8*L8S{5NqKjwQx+zX_xO?N|xuTn|*=Mt&x{jzt3q4qN7bBat-^mLx z3mBbTc_cB?=c90_GACGl&Z~vP*_|Qj{zr+|ot$c(o%EUSe#bKFC`rbUQ2bQecFV?- zJ4*+9<}+_Dr#Z5gA0$tjraelgb3A+eu`p72gX8*F6H{%^o;RRdKDEiopO$)a`su4w zv#Vvg`59NV8b98yK36qN=2rCqQNfgzxudq7_x=6);|I0Y4;1fCsIrlZnY{LL8s)Rl zcjB?e)3fqE*`EF2Djs>aeFy#ic;hjfFHZRKlC<#4S``D*hV9|&Uv#zXD7iT+s;KgI zQJLC2`HB*y`GcM_{O+i_?3TN(%xrC>J=`+9jB8 zJ989Aud*zhMqYT~w|eTLD&fvaPfX3WZuejfd8z&Gxk|tTuNQ0UAL!gljkw9maJUhD zOmp}kZR4-M-C2{hbxili;P+!cj9oj z)8pDHXO@l5_4CpQy1ejw_2rj}8jGoeYoF4$Eora%>VM!WrcxB~?4rThk9W>b8Wg9t zaEvVBc-pP&B@=fnx>4>h%)jor)9|qK2d&!
    8>TbC`m>Hg`rf}_b{VO+A$x9i#U zLeVe9pARg~IPlqPZl&kQRlju0HqEb+`TVryUR>wzx%rRh?lmz`aNPNlSV;}{IX&lD z^3CT@)&##~SJ^Td$%YFUsmA0v=$C&XaGw$sVq{C6!QgP&Oma9G5Xdg^tmNppq*yY8 z8Ocsf^No&nH5crh+~GcQ_1KzYB(v$pgx~b44k8(WC%>mO#=9*1d~#kk%lnLoyC?;If=|o zLfp}e{5<@~(F{gZM0n%^{HKq`0EVAPCNMa$tSAJDi;iO?;kZ+RjLC>vEStrQTZ|~g zamdMWoLK2p7?F%91dWVKjAgLV&j*o2DexLCN=u@Taz9Z_CbTQZNcA&a6q5t(aYmvT zc%+FrLA#IWcy%5;6dW~56ocN`gu(cM{b*dz6#=;T7q$VgdZYxmeS#>a4(Rvb{J(b3 zl|EbRJFlMSYyTVMgOZVj`CB`#QG_ZyrBXg1&3?`XE4rfN8G2^8(9ZyaShaW97VA8!q!(o!~@!=6@ zFdmJ-DSm{{V#cS!qGv>N<0T8$HDCt5+CgIEM^l)IT#i&aSo>1h!lCwkY9J}7HJ-tY z;l%#aF-UPz1(T72L7sj@d{_`@5?B!b!WV_dNdy5VJ?89*wiueOR1M^4E;CYM`?x6P zcn+D}6Q?Bs^2fLmr0;cWiiKB7=!x z{t(ahX^Uc>L7A!}igDBx#q1@EVwCknG5?i}()j+*<_&X>V@9&rY{)!)Y;oF96w^6Y z6l1S1ia7;khk+<&@OV*75)@=BNe&>h5*cjCxKG1YG;|c;;YxR#|trU zUpIOv-PJ`>admN(R@|i!$e|L5Zx00g6@z~+0CM-nD+XF`Tv({RWRak7nRrcc@o;qw zwxF8OJxwf3{HJiORb)J)0$Qb0K$6+>tnLotW) zcp`>5!X191FWsz}CZsujq3A;~hGGrH4T>L>o@*}DAh~crU=|h{o|FXV2&iwtLK$o} zlZEs^k`0ew0n}VJIy#=4gzrThWuw^)28OX^6JS4t{)-2kJ7i&N5t*2Z>Zz!nz*+<- z*kV*qVj!|``K3CuV?zDH*-5BRrf)ld3!5_ZZ$RWSU0KW|PB;@ODgpX@@eO7u5>-+KcrCyslMi)Hl1lOLI}DMRKtW~x zXoC&cz9fup;MClxe*)$!ZdndI0lBzhIXhs9GT-KQqIHM;9B1J zu@z&X_7sTx5`ioc-wm>%y+XJ#;<3CwZ6DYLRHJsN5BMKUnHwqb4-`|RWg2~k58cax zY6?z@=xG4_pOl28MUiZdWRxL{`B#<1R5-yislD?>bN;uyRMEP?FjsSL^M&TC%y*gJ zFmEt#Hy=hZrbJNUDeEXZC}$>ro$PHj!z#q;tyPDW*otGb*ha;cZM)c3Xj^1^*tXR6 zm2Io-CtFRLHqDSWj}}Q|(lTkQXlrRZXoqOWXy<63X-n+{b|>tf*frU`wR5m{wfC`S z+b7#Ew=c8@=46QDu;DPHxKO50W>FZF`4ldtkWxxHPq{+*NC~G#Qx{T;sEyQCs)mJ~ zg|mg1#X5`47P~AiTU@ueXVGD?#pa*xs}q zN}Eh`0ZHO%0@?=J30gJn2JI!Sg*L=a%} f}Mq(tDTqKTswvx*Dk|uwcTdBeRjom z<#t!??%O@H6WP7D`)sFXKgOPF?_xj0J_4j&Zokfcm;G`3O8YzZ&+OanWgXNU#yC(N zf*qn9SPnT3g${=uE;w9uXm)6K`0AkUsO@O#XyZ7;alT`s<8sGij;|d%96?v+5PRW9 zG*>fsFb^_knP;1CH$Py0+`P=Z(!9p}zWHd%c(9!{#e?EUnF*Rqq@+-mQTSlT0?IYY zT}mUR1#~JyRibK8b*L2T3@U?~K;=?*QIAtAsduQ)sO?l)3pI-|7E}utiy0OX7Fia+ z&=2w^+;*E6nIAGQHa}@zPPs&>q0~}-r!)Y~CQ2&>gP(3HQH`nAR41w%SPKi2{9CKt lV7o)qV(Lk1DfJvx-@?$s*uvbx(!v^S2_*mL`oE}w{{RNQ^hE#w literal 0 HcmV?d00001 diff --git a/gam/gdata/Crypto/Cipher/CAST.pyd b/gam/gdata/Crypto/Cipher/CAST.pyd new file mode 100755 index 0000000000000000000000000000000000000000..c3e1fa6233dee810949052844150f8ba1275e0b1 GIT binary patch literal 26112 zcmeIb2UJr{*C-qy2xv5@ps1)(K|!U2P(qU$ib_$64Wu`v2?>fy4JgNB>?jrxuwlnS zm5vHn5l|5kP*JLaf`~PD&j~2{KJRzmf35HT|9jW^&T?{Q_UzfS+svMs4AE-!9+C)& zL?T1t^GT#!c%#RV`24RwqM>@+%=~er>k{{8666!&7C`fG3k(cm z)7-siA)G*(Um(rY#*P*cP8Q*C!g$^AgY_bhHO>Q2(p{#5F|p z`~-M}4pX6i6eK4|BOXJ7KQI3vk0D)VLM4*JeFPF@4M`;9KOz59`hQyk4%nL&j&|H` z3LZh>@`b2zFm6cLRyi{6EQwo*V@`777)7p~18#-g1~i0opp0G;KR`WD5?{t%gG-a5 zs{`U|*erayB;>lr?iu-TyU5zLAyaUX_!4#>zgmDz07JWY?71XNO0GRgkwl``U<9EG zvK$h*oei*fK1XtxJjaYaK;ZZCy!zLyb#X4EkWd-$qlfWN5S)~-{VM=MBAr0&wBH5J z+a2MfV=kZ1-y>j>7e?Y0%tJ{30e^IU`+x$7uaC$N^H&Fujv%1e-U+!15(Q|{OFCQt z!pla4hv+-xBQCHv+-{jX)E;MvWA>oarE+A9EY}_ZjjOnWLQ_i01*wA9DoH*_vj-e z-8G6{-6gD8U0Z^cmUhTsWu;#wV_rcFAW^d$soA~5$h0m!oXjKHRtGT1i6r~#fCc1a z(#q-p5povE2@I|?RJ%Y>DTwO zty=n4oSJPYByk%uxC20pg^4_5L?mugvsaT#Ujv4*s6@FSGio>^CQ)9Di~t_28*WSG z9fv$OLXyN$2L0pYT#VHVdZA|9>lh|D^yE4^I5>Rn!3IkQXDXKQq?TiYiq+KO>Ml*B zJY!qML40ZKcg(668VDOe0}G7RE@%|cj$5^tewnFw2b$rmcA_0Mo7DpbqhO{;{k??k zWaZl3e)9R)N302}rI+)}g{Z}YL+tHz1FymAOWS2Y$^>DQlT0ucqGnel7z-z?7QuR< zD#4hH-cTWmeV}IFN-!RkuvmF*lM;bv*44CYWGs3<~o803Sv!%b@T z6U?CiE5n@&iV!`9R0__z30%q}H^NxC#fUPlw;hYPjt$bQso4%UF^5dlAhQT{g&HIy zMnTx2-Z6&@)a;KKCj+Z27W9-ubaOHf^?+0MVpgfx9eNqgy70TBV>gJ7WVx7g5>`*I z#yJ;C-^*Z}1I2_udO29c9_%YL8WK&wAnFlh!5nr{vv1>q0@9uZnf?f~O2An=fAw6XOK~h?>nqVq!dhRG*AM z0*%S;!jgRvOkf{IJfeE@$*A{X-7_4L z7!G?F4yo8OGG2(NmiEYCg|3KVB2nxd8sT|_FOc)lh(=mP<^W4E$SHV4LB2p|`BU0oyh(qRv{*1(Nf?2Q@HM@40WdRKa>pIRUA@t24=irH`R|feIW<(p5ON!J z9LrjX_=(8KB|Jtw7t69o0EYv41PkhWu`FBEHVcW2^#M_a>=i880!pP_GFY-H!sgr* z=t2EBo``CO3<$E&pWyKX?ywie8zMTrcrto3$cL%fS*RY%(u2=oei`vdRxF+gp91HN zSBI=XPm~yprwS$9>Hmx-0SPxCK_-{zmy(O|zQb&Ub21r}YZz}3dzZ2SQUuGNKO3#W@trOcF0dy2iamQ{`F6OsG^Cjfu%VLPbAt6k8Lcm01&W`cp-WV29;DEHJhN0CFcV0 z*W6o(q-01HVTsoPEQtPCVkXfJMGKg1c^9B0@CN!FJQ?9pvzxKx49E}@53ZhqWu-!i z{t-(&NgyMn(%<|XLqHy?8k#2$VZ0;+Bd8GqAAq8OD@qE++k+|zE9Mik1|=Wk?F1O% zItfHeGyoa0JMcsVAJJDd6tM<83%!ep8Ab}?2hkeiZ3Upfrcz*Uq9sI$tW5+i5stv{ zX#5B~Rtm=w*-(dPA*B%$4lb30q6mUQfW+d1Av2`nm|^hm;6jWH#0f*{I2sv_G&~*| z1t)LE;{8!35m}muYz*>VJQ>lYWq(L>kW*#D{!*NQp>WXwV=xz@@Ii#DF+kh{q$X!fc8N#jiw_JmgNpQveokk6>A- zgW*|`UWO$uMbLPp42INpEYSioXw8vmir|Fj5>pdHb}!8C03!y19%Qfvf%c?8{Y0yw zX&=qZXxfTb2PD)3lW3KUaIj=m_#7StSmHt`4Jif<1qA}L42zdT@c8TKGZEzlEFe1x zP+%0$0F0Fl2VguNRSeJ4zgUn3Ao3bPr3`|D`e2?%@M;F(30S-olt8TEbx0N))j}{s z@R1}@jDyv0A%bItlIXKfV0hU)Jd40aPEA1-&4M{b?+QLN?x8VGNuhtic;ZlxCnK!k z2!t03gd`RMc{m~4u=n7FNM%C`$itms;#0-5Q0K#gjFE`MWF#V?a6)y+l6LBNJn9Js z3!X@1Iwh&ucQ76ap$)BhV5u`?5u$7GIAmF38Nwig#~~&e9ft4{PjPsC5u*GCNnb78HaRIlOF(u|ET1yWv zuF)1b80Uxs3%HeJKH6Jo--#GY%_dJxFr8m-tbJ!|ANMOYYRqjYk&Gc5`HA~wH1&81 zHM@jIjz`-=%*+;c2KA*au<>wsCQ&WOGz@2+OH{+Q0x%i{6)%P>nuasY2%{S?S_Bv` zhbvwUXI>)=*ru>r1sHFJE8Y%g+7L!R+8pwn+hLp7Ue*UYR#p=1RmiA#4T^+#)NIql zB)ORhrjrZ_aeBBngV(Gjb{epk#T_Jh#^1PuW5T9ZKuc0^bwJwIH1I7F>}vVazL4#j zgf_aclS8D@wimX$S3%oVXtRP29IPln1s4J<7?Ck!GSCr6dPUo%A@uGK|1~-~_#1&< z_#0gu_UR=?)d76z>%dBdp*TXVFeJ_v2nLWrCBdLrA2(8t#GZtk$??UxOcI9zIouX) z&iTt_DoltL6(&-MvkDU#!An-~qM;YhL;-Pb4_QJJ@J5o7ryzVugi&FFtPd}Nc0kZtCv7xwG*7O1_h;l^y0AwL5B)Fu$!rz9FNrtFB*^44KG<9s~ui4 zJXR~bqV>>i6d6Q8brO|tU(Lzpg|aIWVbi}feJUGeH3Rt4mv|3I{_o{ zB^)N|bMP;0!dYizh?LhfZMGna1DyUH3&63f=I*Gw;i?#vc4=F*K#AeNgP=uIgmpK zYUst6-UE18S;tpV5@~?dEymV{LCM`DF2dG^Px7C3clt|ZzwLeln!{Af-7GG`afI^@ z+(&txS)5z0 zfU_Ef(D)O_21SU$M$D7UZ+t$tLL6|nHg)6x^6#c+p~T~~5S@Xkga#o>ptDpp)A3RX zWHp#g#Uk5q=SHD##OW$Jr>o?a5I*zjD+EOm=od4Q!W|lM)5&DaL}sX;OC2_U^kXFG z2j+6{JaKM`6!H-wN6j42}EC&rYH(Lj%Ak7QX! z!5#^?ujUStB9)7XA$1oT2z}+*j|P&)1W7U#&fh>C3FOJ<*fXqGv0i&H>M2mDFd`;* z@Ih|7xb|SA<1bSPj3WD|QG~vrMHD=Q{b52B`U+w}tQ{w%Mjo92uLSZKBkmWm$k$-? zVX9H*{%fk|2f=Mv-Ed9`iyC7J(E*4<-8BOp2NLH0H$4%pv@4^UJNjV09abxq_XbMf z!~f2=bchaHi0-6tGYH>;2~E%!aQ_LMgGpf|Ni?zj3rS8RrY_fB1HoQE5`FzZklYE_Jckw(?EZ9v zuLI!O{}d5$PNUFI+__!o8_%i*bT|=t1$h!NXAJ3(y7MlqqPolor$%&%Dbfis%Kj}o zVid+2`tC-1n6WPUko5C@L1uyj5Ude03{CpwNE{Ks5yTd~;YBda=YA)HIs0f=NEBF-(ixfEtDi<`?}<}@^;LZcy4JaZxhl=TmX z0INn3t9tOKscNQ^VTu_qKm~wg0ssj2dP($aRj+H+z4cTF>vz952Hk z-!SBcI-38hBj-jD=X%_#Uc{=sYs`pA;13%>kZI*cL4=Wp_z1|jln}N8f{YLuGneVI z23&|R=Km(b@Jwe8bxi~pd@Pt~P!L-j>O~ydN5+<2Q29sCLach+xn389*{O+w%i7`K zvcQ~(EY^P+EJ6i$fdEs_lM>_EPla#*TSZ`*gLF9xLV{`~5fmnwVVruf@76lpoQN~# zlnxOfJwy#(LBVQAgc(ij8F0aqnkfh|tN)@||C=UuSJ13dyEp1ih53kBqsyv~TqdxB zIM192GegEm{`i;GohHLV61m2(y;bYM1qh?R5;z+QUO+JF@;?$>3It)ESO5-dP9eOI z2rjcxa2ax}f76Aa6`1=)M?aeB1*3t8PvB-$UNZpy(V?EfRt_zv&57t@{wuotaxTKP z2w$E^2+~6cLYQ`IX2&(4oL7%0cOYnj`jZg=pw>F13<*N{h#6sns|r_fV#65 z#TXNq$o|U!BM7GT5<@a}sYCoi!Q=KX$n4<&L#Sgozzj)r9MJxXF~j?EWGGdbK!Z;nL`hc3Z<6cW$===G>2Q2C<0m z=)MFTa=;hM97^$G*kb0v_$PhuJ54=QM%`D!EMqI8BiEsQZ#3fDAIJ?YRZzIj1*`Ep za-dcGbHN@a^DitUod9bu@?kr&t~6Q?u{C}zi@;BQt%(Q)wIULKt%(Q{UCaJ(#(5)G zJ(JncXvA^OFYdZzf4LnFTbf>MnG}o}%t(f~HikR*ikaCCK% z0{IspkT?Eafo3>V5&d@sQXi_o{;oimwot{szbg>er~>KqkIn&3R3Ih(s`z!>F%|?o z3N?J`lOP56rW71c2rj7b<&hKb*@N1nhcAx~=th<^|0=KitBjm+BtAOY8CgarE+fmf zf0g(ARZjS;jCK?w+oOHK$nws=%4lX9S&#f{WEpwO$TBkd$TBkL$THH{@3KQKatyfS z1NWVf28pv%@w8}TP%D!J=Sq_JSX985k$Fbs6DS*B!|A9h=UtzR^b7dln&h+=9kjdQ zvlZh@uyXc9Y+&eq5??AARW0ERfXOm7;KJ16=Y0Muuou0AtKSM$q^{A1xNJPi!o1Yq zRrKfmCINkXIxU*0*C2YzBbXkGV5n0$qSJBczSh6?*U-S{ul&0|DM5cV(BH`0P9T3? zGzUEHO_!8_4Uiib;+BdF#iCoGF<($^NawJ(0e;#ZU;^|9mpb?_u>psCV?v*H)SMD- zwD5XIC`7;5BkO9uAVRtFkr7u*qc+o#dOp`wCyQUJ{mKnv0; z3Frj@G*bXANxMftuM3Jd)2g9}u`1K*i1)p;dg5K3)=0eTs5zo7kn_f16{rWZkR<4d z&>G2!D;2nZa3s$E~8f!p$g*88(U7i9UXC3n2`5?M-fV{FcELj zgotAZ(pHEdR9xB=63t)mWmqK*%BtlOqOExvnq-EcyctM zDzZ^R+T^!J~>#>iVD_vLGj{=>cyMpM&m#6(P-|*#C|0=8hRaW?`%>1it z_*dEVud?1>Wx2n~3;!yk0r`7AiNq4LqOpCw&>seb`FVPI(n8rGet|x;Aa9zx zU!Yq^I1Sw)C&-O#O5{Dgh+Y*0O@J{|0pLF+7#bevp#aUyf{OtN?fGP~j6zKx>s+b$|~2U+E6F`TzUfJ2W;&@-aM4|9+3e z`JeoMj|S=tzp0Mlryf_i-@ShGPI&_rgIP0btw_fV--x(cRvmvh?{ZynR^g3=J%%E$ zI0vhfT2eg%Yuw7Er+c@R?c&$X4$}2iSXCWWQW-vn|H{^7W36GiiQ_hv=BX#QCMXrw zq(5n?N}FQguf9DX$4!N5Kl`Ox(fsw3?9b`!a-3alRGVID_}p7XJMN4M--^^!lc#jS z;J~5!(nE`LOv9F+(dy6Y$zPbe>d~^N?|MH6&D<=py4rV3tn#PsJ@zG;vo%)jDu_}E zSd#8}!&cJLyGAMIP0^m&jGW`rx*g+NcU^Z@=PxlVWBo8VM8Bw9J#f=2=DWkiJ69fG zQo4{9kW-g#OF3Hl(QtMG(H3i z?Yxj1J~6iE_Db$yc8O-)-g&noj=K8k{CQG~x3Ay-DE;!bn)!Qc9M8F$U{A+#oQ&tD zT-#q2w?Rhf8p&Vk#;Z`hf}+%LEqD9#DJ;)eT+JZ%q0Gk957JjDlirixZu*JYN}M++ z8*ot*HDJb6-tzE!vifdV75i}A!?7}V9>52U_M9I1<&heb17gv+G_ImTxNX_>>i+7z~k-tFeHpi&;vMoog^vtI*tM{HS zwqHM2YRrPCRZGvu-alsQ_3_*1zFYC<4?K66CilAToI&Bqhp$d_DhXTBQZC3BR$g;x z?o3K4e0-HY;kEz8$Mc&MO%fGYMHl_38|tZ5%gzm~-*j$vg`fMedDutqO=_KJdS?zIfBO;B{5cM8-C0H`g_INVa(P zYns<9)UG~w&-_|B^#$c)A7Am#>v(ExW+l?pmpKI<#}7{tXwLW9pvnBd@Ft9R4-X%<%fmjJDpln zg`S@)<}F^7uaU~1t07^vrTkf-YJvCMQ%UllEgX{_*Yc}9-ccT2_1OB7H759jZRg_< zg|qQz3TIvpwwP1bH)i{pa4TJDNws+N(+_C0!m&U{lK3!*J?zEP&b=8zPkCH;B zF`xI>e6ridJwIoLN?^*d4P3L9y@i1+$0HP)cH@Ryl{DFg1qb;D-@X(-Z*(OOyR((s z<~P;E;+}B$t=om5K$13d&6zPhkq=+{MVlCCZ+__Z zb2IJM`vb=k9F~px9unSj=AuT(bn6Ems~_iUlAC%|Zyr7%MsbOF-YC=8^CWGHN>-|< zX8WoGnbPkip7A@jkxUy`nI7o5c8{C??S6Snj8Vg*EAu6EOFta?vgP61Q_sg53$5vC zYc=Ah$&VV0eidK!{cvF(KLHkC(efz?<9RDh}rgGv-F6rh4 zc@5jAE^eKl?dekP9!ng?&KmFQysF&0GW4N`eu?_42iqg->t-$Raf)%9DHi4aSvyX3 zhNkX{uGW*DDz3-w>pamgE10Gv5|cvtBsA~(=D5S*&975;_W9=@P>qvZdsa*SXHZc8 zOkIB3x^Fva?47LL^U_O}K7Q`}5^!g$JjOzMQwb?DXS8`$MfS9G=|r4EFA_8WXosC?h-V z?A+&ZQa)KNIVQccTvt}ViBs*h59tpYWtTSfgoAlwL$R}a*4t^0z6aLd;p)}|Dm@o^ z*qvWpd)i4qI$Pq*#>-1jTxUF~+@hVh;MH2P-8dJwpm#BzCT+A$6W3lp>LJrKJ54s> z(OjRr_A{2zTN-j^%PTpo)v5_uE}K{rz7!jKTlo4;i#0z_PFsIG^W;P63|UcCO1ek$ zhJ?E+O?e$vw<&h~_hTlrX1&%>u)nUS=y6=J&FKABrRjm&mXmG!;=O0w7Ex+1G<_#bVaa>I>9m@wby^sD^jw&O6=7&%B-c%+1f@D#_phKSx)gQGE44 zgGth%m!DN~ri7h5D!V&XYg^-$+B1`K_Lm>;3Y>Um(d~lU##+m!7AW}kJn<4)etzRR zr6up?X-uk_8LJ%VB=POzzUOxpJJ){Nw^Ay~X64>3eq8%|89s9@T&Ki$URbiT&Xvji z(lTf4j!34zWzE~s6xX<`(?d1t&2L|6>~vgu;aW`LqRjK-=4>xudVal<)q5>{?QUZ; zneUs|i;OPBGHtL^$)0PCSSRLiMV#_E;VD+v`ugfx1si=@#^t=0*ru=eT%T0s z@Q~P&s|F4WULCPGaHyxQz+ml!_uu)meoR{v`|g&*KEw1C6NI@_t@%+c`E6#g%g0Hr z*j4;Fl#vi~d}q7$_tU4cVykkdmt=UB9bZAWk??UbDEwIZnQ!==mou}pX8o?CZ?>2e z>t`Nba{GDK+Y+9~;p7+x*~&v{fl5M)6&5Gl+7MKJZ(^BA?gtYwe?3fs>+<3q)wVrK zB{HhyX>pXTF!{*0)zvlTXOyQbo3yAn_;U2b@|DV}NiH|bw}uE`r;d*8iK1Toad79Q z`3s#bk96$HO=z%9?VlI7XT^mXme+&Ar5l&W9*rpMiC7hKy52D3BANAxCBk<8Dq6m% zeJW?KO_`tLKBJDFIm#bI&VC#_&o#R>OqlKU>CFyu3X=LUQe}0;j&HA(aXm-j&*F@N1I_aJK>yG+`&f0 zh{jsSdGELG!3}F>X1(58(w27J`^{Rnj`C#Mpx*082V9L#9J@cxEX+wasgo45=}Fzf z73M#lz4QNkY<5QBxr-4Kti^Vi>?t`kbI0>(;zg0E`=#$N^Z0t1hXzZ^^t-Q4 z^!&Evo^QfD>a!Rj%L#S-H_qz@BVS0pN)PiDIhnUR(RtrjzV?!iOWOHkYWXL1_kaF6 zXPaff*8;sMEROrU&HYZXsd%B=BjXv5`pFj`>^l)TA%|xy8FDrxHsfaSzAa#{Vai_&%OG-v|UP0_`C)WB}OzVKF&e#k2Wj_eJbmtzI7mSgRrJUmknQ~I{l_=aZQPcL0v%4b=yZf&BYRA0!a=A zZ_*3u-sVk;CZ{u)D-QWY(4Ok=rZzFQ42Cdl?wxnOl@OMBnzTiTwB(P%TybC2nDd}eGK%Rm`is74FZFgnpsS^hy{kp(ycN@TbSz*_zP>}Q=p~KX zaPYEx9Lx8OF86Q*Ypkp7eia4xYwBFu`03A5c>xRPGZuJ~R;wOMcw4Z1$4ZfDk9+bz zAA4tPG~S7R^420XuBKT}v(PO@nyg)I2UknuJ&k-Vxv3+2X3v(QGfC;8HP5%$EMBqm zqNfRYS^T4M!e9BD6(T*dsc+LXTqI~8y24G8MSon$Ippav)3dL6@eDbaC@F>ex#>2B zB$F!I>~f0gnA#sUTBk~$k(8!OZ;_IlI^OEN&$2tzrF-}re!5Iia`6R${q1( z;eR+#zNyd>E(s&><9eur;N7e7TV=Znf{SiN@AkGkz$dHU?VHO`$u z-3@z#_8nK5ZFW4uY_ImNfZ|g&H%-P1a}!OvtY=l{a%sX8nQVg@`v+y-uAHQ>D9~bV z@Q$Z=Y3oMz)|OFO@}rgdgmH=Rm>Zp~b95&tFfTUwO}fLDcAvJrImb|T)|Zs2r?zNG zkt%o0Ad47|m#Ar7RruKP(d)-y?KXxd_Np|sZ*e%&+DYmzdqX*We0XDQ;40Id|g&g-gVWHae)4b@v-!N>#;c z+`Z3(?rECgV=64xy?qwG`i*1P(sjc8NR#E)!*;8jivBY8^?mlXE^@;=a?#nz&HXY9 zlcU%<*FVocPqQlT*}7gr**@K9vPNq5n*n3}S#!6_E~I`cbNkY;+-s0tR^vyVD4!lR zslM`~4EglIsigCBrN_>4qI-0Qj9&ZWXTIe#@o<@Kg=2YVg2&S-o<|owm0WZ$yY|lG zgz`(2mqMza`8z`V(_VFM2<$sDQ{~a!hm+%)O4k>OU8qkpz3WIlikBy73*Aw%+2 z==vyyP4%x%o|vHaCL$<1^2bV2$BgPS{zbW4)WiL^&W%5mxMe!2rtMAaKxmTWK~*=> ztVE@9{;af%(s6qfqYmAOdVVeC^89yIie4vQIW6o{J=1P*u6l2Y(~h=I;l--9WypHJHBoza_XJ6{WS+)HtN>3p7K+*w%v9lSpW+irx-)3H_iT#ddd0*t70dHQ z-uvK0DXZZ{?NJrc$Y$AK`RDJKt)2E(&Thc}_4jq;dy_;K%`)9qy{a*1rlME4$&V-Z z!nI?K2JSD|Z&kh`JGQ3Tnxgn-^Lo2Hy`Quh@sSRUq|Dy#i6PhcwWYV>4Qe8LMw2O3 zk$LREh0WgMj*bs!oaRTZ%l)*ncDiZu3)84c^vPd8d=tMn{!<%inrICve|2d6?40>` zV?3BTaswAO|0wc{YD<56pH|U3$-?qxbm^yihLhr%78#k+cV+Om{HAkDg+^ProVz-K z{9P(Z{#8%45%0R8^|$O@azADBdBR7}8j(jkOug&1JHM@eS&01PQ;AQzN0p9FI(6t= zp7yPEKf|p&KKFezjK1vam_m7nCte?)u)6DI=39yFjvHE@p82V0t@Os*gH-ZJSajEg zn;SlD+&^DSl=4v{euZ$6+w7`T<0sMS6Ps(+WU)&Q&+_elGpnv$@0;DVcNZ(GqB0a> z4kzl9zRYwwT+949hHp*j6q6fu#YW{^m|vcaXkx`4_L+=Kewss1S^BXz^OUkrdl;{K zJCo$q>@9I?T!F)jW`|Ktibq0c?B!sWC(G`wQ1d-7h(&h@jqP_ZUtXn`EppYgl5(!M zub@No!uGjroi_{WuL)VQQ>I2Lz4vUm^8CJ}WtC8Lv2X5=zPV%D-Wl(YD?3Hb*7EU~ zl>N!}wYT}lQq4QNo{I)f+{^v&Q)_d=x4O@5PpTrmRWyJ1P;J6KWj7cpb|-qb&Yj}v zm~*SgejsNhuUg1{pGDN&{8Fj||Ba=pcy@br)|%DIPq)7eTzCF~x6bVQeG6`Xi5E{X zUv$@(D&#LtYoDIpv~Xj&^hRINr|bPa zuUYLeJ<)sJdxz(TlFN@H{RUJzEN!$>RM%-_FAR;3jyD|KYbkuGH_WPCSNw>qvBIbW z7N)sN14AVhB}#9*RN?){wnYTF*-Ye(->Cd@!L(~3*Y}*8{PdJw{M$rkRV7g2aN)pP2fZGczbV7GGXo()QS!Ew0-vxlA-M6YU+72I|cs^rxWkOUPh^2SYM;)t96#D89)E$>&ZTc za^z_1&wfgjzyG9o2H$s~sRO5I)bS5Tyjs3Yx6FGlBPOJtdq?$#^v~JtT!|G4mhR&? z4w3E+8T_!dW-Pla;!WlOb!{Cz$-BP}y!5}Y(8uD$(ydX9S8d|Y6R(^23KclL>%L&; zpyn!Zy!hR#wC3a$z1+NQ?>ur(XHV5GTHB=dd3PW43Wm8^eqa=8O(|)(J?c~BTKCq& zo#H3-UeB?<_w;VKk4c)vl!Hld!K(i^Wk$#JBp9B&@p`{qr5`Tbspmu-hUR6pAZ_<>0U9W z?fH?98oU=vop$o-dSBVaMnu=XssxT*Z-#NP?_5H=ObaY=YF_|<Z`+Ej21e2prL0huIWzjV z&6%Vz7wdd`Ad|uPcso-1GJAnmYpA@RMg7?hOn6e?^d5H|=Bw>z!Uj(D*;#0^JtPxp zJijoOSl!ob+b{E1&C&R*xjpdP(vk-!BhD#F`)3>!x$FFM?5*=+Prs|&oF#Fiw;9`1 z^vZ={v)i<4q8W!*ExI87ah`?I0dnKf-4d1C+TKNkGLDpsl8lU(O_o@9y0UNFlMoUX z8%9$o`h50mvX5FZXU&>A(hs|9D_>{c9X*@MmAblnruf5<+KG$x?s?M(b{1`y9hI=V zD&f#;ch}9k9-4EPS7v{^P^K5=S7NYSc(=6`z25V6FtcHmz4we>af#AXvs9Pm_^$C! z-Jue@=)Rb0Ps#>%%-rpbb%qSFXDTL7$%)UGazOYOH8b%?rBv{hqOrF7xD^S2H~WmT-s zzuYvCt#iy=N4`PWZe66lmnCnmedorL%ho)eY@6U*^EAwVqG@FYnLI1)P413;Q*-@R zzH>de=GLKUhr2r`uG+8oA?FADb;2(3PXpOUju$W0J^$KyvBq@Kw+{6$Q$H+SU)%N0 z^pl)MM`q#f8$}P@pQhWZR9VvdvY7TK6*h<}YFp$^Uvs+fkZiolL(Y>ahAr_vpB6Cq z#Y*1$9dADjisH?mP<8Ztnz(Dz%mxKoTfscxNs69Ur1GyavG@nu@prE-bZS$(`Y};+ zNt4+2s{;itsU0y!_bIb%Ulx9_eic*hdj)H2{ZXG8?qu=zId&Co9je{jOO|=A7ukH zim;864?3=1+kH-0bcROBnE9X9Pm1MF=o;^5@p=-+s+O;_#d?~LiHlatz%lNH7|RFV z#*7@J{X0wS>#Gy|b?z!E6qsw}%bPE#S$BHI%qN?_<~vcuK4_V`A8yN@R9|D9FyY|n zz>-rheNxyHr;2|M`K>cHgy>xG@3`(97N7u)YEw>e6B zxAXM*O+i&hj^vyg6&M(Q^{ig^)P!x^q7Jh7ILD$74!W9e+MGY8U5mfb^jS~jryIka zJxO7yh#s2~(BOaTT=>P8t$Mg8wghcj&pSQYV|D6hr>_iBgL7u_u8)%Cd7q-oI#V}?<%wt3 zJUcPjxHg=(VXE*t#_c1nKgQ2{Z5wTpA%1w&&394yJqrS2!{WNEvp(7!hkrE-a;A61CjMQ1eHsRt?^eiFp@ zx7=oVTvqJetL8_>A-R_#o2E>4TqL?P=%Tc&zq|L+GW!kl7l?OiD|YSg&1{@}>0{l> zZwJy-mO4IM?){US6eD_5x!g`WOxf>=ka%fo?YK#8X{Vb%w#L^i@=G$?!+tSt!BWRC zj?E^9wzJI~wKQj0HT?rGKa~sZpSwgw_;^KRqRraEZ(j|@UHlx@zu#Li=)p|= zBG!!C>gTg+bG|J2T%t~%=93j-t$$*lgV6rjSy34^4}EejxvpnQ$>^6Cs792eGCz)* z^yBCnLmRB`*FAJ`{632b;j@r<`+g=f# z{;_>p%Yh|98}^*ou_SS<>ZDiS`7dr1HgugyjP}&X8N@bDn^5|eG)Zn;R{KKH%!g;k zk5@^2Gx~5_H~Z1H{j&Rw=Z}wh9j4QxbmjYt7g|@Ejx;SSr;Jt%o>F8eks3$Uk-PCw z>(TYzaO>+o-ldS8)1=4v(#Kk?Zn}XRzf1O}w|7^xCGIwuHl;aaEF+q;%vkKe*ZUDp z9sxz?J!cu78(k-K?are%;Y-t3*rmtPH+R3tnBdGf_oLk;XmXv|w+*xROkma>Iq=}3 zpLwo({hEUg)qSx(hrW4RkI9uzHD5D5sO87f+?%_~0z=wYeBZR$)6{v};r)+sSCMfv z{{fdq?GvOi&L1y(TvB_2M^bi523y+J-XJ$8DbDYwB<<}FIBNGIGQ}c(?;=5uzl4z;i!%LN9m72p>xs| zZeBkgZdolqII%)~dyL!7q`=Lc-vb|Q_1~^3C1zy*LF#=|(Y(tYjz%XJryj2StoJs* zdtZQ%&x@|uNz%>ljWe#h=KtVq+4F%a@|u=XRxhXPqx~}1;B=3@>h=w~4?kubd#WA3 zcT)N;nKd<&7t#9R&9udFjm>TO!3}qfM_-D#zj;mjih~$0g>N$VR+Z zDV(S|JN?e67Yp_`Yhkf+H_N;FF60bIuXwRZy(h@c=VR+#i_*0*%lDk88m;w`+j0G5 zRD*J>;gLP8-c3Yx}+PNaLTv5DM!AYVn# zHg-ag=i-_L8@k3YTjv22MM)!Nv+*o6?a72 zl&C9~VGCja=+P*n`pk<0hgN{wuhouW-;i&h@eV;h)SPkk|=BmCF zZDX-5hc)#j;>NixI=gsFy(^c3pPjjoX*vFsd{{@$BdJkKfR6J66?Tx&t-EDkDb#tBl_Z1TOr@~o!s=Ryp&IPCvmk0E&$O2IHAOE zb-J->{$6g7*PywFv*GTGTc9T`FbM9#gm}SC7q>tfU2~|$KPb?L1Z_#^>JZ7&k>nA? z31oYPkV3tD0=xp*E}?##y-1;Rw)%z6vGF4Djo zxWgK_J}_uw;Ly#QKQHXi(W)IM8O^5U=xS z!EW$lrCz}HucmImroX?NJ8Deabb_lE|AZeD=pPQ3iM+fy{(_4@CicsTTSOo;x}9Me z7|dZ0$p?3eh7}CC->iYG@XM(FUV%Pr-~Y4?Qr(b>X-LJOgFlE^gSEWCUg0#iFgHJe zAkfLkTU;c>>W~U(-kd-W;$D}hpJ(7aHZ5fM?w8=w)j#73kbYn0LO=BisuoCstn#CrM}7PUXurI3*yQNO-@hG_ zL_dPj4Z~lMfqQ}^GiOC9vni9)GiJ@e-c-NE<^TqqjSMxHP+zwYFVA3hh|(fr$mr`6Lnp{v0dQPc7A|%cM$1{IiioDNvI_k3 z@$gV$D61=Lpqz?FP@uP;kBT3Isi^|=lmzsYfS@vAM{_T>5j!Zr8m1B>nt~f-;kF?B z1HPe;P(?-BEPa|PqW*v0GyL1jYVcPX=sw|aQh@S8NUD%n4EO_9c#{YtD+h;rl8|No zu*Ls*lKl@y|2qxXr~bxBI7uQ=(%{bO2v`ZgXaGwZ0b`zmpO^;Nzk5oc?jEH25jgFT zAB03sho8HKBm+qf(n3f~NT!f%Aq_uHkoSia4Jj4Ubx1cM)kErrL_PsDAT5Ms2+1Cj zFQhm~#~@`ws)p18sTUF%OiFBD{<`+0r zXe5oCkw9*ei;Jf}tk@vfutDG;l8aktDBM$qd=kmUD&-u|3W zVohQo>>c9eMIwa=2f(@r{faki$kzY9xg`1Ar*)Og^);lhu~JkqDcTe zJ_NH1htM1#fTa(??8Ae-hMJO255XMW{5iwGCV*MO^4BaRC}1gCBf~RG$Tr-Hh+i&X zFsL;N3Jhht1tLWSK%EV-&~!nf3bFvN2DpHE7o*T2DW3m^A@Tw!sLT#6wc%;{T~C0T zf0q&4qzS^d;V9!`8EVIIXCn|vY8Xafz|Xux*~JX5WkG~ z)wihcQcqF8pkAf^Qk|qRR%3yNg~nQqtr|NuQZ=q=+}0S>5Y`;6DXnRsX{NbMbG_zP zO`hfr%?Fx7S~M+VEg!8-T3oF{tzIojraaSx>CKE_?qeQf7BinSdzd2Hb}+;r6;R5 zUyrHhrx&4jUhk$}quzHtWqosf7yXU;iTYRdEA?OM_vni*nz(4zB9%oZi=r3pT6ApD ztwr}1y;ww|K^y_2nyD^PbyR(>`cgHF{*3;T-cP5hNvX{QZP=-;Rf|%KQ`@a}OYM%@ zW3@K5KD9wL5ynJ@3}X(%mGPGGkug=>Onr&EqxvcJbLv^@&(xdLJJd&O9AutlW-?2d z_n42Ejm*&?o1C@^$X2O+U%OtLs$;HWr?W-pgieOeHJvXyKXoK@m2}l~^>x?i@)u1o zm|~!4pkbh6z%mFmh&G5f*ke#+0F0_aTmxGwsw%5$s|KqcQ_WBFQV2%hYSsU#Wjo7uJ}dFxnL^t0wKcR2wU=l+YP)IsYqPZ@wfAaY)xHJ# zeXK34GeKvzj+&0K&T<`hoe-T^oqam#I#+edbXs*lkA{N(I>S)aRMkRtsj982E8P>u zLNJ{T8jPmL(Rb2Q=_lzI==bRLpu<*rv>GWG#aM`?YRA-4)lRBqs9jLYRFeWfkY&&q PatsB=LRdlnpZEMP-9TFF literal 0 HcmV?d00001 diff --git a/gam/gdata/Crypto/Cipher/DES.pyd b/gam/gdata/Crypto/Cipher/DES.pyd new file mode 100755 index 0000000000000000000000000000000000000000..b9967dcb1a0b50410c391efd4cea16b38cdeb023 GIT binary patch literal 20480 zcmeHv3tUuJn&+wFCKifPDkR1by(%giAN3&NQ3V1;f!d;|pm|6LLMcq3prOi0RwJfl znmgC(n2edY+euoIOt(Fq&SbjNMr|hnbb@(~dD97#5R-Hlft{eSO(iB>`~S|l1x1OO z?EYrAw|D!|-&EF$oX~F?Iwsl^p*0?H?^*Pnvmb5_>xKm03r0 zt6rH^QeD#|H#F97ZLFx1D=XaYdXK!>B{zEAa*bQgU$b7WtFLk`nK*HLvSzy2$=E90 zIM%rR?fkH}J~nC21f4#ek@2ISx9-tpiZa%TxFS^3%19_lq%E=#m22 zK;cyjV|j$^V{G{q)VNGJ$k<8p|DRlzcwE~(uqn;K2L(iZ9qt6$D7WKhNmYfX0`1dN z@R0*Z0HgpSN)BTkOB#h>&Xr)?4ln@hfQXU<3rjXPH4*7FeKgR=rKN@Jxr9BQBM|r) zMM-Q!fRGom#xabINd4M-VpG+v{|w03wt51kg$oY*wr_snBTLUQ2HEXK^QE8I}G zL<@lh_JGzJ@*2bDy#?kATz<&!I)CeJ>F$yQ}9wHBH z!AJkRP$;xd1nJ+#{Kw~$(07Q>912{CXp}56ACB*185ZkdG4K^y-7F5a%pLvZp!g4? z9kyS3jG|CJ@eNKpO8TnPpp+n|tCA%}N)Gs;{DLp2yQbrWP%Aw;EPpI4zY6jtLcWaT zPeddb<5$-I4!Wn&!sBUxBh{pCmy|C%`z7<)-5NNY#x0J5v?psrvd)m8v?PDi(h)rK#>F3=9)CDb*V$)?J%NTE z>xwsGI{T(kW2FS@6~yjD<4R&vV;r$Nm9l_;XFypasm=r?KRJ#TQ9c$wwqwPf9FV^b zh19Zux<+c7!RLEhfsSfpTY;YHX>A2DZL1}|l-dfe;ESiN;7Y2&8mEPw6b{=rXzYd) zT9&jc!IAI>?{Ty@&Bk8#0`*T5#MB&H@JkE_7vv|2ff4}=&Y}3j7Icm0;(0W9E~vqC zfyQ0Kx#$L!=YkqM7v0Lm&H>5%`RzzJ3pF2RK{L@*-X^H6zoJ7iF#3^ zejuo@@Y(>0^8M+3zjQ73r_VbF3?vw$jf@2QXd@d#@T=WturspGGe#vi7s4#IiyCMl z-^x3&-O3O!Hq;rAV!L<$4ylUob`FBV)Y8G0W2_igRL>0MT<52HWvH`ny7G4H^U7C* z1qp+Jf$7S**FGiYwSox*ylaUk4DS9tnN`mEK3qnIuJoOWfp}n?ug@OarN;T$a%_}w z53m*3;J}~gI}=a*NxnW)Y!@+B3PytOjE)$IzP>Bb4YaQE^`#4u??L3E@1tcD+cb-R1~~Dw;U=}R)j*+;NFG6kIr0d(HDDmA#`tGmi#?4kR?+*q zBpfwL59KZt+u7MEDdRy?N{2#pJ`Bdk|L{f3pXfD(D@lS{7I%y?>Mz1nlCZ8?*zG}g zn>0Sp_tm(&2gRh7{LUeotvkO7g?uLyZsjY|H)y~?-M2RA4kbQ)A5QHY1I^y<9Dcdr zbCoVVM%8OjKKI?7%sf}A1<4_UFP(Yg&{B)(bPlbX*6rXL-442Tvu?*UQ6-5=rpoU~ z!7?(mpoyBUwT$AKa}CKG3GH_9Mb_=0J2_KQlS7G5At>KTHJm zBheOiDDftE=b3Hoh~M)H{P}i%Z19*>ZX=GAC{pUpA}F`-fVfJX3MJki*6jZ~sHx5& zU(gW$Kqu@x&=r5U6BojTk?N~2tGn zPRtaZ=INVhPS7qNN-Pc!DNTkXJicL3GH(*~9OmePO;spyALNv-{;#fJ?1FP}oF@b8 z&$rzW!MIuFkJw)S^MMru9Ee{6vl&^!& znO(4|#=x)QNKi|A)%>d^#W79u^JM>;QGRlTAH_p6&*1BzD+HE2THKZuwnPl%N-YZL zzEp}%4@^>vP9vbn7Rk~kP0brMBuIJ2>0R$rzyFId$2)~Wkgld!L?8njh4`Uy-lFg* zp6I+7bA%@1f*wi|#@e4hori*XiI^}lfvGe|h6O#ojwH3H*Pjs2JFY$Ayi43NgX)MF zcwILQ!roa}LH)PH;4Kwj`d-8J1#vd#;EtSdvUko^Z^Nz0pbPr*ry1i97fa};B_X~I z4QFp#!ni^{Ky*XNi;KZL$&2eB4|`mn`m#1oc}+xr+81PZFL{FPq|x606ho{ZqGcVMAASG!OR`T~ZG;N2UBhe8ZW(&#~Xsvi>t&CHro#bk$%*@+h||sEgsI{4Fc4*u&cjPgdV6uk2VTe7 zFHF+w2ySvOr2VCR`~|?MwfW$A%J8jzK zbg0BQ#fItZi8xkD&*4~c_F(1r&_>tO|Id)ujPS{2D}|9mWTdQ5Elf}flhnd#N})_U zDwH%+{z85PjDt0C1Qhkg6!l;}O}FG~N9P#@b z9()=6n#ix%75wmxX{R~rJ{xva)*DmSqZakV6a@w*L~H`1#sG)R$G%-SVG?mZLRd}E zDI0Of=p?0Z+Q4ea;)E%@%n9SqpQ*X#hl@H9LlhKeOKDF`X&~}!xf{$;o^{S1wXDa6 z&g{LB4wu`)hs*Va{A5`jHDEiJs^vIfdi)9F{3X+I9IV8_5>UOA%PVmtm`?FS;i>|~ z+k^4l@uphH&zZuw{ur3ntf?F0ls6)dnH!bgA_{-POmW2A5@oYz#HR9+P&<_|wClz0 z!iclR=Ipt9tr!J^zmOj@+F)c3g$F|$^-WjfB1uytJl>`rOaVUVXT)TqV+ICy{o|Mh z`#{{8Z&ON^HmKnz|yjd8xv z#=|#&_`Pq?8RNi4hE>k*;A0*z;xO%r)%rM)it~#OkLQOev%?1rPe=HG!33us{TO0V zXH57pod)VWaua@PDZzQhw=#UMu)}vZ9zbT2NRZMOaqJD>@&`8h?v68f^3{Sk>Vf~Z z1ndiva&AEvT}zW_Xx75Jx*<+mm;Mq9Eh6#e9XA9AxZMT(5v%^NKg9?EIR(Kfzly=MY{BJ{OBe$fH>Q?CZ~e68~UV{QVv2U7iK>K2dw_&96e- zwRA?0JZB%lsD=8UfmZ8#;u*#hB}KgxQuomuUN6}a4}j#kVfb2vtPbCac!Ca*iKzL- zO$@9ZH5un4o5Q!B_ed$~Er3u;8lHA49%+V@wFy`=O71lNI!7%#uX=;(&aah0?X65? zdgz{x^eFF-nH})}%CGr&$-ti*#ANBepC}T6)8D2%e(*O7E}Q4}$U7 z$B;oXji1y0uf1=jm*tG5#Gs36 zm_hRk0#bT3gLXJ#dXn1~K;0w#o(}^#vLUW%6W#2y7BXP>u2wr-GCI-Ir!}p(*LH>H)(0*_o z(wffjUdgCVBt%^+`E#YmsM-3uxBrC`{-@_rf+3HoDYXw-=;{obXIlE@tuY{ z3H}Y-(@`GuM52+?WbSkleSdqYzZcJhLN`VGOA!8Ehrh^gKhgiAIa@H}J{d?5VW8it z&ev(s-9x`6vinQoHhx+*tW?^*4?!S*>T)Ucp>m=0SRT*k`uJx$e1lcAANxxWncqW0 zJWwVe{l{iPhW7#AfaB?M+(4G@g_YYUwd`1UDA#*F_9bA==X|yj?>Gg=>DMouK=K~P zd_VRD6py?~YXXL8(+FHk+W__+#PyrAWR`R3Yj%7k9QX#SJ360u%-TJ89HMF##oik7 z7_{%ePjovEN0JEVh{Q9Q}r4#UtAoH#k?ZFdZbs)_m3 zcBG)q@cya&sdWf${WLZ4X=*r<+KV}V5gnPzdRcLp+K+&vCLZVf)b`uKSJtPiiB;5a z*xMVpLcUPQQ5?(F#FLP1?;wTs#eB#W$10^~)+zq|e6p;2LCS60vv4_7RyO~h=Ui#6 z2FA0jwANCSQmeE!P?Pehw0fw?bJ@C`nmnDYJE$o#+M;E2&qzkw{H^4LZ~JjPuR~a~ ztrxs`=GU>BXJCU3&~Nb!CmvknKCqO(lvb{zY2tTCkG1gH@Yq4B#jj1AP=*2qh-LIB z=e++k@K1ps!H)L315}0P%A@w1&|Fg#o*IRxMB(XCcuo{0bQ4~%Ka%4XY z&83MKiBrNxaO`*Z{fPJRn;)Vet)V^(b}BIX#$q&`5?CE=Mh*gOA;d+ZdMBc&LcS?tvV&$p+ftLOP^RAK`d}FVKibEc$>R zkBtK0Hb5snw~aQ@@I$)7AR|Q3z~i=wH*B*-k|n&@LXjnWNJe}bC7E#iRvyvNmIR&C z&?Z^va9Pq}kcS>mHVSUiCDR`=pSRbIZUU^0SSW+FXYZC70pDLdho^ zG(@D!hk|GTDq&kRh-i}yk|8>`E84^r(}Ua6;@}3zmhj1SAjiFc$8CjUBD*3E5vS%C zV%(4Bi(=ruC^|jM!6(vd$Ypv%4sac6o?XGCyFEZpS;ljTkbx}7+6ZwP@yL$QMU!;6 zuAq?~gD39=Ag^>B`&&nBg`Q(TUTxAsyJGxJ4Y?CIV`KSPK$D zt7W$lHPLvWU=WQ1n#3bn)YRUMHu0!aq(iz?!nVkUj^t324(U>xXe}hj{h=Zr$q|WU zsRmEb!up)Zc@!s)gM6b<4CIsig>CGHJ+cw@8;*~3i6(q9CNtT^X(6vH#hT|q0(ndu_En{?m>wxNj>twRR=ct&gz`+Ng z#yrx=GGojHKrN#Nt^=+Et^=-<;fa|5sAbf^!3UrQu7eupEjdB=#2k+W9t%7ccr5(! z`3^SCTZC=^AAlNoEc_YZ4~C(S8h9-5Sm3e1!H2#HfExb5hpq{L8aV8WqK5*4WP17l z07os$eEXKLPl)rHwsV;QfTIQ<2<@bLmW(BLPXZl$0Q{l5 zj7@=AI>)ELDGdW~)UrGU>U2;{ff}9Nz-8c|gAbSj-$uX`_%;H7Lr(%d8Fez`zz0l$ zZzBLW^uUL2;E+Sn(Fcs^04O@(_>oXcItnlbKpl+%0M`KrT|zDCIDv$a862i0h(i_n zdgue_Y8C^~gBJq;t^*D_^a0Q`-~$stLM_omhy{)TmQYKvP&L3G-g-%>C3^c63mkL_ zwG<0~2KY0;A8P39p$`C#8v5YDp8@_*LmxcgphF)3e<(;fMWWMYB5)oQaAGkdKJW!G zn+!Ud2H*zZ2H+@Sh$wkFBXt0PnmPj<`T*3EQBMXPIP@jx188W`BMcj5l9>dC$ubV;%0HCI10Ea#R^<>nOK?e?f_(lz!I)H+X zhy)NJvO_>+fC+rmIum#@0JRRa4s^=FNP?m}3?B51keiHpGW3klGoqf1da_BVXaZ0p!N9?X9{d~O-w6M}0jS|0 zID)5w9ReTpBNAXRpay_MnI#_hyGFMI?P}Y_|TyDM(}~_fP)Sm_>9S* z0O*gBbO0uU3t%azQy_=5>j2;z!3PdK0QkTuJ=z8X-!TA6R}6qs8v`HU8?oSx=)Dnq z;5y)-g9kp-%g_Vp;X4MPhwm7G9=2oP1AHU+M(~Z`1BV^}eBg+a7mOBuurpIQ0MvXS zu+@Sf1Cha(!3PdN4L)$erd^*zC22Oq0jMR^;6ng_8hpsl1`a?CK5(R!QjZ$;4X{rs zpjjp+fJA8ppawq$eCi@_J#c^|Lmq${_Mtx;H~=;51D7EWKn*_RX9EYI1|K*EjhAo* zZ=;Iab!W;QuI5GZt@M|BWvEwdC|i&zuXB04jc&O@hJc*!Tra!am5t2}o|<~MT)+9R zT$P?BjNvO9`kIEm{fYx507e-H;uLDKr`kn-IJm8*%2g#dc^YfnTjlyK^5zHo-X`)+5fv8}%UfKos?8OZckn-5 zTm&-y#IerhsjjagO3juUR}+_Av0;%+)~Xxp-8FZSg&L21XN{*?3&mCGsv>G-O+&S- zk^cOTdb9{%q`Y^n$V*Fc(47iAW46FMfZKsToDy{FsPAhp&0ic|GTw7d&>aJ|40#fZ zEL=|`7$JK|YhRcqWNQ`%-76PdsuyYFPujL#8e90iU3C2?jq}gcIWB3qla3o_Fie;* zapI&&M&s0}iHTQTbw6{O{=;M$7^rwdo{ru;@_{Eb?KK0bEe)a2LA3l8a=&@tRk3aWZXJ=Q}$&=mP zr%t`}(krjL`s(S^Z@lr=TW`Pp+uy$XZg1}gAN2Ko^wB4u{O))C{hxk1IQZFTXV0EH zcmDhrUj%~}FMjjQAODEGjnU4=R%~FE^X-l*?Yt}Qigq+<&XLw?SUI{TP*#ItaKWP@zE>)nf8 z+iRLUSTyox{F(P1;b|_Wo?PL_3L2p*S1o3&j3qO|rW5~Cee&@g z#D-_5Hsmx{F1sMP8?nN%1&9W&G7L)0K^K|&B@TX2FCj|NntW%u(~&0{jy#9fD9|Xl zCkV=#Fa>jy;Xgn?KWp4Io|=kUn&a4LyjYNAF>f#%cg!y3S`Qi%gfg<&t2!tUqE)3 zELn=5t(BFFtxHmu+(50RmG$l|HCva~SkrG@3VVx%y~QxNgpXmN%aiM=uUn06Bv($U zKohqa{2*-jXj-s9o|Pq=!cqUF{(mV0;s1NX(t@V}bSw-hz!w2bfYR$3%S6p+g)C`k zu3|K=BWE)4{!;$jGElN_B;vOHjK$r@Sak%g12h@50}-_J2N){`?Q*39yq5tz5i$X^ zg8=D%Jj(;70g?fW0O^2yKrtX(HlbY$XaO7qJPkMr=m87@q`!p?z#>2npaf72*aLV7 z@D$)QpbszvkkF3=Kr$c|upDp`pc)V^e~xkMSZ-An?cBMH{Ua-=acf8~i`>!CZ&qGj zRg23i4r&iNT+hlYnwsz&iuM6k?rLmw*Ha!6^%a}zK{e^OY^n7&@%!8|{gy_Ti?K$1 z9qyF$zql~AU61QoWkWNyo2gw_zYQYnE^0Tq$Sg{6b8h3-^5TldCgSNL@e(gCw)Czg zLWa|F-Bq|~-&x;S)dZbu!FOmk*>XpxR{ zvd*zuthv@A>n7_R*1N3FTK~oRru8H1XVyPjC#EK)7N)LFElaIU-Jbe`)W1vp`_zqT z$J0)v^`?c=u1lYnena}|^e5AM)6b;enelGMnT$VV%*;&AT$*`XW@V;3^WMz;nLo~a zEA!pVPcpyAjIkMPQ*Co>DK@LE!FIu>w=cABu-|ImV*j=MnEhYu{q}#ie`%kdg$~F# zM{sfZiRq}R!*m*b_|PcdPBkwx*O`0F|7H%DXIeH|DlMwzSLn?NORRO0^%`rI zH4iTtYFsE z?CY|ZWM^dGoV_`_F58#w&weEPXW3oZ?`HR9pUa-L?3QI!%eF7OXW5g>o?rICvOg?C z-b^@WQB5#SH_bLJG+9h`(+ZPndeHQc>5%Cenxisjh1^X_gfyd{L<28dEIi}0w={d zdr@sN-EOKfRhw!}EzsX<-e=yAS$5F;xcLclhxw%WW%HmpXlD3}wa40P?X!Mt4Oj=QLsqc=QvMe+@IL^u-m4G* literal 0 HcmV?d00001 diff --git a/gam/gdata/Crypto/Cipher/DES3.pyd b/gam/gdata/Crypto/Cipher/DES3.pyd new file mode 100755 index 0000000000000000000000000000000000000000..6768126507cbe50f0f43974a4bfa5b8d262a2808 GIT binary patch literal 20992 zcmeHv4SZD9nfIB=RVEOa0TYdw$^`-h1e}j(6Y`zOB!C10Bw*DLLNbsnlSww2LE=jw zI7D;zG8HLo*{%8#buHa?TiT`w)YW|O8vI<;u0~5WBDNEguHmB^#X9f*Ip0|?{&-7ZaHtoYPqJi(z$Tr#PNxm=^_VX z%XH&d!`3(RLf-n=q?r?R`V=-Bl;r?Dz@$MoTGsC3IOr6&4nQrx4&xKZU=!oxVoVmg zq(C-s@urzE?r;xdi?8A2D}Nkh>?HaBPrept;f&8%5qOR z`lqJgBO4G4m#}sTd}7?!(bDC) zTa9CN?XH&BJr4C-CiDg-?g_tq15)Z6^*5?fEghZvP4Od5(AII%nQC@-HjdHLapT3i2U)8{4&TF z3;7z7?+!~a=C8E>4;Y?C3(cnij#RU{RZ>3d?3XO(cD`@KWqDOO+1V#qdX#gW{U*z2 zJKv|8Z}~T(nk>(gte#|TxGb;wB|nWsNE^5`3u@8^l76k9giCOda5Mxzd#zLX^tB%4 zRn=iq9D}~5SZ3UHiw+rb43RD6l3FsPItFbCdz8;?%l1f}oe9c|zB5^fAYkzIWvQ|I zSx&oaLTn4OBX6LZKqb^6(0C0k0NShdv#BriNitWUA;mm_h7|MLU6wbM3tAla>@g_k z5J=2lCy=*%jv>j{G&Ifl-S@u?2K#4=RAD~%6c(>i;m zU1<$sXrcp~`6svNd#Lq~+69y_n-Ju&UK`k+;-WcPNsm3F@y3RgB zbT>MB+*iC4)!7$Mouv|}*ATlCook6ropHqOR7wN>rhu|SQXR2MUSbU0M0vRRQI3`L z#DM%oFsPOW)D=?e&3wJL=If|6wdU)ojc?75YF#ezTd6hw8h-P%=3h%KSmU&?6GCC5 z>L;qDh8{SgThs5OgB1DmJBH98h!`qSH=(4F449B<#;1@8kPL}J=2J{a$PR58B5-g6 z#C;oF--*P#d9uR1l0}jp>+i>5wyx&yK~07%?Cj=oU*@$U+$-PX3^YX;lw-(v#EPWz zS$ZBFK14DxW3l@LB3nE_Dt_mJ2&S;1%^l3!NWKAu3}pzic6D zUAqP%jx>z8gq<-E{2U?9ODl~@w&qY`@s!OwS|LCt2JXH3fQ zEC{pcF6y9%m7u&8-J=WvV?&(*DY|Fp?=gGE9>*XkOf4B~KF*48c=p_^obUWduMBne zO;z5EeqQ;UuwlYrU|_0p{&dyFr84sFLG8CkpYtU%??u%GMk#{?tmdvWxpccm9jO4py zGVbs2U5YpE?=l{t6VX)M2ofuAceQPM82dfL9T1GGLU-Wa<1`DY?=;04k4WQ4v%E&p1Uq&jf%iG&~cAr(E!D zPh_6!)%?Vu!I#3kG3cp9WHh)czQ@irdhGN#!g}oSqDc^qOpV{3#DjqYo)M5Jpa}@5 z$8I8t9y>2QJ$8ClVM;RcbQnSTPMEZC5lXRkaOiOac%+VDuIac)jo&eh1SR<$7tth& zG9vpOgTNA~_T(kT`yFS|#QGh5XiNwsW<72I3ak)Qq}3@=>4I^0!aJX%mJZ{tXW`G+ zbe4H%Q21-EFri$u|L-BEI);412IKxtDAGFU#5$NqPH1GL?uk%eAM(_{meflE{@kE% z_?j)V>2OF2d#PesmX zVcvs4S~|Qbyv$Y-7Q=O77TmLvVsGjd`P0yQCx}28-f(QU~(Ba%HWKMZ$$)nrn$3BD$}q z90U}*0n3UKqP(M=@|`FayAH4a>0)Wbh^4{PoQolaX1^#jCp7yBc#cI4VBQ28In2?M z1~tLBJ&;qn`ai#hu}>U><2#!hi;el&{(cDK6nI!RSIkr`@ z=K4M#xBU-l>1o|1{@6(s?4+-QubJ*oarB9TU&S7)mYi1eZjcmvyyoYp{VT@!$q{}~ zP_gn1z79G|;WUFDw{&Yn8^gqG^>TmKd$~p^-JLj`7n3EL3^;+Dhc*x|M2AHx;O*5KyzFv}+E4 zGM z8rjfHC<@^wsjyvU(t8@4df;tr_QE8+m*FONL)u@`$KQmExi^1%0W14TJA&#xt@{!W zKe|i)!an5iI$9Od^}J?iIW+Em3CFd8e7>j+bkEV)7gL~~(XmsYxYy{VR(2?`0gE>J zg(H+{SO7eod<?)J`HFi=iiiXo$_uE< z7zj2LR*4gsqnGaXXXzu&i+$yfRJ6-zQ;EVMmjg(@}-JSWi=}c{iZMOcq{2 zFtvifm)HB;Z((m*;o!D^>UWI#{WTB16n@R*S5yT*d}G;ZjwT@N&|VW&+N&1!MimAI zCWIA%F>`=T=B#fAc9=Nqj}TU4b;??7GCD~qh#y!ES?n+cSJ`3wdDAr4d~?}55mPiQ z_Lh>~sFFbV-m)LeF`jjfUbVD04TITzJ8drOLYvDi1$<{&9x-7}x2k2>V0!(r@=~(+!bN7chsixl2ALDI<(`(o`SHwCe6{ieWjQMgTH`pGumWq|2Q-m+N|Gm0}hh3 zG(z)j?!^+|lYT}lHrl*kaL3QCXm9}xqE6g`1zQlqrTMTXPt=HVCZ2aa%aGDprilR5mPT> z1XtQ%v<1-g$E)uyQC_+MqVNXp3i}Hc9$o;9yWgNa#*RWpHR`GxjMz-yi`K?Ckc|C{ zHjh_EC^JGE3{OXBgV9Fx6vQI-n9y@MxlBL-4L!9KBQJdE;R}{+zU_DbnMNWqM8dZ{ zN*@;+SnJy!WANmu`7tyC|GHSz1xYzSw~LOY2{bi#!n?XAM!PTl#a6nB#8(hFA>0LS zPd1iwU;O-ktQc5o>-eINb3;H4nUwSx(@OJdM+wh2a4EI0#;=)tL z?{^vZcBFK9=F*E*?YY-Qal2qnJF3ujeH616?0*_sEx#4dFpoq?(ddNK!?cE1OKEX* z9`f8aaxB6+895X21f3!i5oaQ9VqoQ%#W)Aq969wwIiVExT4yA>frOD|r{a-jNLd++ zdq&BL=WmbH(u=BhSZ(@38Ps0Ggr|qj=}3?A?iI5m9zgjqA1@jBbAwnc{d;M#zx$k- zQi@$RJf+^r^z}xrfu97Mk%@sSNKQxe$0kHb8u469)`M|eE*o<5Ne;6-#v;jq6q1V< z`zn7?3{LY(dZly-o+8JJcQ~&k+Dm6jE?Gq@br4@l%!Kz~+)tpP zbQ2rZfwNbF&d0M0EMlO_a>iZ6po40NLF1-d4Q-KP{X_ z|3bIFeKzG6@>ro#8%w5#oS<&ev(s-A}(Yv-30J)_*j*=t|o5APD47 zT`UFPS3W5@p3C#O+W2&bZ?KZevcKez<+tdF=gL^5|M)b>@G;;w0`QPIZXiSV!qTmi znzzk6l;gb^{SvUo^S-oV?>Gg!>K89`BY9`B?2ms6#iOwlAq>+wIdBtI11WJaXum#3 zW;vIB?Eqhq2mXwkJTjkn+S)nz3`EroioGT1F=)RN(B0!Wkb|qO1HSxC2YMWj5ZdE7 zNR8q++I9kr9>8X*SJ4(e^}!YFV+H+m6y*5cdq{C%3H!Us;`^##K_sp59i+74n2awqjqb#+`s{TL&quF5*+J z*q152(@*l>S4fm~FGxA9yXGy1%F@O^@SH0xRlsbTqs>}Ica3JW)!#y1c=eCrc^$$%+wzGw*YY~<=9^Jx1N2)>BZ&tWxewgR z-%TsuqGjT@OHVZO*6_qZYQ?XSbSpyv1H@8$mGj=85Bxmv1K0_x$iZ3in0gbOWsbm; zBk-gMJS76pj==LG@bm~gF#?|-fzu5+S`UM>XyJw9l(118^{%wP=Xdx`7;z!3!67U( zT1eEV_M7YfqYUV=G4NmRYi?%x`ZfAqG__+7LSK?G#_L`$nqGuUlJr80JkwZ9v& z#&WWmPL~Z5AP2uqOI`XPI?;6e2Qr{%g9bX0IScZ1>4?r1vo#r^!|jngc*G_dGQ%%U z15OOOvcabhY6<9r+ZQsVrvuK#@j-na55%~fMiViEm;)JM6au6R8h$uEn>wKB^jdoK zz|g1k;W9~jBwNp?3Vohh;NVi9>+q@MGJIMn`DBBph;;c>5Dh?A$QDf^`ecJ-h|cYb zK5@nJ;I_0lxB;>yd~zMgaWCL;TcMc9u82d#sriK%_oMlu7`QKrPS3LOiS!zBnBI^L zT!)rtSMcar2cV}c<8e&LKo(?ehB%FQWJlMkB_7G5rT$Lz ziARGX9nz&MWQ%O*NDeLOkS_I!)=Yxj9~$D39Fa(tTJQudq|b?*M{)8v$Tu#Efqas` zkd2+NM>ax!L-CO=(S%RNWF{L}vuu!?Ax>8_C0a&@XaMwZkqjdl(5U5!BpJvH8tD*S z=!jW`EH3mzI*=D~kPX@9dPEhv&CuXF_=G^X z(6x)~jKobeq>U~at&G_Lt^=wAu9L|MU!!8J0tX*}7VAhS%ZxEI0IiG`xDL1uxDL2Z zh9_nQpq0@A2OoeIxDIMqx8wxF6Kgygcr@^6;L-5M*E`s>ZV|cxd;nVD(eQ_7BLG_X zGXReU9t}JiIQY;v1JJ@B_|P>2&;o~jap|FeAeo*%0Kn18GB4i}>V(*@shrCU030p& zK&X=HSt8m*v_cR#^b;WmK0r?|&j7%oCxH$=0RAvs#-_k5?c-D6l%@eVT3Ma~b=oMV zK#lfp;4*N~!3Ru%Zxdh&e47Bkp(lZ!h&B;&-~*<>w+R3odf>x1aLD1((Fcs^0JwC( z@gt#?bQE9|fCd@`0ImZLx`bBJaRLb;GuTW?5SuFW_0R{<(JTs}2QLZ$Tn8L<=mVf> zzy}t9gjS-35Dgp?ETNU6p=y9XyzGI zKLh-sg+6$|L5Ds7{%|4X6p2n-iNJYKz=_37_`vt#Y%=KR8h{&s8-U{yQ$)$rnP>n2 zv@{st&pqtQ6MmrgF;0E9Z;0EA4 z@t9VgeBBgE001o|132^nXeXnc3_5V=!#7&sGyq%}h)4ht!UqIY2AIJ|t22Wq1JLTw z>OiL)j3y{@z~Dj81i8s*CqvH!Jrml=XeXP6IzCPCZ$iRg4uBR31P+c2fSe93`~wG| zg@54OwF&-Bm=F^ZiY@>x5)2%C=)u1U{!Q=?9Do-7fg^ajP$BR^KOzAJ16lw`G(URq zX#)AyL9(+Ccdhmfuz|jK6!?zwV9{%-!@$jt&jE8SM0DL|8dhqq&Lmqkn@PVU+ zZ$1D%dL4y>;m|({U56D60-qX;-UL2y9dOXW1D`P&6aY&?3t%$10G5O{335og4gkIh zeBjUnfDfF~qZJtVjsj4+q5zcIDEI*1gd5(3(VM^rt^*D_c;GX=3_XAzzM}wo_>KbT zVLJ*wz&C+!0^bBaaOeTR2aY)Th1tRnDl>%xK+7irr4|Gkhz!0AK5zh9@PP|9Rechb zq!|zgpq0>q4*>vL@F70~H~=m9z>!u;JzCf|z&@pbR+(4;5~US@7W^deX^6n}zyXpB zc>r42hyD!U0JN|VT!uUVE%=b10UUr9eBhWgeuWx%8*{mx_a<$2HqMvtroY@QMZ088 z>D)!~DyPR=@0QDD2*`Pk)i&AbuBdOU^HkTm<=PE@2D0Vxo}n9uXJ*+NkRpI%Lc7yC~Anz1WQ9+Ts(dn$*P+qZ_|LN*{kntz3HBL`eZ6#5vH�v zxa^WO^JTJDRbT6_zK<+ad*pkoJylvL&I)HGQ7fwJs+{%o=btp9VtfsGHbLa1xv1TB z5AfWX0`CA`3H+(K?JgaScIDXI5$QI(RQ<=P?XD>FWyljP{Nj2VAr-QRG{Pk45V`)A zcGv28m+OVw_-pn}^bJeI6?#t@*+z(cgMN(+;E;wh>9}zQ!-NSFCr+AVGF^9FT-^27 z-+1HHsR;?wrpfZmnX_iio;`Q&ym|BI-+HUXVznkGfBV}hDQRgL8JU^c*>=0bk)OY0 z$sa-g)QUcbAr~TUTDbVMAr*rcG5W|gxZQPi4GmuJmMx8q z_uaQ`+m0R0%|2gC%L5N6%HRFn?%fYP^zg%b_wL*G{qG+*@Pi*b`sibiwY5F|_>)im z@P~&E{rJcK@-ILA>EXk_{N=BHb>zsgW5V}yss@WYdIQr-BR7u=kmrJW_P#^I&t2*2P=BvcL*Yu1r@GGNq>7n>Ke+eY>vXzhi(KxmB%L{0uyf?Pa{OVw6LF8I zmLpD=t9%0~^Qw&)t|VXUb~U0fIyZVAoO1igzZR-Le)|rZ}hq= z_#RbRUFn|Vk?TYInAkwBEGvXYHzxXT4UlRv63TGoyis+(Z0^#;-qBE1>veJ8RpoVc zPB&w3%xHJbxw+l70q`Q=xoPdL+0)xy-vxZt__V$6>&1+CtKAi~_4U}9!{*@I*ju(H zw!8inaNDeQSHkRe*8_9fT^9f}M`C}JYwMi#`S`5FaB3PD*BqF{M46blTs+Fk9bE_cx~N1gHFp5!KJTvxjmEL)OWV7ITe zSr<4KFR(3Gy>!{q1qH>6S1l-9vb+FfDBf$cmNitB*E=igJoO7Q`E8Vu!EdM9jY*Ix zb1W`fy*Ou;BX2JGS-9|4{A{YISYTV2yzn;a-C9xW-dMfq)@obI?YF|-0%30f3@+qz zTj2EMcxr2wqrBwEN#*F`G=v|F1wI<)&XuQU$mXk!;v4#3GXtUjd&Sy}rvU^Kf+XPc z0cOCu`S?9;w2UwOg>{XUj8^MkT%V-z&Gp|m1I2qrV{6^VSj@wWRfW+yK$Af`5JpSc z&sY&?SGzjEdl}FhCKEt^7$EJ%vpgUkkO-I$NCD&liU6VO9`s#+X23x}JKzMM7cdBr zz6TqC`G9OdF`x>t3-AcwFyIuR4=@CfFpgM2A|M&C7;q<`3J|*fC&sN~IhB=Ed2<;1 z8OyJBYshI9UXd|wR#sN&!Z8*bum=OKWo6|J4R{Vk{{Sm<*4MjhDGzb_@(s11n)Mqu zy1WhioR+2ESnqV=Sy5kuvm*U3K8$VEetk6fe5>g`VCGpi>s(Hr+!me zQF(m>@pR#Mu@?tcde;&r!)ZD0N*tf>t*x(YfX+?e+qDyFnZ3$cu^Dp>nxnqH4A1N} z&R^4A`$mhdI0{$fDMJnb|Bt*(sMQGEZZ0(ct$DZkaq}zY)8_N$$(H$+4HlPWujLWT_pJYD z{gL&k^@Q~e>ly1gYl3a2&1%cB7258xZMNNKd)D@UY`?L6VEfqirEOwzLUKX!^5oKF zSMt{62b2Fk`Ja-{eS1wxY08$Ae@J;cr6c9vQUWP!Q}0RLl-iVfG4=D*hZdb*^!cKR zX-R1d(o)hkrMc4XO*@eGXxgE)chf#d`;Ro19+N&fJt2L5x+Oh5y)iwQJ|V-FQI=7e zQJe8>Mt8=m8Rs)T&CqAg%w#g^91br(vvgTru)J;g(DISxPnMb16l;ccv2}~}ee3TL zeUfdx&1KtXd&bsjd)YQ6`G({f$@$55B(F*KB*&-Rl44ECOdeQTX-d@zV=#Ps&TlD3k z=(HQt7N*(KGSil)`O}W1bzvNXX%o_8({1Sm>4oVV(>JH9>3h=;r2ljJFVnlzUrs-r z{$cu`(!WfflyO7GoD6eDcE*y7?__MqsLE)_Xw2A~@p#5BGP*Ke&p3mz>NBTg&d9W8 z=4P(S+>lwHd4J~K%*Qi-k@;5U+02hK4Oug@7Gz~+Ey=3Rs?Tc5`d-$fSwGD>mi2np zM_EHzm}oQhR5W_C#r!vBkGa*n*Zi#cP4ma*Yb-Ze=2@(kMV4I4GRt=?Kd}7J@)OHZ zOONH0r5BkTvJ6{v)@W;jb-Hz_wZyvKx*0j$Y29tzXMN22bL;EY53PT+{>eH6Ya`dT z%C^B)Z@b^N*Y>#W7q%|j>$WpCi7G`A_FpvX&6VaVv&&p(Zno^Q?6&N&?88bsXn7Jo zJ1i$GFIxuTkKvE84_euJ+P_f0ZLPN5wmq1;1GYzOELlo6B*!G1l4FzOc@2WdH`o7T G2L3NHP{`8& literal 0 HcmV?d00001 diff --git a/gam/gdata/Crypto/Cipher/IDEA.pyd b/gam/gdata/Crypto/Cipher/IDEA.pyd new file mode 100755 index 0000000000000000000000000000000000000000..860c12177453abcb1b7807cb624c28af0f242edb GIT binary patch literal 15360 zcmeHO4^*4wx&H!biAo!)V5_1{E4AQSOwv*k0t5&lv_gvoTBoIyLMW+_u;fb%uC%zo z#&^EP%X;1H?wl*?cHFL4o!3*^+N%_LrS;DlS3O%?nKFl47`ThtTT^xB{+{k2c+mB|t~zU^1V;nl{tg*RRyi_>w-0nwqwp(TY{0q&e`HyZ;^a?8-@&aNZ)i$MbT zl3pCAl4yy5c;I5UR*qu}c5>X2c?>-FpI(mRY5l+QmC0M%c<}66hCd__=^9Z2eOx>6 zFSDVJuS5SylA!>NaBf12x(YaMAk!%!7LYC#X!Fr-MvJ-%z#(&|%SE6O`m=-neERAn zepZPevr#K>Xnpl;+ydfflkk`gJ26k|YSE&u0w$le*-;;|WiKX45Ato)RlspYQNypg ze*+6t3vX}TQt6pW6gv_jkNa50qYLBlfvxXC7KHif7)w3f9Lkvn!!Yl#=!3k<3G$K8Q`0T z0pYlZNgV&3klg==>o{)WreHAGD+%e{!+B4xB&07OpC;)4_EM;yN9@D$H2^Z<07w7b z=nZp;;H4Rus0GA(lo*HjSDzp$gtt9YOO6x0XjTYSuxVMET#%>v7l8PzCm_3KU`QgX zJ|4k85ymeAewBp(+Z%vC6lGxeuV!K^bPT~He=7TH6oRkO^O0;%%P|=eAC!UuDl>GtX!oMk;ktW|~ zlP@Tr7?y!@nsIu5h7);aC>&D=phU4o@hovw9+jOMQ#2}%-X@ccGzxN;=TuyY=aYo} ze|V0jJ)f*Ops@%SJ-aEMX7MW)Z1G(vxKjE^$MxpAP3x$AZOyNy63BLsC zn*F~7bML=XCR6su!ou#p9bOMtNu0>t6hF${)<*G2P`#^){8fgX_>q4yEJ zkLZ08y^rW$jiDcslpztMFrOxNkVCSapP)V=pg99Eg5~4N?#B>lz?SsJ_6V;B*q66y z$d2G5*%xa^3|bOL76s%>nG<4A2;7>?f=H}RY82vx+%$hq6aev*ih2bR-VB}s6=0xH zw>y~PzX#=@KtWk_xPVnH#)_(GlIT_l?|b&Aas1U{Nm@|h(Q)oX^u!7Z{@~^%!zM;C zY=YRJH!YEFNaQq9(amd0ABGr|-*Bh%5=V+|xP~y2XgF-5vV$A#Lrz$m7EBq3P;9fZ zGz?LyhRxFq$?Rv6ngi14Jl(wJ08~u(n$P2w=7k4}(^Os8IMFFBDrywuL5!3 zlRy_qsbI=|Vakc`gAB3Y2`H4`I|V-98&n=Wg_Fk3L{5ql`Nm65hf%(dZ9=6 zfLljvo{YhM0CST!cskB={1TduW()I+gI9PAocl7-B*#_>RxDxem@NLxK&woq&Bqv%>siRZid2kxD3}z8 zGBSwp8)3wAy6VoE`kpIAN}?1g%*@x`Mlm4;Ls!_P6bz|&F7R|raqd*o9PH@iMz>(f zY2XQi6CdKq<$`%Cf!AZZdfF86ydHn$v;D3D|GC5F8IX(B3eVk&c=r~ZT*bnq?t%7r z&p-ll=!fxQ`IK-*copX$997q0dx|CD`BQ;-(w*e0x+ME;#9qoYKS+Tw%%kw~a-um* zG><@sP$ZL=nb9ZG84c+&U-_M*P+BZd1k<{=ZPx8(CkjKyl+rs+@EV0@C|xv<%4j<( zgg`vod*nfX&POQD!wTT;dt;&>$TOrr!^rC%V~fF6IISI()1h5kyePRxt}XI>n6Uqk zV$G=RBX*4E;n*{t0TwgkAEFk-V_m_NELM+-#f#;FX-R0E$0s(;TBlH2N8l;)P+^lP zMp%#WIt*Tt478M3?w4H@%10&_isd5^XpvT~?U7$qG|MG0xqjrp8>II)lM38dk_hs} zB#R`-p&(FAvS*o#A_K8im^ftcYVBf;0+;%k{Gh)c+XouEWAg6Tw;*@D?vg zQXZ|4Lr-n8vIiaWXisv2g*ga9iA9O=h?AnkiOa!{(UUH>B?|u~Nk8HVaQic#Wn1b+ z5Xe6CR$UI3MU16f*@I5?XpelcMR;A9&hY622Yw9>!-Xu@BL+Qwg+8!_e!fJX*XkSu4N12e%&i6@{LQ3HyH!uZOuF`1xmZ{Wqi<;AY^= zrhtlU0jQ(A_XfNk_{^0UmHy0uHz(dlz73mI>3tg`#PFqJE0--e#+FMKOXNb4l+dKL zDAb}tjN*Md62s!GbviH#XC4+?X)Ilzq}($^32Fln!~(ft-b(=STl(n6Oo~KGiG2iP zZ23H5a+0?EV*o^L`PjS*32FP5E#f#S8(fP8C?%(y0U@dzZw9MKLG#D3qhJzP>Cfy=Q5T9!1h*;QL&pH1|MHQWY1L*ME9ud6S2lG`^Z~91{+>QQ6ayY<&2OG zl?PwMiE*-o1vT+yDN=>()%WO(?j$Jo8oeag5f*Gx6ViTT1hNzXz(iv@gp&RcsdF=M zq}q}@>k%VY>`;7(0<-jU3(Q}_1tv?hC_MgnJa+Gyb*Q*>aTx@u=Kv~8g=h(9k58AC zH8Yc6sHQZ})D*LFR!wOmBhOh=bdn?}C})wCp$H0!57@(IAMJMbtZi_kuDnA@yOxSd zyje<^W?xiUQTz|eirI&q@1sQLoA?jlhnTS7GE{@fqr{|UOtd5mmQ>NQM6jqRqk<^J z%4=aI;KccG2`C?pFZUsyuGJPVMv7S^p#qT90{{!J-?4SeeTMR>eLtBn&#e0lX<(?q zx@uZi$_ierjV%{iXjNpPn$h?gpIGjTFZWMg5tRgH`2a;`+;adGCI$5o$YQcg*oq<} zlM9w5lN*7H3S+rMh4B_I4K2-z%To*BqJUIes(ta*{^;8B6HLxpS!VW$H9k`2;5sTU z+r!1>YZg{nHpV!t;})?N1;*!1PViP;i*m35g~czr$(Hj`64Yl{QMkH9aQoojeXofY zR%a}U6Y*dj;wElQ6kd%gGwbk-Sp%L+r4n;T3{O0dqtGy(!iy5w8U6Hu7ltiSwZ>rf z?Y~X(g2HQIWhUf|AHNsl^=*rBkfhKE`&;cp2r#F=B?TK5FEBjt#GDK_f+6z6*ATFl zL>7gtxGbmQ5(=zKEFn?B-M^hUPnllY4Xi#P%#_|Y0h}|S&b>=ZjtLf4T`ZC65(zFA zS}c~YVv77XQ-ry8-;)#HnOSUR>p2OCn5A$QEwP2B2(y3HLSr7Kv(N7@4_Ad0EHJCk zu$uJ%rIgRBS=FQTw0k(h>IG;pSAhuyz=PA5t}gLz+5qu2{<-Q4B_5Ullm}m>8e>8t z`^*9(71Q8lAsr{zqJB~Fcqal`9xgD<9N_|k04I%}2U<*x2|uQjqt3%N;ir}!)H4=( zcwxQIvmXy2O9>@F=8GzOkyHNUR?q%K1z#+d(AxrqcY8AOf?RknV~~!espOhAtS)Xz z4DCyAm6kS<^hy^egx#1OE@6*YeN$K*PkAYHr>8BvbYqWOo~d-9=Dgqz2pu2Oi+Q}y zFi(r_#OE%!N2o^6JZB$+*MbxMpz1y+ zJ;U%ZSfrhV^<9eLO1UBBC4l&>$gv1kjhu;?fgU9mG4aJ%Ox`vt7*|H0dd?A1%v%77 zC^ZsxA|7d`h1-&`XN1Bf#~=?*n&(A#KgGF&ka(r zOdJAd5Xs4;sIt)}buSXDr^O-#I|)7wOH4Ka^CF}_enkwX63?~7J($Aq3W#GB@ZjZH z90?C6pD^AMsjkvfxEwP(3h52gC{~Kq6|Z!X`(DGu!>54*V%`5*6Pz>My+I7>fA zXkMOnHC7L%jDbQJqG@CYHk|=D)8`nbXaZT5O=xL?4yxe^iZ39@4Y3oH;b@{bc7n`D z6XmfJ^mULXw!}`5)HFdhowYdGh$hI0k%`Fbjs;M#7Bj&VAGGkCPR8qrsIMcvy}=bc zCVG3pgLzd@yEwU(A@^_(b!ky%Rk?95e2U8J~_BpNtvPgGV&KGcjX2 z`$x}H0*f9~7)FmNsG`Rd8PQ|%di0nKJ2S3k-+J)T3E$ty(Ch`PqOY?E*4J@({gfmw zpn-Us+*>Grn7ZAsxF^mId7r+C8~}Vou)METLk~egKBId;7~-!IE`+|91(Sb7Q@6S= zz_SIb@B#0{crf^NxK%UY$>TA_Oo>FfODv$MaqN&Lm@RruJ3`7E~5aSD22 z0l{d|G`8qM`hNK7^)_I=+hf;DmezX(>&=d@>f;5~2+X%9lO-{rw(uh*A4{9z&HoK|t00`0Nmj_XxJdi4Q<{>^0gt zp+`gib(AfTq9E5Z?-E-!OP_i1Nz(rpB!O6a;`{JDQzwBc=8^2(L0%Dly*_L{T!^c$ z6DxQ-4iB3jpy9Cj2;BtpvA!-$44WU3ppQz>lYKo5`j`ZLQi2Zj9c0j_rQzwmUJM2E znZEnj{iVLc>^{=>0K31Y?4Nf8y+#3t#atw-wXhSjp&U>1-QR%2p=A}M_D`Ts4p(;l z6M9H_nzPT+1p5YycUt}cA)DkXL=-dfuu0Ln8ilC<+78DPs+dwteN#I699~oP^&+l? z$|YjTebh0f_EoU?Ra9ApN|j(z_Z_BPkn#YVU)#3>^FpOgOlhEwDYvhc5fn=V1%jzm zOnC&jeMg9(vVyr>FqH|uWoOv;;WU-(th}(N>*i9BY-s-@v$@dSgb}l{(A`X3GOf_v zN?o$2(9Khq*|NKhy3Cy2`=~2fv@>MUt{IE=c)Mv8RvpyLuG84F-519`k{67SrsEi$4 zQ9LW(1XonWjBk$_m&J^?#f%$b#yeuhO)=xrm~lnSm^S20J`Ap)z>6M}!DhzDclbU( zhF=~?7t9*mL0JqJ_38fiyzJJ!>33P%SE=r#UuM*E!o&^cy0HT{JEA8?0>AbcqCl6-$6G%FxwruTKYAiNhHZ+of{5XmygP z!cw7Xv|1Z>*45v|ewwxlVElxw#mYB18VF)*v{_vYcm0-CD&p4Ubl7co69*fw+H2#R zLZVpftqlaNx3xA|o%HiA(r6XVGi$GvY_g*OeT)yOr124qb1}YflWsRs5I2S~=^TA6 zz$QMoSht(bVB8$@L>Imm0hfK<17H(q2tJsOlj6!x^9|Gdz3-i&y$~%&cwfZcqjA3K z|8*>Yz0c9k=hkoG>K$%7Z*_7m>#i27ov(G-?zVES8~52H@71pSR38V z(q=YQZDyGOij#%c+gsgyC}ANlgtV_iZ$>oO0>5r;w%T{`O<$CTvQJ2771~bar`!D$+O?@To?Wloz2^qq?l;gr zOTSRg|6(#jUYot%;dG)RMfu=6$f;+R>2@zzjuR_d^NqUQze>~XHm#6yp;Q-Cj#jI) zj@gZLZ?HDn>@Y_oiUq3!vuXnus(|5|#1Rkjx7}f1ZEdr;cx)QgPW;UEu5g%3p{J_j zQSf%Ud8=yW3fD@`R7W0aur?!NRoF5!(^L^ZP0wQL^U)&F8FD#`l}qkOwvM#LkprX- zYCT6(oQW#EBh$o6StbLOZz?v|noUI<>zIm6p-xE%f>J6$HiaPwPY%BUhpINajknb` zQ@j$+UHicD1>5^_ta{8_G7Uwp5jFUR}O^qXl5#>|LX+bv4yF ztqrZbb9Fx3E_r!u+c+B2fm3TPt*tCA+-xq+AfB0-x8UEd`uf#tGS_BiQSX*|hrQ9Z z>lWJ@-MU-AZ?(j4H5g_xA6l$@A@69}h%{2DO0Po~)gJ#KGuCirWT=+qsno>%AMbL7 zetWNNm3~W4X9{ZR7_UN8qdgAG8gNHOM1W+rwl{F()rcQy=Bw-fp9PvaXJme<3%MT=okkl+3!o`H9G8lwMk__zhSrMKg?1S2F|;#iV`%5mlA&`N z+FGN=O4z9M&<-)TK`iHq%tJ7(BkX=&Z>UKH+RmU|pHoINy3}lRJbXu((=ZtH?Im}T{ zcx`bw($u%MQ@@@1Esi}v;qIos%Svpy?OYS@u)DYh&dxiU=p{o%d!chzZAG2aMYyu) zag`f~L;9~NQ8)}%Xm7w_b+5zOKpd{ayeV`VtTi=R>+j;YD!|N6XDyx!Tk5d#*F)n? z_4RI?B^p4+SwpZ&D__Z;SOM!ISX+HpJ*T0^AVPeZURb7b#uwNhIP zPQ_(q9G8pn->S8?r}^?T|?npK*enr6)* z%>$aOY(w@)_8Of*w^7%o>(KS+`gJFD!#cn29bG_|sb8bd*H`Gb>Fe~(`hEI)^au54 z^_z0;%<<$rk@IxUb2$qQR~c?F6d5dr%?6htn17}58e@hr%a~&{8(qd8qt|%Y_`DH9 zsF1H^_zi5M$~u(wVAjJ~ zk7tc$O=SH!>tfc*b%)m7zwV)Rudkbzy(l|9dv&%h``+yDXFr!cmi>12boPAR6}qc+ z89KEtPq!Yrd|hYL?bms9_v#+h{Z=<$ewly_s((g*PXCtPum4aV zpL11CYR;;htQ>RB#+>aryK?sCbmsKt{2=FNIZx#r%Nfc!oAXxAdpU82D-FvHT4=l3 zu+!i)e9Lgi@Q~pt!=T|6!&`>;3~{+v<}S~*;-T7kvzvMrZ|MUE(@=xZU&A-C9 z*qCZeH)b0PjHSjqj5~}?#%~zgjR%e2HJ&j3r*Xu1&iDu8yT*^nwrbQk+~n$o>SgL8 zb+x)iZB-wFzn@YMs$WsR1)s-huGB2oXf;Ke%^F?<2Fr z?$dN?x-LOU9@D&}@oC01>iWA`;C}&p*_pBc literal 0 HcmV?d00001 diff --git a/gam/gdata/Crypto/Cipher/RC5.pyd b/gam/gdata/Crypto/Cipher/RC5.pyd new file mode 100755 index 0000000000000000000000000000000000000000..ef39b3ea0f99fe59557eac8420c45d4b92d487d0 GIT binary patch literal 15872 zcmeHO4Rlo1wZ4;#FzCRHnrPH0H!w&Li1UMz%uGU($t0iz14+QY7$=#5$BQgM6@;#o-_E<6~WfArWzI7Nt4zTcoOkv-nY-alS~G% zcD=UmbuI78nS0JY`|Q87&)(mI#e(_Hix@XPaJBxdI z#;ZAd6f0lNscCBV@oiqu2Ct))Z*;ib9zWmU;=KVk-|XhguB+x-Jx*8N%$aBFWYSf3 zj$5gi&iOa}x-7wMh?_O{97S3_M@VLrG!(5|igD*6PSysT9Mg;A zcxfyRAR4&Xt&QVKsnNx8%g>_98LsUdw~hG!jjuevYm*-|ADxRI5=e(Kp#Kd)!9bNzCMKjX1jx`R(GRTGU2nZcn~~Bg%rq-iyTa?mqvq_<)4XfWDEC|8h>q(8ZR| z;W)z~fe{KTX(4bi9bnz@fHpxsP;U5$;g5w}$8Wl&zHYyoqc-rn68MiWN&5UPVL&+U z5fUeM7bK5O!_V;L@pyc@BxGnK7kYj^A$D)da9r!39vtWWvq$pL z|L!xaf9b>MuT+Kaen=tZfRRhk*|_@MXE6BL_N5Pt9jfrxq|SK!xe0QrP#KpN9Ot%> zqlC*IAQN!5f@tR$28b2P?!NZ)VBd7%#eJWrGyZIVU>_7KwJU_k11CNg>``_Po>Tx@d=1tQ`$2A7sqY z-aH1+y>` zULjNt1v`c~%{LxrI4ZGNDcD~pJKRS+gxUe¯hBQakS?J;zvmp!OXw;vH}UtjQ& zP#Y2UAAV1>yE1Y(CcHLd-`vUpp)yAPvvQ|ehyZUdu|M&VJbE-;x%Z0)9}(@Nf_hKL zrg-2)$DnYyLKsISD!dVwW&P%Da9^?!@FH2~cC}=z zoL^IH0b?@-)HD=0=@1? z&dW0T&G%@7eQAPytiV3n@olkEEf~arR=6y9AY+PNTG6f+^hi*xDMIVM^ zfo|C(-k=ofMn$$tpoUO)Bq2)R2t+?A){S9u7S`<4iglw4UKR!p$9mp8{N9WKu_Klf zHk=T~CBYf?(FtsIDs~{2^P=JNVCz^~uwzWo`Jh;*TIf@YL9$xTiwp14=7fWHDfxgJ zLJK6cU>-SJ(ydM_Sb_(%3EhIDyij*s2#gCIpVE@7Q$u?!OoRc=)`y@ULV>1xG3aV` zpRd_%Q(&3Q*pEuuz9B8Er0*Nbnz0|#q}g4qM8{bPjA;ps=`zMx$UY{N=~N-O7*?!c z)x}=?49;X9?U}YOrs^qIu-`QNVqy1`r(=qG%9RXw7W*YM*t(~D`tXC0ZBitKEI=9r zfIup0pN8GKsq7eiVh3JH(kF;b12M>cCSqsH#1axRYLX(;#k%8SU|j6@R2XGT5<*a53939Hbyc8yT$_ zPzlF^TXdZN01RVJE~n^ z!1513l$Jici14+95w@{(7q+RWB`iT!dJ;mh)yKR7qBtiK#UPW^KFUT)X#??*2!-v( zd3GD8R>(e%)w3jIKaQJTT2hOoC6yaO%qi39LiQuL@ge&VZrLIGAZ}S9B+@cnM#%m; zZd!<}E-r?Jph7KDr)bv#tfY8B+4FkH$wEhzZ$%hIgh6zcs;Wcp43_9SoJmgX3pyyqvZ`G+^f$ z2mA^a3w8{q2d)lwjBg3u7{u`-3)5+}Qzc7AI(82{~h&H8=t&3M>F&`Ky z`@$ttodN%l2$O3nX=ne$1vF)Yf=z|>Lh~p5Mi>YlaNabzr?5)yt(M%Isrv%dO*m^+ z!iC6LvvIg&?vPCr339akr&~O8JMe`4!zXe4{m4E#-CuxQ9NdH*Ljiu3Q{D8y8O`|| zhGU-*yi=7PSc|QISfWkrV)ySmnNC}yh;aCU6QAI`b2S2uSf1$LH=drDt?@IGznREm znqY5#CX8Vpgj2GGXI*pZ4p&-uxoiySGEMWf-B4OAR>gJQTy@0BcBGb$QBCLw!Ru7P zeoXYBg87Xwp3VXW&0y@q6Y+R3tOD-lw}+nr^5oDTpcM)XF=lWW#kgj-k~Z|lvU4+f zl*ZEF$?02uFV+nzK4E7;KPEO3>|;Lj0H})T;49d$GaMAlE>sG(ESb*}!`DsWQzG#Z z{N#D6U|$CL2&*w#m7pcbK&-^dnBrrha$sbZSUCWJ&NV8HJ<9V-r%VY0M`W1e^m zc5N!fc&IE(tJz(pgr3F>O%E#e!Jdri6>Ng&j8KYmz^G8F9zGlN*m%-qwOV*hl74{B zU3o9CCG|1}C{BiI&IZX+MpCKiL8W%EM|oj|@TM@fAX+fq`35M2OIX%|4+dkZg7GbD zC#FK0lj|ozKm1~< z4WJ6#1>=w#Dl!I8NAr!#C`qvCKLaN=svy$&&hRlr*@Rg&p%2i451+}m{(PJm;(P{n z8DhCoD3t=5#EV=l$;HUu$0mGOoWiFAtwiKuzNKT?rH5iqKLx03fPjOZ<*c0m`PcT+ zjhPgIlv3|QXk*D25|LYJ$!`K6Dc7dPT}VjFcmBB?Cq;vc!GJF=@gQILF`syuVcF*k)($ujN~?8hiZq|<6&XTB zwpfuRRPYp0F-RsAs$ePLDNtjh;0fuKQTWrv# z-C_GAU-`KZ`09zT6cs|?4VRV~y*rj*R5zGj7sd0M^vc-CIY~ia${Zlc90_(J!(<{q z0$I#Z2nNFF1I9-%fbqlSyrV?SjUxOJrYbblrcS2b1~AvHApNmF+jGQhLoU>u%vK7 zxe$nAemB1^R{pJg?ktEkh%(p%Y7hseAp5$zlyg;z? z$EPKj4uXgi3*oR8YUWQYxy+&D^2IEqQJC)E4j-pTFRccapWtSi&^rL0)}glli6t1J zf@PPAiR?1rT+FqYFQ3m8`H(5Xrgrm_!(W?ZHo5e?1ccAhS`;f%6HO_4|E7rsn{@1b zUN}3E6*9qKN}^#otC&K{U&>jvgS7ScO^|vC8k{b{$PQ5b$1`V_^Z+e@^t#yT@(TqX z763Hc4pWY?A&`Axf{~KxZ)V6kjx0g`qT~^ofXqoG7-o({f`NmRM!i5w$uWrs4m43a zVVlIMr3U$oxgOqnYz}V00c19zJVfS8N_%wlWN0Jnf?L!of0phsY_%Ple zaNTc7XPB&-saT|)gmo|ZaJABsxf3A&RTJAHSao7g#0<2ONTm1|BQdgiiZjkn-t~0S zprnTu6Vi=jPq>|kBh8qwI)k3Vmt?V*+^0UqOEwSf(_tRr?bBK(zz(3&1Bo%|+#os2 zFg^OlpxI+G7;|@!W{0k#Xg_vXAi8hFgUm-A ze@gA3SIpFLBDI60rVg^{6y|`l4l?3I$He1~nPkOUQt(k|K`#gDCSI|{=aCz4-%b?h ziqCsGrA?3hlKZbuX_Gr9_tUeeG;SCQ{Tr*^ue zBc?jWbQZl|K9%1U;CFp0zYK}rA@G}U9ri}H_SqqLmhizyh9m~`78SvLD#bnY70cZp zQ?K_Alm1rQyB$Kne&TXv{9WOr+P$UBp4FOX`tU@VB66tqF~eJ^NJq*Hn1AnV;4mHV zh92k3=_7@TgV$`D)wTJG$4UanXS|G7`_Z7KCNNzNb?h^&?^Ju@`+>Vhp9iW~NV0dw{VMq#eSg@#vjkT! z4{D)~oniY!)C}8q(M_=L>D8elY=2aOJ|;n*@6Bb<$0g{K611;(5raN0H4pUa(G={F z-X-k*N^d^95A<5u{dLVVXBDHip58HA5r?#}6SE=SOKZAs$Am-68b}=*Mx7>Hv-O*( zA(|NuzCa!99X%db#l{KQrYwd>F(ZdX6O$14shOM13mu^dCLY5zU5xzOE&7PGR@-9l9|t@ zRkEkh?WZcUW%nkkGIMrsrmAGoF4>}6CoS3&>LxBM@T-|!$FOF*KMIr@4q-K4h7cR0 zuZ$)Pk6ze5b|qd_3H`#Qwa5V7L$WyU@10DI> z2D-)P@l)bWe4aj~y(Fcbo6^otX&0xottsuYl(sIVy(pzk3vx0Z#^;gaCAZ07lWoMi zy5Xg7IAjnE5p>f57YJ9Xt^D zHMXmGm%Gv1-sW%ixOq>*zqlIxc}%QJu+gE-4(%c5qg;t%MJYpBh4PnVw@YMsf0K(o z9NgIKbUAsS-`niozGT>VS%Hdz^z!zHF^SWzst+{ zTpLhIT&|Q;WyFfva`j=Jwp-mRHNvMaa)F_=7mUaXP*ZM$< zw76|GYgl9eKC@-1!^z^Mqh*7~i`C!C%Vsi@2og@M+ZSkS^B@Q~c`_bP=1l6B%s3%i zLJ>Vv#CX+Ji+K95nyfIFW_g9XE#Q|eir6IE(}CKgIbdCUf!*SAZ}2yL*)cR(@`N)9 z!Ep+aRUX+@c*jOZvt&FlNy--Z9uVe{&Cah6xEomlaW*^2;k=0)BL(l%<4Q=ANr5!S z^h%mAlcx9zMb=aN#UjORUy~=$!gx11+S*)hj%z}!eg4vj^8`x$Wf5o1Z0vjah;wcZ z^4FR5lhe)_^h=c)@;190Jzg);X3`wIk{fZ()J2?YP+mfLYF@;7$^3}32ZiQH%1ykd z&E<74yOHir*ZO8R%&{H`i{+vz`H7R$G?x&NXb>&8$GzCKsoCep>g5}-&~HmDEy>+^ zhaX9|A%NvSf1YnXCxry3s|Bls$10udrmTGGc;fLT&?dZ7o^tY)GsXbMMhYy5MN$RZ z5n4l>jW#2mJR3`0s`OU@J|Zx#AbK1zuD1( zRY+?&fFM8==~}#UMQMf2w$5Z+Y+t_Ew7B}3mDen;s9C;dapj6t6#xV04wJFo*W~cJ zoNa#Z;v!nid|@G5GM@Fhz^S({udiNSvc_JvfN19BU5UR9jg5;>9{2j@ z4Ocds@~^%U^cG9>7K30Oo4*Q|zr^opU4^h!!sj|r#cl(C-~}GO1q=9`LS9eQ|N1NO zzZb?fX}>^Q1S+{`FGA6y99xLb^>JqrEU&HI$&t+xRwDRU*FU2LYPu$+?%B$5>bnsW zlVE*-@qq12g5`JPUrYcy)76K*S5Tr!I5E`6QIvS@p+(6;(V;9t$ww(esX|FyH>2Ky z(uJ}MF%%{A$UxDdEJ0b0ay?2D zO5*xwlv}}-IGq$3N;vMvTzRuwZVYnC5rh7{sJ`Bb4W1iGzz>BzT)o5R!#M=?om{=k z>vem`4w-3=25d|?ecJlO0Z3h^(Bc^2O=IX$K5N# zE7`tR0{({#t8PciY?WZ&kzqCMZ7z8z_dOZ5*3lA30N(~|1vbg$UQg>v+RWn0QB<{8 zveZ(K2}7^Vm!L3NH4*{wd3&;- zf!>*Hli1vOX;tl1sbAr%4m9`)#API441?otM7zEmNA4C$%Uf6*u2sp7y_|>ID-ixA zg_aQC9clFwN|d=;xQCP4tB|w9)UICz|AWYZM#(?AFiake6isr@XZIpF9X|(^u+m*KgB*NB^pRP=8c^u3?d( z!O&v3&+w4pyT%_He`b6#e|P@11<`_`f@1}<%;%dgGgq3|nCr}2%t7-v%m>T|&99q( zXFh5^VNSE0XSvWa$FkI7wNzMqmQ9wR<-3+2T7G7kW!+)jWsO*mTR*lc3#$vSFKjIQ zTH&_BZx`+O_{>gg6`k7T#IID0@p|P;Ea7|%Dp||j!!utxpQ}~m@{e_1L-zz*?m{xRN z(VQYt!xx literal 0 HcmV?d00001 diff --git a/gam/gdata/Crypto/Cipher/XOR.pyd b/gam/gdata/Crypto/Cipher/XOR.pyd new file mode 100755 index 0000000000000000000000000000000000000000..fb53d53e29d9cae5a590f14b016a623949456bc8 GIT binary patch literal 8704 zcmeHM4QyN06~0Ly!=-5xN=yqX^aTv0(73h}JGPTpOwXFXXBOK%{k5{e zf~MEX+PeJ_J`@hF3wwHbuO|=)ihP&Ahob@BAK)8Swer0|pP-*P^E_Qvbc>6zrh;>s z*!PEqoVNGbtjlK<6k6FVP%Z@&0!p&Lqi2I&1qYq%E&x!kv`u&d`Qxoj@ryBDT}ul{ z2Aa~fFjh}UCu8#_7ZYJh3o&*RDfpV3UKIL7$P|n4AcM%)DRy+iI=#;$dWeN$zypY9 zHXyG#7)$BHD&v!6s{ug#7XtE{1KXJ1qY3d!CY9>aG5e*{4 zmkoL|+HTLF_kbcTc7&7LHHi(Hi6Qu@-YuU!iY?D*ePD|1=hW3Wf4cOu2%R*XaE(h1 zy3&Me3|+}cgRV%&BkB!04QHU}m4M92t|A04qb-M!cq%$4Gb2{RqSIw3C(YAkTIMP) zj=Ez5<19KKYdHkUbVR>;W`Q%HKbNGS+!M{QyEVcZ@v%Fhz*Rf=taavQO=&LjJAc8?i;z z58@z6Z;@ks_C$Pho~I>7fJ@gWgU;{z$jHhzC8SPE9a~)%HbX*>okdB-@I>|Q7?9`PhC%xn|ypZ8sP)VU4+o<#$qj^sE1k`7= zome3aE2p`UhA(2~1!bm*(Ci6(1r%kE7lKH#$QAS)`AW&6ZOBgKl#p|i6KW1117pmg85m6lV>TtS(gk5iK7+<1vMY1 zz~ti;oqU|alaEtm@^K1CK28pw#M=icf+)JUf5?Be_9fphLsYCxoRcY$XA&a+kW18Y zJ82j{6g_-kII;UmiVyfvE4XCmJS-T`hzsH=X;{2KI-b2RGNljD)QaeFMBFh47ucQ? znam9+GsaYGu@6&h^gM?=FHXbw+tIPn=TE(FRQd8HJT4o|n-xt%iqSKPMoM*x(phwW zo~~~>^j$wyU#Y6^5cCa`+mo$*=t(0E%txc8su}QGE{MILDY%o~((gDyv*AxVCKt6Q zx55a-Pj2Kgf02&2@1ZwL7+w5aDt5$2cVnV`m+|fFg_qGD=|riX4<5w_yn&z=o;$j@ z;K1^}S(`Rqy{j%dHvJV~{l{W8ZP9b3H|36r1H%YjDKzi-918~<$U?ewaqiRqz>6nl z`#}5krzBRHrPmx>$?117J2jqoKf2?{Ua-oG$@X|g)MVd5hDTi6>(G)%ks67C?IW)H z2_11gM4jXsOpak<#Pv&+x=W?*O^z$n$5iT*Dm9gyP^i0AbbpeO)vlM4oYKFV)F}PI zWRcRpQT*IFB^a%hTyILX$m2BPq{NWJ1rxs&8xAkqV0HR1#GZJ@EU9AhRqOOr=&uBFzwvUZFH&Uwie zWqu*9&5YE_OHM;_oU%YWq0Cn$J25Y{T4k+|2F}`KNLkRJE^tWBMp?^U2Bi!SskKGf za>>~wy;=5>@_wY_3l4B~TQ*UDe}1~^K+-*lz{p6&_T<`DL^N2)VGeq>;8^6ZM7kcnHH*A zXf&q&Z2R^^V6`Ad!vWsI2ZY=B>sPJjg@8BQ9}@k+03Yo7k>C~e_@1Ed-=x+5&w9U4@bM8b><_HtgKPONf4~#&=jj^(X0+X6h4Pov#vMhF|o1dLVq}vk~d?7Jh@qMy{U%Z&7KDf3VJZ@K`yS1@ywW~pi zgI>P~f9t&7ib{Qz-b|xK-e6#@f88Q~rPZc&z*tP_L1D?PnJ9bfLj za3LSTpQ?)jWc7;0pl;z>l?z+0iQ}MIY+-F$!@u zs6;;7zLODX>pW@pW}M6qfp_Q8=$0D?zAcYdEB0`wP+nF1+E)XRDw1u9o zAgG4IwQGB#k#3^>pm1$i5Eu&=_Ie;fi9}x^O0_rCPxBjS91%zqT1$Ujc%8e&6OLdB zewN9@+oGW!p)TNCA@tuC4EyL`o<__!_`RaLRS=hnLT{_`s>j$3pu56h_bRV98b;+L z$wHO})*KtVn`nJrce8SFUg(~ zH-d;+XD|>EJppn=cXAGaBnugP3p7=+B4*Ntx6ed}o2@9mq^Gp7rN6B|B)I7Td<}5- z61>9psK(u_;MlrmLF^9tSdj2#D2J+XRU~6up}VDjmGT1ZZV-CdgL(K$TxP5jOj%gr zTD{WMRB6&EMRi3v_`ePa^~0TjY0?g|pnexysyXUT5w#|I~b!`7!e|<{|Tb^K0hU&3`th z&Ew_*%lVdzExhF#i_ubVS#9aEge`Yk?y>B!JZ;%$Ib`|3a?Da#J-hm{>ZR3f)$Z!P z>U*mnsD8Y9sQORUA5{OXdYW~n^-}8sYrS=iwbL55CarH-KeUcp3vGJa5}Vr=waK>o zY&&d^*q*lSwH>g{u+Op2vzObe?GAgReT}`--fh3t-fzFlez$#({de|*_P6aH+ds8W z*twcnHDxvRHSIMWHA2ljH9KmauGv>}sOE#3V>N}fvuiJ_HPzPF`f3?ZH5y<@8TK1q aH5@d&fw(avXDmYGtVW0NedBlG blocksize: + key = digestmod.new(key).digest() + + key = key + chr(0) * (blocksize - len(key)) + self.outer.update(_strxor(key, opad)) + self.inner.update(_strxor(key, ipad)) + if (msg): + self.update(msg) + +## def clear(self): +## raise NotImplementedError, "clear() method not available in HMAC." + + def update(self, msg): + """Update this hashing object with the string msg. + """ + self.inner.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + other = HMAC("") + other.digestmod = self.digestmod + other.inner = self.inner.copy() + other.outer = self.outer.copy() + return other + + def digest(self): + """Return the hash value of this hashing object. + + This returns a string containing 8-bit data. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + return "".join([string.zfill(hex(ord(x))[2:], 2) + for x in tuple(self.digest())]) + +def new(key, msg = None, digestmod = None): + """Create a new hashing object and return it. + + key: The starting key for the hash. + msg: if available, will immediately be hashed into the object's starting + state. + + You can now feed arbitrary strings into the object using its update() + method, and can ask for the hash value at any time by calling its digest() + method. + """ + return HMAC(key, msg, digestmod) + diff --git a/gam/gdata/Crypto/Hash/MD2.pyd b/gam/gdata/Crypto/Hash/MD2.pyd new file mode 100755 index 0000000000000000000000000000000000000000..d11706263ee485ae287607511773126100dc456e GIT binary patch literal 8704 zcmeG>4R}+>kt?AD0R}0l-56@fg9tDL8%h3?EUA?(2@J7`itUmF99foSTRmA)q$jWw z!x>~XetlN!=IDh>NrM9EahF`1q!cjWQpM&_Xu{QTXiGzy5bn~7*v%2}fs?qXGy7yL zoS)wH>wUSqFZ*fU?##~2&dkovTS-;-^pa_W5D7psNyuU7u^H*}(;uBMJ?GXVbI5@i zCl?+TRi0c})8Yy+KEJowZ*OHBc8|x)G7V0~AM`LT4`aTwnrZbmI`e1Go~cNwwps|O z6ip}IwqKejRy#}P+%`)j){r?sSpXmgkf(rwp7b~<4s^6z1b{tlo$v%?$$NzQMF=Cv zq5$fEMmsAZ<%rZ1(v1Q5q~;^UiVOUOTR!V-V?pPE+3-Mv(AR5Dz!+LReDWLZtR2Qa zoMQlP1h^R>tr-c4<@*IjCEBV4z?fwNq%|XKYkor@fTTfu81eDZ)`|L@f<77}EAXJc zdDrtAQJ+iTqcN<9d9*VJAgvjxeNMO6k&2}cCQ%;)kk*W;ElDGNKKvh!KrQ$7hObqJ zMl$%V8KI;|*7YPT$aN?re4U)Hl5(3AQZ7T0utVCMQmW79Y1T#2I|Hj_7*`Kk=A%*_2f z6t9Q`Dnjo_#<^EsI4{2R#N~c5n;|=NV-q($6c>k%i!b+!*c)UAZ|t6q=@yqq0`hTc z*rg}%M2d=2@ynYDuZw_D6wFw5CG>z%E(>o5xrs=a%$$kgO%Lz4!pJN;I4Cx&BWI=cTs?b&0 zwqHC;s6?M2HUyHhYzbF9YFiBK$9RXV%Y`Rn9Rzx_c*p6!*w^NvOx+7zh_a8BbjA}xf9f~&`2hlWs**q z1wAGM!HhnGl_Cdp8G%1^EK^Vf6(*B%De(wo(8V%=ff^5`VQJ!9;667nf!`7_O3{8e zE}@td_16CZ=3nCmuc?GI|1^#ANzosIr(8cbNUf*Ilk+K(vabE$%A~Xpr6HOapA}#- zcbrdQ3dAfU5lJzcnyL&!5IM^QDnMBTuV^Z0-`iTqZykY@(Z&)I{fuoA$%bLEMAE;z<*9p~#ZxHo>f5LDCSJAUa0 zvWNR+55+Y7K^X)Bb1IpY1G2-}_zJZ3Z6eP5IL{}{l&@{8j(cz%m;z7B)cpe;|AC- zaF+hpdWICV$xG6b+Q8!iC%k0q0L!-lD1A;UhHBEvI{d`=jL;WNR};peY^Ukm#A z`3$_QVZMm2)JKh;jWk?TK^L8a_m7XRHxt&oYwCKL!g@on-ay*h8~pE6s$l=$9?lfZ z0Ka)r=mn{02Y$=h{yxt7->JJMR2%IDBOrc!g(UeFce(Zm-q68l*`Ziyq!I5Hk=p&L zGpQF|GVAqvsIs>L5A^|VaBxhf4;w@$*0#;*>{!0PEI2yjCBWJ*hIBQ->D+0)Zv4am z1aBu~-;paIc=#A*C|+71TNAS|$0dp}pgjlQu}B}C#cLv5X%fGOYyH%CLdD%aauis3 z1KJ)=veMKYZXj;iRR%5U18tG5yW*DJh{i2@u;VO;qby9sE&BxOet~*4+D55Q3DlP4IHE@IHa-nm z`7ASzO}Q{8oGCMEg}mmfv-zwV9GG&VRyw}|>Qj=dW;l~FdKaF8tW9*jFj^1uT(ySJ zYQ%v_AN5fIvmjvPOcgK%ywQ3TsJ7BA=im}^df|)oeyLzYCnROvUCS##WNrKJXw12A z3t%+LT-c363@sP-;Sl4=g;^ZZSccnhNW&TKz@ZS)&QwIZCL`J%3F9iXs-3FGxL$xW z8@?PYR}H~wUVzsR{1$pG@GuLpPpqV6pPP?q5-~~kcG6zDcMtZ$wSR!SkdOkK_B3}f z_{{J#2_E#Mov7sE1S~jNZ<340rr_gK@C#FLa$UVjE|yHerBm>cDe|LJa6EwN_F$r= z;~1bx9O_%$`K6=qO+{!B#$+#6EoeB#{x8{<<(p1Ce!4?sD6WZ)T)6QI4S#>brC2=B zv+Hke|Mk(z9Y0&}y$$+5oI3l(=5lZLH*b0S=I7TP`{irje`C#_5C8L9&#ip-{(op` z|4!Dk)4#H_U(U^1WLwzz*L9EF)2m)u^yQa-_tPs6PfN~K{%qJ_AL?)WXsn-3)u!~|PSmNXeLBzQiOJvYW4-wmP$Kh@4!`y924`cP zz13O9f?GXhb8-6f_J1k@HT7xc!pS%VcuN|s5NHX&cc#(WfR^Jikk?6i#UVsNY_>+X+v{<*xmYmVOKkQ)!0E^FE@E@~{T?s+ zlO?t{c!8=EH#NC~ffl5FMcm|fItlTM;RZ|+woqhkucB4l22R1O z$?JhDq6ZZZ!dx}I)Y{4d!hoEaq@fytTIOl2hN6o4-k6-Hq)f53y{6sgwBZBh?*ZJl z3NFTOu+Hlz_fi}}Smk6}yp6<*_!=xb)Tlz#xl?Sjw%y6WtH+iWsmZ47!g~t6g%1?I zPLOdwxuSQAvWuBwL-D3!U-9PR=ZfRSmx_%gmJ(-)yTo6zql7Q{ zPRZ_)$4j0q=`R^7IahMAM69`4vq+=Xlxx;&8Z-}Tc4{8e9M-(5`HkjX&HI|U+C|zt ztw~#@y;tkf?$AD>-J^Y0`<8ZIsiIU(q7Y9?|X5?bkh{>(jlgdqwww?gqVF|0Vqry-KgwoAvATcKr_hclD3z zpU@BK&*?Af#fF;=iwtT*xnaGb!QeOSFzhqHjxfUC=|;6#U8%OJYt)<6_3B2oTkTV` b>JIf&>ci@ont|x7C^XWH{7iQ7&tCM8%=%{- literal 0 HcmV?d00001 diff --git a/gam/gdata/Crypto/Hash/MD4.pyd b/gam/gdata/Crypto/Hash/MD4.pyd new file mode 100755 index 0000000000000000000000000000000000000000..4ba243eccd17d97f91875e27668fb8c6c9ed7c2f GIT binary patch literal 9728 zcmeHN4R8}zeqY&2kcmOMoIP<;ldOsOO0dz&gxXlLB^!YuHgU1t+y#sgw!q5Rl95)# z&Oqu!*3{cYttUf!+zfX`kZYNQPPiGD7>-_Ln?oig6PMCEC(y*ng`S9gbLOMqOK7&g z|63V^vG1;#>!j`Ejo-fa|Ng(B`dj~%&!NW%U*blzl~muONTEZdbu8) z@G)|n@Lzj^vXCwX$Fh!-z3k9QHNdpOsFk$FcpZ$-1Sd2PM+a21+T*wNPfY4pK)*)S z&n5kVB!Dr0b(jAZ!%Of$BSehTJnq;YvuUL_-uYF`ru*XJiG49!`pz1&A!cK{N6bbW z4i~e*A;;AXzz(x3Wg8_Ck#-q%n0&c5UAAXPr5Un4ODfHhHByRf*Gi>Y*{+jHb+SEO zDusqYDm8>(%nt9!mIi_ovvxjoGe(<~sF&^8QfYShMLxWP2hO*o_xdiUT>HU|ffO-a z|JME6q*=jOO7L{bjRB2#pZ?hWU9;qDqm1}6;!9&JDYiH_$+bEeX#@;m?RZgXhCfw7 zIu1kMNGc>6NYIbZ)0AcfcN;i;=M}{7wIK&-Ko01h+-xYSGwe*I746Wa+6=*HHmz$6 zZxYrOdX9spX0t(4m%gOV5PZpC(}xbgO>8v-=tpqI9j6Oi=#SOtvh~MS>E`u~=rsK> z1Tpu~z7d0_pL&n=jbv&1rJC;@4)$rwf}iVOdRtzvl{f2PE4QXgZ|A%(4c;8e8Bgn{ zgbb;*hx__drJv=zD-D4?oEFo3ks3JCcY2m|Gv{a0ICz8LeV$r(v~S=6>BF3x(lB^K z;QczavZt@_o6@^EA0}MJG@qr`_D)_+m8$W1d4Jj>1^b7|VCVE*1 z7okT<=@5;=Fqa6Bh5#~4xyvyu_4j?M>#BW^T0>Xu`+f1OF2@BqvAP*$lsTSZo8!1DDDg%>B*Nc zz4)In?U#mF_U!SR{TnRxorjUr9qaFtwIj|_UW$|@+`=klB+4-s)~=vb6XzrqM#(YG z448^KD)uZbys~?_yn9T4Oj{U@Yb|LnX|>Oerf5xF+DA$ge53wuw!QGmuCoZK=z;}Y zAYHDF%cWWzB9*EvxptfY$-XmPSz*U5B$FO?OrT)^X=VUuiOf{-h$ht}q}y&PN~9cP zQ#d)3+5~bq&`EXIrND%=`<7gnE`9KiIdFZP&VjeC{f+)R{rXp<1^od%fS^^SWM;#5 zG>aWPa0XN%>JVrx6+j}_U6%tB^6p!Sa}Sn?G3K}qOD_I0VLs4H{m=z{=bxcfM}&c0 zpB$JlWniIb;34IKU=bHclgqTU2CK4|i_uD~Ra!&;{2y3knOO*qB{zd^PH^{iPJCEJ z1>jIPZNh%@4Wu<3JY`fDH2{g2;Ox*R7!+2N;Tm39(Gnum(w)w3^)31A7+r{jl}rmP zNJto=)e?wot?&@s&TbA+T+8&AgPnW;Ru<5G+TBggsVEER&P=B1&P%50MoXq?6(`fQ zNRw%rw`7`TE14z$lW7`JGED@*E=M zKhlQn+C$V0oeNw(GZ5}sNaIy^AZ_I6d>Bk9;*wBQ8W6uJ-AKHDC>h@)(bIt&fUtWW zUWlH*rYPUWlnK$GwFe?@bhb^-4pBFBA#i>4$9KQKupLe-&og1ehWsfO={QSz4)G|c zkp-Pg?_>AI`|PTsd~0gF3^m?4#7lmg!T67x>{tPpMl;kjFm72B?9*vppG!B;BYzQOfYF*1r`Ejx|bn*9zAj7A~Me4?ZRHVk`F9$DqT; zfOj6;D6>YbnltO#=639Q;;=n%J?)=BwOMQfB^ z;;>%h56~*t8YjT}(G_ybr1aX1SG@Qy@iWI%JlWIvBvfT9;#Fw)7| zvym>=4n_8`_PqYB`?}HFCgJgC0ZzlfiD9S>MfIT_m~f1;23f~1qfZmA@B9n&0IG22 z$0T6yw4zZLA1388?G~&k26C)S7k#wT8>zx*W2vMnXVy@^Y%%(rRF)Cx#=4fOv*gUZ z)G5o3Y+>@H*r61unwQE0FrCDOqRR;dOysjOVid=0vh z17uKL#im>;tB{6s2HE?zk=LBj+PgZRD2K_q_Fpi}rBD+nhO!iDrY=D%g<7dgcuFCW zx(rKpN>SltCz1rB9SKA`?;zS04p9`g{?4}&uH(TmZRkdzR5*vb`BCi7ar#y6Ht>)F z`}i`p@k{e)nS{%rp(WD<&>b54TixY{euZCd)B+fl z1GH`8*@;jZ)0T=J|!>eEPF-P0@t!|Ig*Ur`Pble#PZ;J5pjwErqvkL!iS8&&k{l9w6yeC4X;Q|2I#=j|Te+bX)f9!zdx)i{0I z11&C($hSD#>2uWNycF^7Z6I8J$d;;8ny`4m-P}x?&VcA`af*>S_QaZ>veush=6?x{X&w%k@5MTr*vpc!o!r-^w#hobx4!>zj&4~_y^HSL zPn^2)6a0F=`$ZQAz`0#80Qg*)uf0|D=9S|X&m%s*_AL^D4TA*A^rbJ*?4n6Z&Na7U zZRwh_vS;(nxsLMO{M_nw73*@>)Rb??tz5f)4cJhAA>Ul@Z*uxvjjf_Dx0pinRx3}f zciUp<)H}-StIO>h9IKX)@4UQa_}kvlkei>kJnt#$Eo<<4wz;=2bLSVVSO$N&s=r(~ z%xfTytyQXvK`%u$ zp{~KV#Fc0{S_FBm?Ts8w^_0ma@z>@56aqD^N$KJYdLDFB5^p(panO5{cx~Wi>`1bk z(yfAQ6KYeEP8<4NsC!WlpdLo;K|P5&h0?dwrGD=XccvTAN|_ZSWjEU;Q%;4FMl^Si%)`qentH6uH(kK4>>Kv?M#n{aaR61|qT9hTH`^0_yqzN-BhcD}4% zpG2?6V=6{;{d(YxbOH?w6Z)Tz$@;Wo!+J+WzBvz*LH;rz`)#$wzq7$7suqi( z|7B`xJBH*j-;PVMzmj6$xM2h?GgX>)nD&?sn%*&8G+i})Ls%+o6`F-th5bTK!BYh# z1#1hc3!W=zEbtVx6?7FmQfMwLC~PX+T__d4TX>-`ZDr<4%gS{tohw^bj<3A6a$!+k z(bGlG7rj)}S#+l8UyC%B^_FUj-}0iR!_sZ}uH~;SuUn2<`Yq=yA6u?kQmhYJ=UdIz zQtJlmR;$nY9qVh>H?4!#pId)v{lc17yrkG%Y%Q)Xt}FHww->)!{Ce@x;{M_*#ZQzh zFDWnCQ1VjAUzNOB@{^JaC7+gDE%~ej1LE2L&u}TGbkoD8ji&9UW>cH#PfgvXeWuq< zM@_w^KGPYD^dr-3AyeRmCxsP)U8oRhgw4VZjQR({-wA&&d@NiQQp^vU=bO#uQu7A$ zRuh-NIhrbU=7bI3gSsdW4h0m@px5<}LY+`OWzq`B3~{ GTK)@BR;<a-&EZe8L@97?PG z&V5f$PqKh!r=98U^bObV{+xU6cg{WM+^csl)!X_xJ;!kdq-d1m&fratll{H@pFT7% z|Jb?Z-06&$3(n}OUM{F>Y3;Ujc{+D{8rv;RjU64GUdv9m#pCO+w02l3AFs8vcQ(5V zv$B?171lKZ$5rXlxvstcQ5hR;id()SQ#B{}ittSzjJdcv(kqBna6 z@fU^DB^;BB<1BJt>Oeg3uvZtyZKgsW$6Y1`-0JD)xE(aWpZF>Cy7zj)^YlvmAwVRn zDg^R)cHm!ObECHr`7XlfL?W7xBWWHd$At<#GNFyI)*zA23XwFA6C4V6c6U?NIQ`i{ ze-=M|#Lq4BV>;>r4e_%j(mIJ>t4zmq*oSt)SA?W_oD844t+PqdWj`8;-@_UrWStyW zo>=qO(w|!cF7d4=K2z(THA%fDe^h5a_#KQW?zbAG`fRD%C~mhJMUyomSWtTbWJEJC z!iNIyng>FG5${t{F1N$l61d_Or2Ddwo5||eKK>cQhM#%vk@SI(_nqjtOwB60XM)}b zI5Ec>X|Zw~e??@B#tbNMT%;1OK-8BVWAEF?SW`C+Ao6)?%{&N4-(NIfscK6%b%JB$ z9CBlv3B35wwFcA#UnIr~f7dxeA-?6G%{xo|CBZ1xL8nz#gJ`fus?qt9e?fOoXjJaz zI;+y3i_uqszD}mUkLX7=3da1^U;CPcfDNkGoFWjfwlox(m?|-vYbj_I$ehqzt zbsxz;<3#2Y!kj3~Z5Zp#gTWXj!6a4!B_{Oe_d_{4L?Q_kl?hG5X!;@ga#H0t^GO5u z-Ul?Z1~Dv-gx}5!M~o{uEJL~x^Nar;p32sZh!?|Cc|lW}SoO{P3jgoS&s+#@%4Y*P ztdWy6hHFmeGOhl(KU(_CK57$Y$1o-Vlw2lKoFvTBNM*@G59WW+p(WQWn+XifV`#r% za{7Pc#K`|}m_{Zo$V1Y2KP`Z+WMd^wADBJLAQ`3d98owamFJ1Vsk5l!B*PcQ%HYv5 z|NDk#-jQo6zX%zFr%`2o=6X;#O8g$ao8wq3RI-pJRR)e8I=Ff-430EsJT$PnN6BX+ zzYqCvBo7pSkDNbj@V|f8GZ&y31I>Rh!1@`i(cHQN)|N|zuNF2VRhy)W9I-4%s>l<| z^3KWVzA75NT<*VNcxGO%sQfCxeK{Xh04JLf%zBK|tq^vPISt}>UjEIP)A)zUb4SOV zrag5<*<;S^xvN)3V+MC%c95hNjM93k#sIsLoJP!=(Qx&HDP;@YphOf=4rJGl{N4XcK0^(gc>2y8zy^JZKn2w&*%Y6mwv^MAthP z9;Kl%u#9VKj_B$`JAfHe0Lv4P?8iKS0NBzOW6Kwh(2SrR@G=&`vxrB$ax+7inG&H@ zJkq5Q5y-3*AXkY;+GG;oT%H2w8u3VrOaPqb#W-!^k!HCaIF~NM=@5_XP#};d(^KMH zFCN*h5E0Id6gZ2;BTvdC8hyIv3T}uC?~ShzW-~_gOkcs%ANl8=XSc;wdL1NlL^1~w z*TLYiyI8vl^|gZQ;>3I71q;N1pkj)H2eTPb2+ljF8;2!g>cx?Q*FeYE^g|4lC`-41 zpoiHPM^?CIK{7$CNUOo&yBQzVGLrz40jPj86x}8OWcia zoSq7^Yd{5TQY;|}Fj1=B=y0g6y6z$K~| zvEZS4G`aoc@V#FOhm!2_It38ft7(#C%akciaX%_}jpB$Bie#{NGa@yt!WeJB{A0mL z4iTq6!iZv_6%$l&9U~|a2`+i;E=DlAJOylK&9X-vfd)2Ny%zS;)Uet5aVO-6O)=O@Wyeo~%~oU; zc}hD`;+L##lNL5}Me<~L`9Rv1-`D=6@Ea;g9 zv$hLMR0|%b@Iax3MTcX0)~<#pIW|n_+1^cdF?rks7h}LEf-n|1y6Zgww;Ov&lo!45R|EsWFQ z$}UV}5+zpzQv6DKz9u|^UqF+@ClVH>us56{vT2$Ld7+q82{UvriLS}8Fb%3{qKXg| zi@Aq&j8`~AUp*BTA|QycDY*z^l3Y_N3+w==X)F*;f@l&%7l{drSgvW64l7rL=%&Nr z7;umYhqbuPx57@_p8vCFNQ8}0^-n@r-n|J9LEh~Pla3d#W-$wY}`~mW*lI z<1)h`hLg;C3NB`h;0L_q#LwqfbjVJtJ9+FPMB%hX-t^Sd%H++)7lcH4m)=2ME51A=$_s-mE*xi=Xnx6XnfRDNM59ISlW6XJc1;SQG77AhQb*y>L+ucZ^^p zA#9KVW{lf~Nxg7F_C1*haZL3+=1^G5zy+TL@auk1ho1-JfHE!OXWRjqOA*H@z3_@0 z@MJ2ZCTg%a%y5TP+)I_YVH?wnfXznB6J5t>GzuLuAGmuw>S^Gg)WS!6xdr?+qU&fP ze1w_Q@Xu)BXQhOHT7@5r*D8Fya9j&JA>wIZpVz|Ht}v?B^+JNSL6aKpDJ|UelyEOE zf*T7#6armmWN*K=r+r^IloZ?wUZP0FV&E}3qRC{~e`-t~5enOH#6P|3lpM%pDr2&t zW2t6wPwzSbc@Xeqy0~>yw1b(*aojbCki{s%>f+*>Ou&9ri+y>b=W4JIYq7(@Q)3_3 zVh6LtpcKb`QHy=4>Y0hyVIvCmc(_f(eq4(kF<%>HHP|m}v1i;0`$;YKMGHtA`zu=P zOKySvyo%kdhR#InAuV=Ha~wMlCq`I7{AJrY)gMI)4m>tt!#Y~nBCw!IU2LNlK~!L| z18Yxs#H4$Pc6QW*8uW8;^6+dUyGQ{?Wd&gMj_4Ex z%;NK~zTBV->$hP?KPhfA=shOAFCu!3`hD5@-f3|klsTtQ(+g8#dA7Lhz1<4y1!`9ku#z9+&z_F~I6JO`SU4kiE)tC<${`G5vnL71uz?E-C+Oe=G81VRNd4!% zF$MW&jX2vHEVH&mbGy_t0sm|cjs=1hMmkd20a@t;fn+8QQ%1b=afZ+adjI(x`6vL# zPdSXLhEBUoAdKcZLBP%qa0>Wfx(VM8clD9(7sV0dCC&0srZ_-MIOZ#5@Ymu>j% zeniYj)aYN&`49*rQn>*KYSEfJ{9Go56tJcMAormKdo~Vx%yD zn7V%V%V>1S3)!NaluK_#b2zOO>*qz^g4lagsyB&m{I~$D&(b01&3C?S{@RH7E1~rx zJ~I?S$4SxL&FDLmN2kT)VzP>yWn@duXp!pYCEtS7dsCcc&Il8+jtTSV$wdzQo{WKc zUEp=|!7rjtmI#8)#{_dE2^I+X?v7^qKgszrqzVJsU`-xlF;fj&B^#Q56GoO>iNRrV zx1cTX_s(wVal(JzDjPKvGPE7cUN~NcGvG>V_B(p!3#1Y933!6+ z8xal-aX`m+Fh6@1urlQf$Vix(3zvMWYxS3^6DL+8j!0W#GnI9#d z*DL4e_gQdQkM?zJq_Y(A#Vo3$L9J}i<#hjG@p#KH-jm7W<;de*!Fbx!d&qy*_GuW! zTQfN_49G3%{9&W+S-RzP;C*WKyj`z$at-wZ1oS6uHbk$A@4C*F6STK#%|8hFXPfD6 zLC|%Ae_gpS5Y4^WhtT^m&@dTrg9E;kK2xH*^w{3zefvLtqRcm+@e`DKuKPb(=SvqS zr23neMxnd`nB=+l(DBT9GF!T|027X6lHnVy3#j|yZ;35yrE4O%(h&KtG`MfV{Reix z_8uJ*j!|(;7^D||HxGHyFeV(AYfs3v!$W&n z?Mb=zlw2Dc+RtiF%f<6UeJJA7!O%hWzC3h@y~l?Rv-d0JA1vudZkH%bh{af<7N94l zLqi~B4tyRH4k_z^I&uwpnsD>MFChp2Cinq25 zIU((so=BzKO3GKPLY@;Va)xSP*J5p+lv_uciu|D(*1iToD=OAn#0uNcljH@t+gZC~ zXb0NG+VxUyGi54@hq_pYO1Xnmtk{eu&<^dO4z)FG%EgK*aiZWNyI*3p=q?$`4jugX zW^{S1=hsZ2(f18+g}O&R>By{|#MDp<8IG;?WA-M;>Bpfp{O8NwC83LLYk@Ul^pfa+5-w zix@$+VM4s_`|iwlBNF&%?!!b^8WZA8bY)&rIX|hKom93Yl~*N|tx085QaLB7Ob!^2 zf6XxcI~3l8*lp?D`FHLnZ_;f-8AWsJZg;m=!-4wAm6kn?Z9cc9u^U{wy&lGB zmt|*bN2905LbnEqVT*h32WEjjjm_?+*7n9W8miUXoe~-Y=A&;-Iwjc!H0;9Dh1LfzHvJ>TOlwVcK zeJFnv8DU;AaC=B@m1Q_qHnCjz2HgJcNN~5$8FgE*_%4tPp^7P)_%R#%usLH z`e=<%U70iug%(R$ho!U2+uGTI`Be<3(X-pv?(Xnf+8cZ5MsaLvK51#)g@U^qZENIN zReC(#+SW#tjXrN@d!x6tsj;oChmC~^ZSC-OvMFxuX!W+TVQA(Rv)7vYdg$B(9n(DQ zC||7WQ;KhD6&dr>Bc{KjPBK|Ed?c(r2Ed)@BiZyyL(^MCEukdTc@-NUsKnDI^zM3JE5uDi#iADPHXChQRhV6t)Bg8 zJA^c#p*fEHX`~R+Wu#Y-rjQ~?^GFXPCPXLr9@o&&+}76F;ojTog%F)wLt}Th z+e7)IT!Y)=>F6Zca?=`jcB0mnwrf|Lubb@;O4D|E+-{EZq+z$j{s#uf?M-WUw>Kf? zbVwgXGa@+zow_mv%8_D(bMg&^L4eM|7TGrcsvb{H#PY@h|EO)2I?N| z@HTAnbhcM@cI>9Q0$oi{H4FI-5Du*sogLU#bx?b}dp*o4hw8_;!nxX-aNR9Wrzwsbagom74laY_c| zAf5wh4K+QFvpsD?rMr#$rl!0F>)IHV8@52-fa7aoy5aiJQAwW?wr&xs>_vr`6x4D# zsJ~Nc@7~ko@yb0`f&Piwq6ToNtg3GP%E zD;+7lQu=1;^-}sjMW?_w&Gup2{k9@oxoxX$r|k>20o%81XKX*S{ip4}ZSULe;qT|y z@)dkF|2e*uf0q9`KgfTNzsi5K$Xc|%sJy77=pT!|RrLL$pA@}T^k&i9Mc0dN7G>B! zWq-oH-QHq<+TLeBWdFK-(0;=HUHe)4W&2O)*7v7^$l#nI?^)^Wt~ z4adJYt~lOwTz90c`|!H^*A=ZRU$=GL&UK!3&#pVZ4nUpqe}i)Jm3$Rn!`JcK`5k;S cG}Xm>`ThJ!{tO@DEq1GYjooPn{a^U=KTm-{ga7~l literal 0 HcmV?d00001 diff --git a/gam/gdata/Crypto/Hash/SHA.py b/gam/gdata/Crypto/Hash/SHA.py new file mode 100755 index 00000000000..ea3c6a34f72 --- /dev/null +++ b/gam/gdata/Crypto/Hash/SHA.py @@ -0,0 +1,11 @@ + +# Just use the SHA module from the Python standard library + +__revision__ = "$Id: SHA.py,v 1.4 2002/07/11 14:31:19 akuchling Exp $" + +from sha import * +import sha +if hasattr(sha, 'digestsize'): + digest_size = digestsize + del digestsize +del sha diff --git a/gam/gdata/Crypto/Hash/SHA256.pyd b/gam/gdata/Crypto/Hash/SHA256.pyd new file mode 100755 index 0000000000000000000000000000000000000000..865a16c4d41a80e801a628d32fb40acddb50ee7d GIT binary patch literal 9216 zcmeG?4OCNCnit5_ph##76&zY0HCC{KJfJ{;Kp>=|1x11BI7%TxAn}BRCNB!JVzD7z z-#jy8J-T&9*<(h#b*;0tyK2=sHUYpk~LWm4N6bU&DJvI$}e*L2x*s(K?#*zck@1`7% zsC+l2)@ZUb?KW$xP2a{e=`9v3$E-9kHiv~VS(x(0HB6he*^n`H>SSfmbhVC<%7{s% zz2p7zkhftHJ9A1zWIl-l3Ih-cfa4HOjAl9pI>sFVfIV&9@B}1v4x#g+22c+bXdmpV z38_GGBOx0w0rzU{7~j2^e>y#bGjwpEenJ5cj0@rRBw!4!5&klo^_(8Y?HD5ifHLs_ zVNFAbH^U|gYA{!s04SFT5Y{xXtQjlqb|ekrLxYbmw{Em&knGVMRf7!LO}<-JgZ504 z9L-@P@X_xh0AWo-<1?78O~G8Y0~75%8YToo4IyQBQ@+Lir!!E;fAaMAYMi4nLRXAa zj8OEv3Lg2jN|{g}CsfJ#6-qfDqYTtd2I@%=Lzyjotk-p2vBm4^a++&1~R|Yy_31QFj)R14M#3=`Af#wn& zaUt`LC)w*%{<7OJw)~lf`o0)~ILLR0p#8d|7zO`{ zb2RY?>I*tKUkf>{RLXdnGEfi7r<~&vN#1@*t?tNe`J*BEN|3LW|Dsa`6jlPeC@ZcFuj zl&CmVzAeo+92eaOs(}1-j&E2V-G`&cqx<+Ry#ruX%TqPy?-lEgOSa&j)9^&H^*#@j|`at(Avj zK_g&;U?lIzgT^!rGNl>zEdDHe}J3|UNw2LYNkcvZ{}{1`*+58>0?n&9;0PA^T; zEiCsBc&LS-NsJBs-os)XBq*r*399z-gEV?tK*?XDsp!EB-qC&qwRHg+@>~d+KPdz; zC1Sc!w=5S|BKW3|vdHT$Qw1s_&FTndNOh$vN9Y<=9FnQL0a;P>I+^V0kw{tQ2HEto zAYG}rn_Q~8vi3B%6y?r*;)oII0z#P#UZY}lqEL4k6OtU-98f=Q-AxE4#;qHNjwUGL z9tlbls3?>giejk+(`AQU=&69Yi9|@iKA1VM&exCej&Z*0rcfWlfB04km_CAU!kgFr zM)6XgVzW2D&!K=sFhCO%l0kPQ5w8@%Vg-&S1#2lEI70oH;20OWLbV1ZqV(&=K$8U4 zLkN67{cPeouju&^(4-l`#KuAs%b74Sm)9{(oZ>7bj%YzE!-B3(q$WnHpjNRqUx1A& zCM1G$D7kycrZ~IC2$v*f1p-HRvY`Gl(C3GoCzMiALqa^6U~JcJm=si0+%?$$&|^lh zhn+xs88ee!B#IzlF`)=FkjVs@m_%AD3@~m-Hx+VKP33cdeCG-v&caQ)X%m{pCeFfr z8II$=3&(Mrh2vPm;W(CPIF73pj^kQ|Dq_kibpT;X=6Tx+pG#1!KKV^#Nd!k;xh%*@Z*bdB@nu@w@MDl6~P} z1}<;Fe}Z~CLW7=)G<2$@PGj*t`elDNJ`=_7PxKcr`8x~#!f$gh|09|#SpQE(;-xTP z-a>@aCy#iZ5Tkkh7s$1JT7O%q&a(-Efcyy+GVvmRz3wP}go2kcFvws$5`|`fOTGV7S=i@@k2uW%~So5 zyxmZJM?VL}!{@L<@e&I=Aut7NT&9#mTyI`Mvy_V;8{mo?_#M2uC*~7Q-1Vc!KvYm; z>@Ja$2k-R#e%-cGXr6BH;O^Sy*X=;muiJ?ouRHAN0ft|xtsisTLYb*JolmYsAL&-#(l9g`gnNtBO3p@Xf=A3uo z4S{BvcbRdBspVbmIK+JNE)It@m#z*R(sa7k;!w(HcQB(pcQU%c?Lsf~R6Ey*dA$sK z)^*)c#-4@U{0Lr}@LP%7i3cv^p5FiXe*R%B6Sr2jtDE-nT|2RtJ_f?|J0OR*gKCJs z>Ue$R^?(3&!ag5~vjX7o&Uquw8lQmQoPb}RfRnq50xp|?k514Zn}Fj6437s3 zEgZ)L-NE5}tG_huDtvO48l+L&iKiAc9Ap3Iv?gnNXKGpjv(&&jY!*h(Ac3*AFr3lA z81;6e$2GNelWVV-pqhuOgwm3L2XwjH7bS!Rui zGt%k97n!R~&4%V+^k3;j2AhI=$Y_Qw58uxj&C!7SgSZCpIKXcO@ovDU1OCA@Nq!h` z8syhJsNEnx9dz-lGi(~LiG6$P z|1INf^L|wJVtL-P|MYwB!I#e;ytKUUd#Wq)+D@{224+X?_dmDnt!q<;PtJewm0vGudi%cCw6u8FN--kiK@b;hZ_^{%-slHuH%>Jan<}=Fse)_)aiP*9B zovY#=^6gHNoAVy2TvIXo;IaKV{}7X`TDW&;(c-svEWc*WF#h}VKjExRFL%Uu4D`q& z{TpwH$2X0bt9O6C>x<)8JDxpB>@x`-c|6 zZ0D>Q6>v&s1X%=r^q9>-ebKT49sI$=VKxM7{mZMKUb&!bzE=Bmc2>HsB0W33W?|*R z^!c?FOVg_sESe8wP+60m)nGU3ZHDG{&X!(`p)hJSgT1vS6=WK86%7!;QeAl(ddSF_ z2Y;PC41>$Frx`|<{P+D*eHwiZH7`NRS(0t zj2-?YANI5~CPmF;qU5*j|62w^zrAHuOTVSzS%;7c_*{TYfCWi}sG%ppO_0&v*-Ws^ zLxqp5Z`=R54AeG;g$yUbIlzr!v>c!X0N)Zu>i}B3p4_ul18)U@F-)cd#v1^(0PF&84 zq@{$sMxHcTg2;utPFO;eq@khNY_?hq9VQL}w~_|E-EOeqcpGUj*lZRn&L<&KztRfS z%*d7&v%_vgT1jM!&0rwJ76~_C`l~7-9g%H@wkCLwLHi+&?FKXj`?$KZ)YjTit+&|? zwT^Z(=q&@9&Sq;^+|=Z-!CM98J_OnV3)k?Z&DvIJwX`BFC8D~sioU)YU{b)FmRg!?;3b0gKD@)Hr1;3{&f3m)Ljyj5{{z4qo`equ zW{A#eBhOGAQdni+jMipiMSKCAIZ{wLXmd+sLv`n3`uU`x++Zeu9fmK0ceEezhDDHP z2*=Sxb3z(HF$hoTmM+p&W@lx26N^Q-dB`Q7<1qF$_kbitSqnUoQSd z@xkJcimw(&mON1MXh~K{S;^9pl_j pool.entropy: + pool.add_event() + + # we now have enough entropy in the pool to get a key_size'd key + return pool.get_bytes(key_size) + + def __newcipher(self, key): + if self.__mode is None and self.__IV is None: + return self.__ciphermodule.new(key) + elif self.__IV is None: + return self.__ciphermodule.new(key, self.__mode) + else: + return self.__ciphermodule.new(key, self.__mode, self.__IV) + + + +if __name__ == '__main__': + import sys + import getopt + import base64 + + usagemsg = '''\ +Test module usage: %(program)s [-c cipher] [-l] [-h] + +Where: + --cipher module + -c module + Cipher module to use. Default: %(ciphermodule)s + + --aslong + -l + Print the encoded message blocks as long integers instead of base64 + encoded strings + + --help + -h + Print this help message +''' + + ciphermodule = 'AES' + aslong = 0 + + def usage(code, msg=None): + if msg: + print msg + print usagemsg % {'program': sys.argv[0], + 'ciphermodule': ciphermodule} + sys.exit(code) + + try: + opts, args = getopt.getopt(sys.argv[1:], + 'c:l', ['cipher=', 'aslong']) + except getopt.error, msg: + usage(1, msg) + + if args: + usage(1, 'Too many arguments') + + for opt, arg in opts: + if opt in ('-h', '--help'): + usage(0) + elif opt in ('-c', '--cipher'): + ciphermodule = arg + elif opt in ('-l', '--aslong'): + aslong = 1 + + # ugly hack to force __import__ to give us the end-path module + module = __import__('Crypto.Cipher.'+ciphermodule, None, None, ['new']) + + a = AllOrNothing(module) + print 'Original text:\n==========' + print __doc__ + print '==========' + msgblocks = a.digest(__doc__) + print 'message blocks:' + for i, blk in map(None, range(len(msgblocks)), msgblocks): + # base64 adds a trailing newline + print ' %3d' % i, + if aslong: + print bytes_to_long(blk) + else: + print base64.encodestring(blk)[:-1] + # + # get a new undigest-only object so there's no leakage + b = AllOrNothing(module) + text = b.undigest(msgblocks) + if text == __doc__: + print 'They match!' + else: + print 'They differ!' diff --git a/gam/gdata/Crypto/Protocol/Chaffing.py b/gam/gdata/Crypto/Protocol/Chaffing.py new file mode 100755 index 00000000000..fdfb82d0c39 --- /dev/null +++ b/gam/gdata/Crypto/Protocol/Chaffing.py @@ -0,0 +1,229 @@ +"""This file implements the chaffing algorithm. + +Winnowing and chaffing is a technique for enhancing privacy without requiring +strong encryption. In short, the technique takes a set of authenticated +message blocks (the wheat) and adds a number of chaff blocks which have +randomly chosen data and MAC fields. This means that to an adversary, the +chaff blocks look as valid as the wheat blocks, and so the authentication +would have to be performed on every block. By tailoring the number of chaff +blocks added to the message, the sender can make breaking the message +computationally infeasible. There are many other interesting properties of +the winnow/chaff technique. + +For example, say Alice is sending a message to Bob. She packetizes the +message and performs an all-or-nothing transformation on the packets. Then +she authenticates each packet with a message authentication code (MAC). The +MAC is a hash of the data packet, and there is a secret key which she must +share with Bob (key distribution is an exercise left to the reader). She then +adds a serial number to each packet, and sends the packets to Bob. + +Bob receives the packets, and using the shared secret authentication key, +authenticates the MACs for each packet. Those packets that have bad MACs are +simply discarded. The remainder are sorted by serial number, and passed +through the reverse all-or-nothing transform. The transform means that an +eavesdropper (say Eve) must acquire all the packets before any of the data can +be read. If even one packet is missing, the data is useless. + +There's one twist: by adding chaff packets, Alice and Bob can make Eve's job +much harder, since Eve now has to break the shared secret key, or try every +combination of wheat and chaff packet to read any of the message. The cool +thing is that Bob doesn't need to add any additional code; the chaff packets +are already filtered out because their MACs don't match (in all likelihood -- +since the data and MACs for the chaff packets are randomly chosen it is +possible, but very unlikely that a chaff MAC will match the chaff data). And +Alice need not even be the party adding the chaff! She could be completely +unaware that a third party, say Charles, is adding chaff packets to her +messages as they are transmitted. + +For more information on winnowing and chaffing see this paper: + +Ronald L. Rivest, "Chaffing and Winnowing: Confidentiality without Encryption" +http://theory.lcs.mit.edu/~rivest/chaffing.txt + +""" + +__revision__ = "$Id: Chaffing.py,v 1.7 2003/02/28 15:23:21 akuchling Exp $" + +from Crypto.Util.number import bytes_to_long + +class Chaff: + """Class implementing the chaff adding algorithm. + + Methods for subclasses: + + _randnum(size): + Returns a randomly generated number with a byte-length equal + to size. Subclasses can use this to implement better random + data and MAC generating algorithms. The default algorithm is + probably not very cryptographically secure. It is most + important that the chaff data does not contain any patterns + that can be used to discern it from wheat data without running + the MAC. + + """ + + def __init__(self, factor=1.0, blocksper=1): + """Chaff(factor:float, blocksper:int) + + factor is the number of message blocks to add chaff to, + expressed as a percentage between 0.0 and 1.0. blocksper is + the number of chaff blocks to include for each block being + chaffed. Thus the defaults add one chaff block to every + message block. By changing the defaults, you can adjust how + computationally difficult it could be for an adversary to + brute-force crack the message. The difficulty is expressed + as: + + pow(blocksper, int(factor * number-of-blocks)) + + For ease of implementation, when factor < 1.0, only the first + int(factor*number-of-blocks) message blocks are chaffed. + """ + + if not (0.0<=factor<=1.0): + raise ValueError, "'factor' must be between 0.0 and 1.0" + if blocksper < 0: + raise ValueError, "'blocksper' must be zero or more" + + self.__factor = factor + self.__blocksper = blocksper + + + def chaff(self, blocks): + """chaff( [(serial-number:int, data:string, MAC:string)] ) + : [(int, string, string)] + + Add chaff to message blocks. blocks is a list of 3-tuples of the + form (serial-number, data, MAC). + + Chaff is created by choosing a random number of the same + byte-length as data, and another random number of the same + byte-length as MAC. The message block's serial number is + placed on the chaff block and all the packet's chaff blocks + are randomly interspersed with the single wheat block. This + method then returns a list of 3-tuples of the same form. + Chaffed blocks will contain multiple instances of 3-tuples + with the same serial number, but the only way to figure out + which blocks are wheat and which are chaff is to perform the + MAC hash and compare values. + """ + + chaffedblocks = [] + + # count is the number of blocks to add chaff to. blocksper is the + # number of chaff blocks to add per message block that is being + # chaffed. + count = len(blocks) * self.__factor + blocksper = range(self.__blocksper) + for i, wheat in map(None, range(len(blocks)), blocks): + # it shouldn't matter which of the n blocks we add chaff to, so for + # ease of implementation, we'll just add them to the first count + # blocks + if i < count: + serial, data, mac = wheat + datasize = len(data) + macsize = len(mac) + addwheat = 1 + # add chaff to this block + for j in blocksper: + import sys + chaffdata = self._randnum(datasize) + chaffmac = self._randnum(macsize) + chaff = (serial, chaffdata, chaffmac) + # mix up the order, if the 5th bit is on then put the + # wheat on the list + if addwheat and bytes_to_long(self._randnum(16)) & 0x40: + chaffedblocks.append(wheat) + addwheat = 0 + chaffedblocks.append(chaff) + if addwheat: + chaffedblocks.append(wheat) + else: + # just add the wheat + chaffedblocks.append(wheat) + return chaffedblocks + + def _randnum(self, size): + # TBD: Not a very secure algorithm. + # TBD: size * 2 to work around possible bug in RandomPool + from Crypto.Util import randpool + import time + pool = randpool.RandomPool(size * 2) + while size > pool.entropy: + pass + + # we now have enough entropy in the pool to get size bytes of random + # data... well, probably + return pool.get_bytes(size) + + + +if __name__ == '__main__': + text = """\ +We hold these truths to be self-evident, that all men are created equal, that +they are endowed by their Creator with certain unalienable Rights, that among +these are Life, Liberty, and the pursuit of Happiness. That to secure these +rights, Governments are instituted among Men, deriving their just powers from +the consent of the governed. That whenever any Form of Government becomes +destructive of these ends, it is the Right of the People to alter or to +abolish it, and to institute new Government, laying its foundation on such +principles and organizing its powers in such form, as to them shall seem most +likely to effect their Safety and Happiness. +""" + print 'Original text:\n==========' + print text + print '==========' + + # first transform the text into packets + blocks = [] ; size = 40 + for i in range(0, len(text), size): + blocks.append( text[i:i+size] ) + + # now get MACs for all the text blocks. The key is obvious... + print 'Calculating MACs...' + from Crypto.Hash import HMAC, SHA + key = 'Jefferson' + macs = [HMAC.new(key, block, digestmod=SHA).digest() + for block in blocks] + + assert len(blocks) == len(macs) + + # put these into a form acceptable as input to the chaffing procedure + source = [] + m = map(None, range(len(blocks)), blocks, macs) + print m + for i, data, mac in m: + source.append((i, data, mac)) + + # now chaff these + print 'Adding chaff...' + c = Chaff(factor=0.5, blocksper=2) + chaffed = c.chaff(source) + + from base64 import encodestring + + # print the chaffed message blocks. meanwhile, separate the wheat from + # the chaff + + wheat = [] + print 'chaffed message blocks:' + for i, data, mac in chaffed: + # do the authentication + h = HMAC.new(key, data, digestmod=SHA) + pmac = h.digest() + if pmac == mac: + tag = '-->' + wheat.append(data) + else: + tag = ' ' + # base64 adds a trailing newline + print tag, '%3d' % i, \ + repr(data), encodestring(mac)[:-1] + + # now decode the message packets and check it against the original text + print 'Undigesting wheat...' + newtext = "".join(wheat) + if newtext == text: + print 'They match!' + else: + print 'They differ!' diff --git a/gam/gdata/Crypto/Protocol/__init__.py b/gam/gdata/Crypto/Protocol/__init__.py new file mode 100755 index 00000000000..a6d68bcf8de --- /dev/null +++ b/gam/gdata/Crypto/Protocol/__init__.py @@ -0,0 +1,17 @@ + +"""Cryptographic protocols + +Implements various cryptographic protocols. (Don't expect to find +network protocols here.) + +Crypto.Protocol.AllOrNothing Transforms a message into a set of message + blocks, such that the blocks can be + recombined to get the message back. + +Crypto.Protocol.Chaffing Takes a set of authenticated message blocks + (the wheat) and adds a number of + randomly generated blocks (the chaff). +""" + +__all__ = ['AllOrNothing', 'Chaffing'] +__revision__ = "$Id: __init__.py,v 1.4 2003/02/28 15:23:21 akuchling Exp $" diff --git a/gam/gdata/Crypto/PublicKey/DSA.py b/gam/gdata/Crypto/PublicKey/DSA.py new file mode 100755 index 00000000000..7947b6f5fb1 --- /dev/null +++ b/gam/gdata/Crypto/PublicKey/DSA.py @@ -0,0 +1,238 @@ + +# +# DSA.py : Digital Signature Algorithm +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: DSA.py,v 1.16 2004/05/06 12:52:54 akuchling Exp $" + +from Crypto.PublicKey.pubkey import * +from Crypto.Util import number +from Crypto.Util.number import bytes_to_long, long_to_bytes +from Crypto.Hash import SHA + +try: + from Crypto.PublicKey import _fastmath +except ImportError: + _fastmath = None + +class error (Exception): + pass + +def generateQ(randfunc): + S=randfunc(20) + hash1=SHA.new(S).digest() + hash2=SHA.new(long_to_bytes(bytes_to_long(S)+1)).digest() + q = bignum(0) + for i in range(0,20): + c=ord(hash1[i])^ord(hash2[i]) + if i==0: + c=c | 128 + if i==19: + c= c | 1 + q=q*256+c + while (not isPrime(q)): + q=q+2 + if pow(2,159L) < q < pow(2,160L): + return S, q + raise error, 'Bad q value generated' + +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate a DSA key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + + if bits<160: + raise error, 'Key length <160 bits' + obj=DSAobj() + # Generate string S and prime q + if progress_func: + progress_func('p,q\n') + while (1): + S, obj.q = generateQ(randfunc) + n=(bits-1)/160 + C, N, V = 0, 2, {} + b=(obj.q >> 5) & 15 + powb=pow(bignum(2), b) + powL1=pow(bignum(2), bits-1) + while C<4096: + for k in range(0, n+1): + V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest()) + W=V[n] % powb + for k in range(n-1, -1, -1): + W=(W<<160L)+V[k] + X=W+powL1 + p=X-(X%(2*obj.q)-1) + if powL1<=p and isPrime(p): + break + C, N = C+1, N+n+1 + if C<4096: + break + if progress_func: + progress_func('4096 multiples failed\n') + + obj.p = p + power=(p-1)/obj.q + if progress_func: + progress_func('h,g\n') + while (1): + h=bytes_to_long(randfunc(bits)) % (p-1) + g=pow(h, power, p) + if 11: + break + obj.g=g + if progress_func: + progress_func('x,y\n') + while (1): + x=bytes_to_long(randfunc(20)) + if 0 < x < obj.q: + break + obj.x, obj.y = x, pow(g, x, p) + return obj + +def construct(tuple): + """construct(tuple:(long,long,long,long)|(long,long,long,long,long)):DSAobj + Construct a DSA object from a 4- or 5-tuple of numbers. + """ + obj=DSAobj() + if len(tuple) not in [4,5]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + return obj + +class DSAobj(pubkey): + keydata=['y', 'g', 'p', 'q', 'x'] + + def _encrypt(self, s, Kstr): + raise error, 'DSA algorithm cannot encrypt data' + + def _decrypt(self, s): + raise error, 'DSA algorithm cannot decrypt data' + + def _sign(self, M, K): + if (K<2 or self.q<=K): + raise error, 'K is not between 2 and q' + r=pow(self.g, K, self.p) % self.q + s=(inverse(K, self.q)*(M+self.x*r)) % self.q + return (r,s) + + def _verify(self, M, sig): + r, s = sig + if r<=0 or r>=self.q or s<=0 or s>=self.q: + return 0 + w=inverse(s, self.q) + u1, u2 = (M*w) % self.q, (r*w) % self.q + v1 = pow(self.g, u1, self.p) + v2 = pow(self.y, u2, self.p) + v = ((v1*v2) % self.p) + v = v % self.q + if v==r: + return 1 + return 0 + + def size(self): + "Return the maximum number of bits that can be handled by this key." + return number.size(self.p) - 1 + + def has_private(self): + """Return a Boolean denoting whether the object contains + private components.""" + if hasattr(self, 'x'): + return 1 + else: + return 0 + + def can_sign(self): + """Return a Boolean value recording whether this algorithm can generate signatures.""" + return 1 + + def can_encrypt(self): + """Return a Boolean value recording whether this algorithm can encrypt data.""" + return 0 + + def publickey(self): + """Return a new key object containing only the public information.""" + return construct((self.y, self.g, self.p, self.q)) + +object=DSAobj + +generate_py = generate +construct_py = construct + +class DSAobj_c(pubkey): + keydata = ['y', 'g', 'p', 'q', 'x'] + + def __init__(self, key): + self.key = key + + def __getattr__(self, attr): + if attr in self.keydata: + return getattr(self.key, attr) + else: + if self.__dict__.has_key(attr): + self.__dict__[attr] + else: + raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr) + + def __getstate__(self): + d = {} + for k in self.keydata: + if hasattr(self.key, k): + d[k]=getattr(self.key, k) + return d + + def __setstate__(self, state): + y,g,p,q = state['y'], state['g'], state['p'], state['q'] + if not state.has_key('x'): + self.key = _fastmath.dsa_construct(y,g,p,q) + else: + x = state['x'] + self.key = _fastmath.dsa_construct(y,g,p,q,x) + + def _sign(self, M, K): + return self.key._sign(M, K) + + def _verify(self, M, (r, s)): + return self.key._verify(M, r, s) + + def size(self): + return self.key.size() + + def has_private(self): + return self.key.has_private() + + def publickey(self): + return construct_c((self.key.y, self.key.g, self.key.p, self.key.q)) + + def can_sign(self): + return 1 + + def can_encrypt(self): + return 0 + +def generate_c(bits, randfunc, progress_func=None): + obj = generate_py(bits, randfunc, progress_func) + y,g,p,q,x = obj.y, obj.g, obj.p, obj.q, obj.x + return construct_c((y,g,p,q,x)) + +def construct_c(tuple): + key = apply(_fastmath.dsa_construct, tuple) + return DSAobj_c(key) + +if _fastmath: + #print "using C version of DSA" + generate = generate_c + construct = construct_c + error = _fastmath.error diff --git a/gam/gdata/Crypto/PublicKey/ElGamal.py b/gam/gdata/Crypto/PublicKey/ElGamal.py new file mode 100755 index 00000000000..026881c91a4 --- /dev/null +++ b/gam/gdata/Crypto/PublicKey/ElGamal.py @@ -0,0 +1,132 @@ +# +# ElGamal.py : ElGamal encryption/decryption and signatures +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: ElGamal.py,v 1.9 2003/04/04 19:44:26 akuchling Exp $" + +from Crypto.PublicKey.pubkey import * +from Crypto.Util import number + +class error (Exception): + pass + +# Generate an ElGamal key with N bits +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate an ElGamal key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + obj=ElGamalobj() + # Generate prime p + if progress_func: + progress_func('p\n') + obj.p=bignum(getPrime(bits, randfunc)) + # Generate random number g + if progress_func: + progress_func('g\n') + size=bits-1-(ord(randfunc(1)) & 63) # g will be from 1--64 bits smaller than p + if size<1: + size=bits-1 + while (1): + obj.g=bignum(getPrime(size, randfunc)) + if obj.g < obj.p: + break + size=(size+1) % bits + if size==0: + size=4 + # Generate random number x + if progress_func: + progress_func('x\n') + while (1): + size=bits-1-ord(randfunc(1)) # x will be from 1 to 256 bits smaller than p + if size>2: + break + while (1): + obj.x=bignum(getPrime(size, randfunc)) + if obj.x < obj.p: + break + size = (size+1) % bits + if size==0: + size=4 + if progress_func: + progress_func('y\n') + obj.y = pow(obj.g, obj.x, obj.p) + return obj + +def construct(tuple): + """construct(tuple:(long,long,long,long)|(long,long,long,long,long))) + : ElGamalobj + Construct an ElGamal key from a 3- or 4-tuple of numbers. + """ + + obj=ElGamalobj() + if len(tuple) not in [3,4]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + return obj + +class ElGamalobj(pubkey): + keydata=['p', 'g', 'y', 'x'] + + def _encrypt(self, M, K): + a=pow(self.g, K, self.p) + b=( M*pow(self.y, K, self.p) ) % self.p + return ( a,b ) + + def _decrypt(self, M): + if (not hasattr(self, 'x')): + raise error, 'Private key not available in this object' + ax=pow(M[0], self.x, self.p) + plaintext=(M[1] * inverse(ax, self.p ) ) % self.p + return plaintext + + def _sign(self, M, K): + if (not hasattr(self, 'x')): + raise error, 'Private key not available in this object' + p1=self.p-1 + if (GCD(K, p1)!=1): + raise error, 'Bad K value: GCD(K,p-1)!=1' + a=pow(self.g, K, self.p) + t=(M-self.x*a) % p1 + while t<0: t=t+p1 + b=(t*inverse(K, p1)) % p1 + return (a, b) + + def _verify(self, M, sig): + v1=pow(self.y, sig[0], self.p) + v1=(v1*pow(sig[0], sig[1], self.p)) % self.p + v2=pow(self.g, M, self.p) + if v1==v2: + return 1 + return 0 + + def size(self): + "Return the maximum number of bits that can be handled by this key." + return number.size(self.p) - 1 + + def has_private(self): + """Return a Boolean denoting whether the object contains + private components.""" + if hasattr(self, 'x'): + return 1 + else: + return 0 + + def publickey(self): + """Return a new key object containing only the public information.""" + return construct((self.p, self.g, self.y)) + + +object=ElGamalobj diff --git a/gam/gdata/Crypto/PublicKey/RSA.py b/gam/gdata/Crypto/PublicKey/RSA.py new file mode 100755 index 00000000000..e0e877ec16f --- /dev/null +++ b/gam/gdata/Crypto/PublicKey/RSA.py @@ -0,0 +1,256 @@ +# +# RSA.py : RSA encryption/decryption +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: RSA.py,v 1.20 2004/05/06 12:52:54 akuchling Exp $" + +from Crypto.PublicKey import pubkey +from Crypto.Util import number + +try: + from Crypto.PublicKey import _fastmath +except ImportError: + _fastmath = None + +class error (Exception): + pass + +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate an RSA key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + obj=RSAobj() + + # Generate the prime factors of n + if progress_func: + progress_func('p,q\n') + p = q = 1L + while number.size(p*q) < bits: + p = pubkey.getPrime(bits/2, randfunc) + q = pubkey.getPrime(bits/2, randfunc) + + # p shall be smaller than q (for calc of u) + if p > q: + (p, q)=(q, p) + obj.p = p + obj.q = q + + if progress_func: + progress_func('u\n') + obj.u = pubkey.inverse(obj.p, obj.q) + obj.n = obj.p*obj.q + + obj.e = 65537L + if progress_func: + progress_func('d\n') + obj.d=pubkey.inverse(obj.e, (obj.p-1)*(obj.q-1)) + + assert bits <= 1+obj.size(), "Generated key is too small" + + return obj + +def construct(tuple): + """construct(tuple:(long,) : RSAobj + Construct an RSA object from a 2-, 3-, 5-, or 6-tuple of numbers. + """ + + obj=RSAobj() + if len(tuple) not in [2,3,5,6]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + if len(tuple) >= 5: + # Ensure p is smaller than q + if obj.p>obj.q: + (obj.p, obj.q)=(obj.q, obj.p) + + if len(tuple) == 5: + # u not supplied, so we're going to have to compute it. + obj.u=pubkey.inverse(obj.p, obj.q) + + return obj + +class RSAobj(pubkey.pubkey): + keydata = ['n', 'e', 'd', 'p', 'q', 'u'] + def _encrypt(self, plaintext, K=''): + if self.n<=plaintext: + raise error, 'Plaintext too large' + return (pow(plaintext, self.e, self.n),) + + def _decrypt(self, ciphertext): + if (not hasattr(self, 'd')): + raise error, 'Private key not available in this object' + if self.n<=ciphertext[0]: + raise error, 'Ciphertext too large' + return pow(ciphertext[0], self.d, self.n) + + def _sign(self, M, K=''): + return (self._decrypt((M,)),) + + def _verify(self, M, sig): + m2=self._encrypt(sig[0]) + if m2[0]==M: + return 1 + else: return 0 + + def _blind(self, M, B): + tmp = pow(B, self.e, self.n) + return (M * tmp) % self.n + + def _unblind(self, M, B): + tmp = pubkey.inverse(B, self.n) + return (M * tmp) % self.n + + def can_blind (self): + """can_blind() : bool + Return a Boolean value recording whether this algorithm can + blind data. (This does not imply that this + particular key object has the private information required to + to blind a message.) + """ + return 1 + + def size(self): + """size() : int + Return the maximum number of bits that can be handled by this key. + """ + return number.size(self.n) - 1 + + def has_private(self): + """has_private() : bool + Return a Boolean denoting whether the object contains + private components. + """ + if hasattr(self, 'd'): + return 1 + else: return 0 + + def publickey(self): + """publickey(): RSAobj + Return a new key object containing only the public key information. + """ + return construct((self.n, self.e)) + +class RSAobj_c(pubkey.pubkey): + keydata = ['n', 'e', 'd', 'p', 'q', 'u'] + + def __init__(self, key): + self.key = key + + def __getattr__(self, attr): + if attr in self.keydata: + return getattr(self.key, attr) + else: + if self.__dict__.has_key(attr): + self.__dict__[attr] + else: + raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr) + + def __getstate__(self): + d = {} + for k in self.keydata: + if hasattr(self.key, k): + d[k]=getattr(self.key, k) + return d + + def __setstate__(self, state): + n,e = state['n'], state['e'] + if not state.has_key('d'): + self.key = _fastmath.rsa_construct(n,e) + else: + d = state['d'] + if not state.has_key('q'): + self.key = _fastmath.rsa_construct(n,e,d) + else: + p, q, u = state['p'], state['q'], state['u'] + self.key = _fastmath.rsa_construct(n,e,d,p,q,u) + + def _encrypt(self, plain, K): + return (self.key._encrypt(plain),) + + def _decrypt(self, cipher): + return self.key._decrypt(cipher[0]) + + def _sign(self, M, K): + return (self.key._sign(M),) + + def _verify(self, M, sig): + return self.key._verify(M, sig[0]) + + def _blind(self, M, B): + return self.key._blind(M, B) + + def _unblind(self, M, B): + return self.key._unblind(M, B) + + def can_blind (self): + return 1 + + def size(self): + return self.key.size() + + def has_private(self): + return self.key.has_private() + + def publickey(self): + return construct_c((self.key.n, self.key.e)) + +def generate_c(bits, randfunc, progress_func = None): + # Generate the prime factors of n + if progress_func: + progress_func('p,q\n') + + p = q = 1L + while number.size(p*q) < bits: + p = pubkey.getPrime(bits/2, randfunc) + q = pubkey.getPrime(bits/2, randfunc) + + # p shall be smaller than q (for calc of u) + if p > q: + (p, q)=(q, p) + if progress_func: + progress_func('u\n') + u=pubkey.inverse(p, q) + n=p*q + + e = 65537L + if progress_func: + progress_func('d\n') + d=pubkey.inverse(e, (p-1)*(q-1)) + key = _fastmath.rsa_construct(n,e,d,p,q,u) + obj = RSAobj_c(key) + +## print p +## print q +## print number.size(p), number.size(q), number.size(q*p), +## print obj.size(), bits + assert bits <= 1+obj.size(), "Generated key is too small" + return obj + + +def construct_c(tuple): + key = apply(_fastmath.rsa_construct, tuple) + return RSAobj_c(key) + +object = RSAobj + +generate_py = generate +construct_py = construct + +if _fastmath: + #print "using C version of RSA" + generate = generate_c + construct = construct_c + error = _fastmath.error diff --git a/gam/gdata/Crypto/PublicKey/__init__.py b/gam/gdata/Crypto/PublicKey/__init__.py new file mode 100755 index 00000000000..ad1c80ca14b --- /dev/null +++ b/gam/gdata/Crypto/PublicKey/__init__.py @@ -0,0 +1,17 @@ +"""Public-key encryption and signature algorithms. + +Public-key encryption uses two different keys, one for encryption and +one for decryption. The encryption key can be made public, and the +decryption key is kept private. Many public-key algorithms can also +be used to sign messages, and some can *only* be used for signatures. + +Crypto.PublicKey.DSA Digital Signature Algorithm. (Signature only) +Crypto.PublicKey.ElGamal (Signing and encryption) +Crypto.PublicKey.RSA (Signing, encryption, and blinding) +Crypto.PublicKey.qNEW (Signature only) + +""" + +__all__ = ['RSA', 'DSA', 'ElGamal', 'qNEW'] +__revision__ = "$Id: __init__.py,v 1.4 2003/04/03 20:27:13 akuchling Exp $" + diff --git a/gam/gdata/Crypto/PublicKey/pubkey.py b/gam/gdata/Crypto/PublicKey/pubkey.py new file mode 100755 index 00000000000..5c75c3e3ad7 --- /dev/null +++ b/gam/gdata/Crypto/PublicKey/pubkey.py @@ -0,0 +1,172 @@ +# +# pubkey.py : Internal functions for public key operations +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: pubkey.py,v 1.11 2003/04/03 20:36:14 akuchling Exp $" + +import types, warnings +from Crypto.Util.number import * + +# Basic public key class +class pubkey: + def __init__(self): + pass + + def __getstate__(self): + """To keep key objects platform-independent, the key data is + converted to standard Python long integers before being + written out. It will then be reconverted as necessary on + restoration.""" + d=self.__dict__ + for key in self.keydata: + if d.has_key(key): d[key]=long(d[key]) + return d + + def __setstate__(self, d): + """On unpickling a key object, the key data is converted to the big +number representation being used, whether that is Python long +integers, MPZ objects, or whatever.""" + for key in self.keydata: + if d.has_key(key): self.__dict__[key]=bignum(d[key]) + + def encrypt(self, plaintext, K): + """encrypt(plaintext:string|long, K:string|long) : tuple + Encrypt the string or integer plaintext. K is a random + parameter required by some algorithms. + """ + wasString=0 + if isinstance(plaintext, types.StringType): + plaintext=bytes_to_long(plaintext) ; wasString=1 + if isinstance(K, types.StringType): + K=bytes_to_long(K) + ciphertext=self._encrypt(plaintext, K) + if wasString: return tuple(map(long_to_bytes, ciphertext)) + else: return ciphertext + + def decrypt(self, ciphertext): + """decrypt(ciphertext:tuple|string|long): string + Decrypt 'ciphertext' using this key. + """ + wasString=0 + if not isinstance(ciphertext, types.TupleType): + ciphertext=(ciphertext,) + if isinstance(ciphertext[0], types.StringType): + ciphertext=tuple(map(bytes_to_long, ciphertext)) ; wasString=1 + plaintext=self._decrypt(ciphertext) + if wasString: return long_to_bytes(plaintext) + else: return plaintext + + def sign(self, M, K): + """sign(M : string|long, K:string|long) : tuple + Return a tuple containing the signature for the message M. + K is a random parameter required by some algorithms. + """ + if (not self.has_private()): + raise error, 'Private key not available in this object' + if isinstance(M, types.StringType): M=bytes_to_long(M) + if isinstance(K, types.StringType): K=bytes_to_long(K) + return self._sign(M, K) + + def verify (self, M, signature): + """verify(M:string|long, signature:tuple) : bool + Verify that the signature is valid for the message M; + returns true if the signature checks out. + """ + if isinstance(M, types.StringType): M=bytes_to_long(M) + return self._verify(M, signature) + + # alias to compensate for the old validate() name + def validate (self, M, signature): + warnings.warn("validate() method name is obsolete; use verify()", + DeprecationWarning) + + def blind(self, M, B): + """blind(M : string|long, B : string|long) : string|long + Blind message M using blinding factor B. + """ + wasString=0 + if isinstance(M, types.StringType): + M=bytes_to_long(M) ; wasString=1 + if isinstance(B, types.StringType): B=bytes_to_long(B) + blindedmessage=self._blind(M, B) + if wasString: return long_to_bytes(blindedmessage) + else: return blindedmessage + + def unblind(self, M, B): + """unblind(M : string|long, B : string|long) : string|long + Unblind message M using blinding factor B. + """ + wasString=0 + if isinstance(M, types.StringType): + M=bytes_to_long(M) ; wasString=1 + if isinstance(B, types.StringType): B=bytes_to_long(B) + unblindedmessage=self._unblind(M, B) + if wasString: return long_to_bytes(unblindedmessage) + else: return unblindedmessage + + + # The following methods will usually be left alone, except for + # signature-only algorithms. They both return Boolean values + # recording whether this key's algorithm can sign and encrypt. + def can_sign (self): + """can_sign() : bool + Return a Boolean value recording whether this algorithm can + generate signatures. (This does not imply that this + particular key object has the private information required to + to generate a signature.) + """ + return 1 + + def can_encrypt (self): + """can_encrypt() : bool + Return a Boolean value recording whether this algorithm can + encrypt data. (This does not imply that this + particular key object has the private information required to + to decrypt a message.) + """ + return 1 + + def can_blind (self): + """can_blind() : bool + Return a Boolean value recording whether this algorithm can + blind data. (This does not imply that this + particular key object has the private information required to + to blind a message.) + """ + return 0 + + # The following methods will certainly be overridden by + # subclasses. + + def size (self): + """size() : int + Return the maximum number of bits that can be handled by this key. + """ + return 0 + + def has_private (self): + """has_private() : bool + Return a Boolean denoting whether the object contains + private components. + """ + return 0 + + def publickey (self): + """publickey(): object + Return a new key object containing only the public information. + """ + return self + + def __eq__ (self, other): + """__eq__(other): 0, 1 + Compare us to other for equality. + """ + return self.__getstate__() == other.__getstate__() diff --git a/gam/gdata/Crypto/PublicKey/qNEW.py b/gam/gdata/Crypto/PublicKey/qNEW.py new file mode 100755 index 00000000000..65f8ae36b31 --- /dev/null +++ b/gam/gdata/Crypto/PublicKey/qNEW.py @@ -0,0 +1,170 @@ +# +# qNEW.py : The q-NEW signature algorithm. +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: qNEW.py,v 1.8 2003/04/04 15:13:35 akuchling Exp $" + +from Crypto.PublicKey import pubkey +from Crypto.Util.number import * +from Crypto.Hash import SHA + +class error (Exception): + pass + +HASHBITS = 160 # Size of SHA digests + +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate a qNEW key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + obj=qNEWobj() + + # Generate prime numbers p and q. q is a 160-bit prime + # number. p is another prime number (the modulus) whose bit + # size is chosen by the caller, and is generated so that p-1 + # is a multiple of q. + # + # Note that only a single seed is used to + # generate p and q; if someone generates a key for you, you can + # use the seed to duplicate the key generation. This can + # protect you from someone generating values of p,q that have + # some special form that's easy to break. + if progress_func: + progress_func('p,q\n') + while (1): + obj.q = getPrime(160, randfunc) + # assert pow(2, 159L)1. g is kept; h can be discarded. + if progress_func: + progress_func('h,g\n') + while (1): + h=bytes_to_long(randfunc(bits)) % (p-1) + g=pow(h, power, p) + if 11: + break + obj.g=g + + # x is the private key information, and is + # just a random number between 0 and q. + # y=g**x mod p, and is part of the public information. + if progress_func: + progress_func('x,y\n') + while (1): + x=bytes_to_long(randfunc(20)) + if 0 < x < obj.q: + break + obj.x, obj.y=x, pow(g, x, p) + + return obj + +# Construct a qNEW object +def construct(tuple): + """construct(tuple:(long,long,long,long)|(long,long,long,long,long) + Construct a qNEW object from a 4- or 5-tuple of numbers. + """ + obj=qNEWobj() + if len(tuple) not in [4,5]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + return obj + +class qNEWobj(pubkey.pubkey): + keydata=['p', 'q', 'g', 'y', 'x'] + + def _sign(self, M, K=''): + if (self.q<=K): + raise error, 'K is greater than q' + if M<0: + raise error, 'Illegal value of M (<0)' + if M>=pow(2,161L): + raise error, 'Illegal value of M (too large)' + r=pow(self.g, K, self.p) % self.q + s=(K- (r*M*self.x % self.q)) % self.q + return (r,s) + def _verify(self, M, sig): + r, s = sig + if r<=0 or r>=self.q or s<=0 or s>=self.q: + return 0 + if M<0: + raise error, 'Illegal value of M (<0)' + if M<=0 or M>=pow(2,161L): + return 0 + v1 = pow(self.g, s, self.p) + v2 = pow(self.y, M*r, self.p) + v = ((v1*v2) % self.p) + v = v % self.q + if v==r: + return 1 + return 0 + + def size(self): + "Return the maximum number of bits that can be handled by this key." + return 160 + + def has_private(self): + """Return a Boolean denoting whether the object contains + private components.""" + return hasattr(self, 'x') + + def can_sign(self): + """Return a Boolean value recording whether this algorithm can generate signatures.""" + return 1 + + def can_encrypt(self): + """Return a Boolean value recording whether this algorithm can encrypt data.""" + return 0 + + def publickey(self): + """Return a new key object containing only the public information.""" + return construct((self.p, self.q, self.g, self.y)) + +object = qNEWobj + diff --git a/gam/gdata/Crypto/Util/RFC1751.py b/gam/gdata/Crypto/Util/RFC1751.py new file mode 100755 index 00000000000..0a47952495b --- /dev/null +++ b/gam/gdata/Crypto/Util/RFC1751.py @@ -0,0 +1,342 @@ +#!/usr/local/bin/python +# rfc1751.py : Converts between 128-bit strings and a human-readable +# sequence of words, as defined in RFC1751: "A Convention for +# Human-Readable 128-bit Keys", by Daniel L. McDonald. + +__revision__ = "$Id: RFC1751.py,v 1.6 2003/04/04 15:15:10 akuchling Exp $" + + +import string, binascii + +binary={0:'0000', 1:'0001', 2:'0010', 3:'0011', 4:'0100', 5:'0101', + 6:'0110', 7:'0111', 8:'1000', 9:'1001', 10:'1010', 11:'1011', + 12:'1100', 13:'1101', 14:'1110', 15:'1111'} + +def _key2bin(s): + "Convert a key into a string of binary digits" + kl=map(lambda x: ord(x), s) + kl=map(lambda x: binary[x/16]+binary[x&15], kl) + return ''.join(kl) + +def _extract(key, start, length): + """Extract a bitstring from a string of binary digits, and return its + numeric value.""" + k=key[start:start+length] + return reduce(lambda x,y: x*2+ord(y)-48, k, 0) + +def key_to_english (key): + """key_to_english(key:string) : string + Transform an arbitrary key into a string containing English words. + The key length must be a multiple of 8. + """ + english='' + for index in range(0, len(key), 8): # Loop over 8-byte subkeys + subkey=key[index:index+8] + # Compute the parity of the key + skbin=_key2bin(subkey) ; p=0 + for i in range(0, 64, 2): p=p+_extract(skbin, i, 2) + # Append parity bits to the subkey + skbin=_key2bin(subkey+chr((p<<6) & 255)) + for i in range(0, 64, 11): + english=english+wordlist[_extract(skbin, i, 11)]+' ' + + return english[:-1] # Remove the trailing space + +def english_to_key (str): + """english_to_key(string):string + Transform a string into a corresponding key. + The string must contain words separated by whitespace; the number + of words must be a multiple of 6. + """ + + L=string.split(string.upper(str)) ; key='' + for index in range(0, len(L), 6): + sublist=L[index:index+6] ; char=9*[0] ; bits=0 + for i in sublist: + index = wordlist.index(i) + shift = (8-(bits+11)%8) %8 + y = index << shift + cl, cc, cr = (y>>16), (y>>8)&0xff, y & 0xff + if (shift>5): + char[bits/8] = char[bits/8] | cl + char[bits/8+1] = char[bits/8+1] | cc + char[bits/8+2] = char[bits/8+2] | cr + elif shift>-3: + char[bits/8] = char[bits/8] | cc + char[bits/8+1] = char[bits/8+1] | cr + else: char[bits/8] = char[bits/8] | cr + bits=bits+11 + subkey=reduce(lambda x,y:x+chr(y), char, '') + + # Check the parity of the resulting key + skbin=_key2bin(subkey) + p=0 + for i in range(0, 64, 2): p=p+_extract(skbin, i, 2) + if (p&3) != _extract(skbin, 64, 2): + raise ValueError, "Parity error in resulting key" + key=key+subkey[0:8] + return key + +wordlist=[ "A", "ABE", "ACE", "ACT", "AD", "ADA", "ADD", + "AGO", "AID", "AIM", "AIR", "ALL", "ALP", "AM", "AMY", "AN", "ANA", + "AND", "ANN", "ANT", "ANY", "APE", "APS", "APT", "ARC", "ARE", "ARK", + "ARM", "ART", "AS", "ASH", "ASK", "AT", "ATE", "AUG", "AUK", "AVE", + "AWE", "AWK", "AWL", "AWN", "AX", "AYE", "BAD", "BAG", "BAH", "BAM", + "BAN", "BAR", "BAT", "BAY", "BE", "BED", "BEE", "BEG", "BEN", "BET", + "BEY", "BIB", "BID", "BIG", "BIN", "BIT", "BOB", "BOG", "BON", "BOO", + "BOP", "BOW", "BOY", "BUB", "BUD", "BUG", "BUM", "BUN", "BUS", "BUT", + "BUY", "BY", "BYE", "CAB", "CAL", "CAM", "CAN", "CAP", "CAR", "CAT", + "CAW", "COD", "COG", "COL", "CON", "COO", "COP", "COT", "COW", "COY", + "CRY", "CUB", "CUE", "CUP", "CUR", "CUT", "DAB", "DAD", "DAM", "DAN", + "DAR", "DAY", "DEE", "DEL", "DEN", "DES", "DEW", "DID", "DIE", "DIG", + "DIN", "DIP", "DO", "DOE", "DOG", "DON", "DOT", "DOW", "DRY", "DUB", + "DUD", "DUE", "DUG", "DUN", "EAR", "EAT", "ED", "EEL", "EGG", "EGO", + "ELI", "ELK", "ELM", "ELY", "EM", "END", "EST", "ETC", "EVA", "EVE", + "EWE", "EYE", "FAD", "FAN", "FAR", "FAT", "FAY", "FED", "FEE", "FEW", + "FIB", "FIG", "FIN", "FIR", "FIT", "FLO", "FLY", "FOE", "FOG", "FOR", + "FRY", "FUM", "FUN", "FUR", "GAB", "GAD", "GAG", "GAL", "GAM", "GAP", + "GAS", "GAY", "GEE", "GEL", "GEM", "GET", "GIG", "GIL", "GIN", "GO", + "GOT", "GUM", "GUN", "GUS", "GUT", "GUY", "GYM", "GYP", "HA", "HAD", + "HAL", "HAM", "HAN", "HAP", "HAS", "HAT", "HAW", "HAY", "HE", "HEM", + "HEN", "HER", "HEW", "HEY", "HI", "HID", "HIM", "HIP", "HIS", "HIT", + "HO", "HOB", "HOC", "HOE", "HOG", "HOP", "HOT", "HOW", "HUB", "HUE", + "HUG", "HUH", "HUM", "HUT", "I", "ICY", "IDA", "IF", "IKE", "ILL", + "INK", "INN", "IO", "ION", "IQ", "IRA", "IRE", "IRK", "IS", "IT", + "ITS", "IVY", "JAB", "JAG", "JAM", "JAN", "JAR", "JAW", "JAY", "JET", + "JIG", "JIM", "JO", "JOB", "JOE", "JOG", "JOT", "JOY", "JUG", "JUT", + "KAY", "KEG", "KEN", "KEY", "KID", "KIM", "KIN", "KIT", "LA", "LAB", + "LAC", "LAD", "LAG", "LAM", "LAP", "LAW", "LAY", "LEA", "LED", "LEE", + "LEG", "LEN", "LEO", "LET", "LEW", "LID", "LIE", "LIN", "LIP", "LIT", + "LO", "LOB", "LOG", "LOP", "LOS", "LOT", "LOU", "LOW", "LOY", "LUG", + "LYE", "MA", "MAC", "MAD", "MAE", "MAN", "MAO", "MAP", "MAT", "MAW", + "MAY", "ME", "MEG", "MEL", "MEN", "MET", "MEW", "MID", "MIN", "MIT", + "MOB", "MOD", "MOE", "MOO", "MOP", "MOS", "MOT", "MOW", "MUD", "MUG", + "MUM", "MY", "NAB", "NAG", "NAN", "NAP", "NAT", "NAY", "NE", "NED", + "NEE", "NET", "NEW", "NIB", "NIL", "NIP", "NIT", "NO", "NOB", "NOD", + "NON", "NOR", "NOT", "NOV", "NOW", "NU", "NUN", "NUT", "O", "OAF", + "OAK", "OAR", "OAT", "ODD", "ODE", "OF", "OFF", "OFT", "OH", "OIL", + "OK", "OLD", "ON", "ONE", "OR", "ORB", "ORE", "ORR", "OS", "OTT", + "OUR", "OUT", "OVA", "OW", "OWE", "OWL", "OWN", "OX", "PA", "PAD", + "PAL", "PAM", "PAN", "PAP", "PAR", "PAT", "PAW", "PAY", "PEA", "PEG", + "PEN", "PEP", "PER", "PET", "PEW", "PHI", "PI", "PIE", "PIN", "PIT", + "PLY", "PO", "POD", "POE", "POP", "POT", "POW", "PRO", "PRY", "PUB", + "PUG", "PUN", "PUP", "PUT", "QUO", "RAG", "RAM", "RAN", "RAP", "RAT", + "RAW", "RAY", "REB", "RED", "REP", "RET", "RIB", "RID", "RIG", "RIM", + "RIO", "RIP", "ROB", "ROD", "ROE", "RON", "ROT", "ROW", "ROY", "RUB", + "RUE", "RUG", "RUM", "RUN", "RYE", "SAC", "SAD", "SAG", "SAL", "SAM", + "SAN", "SAP", "SAT", "SAW", "SAY", "SEA", "SEC", "SEE", "SEN", "SET", + "SEW", "SHE", "SHY", "SIN", "SIP", "SIR", "SIS", "SIT", "SKI", "SKY", + "SLY", "SO", "SOB", "SOD", "SON", "SOP", "SOW", "SOY", "SPA", "SPY", + "SUB", "SUD", "SUE", "SUM", "SUN", "SUP", "TAB", "TAD", "TAG", "TAN", + "TAP", "TAR", "TEA", "TED", "TEE", "TEN", "THE", "THY", "TIC", "TIE", + "TIM", "TIN", "TIP", "TO", "TOE", "TOG", "TOM", "TON", "TOO", "TOP", + "TOW", "TOY", "TRY", "TUB", "TUG", "TUM", "TUN", "TWO", "UN", "UP", + "US", "USE", "VAN", "VAT", "VET", "VIE", "WAD", "WAG", "WAR", "WAS", + "WAY", "WE", "WEB", "WED", "WEE", "WET", "WHO", "WHY", "WIN", "WIT", + "WOK", "WON", "WOO", "WOW", "WRY", "WU", "YAM", "YAP", "YAW", "YE", + "YEA", "YES", "YET", "YOU", "ABED", "ABEL", "ABET", "ABLE", "ABUT", + "ACHE", "ACID", "ACME", "ACRE", "ACTA", "ACTS", "ADAM", "ADDS", + "ADEN", "AFAR", "AFRO", "AGEE", "AHEM", "AHOY", "AIDA", "AIDE", + "AIDS", "AIRY", "AJAR", "AKIN", "ALAN", "ALEC", "ALGA", "ALIA", + "ALLY", "ALMA", "ALOE", "ALSO", "ALTO", "ALUM", "ALVA", "AMEN", + "AMES", "AMID", "AMMO", "AMOK", "AMOS", "AMRA", "ANDY", "ANEW", + "ANNA", "ANNE", "ANTE", "ANTI", "AQUA", "ARAB", "ARCH", "AREA", + "ARGO", "ARID", "ARMY", "ARTS", "ARTY", "ASIA", "ASKS", "ATOM", + "AUNT", "AURA", "AUTO", "AVER", "AVID", "AVIS", "AVON", "AVOW", + "AWAY", "AWRY", "BABE", "BABY", "BACH", "BACK", "BADE", "BAIL", + "BAIT", "BAKE", "BALD", "BALE", "BALI", "BALK", "BALL", "BALM", + "BAND", "BANE", "BANG", "BANK", "BARB", "BARD", "BARE", "BARK", + "BARN", "BARR", "BASE", "BASH", "BASK", "BASS", "BATE", "BATH", + "BAWD", "BAWL", "BEAD", "BEAK", "BEAM", "BEAN", "BEAR", "BEAT", + "BEAU", "BECK", "BEEF", "BEEN", "BEER", + "BEET", "BELA", "BELL", "BELT", "BEND", "BENT", "BERG", "BERN", + "BERT", "BESS", "BEST", "BETA", "BETH", "BHOY", "BIAS", "BIDE", + "BIEN", "BILE", "BILK", "BILL", "BIND", "BING", "BIRD", "BITE", + "BITS", "BLAB", "BLAT", "BLED", "BLEW", "BLOB", "BLOC", "BLOT", + "BLOW", "BLUE", "BLUM", "BLUR", "BOAR", "BOAT", "BOCA", "BOCK", + "BODE", "BODY", "BOGY", "BOHR", "BOIL", "BOLD", "BOLO", "BOLT", + "BOMB", "BONA", "BOND", "BONE", "BONG", "BONN", "BONY", "BOOK", + "BOOM", "BOON", "BOOT", "BORE", "BORG", "BORN", "BOSE", "BOSS", + "BOTH", "BOUT", "BOWL", "BOYD", "BRAD", "BRAE", "BRAG", "BRAN", + "BRAY", "BRED", "BREW", "BRIG", "BRIM", "BROW", "BUCK", "BUDD", + "BUFF", "BULB", "BULK", "BULL", "BUNK", "BUNT", "BUOY", "BURG", + "BURL", "BURN", "BURR", "BURT", "BURY", "BUSH", "BUSS", "BUST", + "BUSY", "BYTE", "CADY", "CAFE", "CAGE", "CAIN", "CAKE", "CALF", + "CALL", "CALM", "CAME", "CANE", "CANT", "CARD", "CARE", "CARL", + "CARR", "CART", "CASE", "CASH", "CASK", "CAST", "CAVE", "CEIL", + "CELL", "CENT", "CERN", "CHAD", "CHAR", "CHAT", "CHAW", "CHEF", + "CHEN", "CHEW", "CHIC", "CHIN", "CHOU", "CHOW", "CHUB", "CHUG", + "CHUM", "CITE", "CITY", "CLAD", "CLAM", "CLAN", "CLAW", "CLAY", + "CLOD", "CLOG", "CLOT", "CLUB", "CLUE", "COAL", "COAT", "COCA", + "COCK", "COCO", "CODA", "CODE", "CODY", "COED", "COIL", "COIN", + "COKE", "COLA", "COLD", "COLT", "COMA", "COMB", "COME", "COOK", + "COOL", "COON", "COOT", "CORD", "CORE", "CORK", "CORN", "COST", + "COVE", "COWL", "CRAB", "CRAG", "CRAM", "CRAY", "CREW", "CRIB", + "CROW", "CRUD", "CUBA", "CUBE", "CUFF", "CULL", "CULT", "CUNY", + "CURB", "CURD", "CURE", "CURL", "CURT", "CUTS", "DADE", "DALE", + "DAME", "DANA", "DANE", "DANG", "DANK", "DARE", "DARK", "DARN", + "DART", "DASH", "DATA", "DATE", "DAVE", "DAVY", "DAWN", "DAYS", + "DEAD", "DEAF", "DEAL", "DEAN", "DEAR", "DEBT", "DECK", "DEED", + "DEEM", "DEER", "DEFT", "DEFY", "DELL", "DENT", "DENY", "DESK", + "DIAL", "DICE", "DIED", "DIET", "DIME", "DINE", "DING", "DINT", + "DIRE", "DIRT", "DISC", "DISH", "DISK", "DIVE", "DOCK", "DOES", + "DOLE", "DOLL", "DOLT", "DOME", "DONE", "DOOM", "DOOR", "DORA", + "DOSE", "DOTE", "DOUG", "DOUR", "DOVE", "DOWN", "DRAB", "DRAG", + "DRAM", "DRAW", "DREW", "DRUB", "DRUG", "DRUM", "DUAL", "DUCK", + "DUCT", "DUEL", "DUET", "DUKE", "DULL", "DUMB", "DUNE", "DUNK", + "DUSK", "DUST", "DUTY", "EACH", "EARL", "EARN", "EASE", "EAST", + "EASY", "EBEN", "ECHO", "EDDY", "EDEN", "EDGE", "EDGY", "EDIT", + "EDNA", "EGAN", "ELAN", "ELBA", "ELLA", "ELSE", "EMIL", "EMIT", + "EMMA", "ENDS", "ERIC", "EROS", "EVEN", "EVER", "EVIL", "EYED", + "FACE", "FACT", "FADE", "FAIL", "FAIN", "FAIR", "FAKE", "FALL", + "FAME", "FANG", "FARM", "FAST", "FATE", "FAWN", "FEAR", "FEAT", + "FEED", "FEEL", "FEET", "FELL", "FELT", "FEND", "FERN", "FEST", + "FEUD", "FIEF", "FIGS", "FILE", "FILL", "FILM", "FIND", "FINE", + "FINK", "FIRE", "FIRM", "FISH", "FISK", "FIST", "FITS", "FIVE", + "FLAG", "FLAK", "FLAM", "FLAT", "FLAW", "FLEA", "FLED", "FLEW", + "FLIT", "FLOC", "FLOG", "FLOW", "FLUB", "FLUE", "FOAL", "FOAM", + "FOGY", "FOIL", "FOLD", "FOLK", "FOND", "FONT", "FOOD", "FOOL", + "FOOT", "FORD", "FORE", "FORK", "FORM", "FORT", "FOSS", "FOUL", + "FOUR", "FOWL", "FRAU", "FRAY", "FRED", "FREE", "FRET", "FREY", + "FROG", "FROM", "FUEL", "FULL", "FUME", "FUND", "FUNK", "FURY", + "FUSE", "FUSS", "GAFF", "GAGE", "GAIL", "GAIN", "GAIT", "GALA", + "GALE", "GALL", "GALT", "GAME", "GANG", "GARB", "GARY", "GASH", + "GATE", "GAUL", "GAUR", "GAVE", "GAWK", "GEAR", "GELD", "GENE", + "GENT", "GERM", "GETS", "GIBE", "GIFT", "GILD", "GILL", "GILT", + "GINA", "GIRD", "GIRL", "GIST", "GIVE", "GLAD", "GLEE", "GLEN", + "GLIB", "GLOB", "GLOM", "GLOW", "GLUE", "GLUM", "GLUT", "GOAD", + "GOAL", "GOAT", "GOER", "GOES", "GOLD", "GOLF", "GONE", "GONG", + "GOOD", "GOOF", "GORE", "GORY", "GOSH", "GOUT", "GOWN", "GRAB", + "GRAD", "GRAY", "GREG", "GREW", "GREY", "GRID", "GRIM", "GRIN", + "GRIT", "GROW", "GRUB", "GULF", "GULL", "GUNK", "GURU", "GUSH", + "GUST", "GWEN", "GWYN", "HAAG", "HAAS", "HACK", "HAIL", "HAIR", + "HALE", "HALF", "HALL", "HALO", "HALT", "HAND", "HANG", "HANK", + "HANS", "HARD", "HARK", "HARM", "HART", "HASH", "HAST", "HATE", + "HATH", "HAUL", "HAVE", "HAWK", "HAYS", "HEAD", "HEAL", "HEAR", + "HEAT", "HEBE", "HECK", "HEED", "HEEL", "HEFT", "HELD", "HELL", + "HELM", "HERB", "HERD", "HERE", "HERO", "HERS", "HESS", "HEWN", + "HICK", "HIDE", "HIGH", "HIKE", "HILL", "HILT", "HIND", "HINT", + "HIRE", "HISS", "HIVE", "HOBO", "HOCK", "HOFF", "HOLD", "HOLE", + "HOLM", "HOLT", "HOME", "HONE", "HONK", "HOOD", "HOOF", "HOOK", + "HOOT", "HORN", "HOSE", "HOST", "HOUR", "HOVE", "HOWE", "HOWL", + "HOYT", "HUCK", "HUED", "HUFF", "HUGE", "HUGH", "HUGO", "HULK", + "HULL", "HUNK", "HUNT", "HURD", "HURL", "HURT", "HUSH", "HYDE", + "HYMN", "IBIS", "ICON", "IDEA", "IDLE", "IFFY", "INCA", "INCH", + "INTO", "IONS", "IOTA", "IOWA", "IRIS", "IRMA", "IRON", "ISLE", + "ITCH", "ITEM", "IVAN", "JACK", "JADE", "JAIL", "JAKE", "JANE", + "JAVA", "JEAN", "JEFF", "JERK", "JESS", "JEST", "JIBE", "JILL", + "JILT", "JIVE", "JOAN", "JOBS", "JOCK", "JOEL", "JOEY", "JOHN", + "JOIN", "JOKE", "JOLT", "JOVE", "JUDD", "JUDE", "JUDO", "JUDY", + "JUJU", "JUKE", "JULY", "JUNE", "JUNK", "JUNO", "JURY", "JUST", + "JUTE", "KAHN", "KALE", "KANE", "KANT", "KARL", "KATE", "KEEL", + "KEEN", "KENO", "KENT", "KERN", "KERR", "KEYS", "KICK", "KILL", + "KIND", "KING", "KIRK", "KISS", "KITE", "KLAN", "KNEE", "KNEW", + "KNIT", "KNOB", "KNOT", "KNOW", "KOCH", "KONG", "KUDO", "KURD", + "KURT", "KYLE", "LACE", "LACK", "LACY", "LADY", "LAID", "LAIN", + "LAIR", "LAKE", "LAMB", "LAME", "LAND", "LANE", "LANG", "LARD", + "LARK", "LASS", "LAST", "LATE", "LAUD", "LAVA", "LAWN", "LAWS", + "LAYS", "LEAD", "LEAF", "LEAK", "LEAN", "LEAR", "LEEK", "LEER", + "LEFT", "LEND", "LENS", "LENT", "LEON", "LESK", "LESS", "LEST", + "LETS", "LIAR", "LICE", "LICK", "LIED", "LIEN", "LIES", "LIEU", + "LIFE", "LIFT", "LIKE", "LILA", "LILT", "LILY", "LIMA", "LIMB", + "LIME", "LIND", "LINE", "LINK", "LINT", "LION", "LISA", "LIST", + "LIVE", "LOAD", "LOAF", "LOAM", "LOAN", "LOCK", "LOFT", "LOGE", + "LOIS", "LOLA", "LONE", "LONG", "LOOK", "LOON", "LOOT", "LORD", + "LORE", "LOSE", "LOSS", "LOST", "LOUD", "LOVE", "LOWE", "LUCK", + "LUCY", "LUGE", "LUKE", "LULU", "LUND", "LUNG", "LURA", "LURE", + "LURK", "LUSH", "LUST", "LYLE", "LYNN", "LYON", "LYRA", "MACE", + "MADE", "MAGI", "MAID", "MAIL", "MAIN", "MAKE", "MALE", "MALI", + "MALL", "MALT", "MANA", "MANN", "MANY", "MARC", "MARE", "MARK", + "MARS", "MART", "MARY", "MASH", "MASK", "MASS", "MAST", "MATE", + "MATH", "MAUL", "MAYO", "MEAD", "MEAL", "MEAN", "MEAT", "MEEK", + "MEET", "MELD", "MELT", "MEMO", "MEND", "MENU", "MERT", "MESH", + "MESS", "MICE", "MIKE", "MILD", "MILE", "MILK", "MILL", "MILT", + "MIMI", "MIND", "MINE", "MINI", "MINK", "MINT", "MIRE", "MISS", + "MIST", "MITE", "MITT", "MOAN", "MOAT", "MOCK", "MODE", "MOLD", + "MOLE", "MOLL", "MOLT", "MONA", "MONK", "MONT", "MOOD", "MOON", + "MOOR", "MOOT", "MORE", "MORN", "MORT", "MOSS", "MOST", "MOTH", + "MOVE", "MUCH", "MUCK", "MUDD", "MUFF", "MULE", "MULL", "MURK", + "MUSH", "MUST", "MUTE", "MUTT", "MYRA", "MYTH", "NAGY", "NAIL", + "NAIR", "NAME", "NARY", "NASH", "NAVE", "NAVY", "NEAL", "NEAR", + "NEAT", "NECK", "NEED", "NEIL", "NELL", "NEON", "NERO", "NESS", + "NEST", "NEWS", "NEWT", "NIBS", "NICE", "NICK", "NILE", "NINA", + "NINE", "NOAH", "NODE", "NOEL", "NOLL", "NONE", "NOOK", "NOON", + "NORM", "NOSE", "NOTE", "NOUN", "NOVA", "NUDE", "NULL", "NUMB", + "OATH", "OBEY", "OBOE", "ODIN", "OHIO", "OILY", "OINT", "OKAY", + "OLAF", "OLDY", "OLGA", "OLIN", "OMAN", "OMEN", "OMIT", "ONCE", + "ONES", "ONLY", "ONTO", "ONUS", "ORAL", "ORGY", "OSLO", "OTIS", + "OTTO", "OUCH", "OUST", "OUTS", "OVAL", "OVEN", "OVER", "OWLY", + "OWNS", "QUAD", "QUIT", "QUOD", "RACE", "RACK", "RACY", "RAFT", + "RAGE", "RAID", "RAIL", "RAIN", "RAKE", "RANK", "RANT", "RARE", + "RASH", "RATE", "RAVE", "RAYS", "READ", "REAL", "REAM", "REAR", + "RECK", "REED", "REEF", "REEK", "REEL", "REID", "REIN", "RENA", + "REND", "RENT", "REST", "RICE", "RICH", "RICK", "RIDE", "RIFT", + "RILL", "RIME", "RING", "RINK", "RISE", "RISK", "RITE", "ROAD", + "ROAM", "ROAR", "ROBE", "ROCK", "RODE", "ROIL", "ROLL", "ROME", + "ROOD", "ROOF", "ROOK", "ROOM", "ROOT", "ROSA", "ROSE", "ROSS", + "ROSY", "ROTH", "ROUT", "ROVE", "ROWE", "ROWS", "RUBE", "RUBY", + "RUDE", "RUDY", "RUIN", "RULE", "RUNG", "RUNS", "RUNT", "RUSE", + "RUSH", "RUSK", "RUSS", "RUST", "RUTH", "SACK", "SAFE", "SAGE", + "SAID", "SAIL", "SALE", "SALK", "SALT", "SAME", "SAND", "SANE", + "SANG", "SANK", "SARA", "SAUL", "SAVE", "SAYS", "SCAN", "SCAR", + "SCAT", "SCOT", "SEAL", "SEAM", "SEAR", "SEAT", "SEED", "SEEK", + "SEEM", "SEEN", "SEES", "SELF", "SELL", "SEND", "SENT", "SETS", + "SEWN", "SHAG", "SHAM", "SHAW", "SHAY", "SHED", "SHIM", "SHIN", + "SHOD", "SHOE", "SHOT", "SHOW", "SHUN", "SHUT", "SICK", "SIDE", + "SIFT", "SIGH", "SIGN", "SILK", "SILL", "SILO", "SILT", "SINE", + "SING", "SINK", "SIRE", "SITE", "SITS", "SITU", "SKAT", "SKEW", + "SKID", "SKIM", "SKIN", "SKIT", "SLAB", "SLAM", "SLAT", "SLAY", + "SLED", "SLEW", "SLID", "SLIM", "SLIT", "SLOB", "SLOG", "SLOT", + "SLOW", "SLUG", "SLUM", "SLUR", "SMOG", "SMUG", "SNAG", "SNOB", + "SNOW", "SNUB", "SNUG", "SOAK", "SOAR", "SOCK", "SODA", "SOFA", + "SOFT", "SOIL", "SOLD", "SOME", "SONG", "SOON", "SOOT", "SORE", + "SORT", "SOUL", "SOUR", "SOWN", "STAB", "STAG", "STAN", "STAR", + "STAY", "STEM", "STEW", "STIR", "STOW", "STUB", "STUN", "SUCH", + "SUDS", "SUIT", "SULK", "SUMS", "SUNG", "SUNK", "SURE", "SURF", + "SWAB", "SWAG", "SWAM", "SWAN", "SWAT", "SWAY", "SWIM", "SWUM", + "TACK", "TACT", "TAIL", "TAKE", "TALE", "TALK", "TALL", "TANK", + "TASK", "TATE", "TAUT", "TEAL", "TEAM", "TEAR", "TECH", "TEEM", + "TEEN", "TEET", "TELL", "TEND", "TENT", "TERM", "TERN", "TESS", + "TEST", "THAN", "THAT", "THEE", "THEM", "THEN", "THEY", "THIN", + "THIS", "THUD", "THUG", "TICK", "TIDE", "TIDY", "TIED", "TIER", + "TILE", "TILL", "TILT", "TIME", "TINA", "TINE", "TINT", "TINY", + "TIRE", "TOAD", "TOGO", "TOIL", "TOLD", "TOLL", "TONE", "TONG", + "TONY", "TOOK", "TOOL", "TOOT", "TORE", "TORN", "TOTE", "TOUR", + "TOUT", "TOWN", "TRAG", "TRAM", "TRAY", "TREE", "TREK", "TRIG", + "TRIM", "TRIO", "TROD", "TROT", "TROY", "TRUE", "TUBA", "TUBE", + "TUCK", "TUFT", "TUNA", "TUNE", "TUNG", "TURF", "TURN", "TUSK", + "TWIG", "TWIN", "TWIT", "ULAN", "UNIT", "URGE", "USED", "USER", + "USES", "UTAH", "VAIL", "VAIN", "VALE", "VARY", "VASE", "VAST", + "VEAL", "VEDA", "VEIL", "VEIN", "VEND", "VENT", "VERB", "VERY", + "VETO", "VICE", "VIEW", "VINE", "VISE", "VOID", "VOLT", "VOTE", + "WACK", "WADE", "WAGE", "WAIL", "WAIT", "WAKE", "WALE", "WALK", + "WALL", "WALT", "WAND", "WANE", "WANG", "WANT", "WARD", "WARM", + "WARN", "WART", "WASH", "WAST", "WATS", "WATT", "WAVE", "WAVY", + "WAYS", "WEAK", "WEAL", "WEAN", "WEAR", "WEED", "WEEK", "WEIR", + "WELD", "WELL", "WELT", "WENT", "WERE", "WERT", "WEST", "WHAM", + "WHAT", "WHEE", "WHEN", "WHET", "WHOA", "WHOM", "WICK", "WIFE", + "WILD", "WILL", "WIND", "WINE", "WING", "WINK", "WINO", "WIRE", + "WISE", "WISH", "WITH", "WOLF", "WONT", "WOOD", "WOOL", "WORD", + "WORE", "WORK", "WORM", "WORN", "WOVE", "WRIT", "WYNN", "YALE", + "YANG", "YANK", "YARD", "YARN", "YAWL", "YAWN", "YEAH", "YEAR", + "YELL", "YOGA", "YOKE" ] + +if __name__=='__main__': + data = [('EB33F77EE73D4053', 'TIDE ITCH SLOW REIN RULE MOT'), + ('CCAC2AED591056BE4F90FD441C534766', + 'RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE'), + ('EFF81F9BFBC65350920CDD7416DE8009', + 'TROD MUTE TAIL WARM CHAR KONG HAAG CITY BORE O TEAL AWL') + ] + + for key, words in data: + print 'Trying key', key + key=binascii.a2b_hex(key) + w2=key_to_english(key) + if w2!=words: + print 'key_to_english fails on key', repr(key), ', producing', str(w2) + k2=english_to_key(words) + if k2!=key: + print 'english_to_key fails on key', repr(key), ', producing', repr(k2) + + diff --git a/gam/gdata/Crypto/Util/__init__.py b/gam/gdata/Crypto/Util/__init__.py new file mode 100755 index 00000000000..0d147681815 --- /dev/null +++ b/gam/gdata/Crypto/Util/__init__.py @@ -0,0 +1,16 @@ +"""Miscellaneous modules + +Contains useful modules that don't belong into any of the +other Crypto.* subpackages. + +Crypto.Util.number Number-theoretic functions (primality testing, etc.) +Crypto.Util.randpool Random number generation +Crypto.Util.RFC1751 Converts between 128-bit keys and human-readable + strings of words. + +""" + +__all__ = ['randpool', 'RFC1751', 'number'] + +__revision__ = "$Id: __init__.py,v 1.4 2003/02/28 15:26:00 akuchling Exp $" + diff --git a/gam/gdata/Crypto/Util/number.py b/gam/gdata/Crypto/Util/number.py new file mode 100755 index 00000000000..9d50563e904 --- /dev/null +++ b/gam/gdata/Crypto/Util/number.py @@ -0,0 +1,201 @@ +# +# number.py : Number-theoretic functions +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: number.py,v 1.13 2003/04/04 18:21:07 akuchling Exp $" + +bignum = long +try: + from Crypto.PublicKey import _fastmath +except ImportError: + _fastmath = None + +# Commented out and replaced with faster versions below +## def long2str(n): +## s='' +## while n>0: +## s=chr(n & 255)+s +## n=n>>8 +## return s + +## import types +## def str2long(s): +## if type(s)!=types.StringType: return s # Integers will be left alone +## return reduce(lambda x,y : x*256+ord(y), s, 0L) + +def size (N): + """size(N:long) : int + Returns the size of the number N in bits. + """ + bits, power = 0,1L + while N >= power: + bits += 1 + power = power << 1 + return bits + +def getRandomNumber(N, randfunc): + """getRandomNumber(N:int, randfunc:callable):long + Return an N-bit random number.""" + + S = randfunc(N/8) + odd_bits = N % 8 + if odd_bits != 0: + char = ord(randfunc(1)) >> (8-odd_bits) + S = chr(char) + S + value = bytes_to_long(S) + value |= 2L ** (N-1) # Ensure high bit is set + assert size(value) >= N + return value + +def GCD(x,y): + """GCD(x:long, y:long): long + Return the GCD of x and y. + """ + x = abs(x) ; y = abs(y) + while x > 0: + x, y = y % x, x + return y + +def inverse(u, v): + """inverse(u:long, u:long):long + Return the inverse of u mod v. + """ + u3, v3 = long(u), long(v) + u1, v1 = 1L, 0L + while v3 > 0: + q=u3 / v3 + u1, v1 = v1, u1 - v1*q + u3, v3 = v3, u3 - v3*q + while u1<0: + u1 = u1 + v + return u1 + +# Given a number of bits to generate and a random generation function, +# find a prime number of the appropriate size. + +def getPrime(N, randfunc): + """getPrime(N:int, randfunc:callable):long + Return a random N-bit prime number. + """ + + number=getRandomNumber(N, randfunc) | 1 + while (not isPrime(number)): + number=number+2 + return number + +def isPrime(N): + """isPrime(N:long):bool + Return true if N is prime. + """ + if N == 1: + return 0 + if N in sieve: + return 1 + for i in sieve: + if (N % i)==0: + return 0 + + # Use the accelerator if available + if _fastmath is not None: + return _fastmath.isPrime(N) + + # Compute the highest bit that's set in N + N1 = N - 1L + n = 1L + while (n> 1L + + # Rabin-Miller test + for c in sieve[:7]: + a=long(c) ; d=1L ; t=n + while (t): # Iterate over the bits in N1 + x=(d*d) % N + if x==1L and d!=1L and d!=N1: + return 0 # Square root of 1 found + if N1 & t: + d=(x*a) % N + else: + d=x + t = t >> 1L + if d!=1L: + return 0 + return 1 + +# Small primes used for checking primality; these are all the primes +# less than 256. This should be enough to eliminate most of the odd +# numbers before needing to do a Rabin-Miller test at all. + +sieve=[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, + 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, + 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, + 197, 199, 211, 223, 227, 229, 233, 239, 241, 251] + +# Improved conversion functions contributed by Barry Warsaw, after +# careful benchmarking + +import struct + +def long_to_bytes(n, blocksize=0): + """long_to_bytes(n:long, blocksize:int) : string + Convert a long integer to a byte string. + + If optional blocksize is given and greater than zero, pad the front of the + byte string with binary zeros so that the length is a multiple of + blocksize. + """ + # after much testing, this algorithm was deemed to be the fastest + s = '' + n = long(n) + pack = struct.pack + while n > 0: + s = pack('>I', n & 0xffffffffL) + s + n = n >> 32 + # strip off leading zeros + for i in range(len(s)): + if s[i] != '\000': + break + else: + # only happens when n == 0 + s = '\000' + i = 0 + s = s[i:] + # add back some pad bytes. this could be done more efficiently w.r.t. the + # de-padding being done above, but sigh... + if blocksize > 0 and len(s) % blocksize: + s = (blocksize - len(s) % blocksize) * '\000' + s + return s + +def bytes_to_long(s): + """bytes_to_long(string) : long + Convert a byte string to a long integer. + + This is (essentially) the inverse of long_to_bytes(). + """ + acc = 0L + unpack = struct.unpack + length = len(s) + if length % 4: + extra = (4 - length % 4) + s = '\000' * extra + s + length = length + extra + for i in range(0, length, 4): + acc = (acc << 32) + unpack('>I', s[i:i+4])[0] + return acc + +# For backwards compatibility... +import warnings +def long2str(n, blocksize=0): + warnings.warn("long2str() has been replaced by long_to_bytes()") + return long_to_bytes(n, blocksize) +def str2long(s): + warnings.warn("str2long() has been replaced by bytes_to_long()") + return bytes_to_long(s) diff --git a/gam/gdata/Crypto/Util/randpool.py b/gam/gdata/Crypto/Util/randpool.py new file mode 100755 index 00000000000..467501c5442 --- /dev/null +++ b/gam/gdata/Crypto/Util/randpool.py @@ -0,0 +1,421 @@ +# +# randpool.py : Cryptographically strong random number generation +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: randpool.py,v 1.14 2004/05/06 12:56:54 akuchling Exp $" + +import time, array, types, warnings, os.path +from Crypto.Util.number import long_to_bytes +try: + import Crypto.Util.winrandom as winrandom +except: + winrandom = None + +STIRNUM = 3 + +class RandomPool: + """randpool.py : Cryptographically strong random number generation. + + The implementation here is similar to the one in PGP. To be + cryptographically strong, it must be difficult to determine the RNG's + output, whether in the future or the past. This is done by using + a cryptographic hash function to "stir" the random data. + + Entropy is gathered in the same fashion as PGP; the highest-resolution + clock around is read and the data is added to the random number pool. + A conservative estimate of the entropy is then kept. + + If a cryptographically secure random source is available (/dev/urandom + on many Unixes, Windows CryptGenRandom on most Windows), then use + it. + + Instance Attributes: + bits : int + Maximum size of pool in bits + bytes : int + Maximum size of pool in bytes + entropy : int + Number of bits of entropy in this pool. + + Methods: + add_event([s]) : add some entropy to the pool + get_bytes(int) : get N bytes of random data + randomize([N]) : get N bytes of randomness from external source + """ + + + def __init__(self, numbytes = 160, cipher=None, hash=None): + if hash is None: + from Crypto.Hash import SHA as hash + + # The cipher argument is vestigial; it was removed from + # version 1.1 so RandomPool would work even in the limited + # exportable subset of the code + if cipher is not None: + warnings.warn("'cipher' parameter is no longer used") + + if isinstance(hash, types.StringType): + # ugly hack to force __import__ to give us the end-path module + hash = __import__('Crypto.Hash.'+hash, + None, None, ['new']) + warnings.warn("'hash' parameter should now be a hashing module") + + self.bytes = numbytes + self.bits = self.bytes*8 + self.entropy = 0 + self._hash = hash + + # Construct an array to hold the random pool, + # initializing it to 0. + self._randpool = array.array('B', [0]*self.bytes) + + self._event1 = self._event2 = 0 + self._addPos = 0 + self._getPos = hash.digest_size + self._lastcounter=time.time() + self.__counter = 0 + + self._measureTickSize() # Estimate timer resolution + self._randomize() + + def _updateEntropyEstimate(self, nbits): + self.entropy += nbits + if self.entropy < 0: + self.entropy = 0 + elif self.entropy > self.bits: + self.entropy = self.bits + + def _randomize(self, N = 0, devname = '/dev/urandom'): + """_randomize(N, DEVNAME:device-filepath) + collects N bits of randomness from some entropy source (e.g., + /dev/urandom on Unixes that have it, Windows CryptoAPI + CryptGenRandom, etc) + DEVNAME is optional, defaults to /dev/urandom. You can change it + to /dev/random if you want to block till you get enough + entropy. + """ + data = '' + if N <= 0: + nbytes = int((self.bits - self.entropy)/8+0.5) + else: + nbytes = int(N/8+0.5) + if winrandom: + # Windows CryptGenRandom provides random data. + data = winrandom.new().get_bytes(nbytes) + elif os.path.exists(devname): + # Many OSes support a /dev/urandom device + try: + f=open(devname) + data=f.read(nbytes) + f.close() + except IOError, (num, msg): + if num!=2: raise IOError, (num, msg) + # If the file wasn't found, ignore the error + if data: + self._addBytes(data) + # Entropy estimate: The number of bits of + # data obtained from the random source. + self._updateEntropyEstimate(8*len(data)) + self.stir_n() # Wash the random pool + + def randomize(self, N=0): + """randomize(N:int) + use the class entropy source to get some entropy data. + This is overridden by KeyboardRandomize(). + """ + return self._randomize(N) + + def stir_n(self, N = STIRNUM): + """stir_n(N) + stirs the random pool N times + """ + for i in xrange(N): + self.stir() + + def stir (self, s = ''): + """stir(s:string) + Mix up the randomness pool. This will call add_event() twice, + but out of paranoia the entropy attribute will not be + increased. The optional 's' parameter is a string that will + be hashed with the randomness pool. + """ + + entropy=self.entropy # Save inital entropy value + self.add_event() + + # Loop over the randomness pool: hash its contents + # along with a counter, and add the resulting digest + # back into the pool. + for i in range(self.bytes / self._hash.digest_size): + h = self._hash.new(self._randpool) + h.update(str(self.__counter) + str(i) + str(self._addPos) + s) + self._addBytes( h.digest() ) + self.__counter = (self.__counter + 1) & 0xFFFFffffL + + self._addPos, self._getPos = 0, self._hash.digest_size + self.add_event() + + # Restore the old value of the entropy. + self.entropy=entropy + + + def get_bytes (self, N): + """get_bytes(N:int) : string + Return N bytes of random data. + """ + + s='' + i, pool = self._getPos, self._randpool + h=self._hash.new() + dsize = self._hash.digest_size + num = N + while num > 0: + h.update( self._randpool[i:i+dsize] ) + s = s + h.digest() + num = num - dsize + i = (i + dsize) % self.bytes + if i>1, bits+1 + if bits>8: bits=8 + + self._event1, self._event2 = event, self._event1 + + self._updateEntropyEstimate(bits) + return bits + + # Private functions + def _noise(self): + # Adds a bit of noise to the random pool, by adding in the + # current time and CPU usage of this process. + # The difference from the previous call to _noise() is taken + # in an effort to estimate the entropy. + t=time.time() + delta = (t - self._lastcounter)/self._ticksize*1e6 + self._lastcounter = t + self._addBytes(long_to_bytes(long(1000*time.time()))) + self._addBytes(long_to_bytes(long(1000*time.clock()))) + self._addBytes(long_to_bytes(long(1000*time.time()))) + self._addBytes(long_to_bytes(long(delta))) + + # Reduce delta to a maximum of 8 bits so we don't add too much + # entropy as a result of this call. + delta=delta % 0xff + return int(delta) + + + def _measureTickSize(self): + # _measureTickSize() tries to estimate a rough average of the + # resolution of time that you can see from Python. It does + # this by measuring the time 100 times, computing the delay + # between measurements, and taking the median of the resulting + # list. (We also hash all the times and add them to the pool) + interval = [None] * 100 + h = self._hash.new(`(id(self),id(interval))`) + + # Compute 100 differences + t=time.time() + h.update(`t`) + i = 0 + j = 0 + while i < 100: + t2=time.time() + h.update(`(i,j,t2)`) + j += 1 + delta=int((t2-t)*1e6) + if delta: + interval[i] = delta + i += 1 + t=t2 + + # Take the median of the array of intervals + interval.sort() + self._ticksize=interval[len(interval)/2] + h.update(`(interval,self._ticksize)`) + # mix in the measurement times and wash the random pool + self.stir(h.digest()) + + def _addBytes(self, s): + "XOR the contents of the string S into the random pool" + i, pool = self._addPos, self._randpool + for j in range(0, len(s)): + pool[i]=pool[i] ^ ord(s[j]) + i=(i+1) % self.bytes + self._addPos = i + + # Deprecated method names: remove in PCT 2.1 or later. + def getBytes(self, N): + warnings.warn("getBytes() method replaced by get_bytes()", + DeprecationWarning) + return self.get_bytes(N) + + def addEvent (self, event, s=""): + warnings.warn("addEvent() method replaced by add_event()", + DeprecationWarning) + return self.add_event(s + str(event)) + +class PersistentRandomPool (RandomPool): + def __init__ (self, filename=None, *args, **kwargs): + RandomPool.__init__(self, *args, **kwargs) + self.filename = filename + if filename: + try: + # the time taken to open and read the file might have + # a little disk variability, modulo disk/kernel caching... + f=open(filename, 'rb') + self.add_event() + data = f.read() + self.add_event() + # mix in the data from the file and wash the random pool + self.stir(data) + f.close() + except IOError: + # Oh, well; the file doesn't exist or is unreadable, so + # we'll just ignore it. + pass + + def save(self): + if self.filename == "": + raise ValueError, "No filename set for this object" + # wash the random pool before save, provides some forward secrecy for + # old values of the pool. + self.stir_n() + f=open(self.filename, 'wb') + self.add_event() + f.write(self._randpool.tostring()) + f.close() + self.add_event() + # wash the pool again, provide some protection for future values + self.stir() + +# non-echoing Windows keyboard entry +_kb = 0 +if not _kb: + try: + import msvcrt + class KeyboardEntry: + def getch(self): + c = msvcrt.getch() + if c in ('\000', '\xe0'): + # function key + c += msvcrt.getch() + return c + def close(self, delay = 0): + if delay: + time.sleep(delay) + while msvcrt.kbhit(): + msvcrt.getch() + _kb = 1 + except: + pass + +# non-echoing Posix keyboard entry +if not _kb: + try: + import termios + class KeyboardEntry: + def __init__(self, fd = 0): + self._fd = fd + self._old = termios.tcgetattr(fd) + new = termios.tcgetattr(fd) + new[3]=new[3] & ~termios.ICANON & ~termios.ECHO + termios.tcsetattr(fd, termios.TCSANOW, new) + def getch(self): + termios.tcflush(0, termios.TCIFLUSH) # XXX Leave this in? + return os.read(self._fd, 1) + def close(self, delay = 0): + if delay: + time.sleep(delay) + termios.tcflush(self._fd, termios.TCIFLUSH) + termios.tcsetattr(self._fd, termios.TCSAFLUSH, self._old) + _kb = 1 + except: + pass + +class KeyboardRandomPool (PersistentRandomPool): + def __init__(self, *args, **kwargs): + PersistentRandomPool.__init__(self, *args, **kwargs) + + def randomize(self, N = 0): + "Adds N bits of entropy to random pool. If N is 0, fill up pool." + import os, string, time + if N <= 0: + bits = self.bits - self.entropy + else: + bits = N*8 + if bits == 0: + return + print bits,'bits of entropy are now required. Please type on the keyboard' + print 'until enough randomness has been accumulated.' + kb = KeyboardEntry() + s='' # We'll save the characters typed and add them to the pool. + hash = self._hash + e = 0 + try: + while e < bits: + temp=str(bits-e).rjust(6) + os.write(1, temp) + s=s+kb.getch() + e += self.add_event(s) + os.write(1, 6*chr(8)) + self.add_event(s+hash.new(s).digest() ) + finally: + kb.close() + print '\n\007 Enough. Please wait a moment.\n' + self.stir_n() # wash the random pool. + kb.close(4) + +if __name__ == '__main__': + pool = RandomPool() + print 'random pool entropy', pool.entropy, 'bits' + pool.add_event('something') + print `pool.get_bytes(100)` + import tempfile, os + fname = tempfile.mktemp() + pool = KeyboardRandomPool(filename=fname) + print 'keyboard random pool entropy', pool.entropy, 'bits' + pool.randomize() + print 'keyboard random pool entropy', pool.entropy, 'bits' + pool.randomize(128) + pool.save() + saved = open(fname, 'rb').read() + print 'saved', `saved` + print 'pool ', `pool._randpool.tostring()` + newpool = PersistentRandomPool(fname) + print 'persistent random pool entropy', pool.entropy, 'bits' + os.remove(fname) diff --git a/gam/gdata/Crypto/Util/test.py b/gam/gdata/Crypto/Util/test.py new file mode 100755 index 00000000000..7b23e9f5e47 --- /dev/null +++ b/gam/gdata/Crypto/Util/test.py @@ -0,0 +1,453 @@ +# +# test.py : Functions used for testing the modules +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: test.py,v 1.16 2004/08/13 22:24:18 akuchling Exp $" + +import binascii +import string +import testdata + +from Crypto.Cipher import * + +def die(string): + import sys + print '***ERROR: ', string +# sys.exit(0) # Will default to continuing onward... + +def print_timing (size, delta, verbose): + if verbose: + if delta == 0: + print 'Unable to measure time -- elapsed time too small' + else: + print '%.2f K/sec' % (size/delta) + +def exerciseBlockCipher(cipher, verbose): + import string, time + try: + ciph = eval(cipher) + except NameError: + print cipher, 'module not available' + return None + print cipher+ ':' + str='1' # Build 128K of test data + for i in xrange(0, 17): + str=str+str + if ciph.key_size==0: ciph.key_size=16 + password = 'password12345678Extra text for password'[0:ciph.key_size] + IV = 'Test IV Test IV Test IV Test'[0:ciph.block_size] + + if verbose: print ' ECB mode:', + obj=ciph.new(password, ciph.MODE_ECB) + if obj.block_size != ciph.block_size: + die("Module and cipher object block_size don't match") + + text='1234567812345678'[0:ciph.block_size] + c=obj.encrypt(text) + if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='KuchlingKuchling'[0:ciph.block_size] + c=obj.encrypt(text) + if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='NotTodayNotEver!'[0:ciph.block_size] + c=obj.encrypt(text) + if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"') + + start=time.time() + s=obj.encrypt(str) + s2=obj.decrypt(s) + end=time.time() + if (str!=s2): + die('Error in resulting plaintext from ECB mode') + print_timing(256, end-start, verbose) + del obj + + if verbose: print ' CFB mode:', + obj1=ciph.new(password, ciph.MODE_CFB, IV) + obj2=ciph.new(password, ciph.MODE_CFB, IV) + start=time.time() + ciphertext=obj1.encrypt(str[0:65536]) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str[0:65536]): + die('Error in resulting plaintext from CFB mode') + print_timing(64, end-start, verbose) + del obj1, obj2 + + if verbose: print ' CBC mode:', + obj1=ciph.new(password, ciph.MODE_CBC, IV) + obj2=ciph.new(password, ciph.MODE_CBC, IV) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from CBC mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + if verbose: print ' PGP mode:', + obj1=ciph.new(password, ciph.MODE_PGP, IV) + obj2=ciph.new(password, ciph.MODE_PGP, IV) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from PGP mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + if verbose: print ' OFB mode:', + obj1=ciph.new(password, ciph.MODE_OFB, IV) + obj2=ciph.new(password, ciph.MODE_OFB, IV) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from OFB mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + def counter(length=ciph.block_size): + return length * 'a' + + if verbose: print ' CTR mode:', + obj1=ciph.new(password, ciph.MODE_CTR, counter=counter) + obj2=ciph.new(password, ciph.MODE_CTR, counter=counter) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from CTR mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + # Test the IV handling + if verbose: print ' Testing IV handling' + obj1=ciph.new(password, ciph.MODE_CBC, IV) + plaintext='Test'*(ciph.block_size/4)*3 + ciphertext1=obj1.encrypt(plaintext) + obj1.IV=IV + ciphertext2=obj1.encrypt(plaintext) + if ciphertext1!=ciphertext2: + die('Error in setting IV') + + # Test keyword arguments + obj1=ciph.new(key=password) + obj1=ciph.new(password, mode=ciph.MODE_CBC) + obj1=ciph.new(mode=ciph.MODE_CBC, key=password) + obj1=ciph.new(IV=IV, mode=ciph.MODE_CBC, key=password) + + return ciph + +def exerciseStreamCipher(cipher, verbose): + import string, time + try: + ciph = eval(cipher) + except (NameError): + print cipher, 'module not available' + return None + print cipher + ':', + str='1' # Build 128K of test data + for i in xrange(0, 17): + str=str+str + key_size = ciph.key_size or 16 + password = 'password12345678Extra text for password'[0:key_size] + + obj1=ciph.new(password) + obj2=ciph.new(password) + if obj1.block_size != ciph.block_size: + die("Module and cipher object block_size don't match") + if obj1.key_size != ciph.key_size: + die("Module and cipher object key_size don't match") + + text='1234567812345678Python' + c=obj1.encrypt(text) + if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='B1FF I2 A R3A11Y |<00L D00D!!!!!' + c=obj1.encrypt(text) + if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='SpamSpamSpamSpamSpamSpamSpamSpamSpam' + c=obj1.encrypt(text) + if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"') + + start=time.time() + s=obj1.encrypt(str) + str=obj2.decrypt(s) + end=time.time() + print_timing(256, end-start, verbose) + del obj1, obj2 + + return ciph + +def TestStreamModules(args=['arc4', 'XOR'], verbose=1): + import sys, string + args=map(string.lower, args) + + if 'arc4' in args: + # Test ARC4 stream cipher + arc4=exerciseStreamCipher('ARC4', verbose) + if (arc4!=None): + for entry in testdata.arc4: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=arc4.new(key) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('ARC4 failed on entry '+`entry`) + + if 'xor' in args: + # Test XOR stream cipher + XOR=exerciseStreamCipher('XOR', verbose) + if (XOR!=None): + for entry in testdata.xor: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=XOR.new(key) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('XOR failed on entry '+`entry`) + + +def TestBlockModules(args=['aes', 'arc2', 'des', 'blowfish', 'cast', 'des3', + 'idea', 'rc5'], + verbose=1): + import string + args=map(string.lower, args) + if 'aes' in args: + ciph=exerciseBlockCipher('AES', verbose) # AES + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.aes: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('AES failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + for entry in testdata.aes_modes: + mode, key, plain, cipher, kw = entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, mode, **kw) + obj2=ciph.new(key, mode, **kw) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('AES encrypt failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + plain2=obj2.decrypt(ciphertext) + if plain2!=plain: + die('AES decrypt failed on entry '+`entry`) + for i in plain2: + if verbose: print hex(ord(i)), + if verbose: print + + + if 'arc2' in args: + ciph=exerciseBlockCipher('ARC2', verbose) # Alleged RC2 + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.arc2: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('ARC2 failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + print + + if 'blowfish' in args: + ciph=exerciseBlockCipher('Blowfish',verbose)# Bruce Schneier's Blowfish cipher + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.blowfish: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('Blowfish failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + if 'cast' in args: + ciph=exerciseBlockCipher('CAST', verbose) # CAST-128 + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.cast: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('CAST failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + if 0: + # The full-maintenance test; it requires 4 million encryptions, + # and correspondingly is quite time-consuming. I've disabled + # it; it's faster to compile block/cast.c with -DTEST and run + # the resulting program. + a = b = '\x01\x23\x45\x67\x12\x34\x56\x78\x23\x45\x67\x89\x34\x56\x78\x9A' + + for i in range(0, 1000000): + obj = cast.new(b, cast.MODE_ECB) + a = obj.encrypt(a[:8]) + obj.encrypt(a[-8:]) + obj = cast.new(a, cast.MODE_ECB) + b = obj.encrypt(b[:8]) + obj.encrypt(b[-8:]) + + if a!="\xEE\xA9\xD0\xA2\x49\xFD\x3B\xA6\xB3\x43\x6F\xB8\x9D\x6D\xCA\x92": + if verbose: print 'CAST test failed: value of "a" doesn\'t match' + if b!="\xB2\xC9\x5E\xB0\x0C\x31\xAD\x71\x80\xAC\x05\xB8\xE8\x3D\x69\x6E": + if verbose: print 'CAST test failed: value of "b" doesn\'t match' + + if 'des' in args: + # Test/benchmark DES block cipher + des=exerciseBlockCipher('DES', verbose) + if (des!=None): + # Various tests taken from the DES library packaged with Kerberos V4 + obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_ECB) + s=obj.encrypt('Now is t') + if (s!=binascii.a2b_hex('3fa40e8a984d4815')): + die('DES fails test 1') + obj=des.new(binascii.a2b_hex('08192a3b4c5d6e7f'), des.MODE_ECB) + s=obj.encrypt('\000\000\000\000\000\000\000\000') + if (s!=binascii.a2b_hex('25ddac3e96176467')): + die('DES fails test 2') + obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_CBC, + binascii.a2b_hex('1234567890abcdef')) + s=obj.encrypt("Now is the time for all ") + if (s!=binascii.a2b_hex('e5c7cdde872bf27c43e934008c389c0f683788499a7c05f6')): + die('DES fails test 3') + obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_CBC, + binascii.a2b_hex('fedcba9876543210')) + s=obj.encrypt("7654321 Now is the time for \000\000\000\000") + if (s!=binascii.a2b_hex("ccd173ffab2039f4acd8aefddfd8a1eb468e91157888ba681d269397f7fe62b4")): + die('DES fails test 4') + del obj,s + + # R. Rivest's test: see http://theory.lcs.mit.edu/~rivest/destest.txt + x=binascii.a2b_hex('9474B8E8C73BCA7D') + for i in range(0, 16): + obj=des.new(x, des.MODE_ECB) + if (i & 1): x=obj.decrypt(x) + else: x=obj.encrypt(x) + if x!=binascii.a2b_hex('1B1A2DDB4C642438'): + die("DES fails Rivest's test") + + if verbose: print ' Verifying against test suite...' + for entry in testdata.des: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=des.new(key, des.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('DES failed on entry '+`entry`) + for entry in testdata.des_cbc: + key, iv, plain, cipher=entry + key, iv, cipher=binascii.a2b_hex(key),binascii.a2b_hex(iv),binascii.a2b_hex(cipher) + obj1=des.new(key, des.MODE_CBC, iv) + obj2=des.new(key, des.MODE_CBC, iv) + ciphertext=obj1.encrypt(plain) + if (ciphertext!=cipher): + die('DES CBC mode failed on entry '+`entry`) + + if 'des3' in args: + ciph=exerciseBlockCipher('DES3', verbose) # Triple DES + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.des3: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('DES3 failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + for entry in testdata.des3_cbc: + key, iv, plain, cipher=entry + key, iv, cipher=binascii.a2b_hex(key),binascii.a2b_hex(iv),binascii.a2b_hex(cipher) + obj1=ciph.new(key, ciph.MODE_CBC, iv) + obj2=ciph.new(key, ciph.MODE_CBC, iv) + ciphertext=obj1.encrypt(plain) + if (ciphertext!=cipher): + die('DES3 CBC mode failed on entry '+`entry`) + + if 'idea' in args: + ciph=exerciseBlockCipher('IDEA', verbose) # IDEA block cipher + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.idea: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('IDEA failed on entry '+`entry`) + + if 'rc5' in args: + # Ronald Rivest's RC5 algorithm + ciph=exerciseBlockCipher('RC5', verbose) + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.rc5: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key[4:], ciph.MODE_ECB, + version =ord(key[0]), + word_size=ord(key[1]), + rounds =ord(key[2]) ) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('RC5 failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + + diff --git a/gam/gdata/Crypto/__init__.py b/gam/gdata/Crypto/__init__.py new file mode 100755 index 00000000000..2324ae8c378 --- /dev/null +++ b/gam/gdata/Crypto/__init__.py @@ -0,0 +1,25 @@ + +"""Python Cryptography Toolkit + +A collection of cryptographic modules implementing various algorithms +and protocols. + +Subpackages: +Crypto.Cipher Secret-key encryption algorithms (AES, DES, ARC4) +Crypto.Hash Hashing algorithms (MD5, SHA, HMAC) +Crypto.Protocol Cryptographic protocols (Chaffing, all-or-nothing + transform). This package does not contain any + network protocols. +Crypto.PublicKey Public-key encryption and signature algorithms + (RSA, DSA) +Crypto.Util Various useful modules and functions (long-to-string + conversion, random number generation, number + theoretic functions) +""" + +__all__ = ['Cipher', 'Hash', 'Protocol', 'PublicKey', 'Util'] + +__version__ = '2.0.1' +__revision__ = "$Id: __init__.py,v 1.12 2005/06/14 01:20:22 akuchling Exp $" + + diff --git a/gam/gdata/Crypto/test.py b/gam/gdata/Crypto/test.py new file mode 100755 index 00000000000..c5ed0616216 --- /dev/null +++ b/gam/gdata/Crypto/test.py @@ -0,0 +1,38 @@ +# +# Test script for the Python Cryptography Toolkit. +# + +__revision__ = "$Id: test.py,v 1.7 2002/07/11 14:31:19 akuchling Exp $" + +import os, sys + + +# Add the build directory to the front of sys.path +from distutils.util import get_platform +s = "build/lib.%s-%.3s" % (get_platform(), sys.version) +s = os.path.join(os.getcwd(), s) +sys.path.insert(0, s) +s = os.path.join(os.getcwd(), 'test') +sys.path.insert(0, s) + +from Crypto.Util import test + +args = sys.argv[1:] +quiet = "--quiet" in args +if quiet: args.remove('--quiet') + +if not quiet: + print '\nStream Ciphers:' + print '===============' + +if args: test.TestStreamModules(args, verbose= not quiet) +else: test.TestStreamModules(verbose= not quiet) + +if not quiet: + print '\nBlock Ciphers:' + print '==============' + +if args: test.TestBlockModules(args, verbose= not quiet) +else: test.TestBlockModules(verbose= not quiet) + + diff --git a/gam/gdata/__init__.py b/gam/gdata/__init__.py new file mode 100755 index 00000000000..634889b060f --- /dev/null +++ b/gam/gdata/__init__.py @@ -0,0 +1,835 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains classes representing Google Data elements. + + Extends Atom classes to add Google Data specific elements. +""" + + +__author__ = 'j.s@google.com (Jeffrey Scudder)' + +import os +import atom +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree + + +# XML namespaces which are often used in GData entities. +GDATA_NAMESPACE = 'http://schemas.google.com/g/2005' +GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s' +OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/' +OPENSEARCH_TEMPLATE = '{http://a9.com/-/spec/opensearchrss/1.0/}%s' +BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch' +GACL_NAMESPACE = 'http://schemas.google.com/acl/2007' +GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s' + + +# Labels used in batch request entries to specify the desired CRUD operation. +BATCH_INSERT = 'insert' +BATCH_UPDATE = 'update' +BATCH_DELETE = 'delete' +BATCH_QUERY = 'query' + +class Error(Exception): + pass + + +class MissingRequiredParameters(Error): + pass + + +class MediaSource(object): + """GData Entries can refer to media sources, so this class provides a + place to store references to these objects along with some metadata. + """ + + def __init__(self, file_handle=None, content_type=None, content_length=None, + file_path=None, file_name=None): + """Creates an object of type MediaSource. + + Args: + file_handle: A file handle pointing to the file to be encapsulated in the + MediaSource + content_type: string The MIME type of the file. Required if a file_handle + is given. + content_length: int The size of the file. Required if a file_handle is + given. + file_path: string (optional) A full path name to the file. Used in + place of a file_handle. + file_name: string The name of the file without any path information. + Required if a file_handle is given. + """ + self.file_handle = file_handle + self.content_type = content_type + self.content_length = content_length + self.file_name = file_name + + if (file_handle is None and content_type is not None and + file_path is not None): + self.setFile(file_path, content_type) + + def setFile(self, file_name, content_type): + """A helper function which can create a file handle from a given filename + and set the content type and length all at once. + + Args: + file_name: string The path and file name to the file containing the media + content_type: string A MIME type representing the type of the media + """ + + self.file_handle = open(file_name, 'rb') + self.content_type = content_type + self.content_length = os.path.getsize(file_name) + self.file_name = os.path.basename(file_name) + + +class LinkFinder(atom.LinkFinder): + """An "interface" providing methods to find link elements + + GData Entry elements often contain multiple links which differ in the rel + attribute or content type. Often, developers are interested in a specific + type of link so this class provides methods to find specific classes of + links. + + This class is used as a mixin in GData entries. + """ + + def GetSelfLink(self): + """Find the first link with rel set to 'self' + + Returns: + An atom.Link or none if none of the links had rel equal to 'self' + """ + + for a_link in self.link: + if a_link.rel == 'self': + return a_link + return None + + def GetEditLink(self): + for a_link in self.link: + if a_link.rel == 'edit': + return a_link + return None + + def GetEditMediaLink(self): + """The Picasa API mistakenly returns media-edit rather than edit-media, but + this may change soon. + """ + for a_link in self.link: + if a_link.rel == 'edit-media': + return a_link + if a_link.rel == 'media-edit': + return a_link + return None + + def GetHtmlLink(self): + """Find the first link with rel of alternate and type of text/html + + Returns: + An atom.Link or None if no links matched + """ + for a_link in self.link: + if a_link.rel == 'alternate' and a_link.type == 'text/html': + return a_link + return None + + def GetPostLink(self): + """Get a link containing the POST target URL. + + The POST target URL is used to insert new entries. + + Returns: + A link object with a rel matching the POST type. + """ + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/g/2005#post': + return a_link + return None + + def GetAclLink(self): + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/acl/2007#accessControlList': + return a_link + return None + + def GetFeedLink(self): + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/g/2005#feed': + return a_link + return None + + def GetNextLink(self): + for a_link in self.link: + if a_link.rel == 'next': + return a_link + return None + + def GetPrevLink(self): + for a_link in self.link: + if a_link.rel == 'previous': + return a_link + return None + + +class TotalResults(atom.AtomBase): + """opensearch:TotalResults for a GData feed""" + + _tag = 'totalResults' + _namespace = OPENSEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def TotalResultsFromString(xml_string): + return atom.CreateClassFromXMLString(TotalResults, xml_string) + + +class StartIndex(atom.AtomBase): + """The opensearch:startIndex element in GData feed""" + + _tag = 'startIndex' + _namespace = OPENSEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def StartIndexFromString(xml_string): + return atom.CreateClassFromXMLString(StartIndex, xml_string) + + +class ItemsPerPage(atom.AtomBase): + """The opensearch:itemsPerPage element in GData feed""" + + _tag = 'itemsPerPage' + _namespace = OPENSEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ItemsPerPageFromString(xml_string): + return atom.CreateClassFromXMLString(ItemsPerPage, xml_string) + + +class ExtendedProperty(atom.AtomBase): + """The Google Data extendedProperty element. + + Used to store arbitrary key-value information specific to your + application. The value can either be a text string stored as an XML + attribute (.value), or an XML node (XmlBlob) as a child element. + + This element is used in the Google Calendar data API and the Google + Contacts data API. + """ + + _tag = 'extendedProperty' + _namespace = GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['value'] = 'value' + + def __init__(self, name=None, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def GetXmlBlobExtensionElement(self): + """Returns the XML blob as an atom.ExtensionElement. + + Returns: + An atom.ExtensionElement representing the blob's XML, or None if no + blob was set. + """ + if len(self.extension_elements) < 1: + return None + else: + return self.extension_elements[0] + + def GetXmlBlobString(self): + """Returns the XML blob as a string. + + Returns: + A string containing the blob's XML, or None if no blob was set. + """ + blob = self.GetXmlBlobExtensionElement() + if blob: + return blob.ToString() + return None + + def SetXmlBlob(self, blob): + """Sets the contents of the extendedProperty to XML as a child node. + + Since the extendedProperty is only allowed one child element as an XML + blob, setting the XML blob will erase any preexisting extension elements + in this object. + + Args: + blob: str, ElementTree Element or atom.ExtensionElement representing + the XML blob stored in the extendedProperty. + """ + # Erase any existing extension_elements, clears the child nodes from the + # extendedProperty. + self.extension_elements = [] + if isinstance(blob, atom.ExtensionElement): + self.extension_elements.append(blob) + elif ElementTree.iselement(blob): + self.extension_elements.append(atom._ExtensionElementFromElementTree( + blob)) + else: + self.extension_elements.append(atom.ExtensionElementFromString(blob)) + + +def ExtendedPropertyFromString(xml_string): + return atom.CreateClassFromXMLString(ExtendedProperty, xml_string) + + +class GDataEntry(atom.Entry, LinkFinder): + """Extends Atom Entry to provide data processing""" + + _tag = atom.Entry._tag + _namespace = atom.Entry._namespace + _children = atom.Entry._children.copy() + _attributes = atom.Entry._attributes.copy() + + def __GetId(self): + return self.__id + + # This method was created to strip the unwanted whitespace from the id's + # text node. + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def IsMedia(self): + """Determines whether or not an entry is a GData Media entry. + """ + if (self.GetEditMediaLink()): + return True + else: + return False + + def GetMediaURL(self): + """Returns the URL to the media content, if the entry is a media entry. + Otherwise returns None. + """ + if not self.IsMedia(): + return None + else: + return self.content.src + + +def GDataEntryFromString(xml_string): + """Creates a new GDataEntry instance given a string of XML.""" + return atom.CreateClassFromXMLString(GDataEntry, xml_string) + + +class GDataFeed(atom.Feed, LinkFinder): + """A Feed from a GData service""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = atom.Feed._children.copy() + _attributes = atom.Feed._attributes.copy() + _children['{%s}totalResults' % OPENSEARCH_NAMESPACE] = ('total_results', + TotalResults) + _children['{%s}startIndex' % OPENSEARCH_NAMESPACE] = ('start_index', + StartIndex) + _children['{%s}itemsPerPage' % OPENSEARCH_NAMESPACE] = ('items_per_page', + ItemsPerPage) + # Add a conversion rule for atom:entry to make it into a GData + # Entry. + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GDataEntry]) + + def __GetId(self): + return self.__id + + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __GetGenerator(self): + return self.__generator + + def __SetGenerator(self, generator): + self.__generator = generator + if generator is not None: + self.__generator.text = generator.text.strip() + + generator = property(__GetGenerator, __SetGenerator) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + total_results=None, start_index=None, items_per_page=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for Source + + Args: + author: list (optional) A list of Author instances which belong to this + class. + category: list (optional) A list of Category instances + contributor: list (optional) A list on Contributor instances + generator: Generator (optional) + icon: Icon (optional) + id: Id (optional) The entry's Id element + link: list (optional) A list of Link instances + logo: Logo (optional) + rights: Rights (optional) The entry's Rights element + subtitle: Subtitle (optional) The entry's subtitle element + title: Title (optional) the entry's title element + updated: Updated (optional) the entry's updated element + entry: list (optional) A list of the Entry instances contained in the + feed. + text: String (optional) The text contents of the element. This is the + contents of the Entry's XML text node. + (Example: This is the text) + extension_elements: list (optional) A list of ExtensionElement instances + which are children of this element. + extension_attributes: dict (optional) A dictionary of strings which are + the values for additional XML attributes of this element. + """ + + self.author = author or [] + self.category = category or [] + self.contributor = contributor or [] + self.generator = generator + self.icon = icon + self.id = atom_id + self.link = link or [] + self.logo = logo + self.rights = rights + self.subtitle = subtitle + self.title = title + self.updated = updated + self.entry = entry or [] + self.total_results = total_results + self.start_index = start_index + self.items_per_page = items_per_page + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def GDataFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GDataFeed, xml_string) + + +class BatchId(atom.AtomBase): + _tag = 'id' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + +def BatchIdFromString(xml_string): + return atom.CreateClassFromXMLString(BatchId, xml_string) + + +class BatchOperation(atom.AtomBase): + _tag = 'operation' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + + def __init__(self, op_type=None, extension_elements=None, + extension_attributes=None, + text=None): + self.type = op_type + atom.AtomBase.__init__(self, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def BatchOperationFromString(xml_string): + return atom.CreateClassFromXMLString(BatchOperation, xml_string) + + +class BatchStatus(atom.AtomBase): + """The batch:status element present in a batch response entry. + + A status element contains the code (HTTP response code) and + reason as elements. In a single request these fields would + be part of the HTTP response, but in a batch request each + Entry operation has a corresponding Entry in the response + feed which includes status information. + + See http://code.google.com/apis/gdata/batch.html#Handling_Errors + """ + + _tag = 'status' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['code'] = 'code' + _attributes['reason'] = 'reason' + _attributes['content-type'] = 'content_type' + + def __init__(self, code=None, reason=None, content_type=None, + extension_elements=None, extension_attributes=None, text=None): + self.code = code + self.reason = reason + self.content_type = content_type + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def BatchStatusFromString(xml_string): + return atom.CreateClassFromXMLString(BatchStatus, xml_string) + + +class BatchEntry(GDataEntry): + """An atom:entry for use in batch requests. + + The BatchEntry contains additional members to specify the operation to be + performed on this entry and a batch ID so that the server can reference + individual operations in the response feed. For more information, see: + http://code.google.com/apis/gdata/batch.html + """ + + _tag = GDataEntry._tag + _namespace = GDataEntry._namespace + _children = GDataEntry._children.copy() + _children['{%s}operation' % BATCH_NAMESPACE] = ('batch_operation', BatchOperation) + _children['{%s}id' % BATCH_NAMESPACE] = ('batch_id', BatchId) + _children['{%s}status' % BATCH_NAMESPACE] = ('batch_status', BatchStatus) + _attributes = GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, control=None, title=None, updated=None, + batch_operation=None, batch_id=None, batch_status=None, + extension_elements=None, extension_attributes=None, text=None): + self.batch_operation = batch_operation + self.batch_id = batch_id + self.batch_status = batch_status + GDataEntry.__init__(self, author=author, category=category, + content=content, contributor=contributor, atom_id=atom_id, link=link, + published=published, rights=rights, source=source, summary=summary, + control=control, title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +def BatchEntryFromString(xml_string): + return atom.CreateClassFromXMLString(BatchEntry, xml_string) + + +class BatchInterrupted(atom.AtomBase): + """The batch:interrupted element sent if batch request was interrupted. + + Only appears in a feed if some of the batch entries could not be processed. + See: http://code.google.com/apis/gdata/batch.html#Handling_Errors + """ + + _tag = 'interrupted' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['reason'] = 'reason' + _attributes['success'] = 'success' + _attributes['failures'] = 'failures' + _attributes['parsed'] = 'parsed' + + def __init__(self, reason=None, success=None, failures=None, parsed=None, + extension_elements=None, extension_attributes=None, text=None): + self.reason = reason + self.success = success + self.failures = failures + self.parsed = parsed + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def BatchInterruptedFromString(xml_string): + return atom.CreateClassFromXMLString(BatchInterrupted, xml_string) + + +class BatchFeed(GDataFeed): + """A feed containing a list of batch request entries.""" + + _tag = GDataFeed._tag + _namespace = GDataFeed._namespace + _children = GDataFeed._children.copy() + _attributes = GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchEntry]) + _children['{%s}interrupted' % BATCH_NAMESPACE] = ('interrupted', BatchInterrupted) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + total_results=None, start_index=None, items_per_page=None, + interrupted=None, + extension_elements=None, extension_attributes=None, text=None): + self.interrupted = interrupted + GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + def AddBatchEntry(self, entry=None, id_url_string=None, + batch_id_string=None, operation_string=None): + """Logic for populating members of a BatchEntry and adding to the feed. + + + If the entry is not a BatchEntry, it is converted to a BatchEntry so + that the batch specific members will be present. + + The id_url_string can be used in place of an entry if the batch operation + applies to a URL. For example query and delete operations require just + the URL of an entry, no body is sent in the HTTP request. If an + id_url_string is sent instead of an entry, a BatchEntry is created and + added to the feed. + + This method also assigns the desired batch id to the entry so that it + can be referenced in the server's response. If the batch_id_string is + None, this method will assign a batch_id to be the index at which this + entry will be in the feed's entry list. + + Args: + entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The + entry which will be sent to the server as part of the batch request. + The item must have a valid atom id so that the server knows which + entry this request references. + id_url_string: str (optional) The URL of the entry to be acted on. You + can find this URL in the text member of the atom id for an entry. + If an entry is not sent, this id will be used to construct a new + BatchEntry which will be added to the request feed. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. Note that batch_ids should either always be specified or + never, mixing could potentially result in duplicate batch ids. + operation_string: str (optional) The desired batch operation which will + set the batch_operation.type member of the entry. Options are + 'insert', 'update', 'delete', and 'query' + + Raises: + MissingRequiredParameters: Raised if neither an id_ url_string nor an + entry are provided in the request. + + Returns: + The added entry. + """ + if entry is None and id_url_string is None: + raise MissingRequiredParameters('supply either an entry or URL string') + if entry is None and id_url_string is not None: + entry = BatchEntry(atom_id=atom.Id(text=id_url_string)) + # TODO: handle cases in which the entry lacks batch_... members. + #if not isinstance(entry, BatchEntry): + # Convert the entry to a batch entry. + if batch_id_string is not None: + entry.batch_id = BatchId(text=batch_id_string) + elif entry.batch_id is None or entry.batch_id.text is None: + entry.batch_id = BatchId(text=str(len(self.entry))) + if operation_string is not None: + entry.batch_operation = BatchOperation(op_type=operation_string) + self.entry.append(entry) + return entry + + def AddInsert(self, entry, batch_id_string=None): + """Add an insert request to the operations in this batch request feed. + + If the entry doesn't yet have an operation or a batch id, these will + be set to the insert operation and a batch_id specified as a parameter. + + Args: + entry: BatchEntry The entry which will be sent in the batch feed as an + insert request. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. Note that batch_ids should either always be specified or + never, mixing could potentially result in duplicate batch ids. + """ + entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string, + operation_string=BATCH_INSERT) + + def AddUpdate(self, entry, batch_id_string=None): + """Add an update request to the list of batch operations in this feed. + + Sets the operation type of the entry to insert if it is not already set + and assigns the desired batch id to the entry so that it can be + referenced in the server's response. + + Args: + entry: BatchEntry The entry which will be sent to the server as an + update (HTTP PUT) request. The item must have a valid atom id + so that the server knows which entry to replace. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. See also comments for AddInsert. + """ + entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string, + operation_string=BATCH_UPDATE) + + def AddDelete(self, url_string=None, entry=None, batch_id_string=None): + """Adds a delete request to the batch request feed. + + This method takes either the url_string which is the atom id of the item + to be deleted, or the entry itself. The atom id of the entry must be + present so that the server knows which entry should be deleted. + + Args: + url_string: str (optional) The URL of the entry to be deleted. You can + find this URL in the text member of the atom id for an entry. + entry: BatchEntry (optional) The entry to be deleted. + batch_id_string: str (optional) + + Raises: + MissingRequiredParameters: Raised if neither a url_string nor an entry + are provided in the request. + """ + entry = self.AddBatchEntry(entry=entry, id_url_string=url_string, + batch_id_string=batch_id_string, + operation_string=BATCH_DELETE) + + def AddQuery(self, url_string=None, entry=None, batch_id_string=None): + """Adds a query request to the batch request feed. + + This method takes either the url_string which is the query URL + whose results will be added to the result feed. The query URL will + be encapsulated in a BatchEntry, and you may pass in the BatchEntry + with a query URL instead of sending a url_string. + + Args: + url_string: str (optional) + entry: BatchEntry (optional) + batch_id_string: str (optional) + + Raises: + MissingRequiredParameters + """ + entry = self.AddBatchEntry(entry=entry, id_url_string=url_string, + batch_id_string=batch_id_string, + operation_string=BATCH_QUERY) + + def GetBatchLink(self): + for link in self.link: + if link.rel == 'http://schemas.google.com/g/2005#batch': + return link + return None + + +def BatchFeedFromString(xml_string): + return atom.CreateClassFromXMLString(BatchFeed, xml_string) + + +class EntryLink(atom.AtomBase): + """The gd:entryLink element""" + + _tag = 'entryLink' + _namespace = GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + # The entry used to be an atom.Entry, now it is a GDataEntry. + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', GDataEntry) + _attributes['rel'] = 'rel' + _attributes['readOnly'] = 'read_only' + _attributes['href'] = 'href' + + def __init__(self, href=None, read_only=None, rel=None, + entry=None, extension_elements=None, + extension_attributes=None, text=None): + self.href = href + self.read_only = read_only + self.rel = rel + self.entry = entry + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EntryLinkFromString(xml_string): + return atom.CreateClassFromXMLString(EntryLink, xml_string) + + +class FeedLink(atom.AtomBase): + """The gd:feedLink element""" + + _tag = 'feedLink' + _namespace = GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}feed' % atom.ATOM_NAMESPACE] = ('feed', GDataFeed) + _attributes['rel'] = 'rel' + _attributes['readOnly'] = 'read_only' + _attributes['countHint'] = 'count_hint' + _attributes['href'] = 'href' + + def __init__(self, count_hint=None, href=None, read_only=None, rel=None, + feed=None, extension_elements=None, extension_attributes=None, + text=None): + self.count_hint = count_hint + self.href = href + self.read_only = read_only + self.rel = rel + self.feed = feed + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def FeedLinkFromString(xml_string): + return atom.CreateClassFromXMLString(FeedLink, xml_string) diff --git a/gam/gdata/__init__.pyc b/gam/gdata/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f373706d9face6a583d358fd16ae1073cb2ba82 GIT binary patch literal 33816 zcmeHwZEPLadFHt!MUfI^NtEPBw&ILs*`gAO^q1^}Q4&!SZ8NcHWk@wP7wPTr&XBy) z<-ON4b0smc3p8?&rrYmr_ba=bY?9q}cbi>oyNe*e0xhtM{M=m>EsC~3iULK^AHiaQ z&5s58BYmFtJ?G5aE0U6}OS@kr=Wyf>&`>wPwY<79(ra z#JkPX9utq4rM)H|HA`bA-eZ>bnRu^RdceeEW@*2P_n9Sh<^i*G(8T-A(zuBan59D| zK4_A|W(C6>H*Xtr+nCpvj+pq6NsdVFVW0b;iI14%1Cslo&pm45513@oTstNYj`|13 zOngipJR}bu@(&(1@x%PEblk+pOC>(&OMKA8k4T9}O#GoziNKfmkcl6a5`l?NluA72 zOFU}glTzY@i655|kC_C0#AH6<3!OCaDJk^0i6^Ac6Ryx{U+9#HKP-hNO#G2jTW5TU z(NXQXHEQ!l$bQ}vnDwylXlKOo-*-s^7u&; zKVPcgqrSvbCjOX|c-q9%COIV)eB3{N#>D64@v|oWM5%&L`V!|%{3$8%oQW@#O3e5Y z&ztz7l=vtyVqxME(IrK4snyJ*dNT`ZjVQ~KEJ%}fnq)~cuQykN7hA2>MiN|(@+e3e z$vU#L$vu05VD?s?G~+Co$y@9G)w~r%aoqJ%)=p~mm3po8b{3%7Ym?c9RjGWkh8iY5 zovf^+$wshH>%?)Ap0580Kl2k66wSww)MmBy#WYDYcQ0~FnNsUop0}r`rn1^vvL0oV zMFUf-Q%^tnGHchT0_#T;VJU+H4uxSwR|n927eP$uPZc+BR?M4#8r%%X4L%Oa4L? zvB2#CQ)!9}upoe$r2vpiX#!A37n`=XL2D(*H`+;1ntq{eK`@i9X4B|0e^%;%-?gY2 zHoBo~|q-U3UB`9OHR88A6y z(IG)&JXN|d>@s(g3fRmM61#;7R-HW#rZf|*C5<-7+DfNc2z&w01I(}>m|YV^IzCV=0g-D2Q698F0=J?Ucfg#0HtXQ3-T~u+ zhmgXE>qKp#%{?gEMG$66p6Sp}z-zE-Po zt5&Us``ML4O!#imAGzxb@Q0LbF5r?*4G#_vj8^s^6^ZBNdh_}V^(I8v8lKCKmzv1# zx;QylH#jz!xw39>vq8(l;~}{jruxV-yA9Mu-T?G>%MDb=5xIf(I4U=L$@$r{>^0&< zGdKZio}?>a-X|u@?PHrR(Kw=8c4BF^ffO7Gy@>t01^t)M39wV>`|A@YDVcF1TVbR<}>-@-WXr;`DOOaY3# zpvL7SDiQWCakeS`-#STJcFmZ}pFd{a0_m!lYbvx1m~;<6F1)eFe6Ao{U&c#>6U)id zEZ2n)(Ft>>M5q;<%0S&uaajVe`n;1iyIS)aM(Z!ypw&Lr_U1C_!>j6QU|h6j;?4gc2V4Vv;XlupG`U zw66%i`#@!XWu$U{vu?mFxMcG(Lddv<1YXYK&3#{{q}tPPm!-LOVAe~7KDBB9)KrCy7&D44l22l&qN=z7v@ zYydC>s+8xP<>Uk@$Ul_?h`9kX&I%Vf69AC~5Jgc~k2ZoDN~|Uj^_`rQk^U5~$VYiu zDpoLiZ>OC+0FcFLe+395|AN@pppa}m z_dkU?w#9Rbt__gsCNu>IpdbiTNr=A5t$b=NUvJzK@>4ui3cl}GLy?Y)fn8>kmB5JXC2Dc6;Rtt+oC zECzX$Le>echA&%@P7tR13B`M15ul)ILkXJ6O{jX7Ic!R#wMo^FRWXi)7R&lLc~855 zNmp&-ZWbc3Wj)r0u=ajn6jcq(<`p#0Mf-+dw2`d{**oEoo>phiqT;PhbZ*{WwZQFB`qF`Ki;G zxH1~g3^i-0`!s45>b`)SP=ab6txA_)y)rw$Fgp`odZ{`;^UCbPm6=Pk>VlxPB5Okl zmKUfoiS3GxtBWvZh^eevTdOzX6r7yXTx-GeKrUA;hF@3}it+Xlde5S@@8c3H+Hhrf z09R$aGCnYuz0=p%F7F4 zPY&6fdeO+aZml<}YLhL-)|a)+A4KhCSZ6P^3%`#i;sP-hS*8UDVEG*ECbw#MB0pS; z9Y#ySF9p34YCMeq3{_d zl+6Ur!smGRJQI52!jCcW7PtbQ`iLu^Koft6t8cgxQqc=pt>8rXao*wTr}j^>TEKXL z2Y-QlhX>@Vb9u6!T}jd_(Q4rfd=D^zZPu1feY+$x!B(pZ1^Ep2oQL?vyFuK+%wGP3H6A{8#Ac~A9^qMPjYou*TI2Uq(xY7^TL7yjB6UU1^bEpb zlb9OBy9tYeI@hCgxt^y{x)EGYHqPFN8Xeliy*JX^!)e9cfbSkQhp63dz_KUK%79Jl zTQwT*;E0DI5B}&#>`Bh`Ds8E$yBR<%U@hKKKQSp4oIXRNGAhAy*p%ZWn0Rfyak0@_ zKCL!#k(s)NBGz;(NqS?6^#|DbxD>&O7Ds8or~olVip|SU7o9 z)DzA!d4UN^dDQ`M>QCKlY_BlVE~Xoz%mwI*4=s?1Tm>P*cdEpHrv8S(OViOuu_y6 z+hF~tGBzb}SJ*SL`23OfZ^AhPkzK81bqdAMbG&LZh+9CHt_o!kC13}6hpV6LoH{QI zJ{BQ*M|X_NBJ5-Lk;>43cV4i=vLpd+IP(&;sqUimxOE!c2FPKFY~;!pP;$$qu;{$S z)H`JFF?Vh`(VI-MH7~w|PKeU9*s=^RgFpx(I_seI)>09kvY2@|NDIQTm{f z4nhq?#jGPP5Q-b-Q#E8SUX-Yqx;WbijEF;Gt5^r>@WD#xYaSr0+S_FcFQp;oqz#*u zQSdE!89$AJ{l*w-rvN=mn4ToAP%t$6Mqy4k1pctUvTtC1f@ANor-*i!6R}^sJhM1c zm{IOk*$YV~!p|@XkrX47yS^9{LZbJNaAh=a(8_1+Bp?HOXe!zbtKVLsw_H6>5I9`0 zz-=uiD)+L1os)Y#Xq1GQLiT-;t3^U))FvV(!CCk<3aOVPx`-=#3rSh(Wf;GNPSI4U z_p;@{-izh(wgi&coKe~`nS($QjYAP3_-*9uHOUwy;bbofCO0E!SPGSalYQbruPIBsm!0n?PdDQ=Ktnc~sjahXBZ z>-8ogY;Ue1#0YAK#H28)Qe31P)CioNTU_y)?TGatDhW>@5vhJL?Wk1$Le$7K;$K9K zotTKZOuB0J7REVF53_Ef#5&SHoMQJ*Kmt8U&oXW%qGO%@qAhSRWK z0(S+t^pOc&Ma7@tXbNHG8=iEP;Ur2AeVS;CZm6WS`3e|C2ma z_khdWZSzJ|uc~a`6$T}= zNn(gtiG1?#j^*8!sZ$hHyyozBl&6QELt9_*pt*^bcmW0a$;=z5uVCikYxr33aiL2E zHR_Kvyo4rxhC>uv!xrp&3;F5@-YqZ1YSM%Phz&kF$1r5|DP_Ir+WMtU)`yftXeM0} zo2<`SD{S3ex>`pCTP}!@s$$S6+E4jPhYI%Ohx#k!b;8~V(nMY~8q09$(!y}tqlM`L z*2?x#_Yq9W~sJJe1r)DIrzBSv_I=gA`@l`ghDX=MWm*wN*%RzyW8Zgy)ei41F2l zVL%1xP0D;E^FYm<|cgD353n zRq~nB845F^nxrN%+t918*+E$K@GwV+n7f#OoOpv1dH>{h8kv_XUtA!Z;$3D7_y|6 zLm^3`kT?P(;ulxLQi8C>!qnS{+^Vh|cUi%m>7%* zbA>$FN9}e672n`=PvS-c3TQhXuMAcWx@+*Efzd(Hbv3n1;`M+*yedM?mU^%CEO9kd zcV)9Xr@L~tyk1(UuK&A`KLrIYa?`|Ip}69)Dz4Ph%Ze+EsMvC!h}TF}*4t`a)tr)X z6evDCo#+} zRP5BArfp(-L#~Kt+pI3(EB!?^iltj?YDo<I3xR7L^&iflt_+@n9 zPuN?MD+m1J@F)#q#_tCSSnMo7V;?X6mjna2SEg~_yqJz z92z`g#F^#pHST@05Gs&Ic$9V_mIjPR;9f`*-gzpEA5G}+yC zttp9WYj{`H1y3Y!Kmna3wN|P_jOl`RDuwt8+|cnZ^#?0ABTAsr;dw1~5=tRsGCK=! zz1zK19CrC+q^MoTnLCWN(nE(Px##e)moP6-8CR=v0uX)vBA06QA}6sPl~rZ#Wb?ml zsG?C^BW_6~podufBJVp@{3^QgLy{5d55s%9 zeC^_GzaPrT%6E=39zpFTidT@uaV`Qb=VF&af?qXe@`s}ps`%eKdQIH%YL|2q5? z>S&j3L0BueFDMm^h;IS+Ac>=NEJ#c#{SC;&tCEK|)rEmPxG9*&I>+e!U{we-Ek~ce zqkF`o!kh4bk4K%x3eITE-gF|i3GO$-HdYcPp|_5>>A`L!!^NyP^96`fp%kwt>&rZ^ z0VXy*UtvQiQ$fV=V43?x}1uZuagG%rM#<$Lcs!BwVaZd?Pbb2R5kVKEVmdsMM zXqPBU--pi;JzY!HV=>0_ab0(EX}%Ip5r?erg`AkM#_I&GOSt6BE^BcGnN*7_2q@Xx@`P`2MJ;1sFPI4jGAkO~uii3zFcd$INA=h4oC90b|a@NS-aL(%@Q{KE6y2Ri_p*g3{P=qadM z!55zApm32d9B^gkczjOUY2z@O8Xxh8a?C|s+1p6U+@X&$^AKcau!t?CXFUuE2geoy z^dF%&g#ayw=xE2CJPZ1Vqumg1BIL-C{D1+~rP3lRzA8bbh=Ghi<3;EeK@;!Fk$yay z!inla&*fx6io)0>kc%*^(OzoYhU7W@j;QWtq9-+&Qa44uvxG%<{|f zOndGsnX&+OuB&tjX+X?ZHVy_bE$aKVy;fWub0RFn)qTu^nDDOuuwm-3oMBzA0qQ^O zWC*rFCgfFE8z6n)9tWD?9x{k8jFZh^X_fFBn~6Xj{LvxNVY)sY>G6jx#bTmW7ZxCF9@w~(AbBe1tfjdwdD zoFVcSvLM2XAe>>^#Kj(uQ&7Wc-)|lUJvyY~8u?pUT+8seaPx#;;77`AVTZ}P^zRJv zT~1O+qrK8HPD`&?)}O|up`9m&Kmw&I2rGw7CINkr%(%sbzcoswyhyZV?f*Y+z)8TV zc`)rMk;L6SKmj;*yJD~tq**TZc~=bQ*M6Sc_tC7LdP&BDlWR={f7lVkw;SHG3kc9cnykz?v}y71vz{)`vm3!u>tj(cum`_Hjld{(m4eYSQnL*NzwbDN4oF~ zSklA=uF)TJJ;AV)Cd#iPTow|Hc+{aH@`OO>ndJhIPoT0vX@XPG(F<@L!P=_R%&T##L3mcwN94X;UuhqG|YukpMj_qEVp6_8dk|pRbgv;6cZyX zn0!O(=pGm*(Y}O(7I8LUSIu2B%ITIvB%rX1p){Z&K&PHkpj|)Cx^#VHJA=7jL^(W@g}Ojdc8K+i)d4#Pql4Ae!Jc5TBuxgdw>qU(2`+MysWePXv8y9- zGm7-iHt&W+8<<0;@@xpk*j52FBte{83qzO-H{)e=`M9drbWwp8s(-4h{V8Q#beS7DGb2j zrfPsB=0I?$t2%_}bl{(WK4}j`2L0e-bzwu4rZ^zdjam9f*G8$LTB`%%i~2$Tgk-yM zgF3B=K54hFM~$1&Mn(j+q?};Do$v0ai8Ex=GkR$2DomEtMcb$rhqGgIB#*e{C^_c1 zqkH8o#Q-pRxb$;lX~?G2-c85+E2R}Kbt|*yNb9(K;Ym5+u^TSY)pP@A;>wBWn5M4q zq*Hp>;HfjgDUJH!oiYyGk&B!XhT@Kijj&t9pAx&}_aPv1j|D+oab&EUX`Fc*SA!A{ zYzm^31|>a}qk8QQFHfC4agowJ8^J-tEa%XnN*Vb&j;w?83vkk~9#!CwlADK!${Mz% zg6k9{uJ9{J<|nANYG^N2rSRvOoI@hprRHL_iv1#U`I7F1$6y~<0l}E7M=1Aqy z%1OwoM_{(~r>@fsaS4}viwMO(wPcjc#t>6)n?XpMkop#64zll~dbCI4Xpgvb6w0y} zKVWNq2Zd1D;>wjo0--04h{vrYYo5x34Pb?`8GI3g3yzTez_y}#Oma&4URR6?Aoo|@eYbA}k>I;;YKqhMtzO9#w>>K5w~dUQ{kV=YPnJmhRj zTC~!?r|3e|`#{kZ&;KQBTa$6C4@z+p~~yNSaBS&#|zYJ zp&Xn;RWec48c-CVkpNKz9|S4ig$iKRE~&$dGoHz)bX!&d?%X;c9HuOBZ;#Y=o(j3y z>P#wK*h$scktCU=)7c&fiVS!7vrk9r1-* z@?B$MS@3A1lTzU5Hi&)o3Jx)teGd`)s_ZEEA0Dv}0I`YTyeOjxCnixTII&oNU`|oX z4tTKe{Xk5{ zi-Ir_G1Z#4qj;%J0GMT!ZmzW;9Xs9KNvKiTFlSX>;qAhxd<|Vvb??`dm5qO@TH=to zfM4~{hSAJf4K8|ZM7zVUdvnvE_7`tggxDwn#rQN2++3BSt$<#aand;AQd#6}x|LT+2+IX`+Lm_2WQeuD1d-e4MWR&5qc z^D!3oCWP>d{76r*2H4OMC_b+6TfF->On!~Yzi09rOumDp7^K`~7m#<~UvT*^Q3>L) zO~yLmhO+TC`zsGs4*O`E!OHl);mZDxkByHV8b30ASYUu~fN4>&6 zGX!Th?hrY5=O3Ka2*Hs+OKx%yo6I7FqSaXAEM-HjjV2fEPe>SZg);Q`oIi*vq!g8? zD$jM3m==CwJdQ+MuR$I15iR7Xm)P#BNFYQ)tn51965c`ysP+*p2wPze!UT7j_H%Bx zHsS?`O?%eu)`mNgM1asUKS2Xh_*+c=GZS`Ei5hKc#RifIu8tbdHY>dIuG^!8A6f*t z(Zv076QH23j6%A|_zR7#Uf`XC)X`|T=^55n`9Mg4Cjdmk5}x-5#s3rP+{*#brbiT~ zx0#2@e1K<~Nu0GM>+ce!*}%@Cw8z&1rB8B17Nt4&MEEX@mIxO-$o>_Q_ZFi^1fz-D zqqu|1d(%I}ncz#=1-;2{vJW7!=uM%j3%fZ&ml>|IG;3uV zrcIDuKE8xcf^IcV4lj@6ui?Wz;W*G)aa>P)Q0)GcLv}>7;b4e)hwO+HD3F~!4!_H! z2Z=czv7(nSo}E*S-|~%}NBeVxF|xKY+A{?8{t#cpg!cgP|0_C9;Ynj@3&gKj#Ft>U z(fbN#lO{+ucH~I=aa8Q^y1@L41?EqPU`47cFkh|enI8CxP7%*YdIkFN{S%W(j?+Oy zPdb^e8vH=N^DIgQ}6i${T1($8ydRXP+>?>DZhQUWRY3GOVu=Ir>{^4!^ z(#)loi@o3C?s<_}zT~%2GW@qpex1pG;EM$M*;}TE za_c|lortXef_IeS!vDyG#6SEmOb83%_mE8BDkXj4Vj`=Y*YA4&xvDLObnrRr%?SrtF&oPX)wuBvJ@ zt7cNcUDbv;V;CwhSe6Vve5x{IlxgP6ahov6rxIEGWWuqtiE>^lmBzTOY#?P2%Euy= z%2y^;i`0P+ zgRIk@p-lLwR9Wp4j4Q5g$AVx6S;vu~O!!bj`;m!>GV7%Lm?=O0F=k46QO5F=I$Fd! zvi}hbWx_|wi#D#O)ZuCtR$>^*P$qnoSg5^Hw)SIYW>}%>-S{+0mk*)$@)2EDwyjRn!kb5E174!o`z5}LoahflI4Cm z6pI}-v#;A&?C4^WebrKXfRPH>SIv|WQ|$qLZHpa!)y5Snmab5#bcM>S&|l3OX2vzD zl&(>=bd74Vh8b&^8`r2_x<-xCH7sNe^I0!vax6*X_o+ru4L!^)cKFpWsOL6kX0em0 z#g3m0R?B8xXF{btX|dy!af%*Eds2Ti`woY1wM=``N&TH_67}h4>Ex%@`&l{pnd$v( zw1@S6wm~VyRgU<&ShY$ItF|1LC2Lq2*RU>KqfY4> zb!82!SueBXz^9cQxD^Lp=ifhY8|A=ll>^r}`I+ne>^O2Wj$Ce`e0z@EAZKT%_iN?k zXQlVEPsy<wza1aue5m!#c6s?Pii`CDg}DKD*_}$Coqer>N3Xln`WOQ4a!3aC z0z>Fsy?T}2YJp+EDz(9Xfm-jsO06HTK+Uun(`HN~aDf@q=1f!aDs!gIQ*zY$(7P%5 zYL8=6w^BY8t1<)>dIa3HNcU!9zuf96hb#>>lnSueVLR{jiE~5GGpJXpA~R#0)Js)k zGGj2n%nxNMCNpN%HK`tEER-4X-1LebsQ{Xr(|pxtsvX` zT5@md3uV(^|8~=#N;cI$GZtNTdn)fZXpIBN*FT3XJ7e=NsF zACRZCv~nILHm#P*8R$!!t=N5UYa zkVYF~<;(in$xA8A5RxZ_F`*QeEn^hs)C&dQdw6Edc37EmaovR$H@ z64~%IFhj}FpK-D+*<+#XkqP6bcV$$LU%AxEsdLYj-)rf)b690hJ|M63?Af}jd4BO< z${ovP*sb($UbAQ2rW&}5U)O+oHk}iT=!??)OqCLC49>G>-7@arbw!o#fE%5rw4`(<0yuGsFS+xTn@!56 z*_P8y^Q$PI?4XjqFwL*BA%GHX47Rgp-H;uWsnF8$(Lsri4C-yh6Tl2jp6nt=&S1Ei zaz7f9tkMFNQOmTzDl*F~EwF0Z@M=nXVKARP>zeTdUe|f)4zg*I%hnLp6F`-*gKXJB zNt0s;u}bq*S|7_a-|AATS(7HyE3ZGPfLtr8uS4pt#qqhH^9kccs~&FsDwX&Bp7Ssbo{7 z*-&~{rP(lUmnY4J9E8#r7+a3AR7#jdA771vUS;(<2A#Qjg^)|*|@aj zFz&msv;vGR$6sYy4jpBdwj5e_P98Z2kVLEeGG&^r4URNk#f;FSu9ver)JvxrcjHmGacmVrf>& zQyYt=Jg3UH;KnIWHM;t_JaD#}lo^vTHG}J@;`?2(^%zDAhO@4&K0jtuW?NmA0>(8u z%Yxc_GZjUvXn27 zCV$fJgg!DP6&a?z(BD;_g;VZY(l{6&gyrc(c^;lE#VU`)eB<8qxVZR;JU0LHRc9Sg zcEbkSp1s<@jIY31T{2^etur~dTpu&aRpt6P%kh?!*{W>C=^v&{cUI}@7y_M(%~IS| zN#=|hg5~RPaX*_}KV`%8DON9(uP3|I6!UOX`V_m;F+X?e)$AsFk@}Ur$aycZVcO49 zCUgCy`uH%D52~uzdW&3uaZPQWXOaH@0@g!90!; z1-}2}2J3G{g&T{NYps@)RP#Dg12^@4X53v;9r|DOCsPgv4=wX;v!`L2S$e=(HZe~R zI4K3lNyTzfrFTGZ=~a*){VZ)1(e0cS0At|>Fj zOqHMNH!r)cT#dcH>=UGPwn_>%1X-qLC7Y&XRn{NKer9Uubyj~^f99hn&&km_y6_pThoNKGx!FOewc#gYFJ7>Iu?{aP&NhOPU(-<=ZnHAfm zZke1JvrkUhGwG~F`bOrbhq)U9j++QIGku|{Qh##A^1PlG7xR^w6{lX#pU#@Ew~J>= z*@q$Eq=_`@mX7{)t7AelQVA2nqfJUvrp0(dL1{YYsj(q*DxMP4da2-kOL{`&6XQ0Ck2&W zz?|&oraQ%aYdImabpOUB$6+OxB)&G6q^R}zlq@^SO}DC{bPpw&!Uz_=aU3Rxr1Iyt z0ZV=hQ-0GU<+cqn_Y62Y)w8scr!_N#nWwAW(*12LEp`N}IXOqG8flCK98a^V6eySF zsv*eSl$#>RJpVGk6;L)c3^LcBG9Ky4lp@viHu_`fR&MD7s#1!tQi{4p8Y3ac)6{hW z^}m#yltsu`4p^mJWJAbF6TXZ3TUa@& ztETahrmof{<*CK=Tas-lDQU)zx0ZCFNWQ6^ZC%I-C?#^d77MI6lfN%oS>NOEjO+Q5 z<0-8yi}cG(z4aEEpzP$lTxi-VZa5k)?l)SLAWgYjr|gX*xA7k(x8sKVnQ!bXfvM7# z!w}@Gw6%KlkJ_{1a%SlUX2^$BpR|M08LeO%yWUcQB^om(F{6y#^35hU zl;r&{ag!KXTeAacQLUG-A>ETMfTCXYM_%QMl!oaaiVQt2z~6gSo~}X0e*FU+DQpnyM#{i~og=;qw1y$FR4wV{kD9nx*8K z@(Y6rZ#)kCeZMeE3ZJg@3xWTvUwF0fU+NdyzU~(;@X%VOUpOxH{MLTq3C$@-Lcfs4 z7$-u%U_T`@X1}QRoJPN}T^iisO21Gf*G*qkreEm8gKkQ$B`eY|=vnjic2kt{tn`6) zhCmyAU@JqQtv=A+5U9}yI+W;qRaAPQ(kIAcv(aI%<0emON!pxu^shrRQd9aCl4WYk zG^Uk)-tw(AX{;3r~JjS~&f~bLk(Rmg^tN*0$0=OfT&pLQWdg zb@hSt4|Uc0Ksu_rX8J%ns=DU-Kutj}L!k5zfq&aSG%7g`rGE&n&ZQ|-`Um|p(~y&8 zy9a-kQCLYqTy&FMxd!vwO83y_Rre66bPt_M>v~!X!*KI-t4`?y>eH}yr&l;hRaK^C zcQ~xH?RA6X0$nl$Dm_G?dHxmJRb^|#Ky&?>GCf2){V%Ty8dA2P^YBhnHxAMtGp>Eu zqiDvN+`zJ(MA7tK=}L{8r3V^231h{p9+p}P%;&c^(ue%E4U1V(T#b!yzDAQC@U)gk7@fiZ{qUQQ~IpZlB0X|tkPjB z+m%VtygJH)R;Am?1Bt(n>FZqj^$40}XhUr_&)^JiylA;D*GiBwO99Y2^*6hhi z?5b97O$)wJO6kSc%6dMmH%QjwLcdX>tjDa<^=zbL;E1?6)GQg%MUp!kr3%WWP`Gog zGFG!-?LpR|PAwmDJMd{tz);6hs@w^yWx3jJ40|uO{*o>9Im|Yuida0E`O5jsVUaAL zRaLf2kp=RUS-L=OHz{y_`2}>o%7Ufk7fAKV7OToHkT;WL!G`h+q{?K0)ae^LXTGvP zD)H5VSE+{ToZuFg6kD(3EK>MgWoXW#;^uNQFIps*XG!AtF_3P7lOuKEG~YBOQp?9@jl2R zRY6rBgKeserd~*TRIu;O_jiot8yaeOd57gL%AR&92VGs}$xiK*oz{@|k^XUi&n^@f z4=%qyE93r7v%j)$^KkyVR!`$*`>nvrco>|w!6YTy%w)E_^*HOPEKRt>jaK@;PsSn+ zg8DObRTp2=KMvXLtJG(Ii*K`1?ncU2s`QYJ+6yHw5EWZnQT@9cvyO5OywSzC!pa5S zCP%tYsxfnFn~hzP9#uKURPuuq&jCr5^*oNfIFdux`5nzpXE{Q7i3u}#4WtD? zI`j72mtvKrlBebsr@X&_UA{hf-)z?1y{u~RmSayX)|r*O)0LAOu)GVAaTQkS)0XE3 ztdPmvfYtJ$57?P;lLff}>x^?Z8t3lK$W!KiX`H*oI5#Wft}-{%IC(ImkV$>O;fzPh z=kbgp<@0353+3~y#kUnzoWm$RPxM|iK$g^#Qip23UZ=jx1?MaWaMXGEjLQYLn6r>E znlbH>eX>A#r%PvNmRBSz`>Er(qm)X{^)t(Iq0jN*Y;#RzRfDy^jQh1>T*Yfl8H>2D zrK7Q4A|rmb8NteOd+9v&{WW?&hm7T-z#0BAj!yIMYtuL0)h3-ioa&EvfOLDJTO zt$CkNPALvPi<@iSXm5&}JCvJtE;rq(+_ZbSX`gb_Ugf6!%T3#ro3<}EEg)aFhsDk1 z#w$B574~|X_AWHP@_F85GzQgLyhU0uLdNCuzY?)g6I+doO19Swl{d3P`Rq0@v~@>K zzbIW&LaZiS!wQ-%0sS>mu@MQ$@w(`^SWVo>u~8Aa_Nu`=t;iFLJQqeX#4wcKcYtv- zjc!boycj+qIx;F!lc-CGjvcLu8>JZ;9UGpItdVz!<%Q*cjL0Zux2=quaA5w%8UJd5 ziOI1Mt=KFuE}`VO6E#VRvUfS6;Gkg5sHmvOk>L^JlvmU3nawNg1MM|Z+L(m6*yzbpLbOgZF7K2P)2o?Y#B^MT%ov&s)m5g;*T4wS-PUBr$a9eDEz4!Q(uibTTX~Wqg4sqs zJ87Bc@Q>xnvi}?6WXZi-_E<{qh77=l1&eW+zA5EP2d-{|qkE=g; zQ2gDvap$(i#a$lo{rB$&J2;FmH8H91`RLI=WlLp+SKdg2@~!H z96h>l40w>%S}^;pWh+tWOSn`)!i#Uw>Wu`hyR=$F^wEbBL>JOqb4`r&oCL(Q*Avg5-_&2L9ogC1x{bM7ZPqV0cWG3!W`VoZ5f(@4tS`sZ+`?6728Y%O0W8_%wd;;>O{vTa)h% z9lB%R*s*5=j~z?@W#7KhJv=-TE_d&~$V?s@ZbjzHf#u)J$?F%AaC!=6NB5V1qS@ll! z>9cCa#fxi4HErrYy-k||n-(qdOi4?tb~G?>e(T?V&poqq=QuAbt5(UEFK@U$eE8mm z)vDE*aN)vNo>#A480hF2*}G#$U8Aa1tuA!ywzz7gO6D#PjvxQ){Pc8_$e(`N zdb)S-6<0t1yc@mJ`DX(Mep&g^qleA!-1%{HRMgQ0lP5pQ|N8478#6P@pLXw#OPe&Q zFs^y??g{PN53{qib-%xU{h-{;%*4Kj4t>}xFYnmYOP4ZM=yZ8a8aM8AsYj1x7r*;1 ze6GPzJ@vi!EEdJbU-j+OY3jjmzIpeHQKOEZ@7i@yMN`vCCXXLKsa2zf?b`7017R*M zW1kKh^!3SIyQ1HpJGbuO?Cg(PHf-2&*Q!-5-LGByrFKzK!S?0L+d9>(*>q2I^y&Ct ze)(L#U_s>#OO`n8_~eszO&c}xT0C!_wI)CRNVgw;m{%MU^2MZ9t%4_Cx$@b<`SVSS zGBVzosn=JjTfKVyU-$2i(%!oD^C4f~IXlOUIhoU?%j_*5ebjnlyLLlrT39qXwq?td z7Xt=tJhx}h_>Ohz*u(!b_|Jj=NAORFe|7kO2mh_`9{~Sg_?yE2bNEk#e?9n*g#Ru0 ze+>Ts_^*V&5B%rB-x~e}@IM0oYVf}c|IzUO3;uWDzaRcj;J*z1L*ah_{B*le+&431OF=U zcZL5a@b`v)3;0LF{{;L$g?|J1uY$i0{_Wx44*uKV|2zDj!~Ze-d%-^({y)Hf3j904 z|6TZh2>(CfKSo+(_}jyO1pFJr|1kXT!G9n81L6M*{CmLvGW^fNKM(%h;Xe-kZQ%bK z{OiE~d-(qh{}A{uf&Y*2w}gKs_|JlWA^iQ}zZ?EH;QtW*FX5jB|M%ek75w+YzbpLJ z@V^58Z1^X^KLY*^@UIX58t@+v|DN!-g1-y=JHY=M{MW$$IQ%`}KM4L0;6EGwLGb?) z{fqP z{=MPf2>ut~Ulsl_@V^QFQ258f{}1@DhyOP-wyuw;hzitzVL4b|Ecg_0skiO zzXbn_@Sh9+RQNA~zc2g`!v72SpND@%_?y7L7W~)3KMej);eQhT@56sE{9D3*7yRAf zUmO10;qL_hJ@Ai*zaIV@;J*X@P2s;7{u=mqgMTsnC&7O*{1?K%2>vtSUl;zr!e0yj zL-5}T{~Y*lf&WDK*M$Eu_`iVvIrw*ke+2xe!ha(Cr@;RK{KvsR4*uW6-vRz6@E;BT zw(x%m|DWN%3H}q{e-!?+;eQ1F)8PLM{_ns)2>yle{|f$R;lBp{o8i9#{_EhM0RJrb ze+&P=;Qs;qTfpBH{+;3f1pde2?+pLX;Qt%^@5BFH_@9Tr7XH@oSHnLS{>|W@1AiO% zFN1$Y_x4{QcnH3;s^D5 z&xU_P_^*QhHTW07e>wbX!ao}RzrcS1{FlK06ZkiR|2+8T!~X~PhrquT{I9@&KKwJ_ zuZMqi`0t1RE%^Jwe+>M)!2cuow}Zb0{I|e=0Q~pBzYhE(;6D}q6X8Dv{tw_k4*qfQ z{~rDh@Hc_~X!y5<|4aD)4F65=p8)@(@ShF;Bk-RF|7Y-j2mV3uFNFVB@IMRxHSpgI z{}u3G2mb{4XTkql`2Pj}58&Sd{;u%v4F4zaKMsFq_h>2{@dZ-8vZxn-vRz};NKtqihuaugTDs;FW|ow{zKtE7XHWJzYqQ% z@b3=)p70+G{~zIh68?X{{|op(hrb{Ed%@oc{;BW}fd5nYSAhQv_y@z^3;v(Ne{4L>M4F8AlKMnsA@HdBlZTR1Xe;E8f zgug%h>%#va{5QaVI{dxi9|`|Z_}7DfBK$|de-`}v!2crro5H^h{1?GL4gP`f{~i82 z;co^1%kUo#|7!5R0ROA-cZ7dO_*aF0H~3eAza9LC!T$jK&%pmn_>YAD`|w{1|C{hn zhW~B&ABMjh{KvyT9sWPTzc>6phkp$G2g3gm{O`a&3jUMf|26#0;J+LGli=SR{_Wv! z3;*@-&xHRW_~*g@68v@WZw&t)@c$0}2Kc`R|9JR!g8w)09|ixe@Hd73WBAvAe>nVI z;6DidyWl?;{@L(v2>(^^zXtyz_%DZlP54K{{}=c#fd3Nse**tT@Sg|&eE9zW{}A}M zg8vox&xd~o{Ppm!4*&h|zXg9^_>X~q7x;e!|90@Vfd3Zw4}kw3_}7UbrV>z$6nh=(R7UIZ>OqON0?068=P8;vlhsm`->Tkwhp_k4Pj& z5VMFr#6_Yh(T0%hbQ%#z{7&p7tcc6RaH1N4{tH(LN1`K9mFPxPBJ7A^!~xCJ zXyO-Q0kMSmglI&}Bl3wKh!CO`afO&qWDt6yII>TM?^cqg4jX~AodV- zh~N#6`u30NGCgNq-TKRhx9oUq+$PWa0V^AC8vN|b4@__L2zln9c|3l?SMN_ap4Pl) zg+E{X{9B)&kG1)JroT&zg=ooYI zOYiP#xuegueRaP|D;)Ku=16A8AEw!KzY;N9TmQEaPJKTNn9+aa)rpZ?=X^bGkL~JN zFPF46dpdYb*Z6vW1uUEwb9u?PMN|4UJCnHSdW9YdgBCWv(PmS~sLHJhwwWIIro+cm zzMuPrSyrJ+x<%b#9qkErt^lQ{l`zlsy zJkt5pdoR}?T-m?9^Xia?YaFk=S23yJ@j8#YDb>FCV}wtoPv0Hh*E06Ir|&iCtkU&fTgNZsYc}egFeK)AagDzQx~Ez+ z`sr)$mLBa~BY&}|+GPE(F{cI${iI`iSG%pFtD0Nf9`wQKWxr>xdjFFKeZJ3{xkhs_ zJi5okA$984vaP!NgX_PwS)Ju~wW(Lbx(#Yot6rtXSGk4zna&ObIJ2vLUy&VzvZ|*#LQTwxH zYJ{&((+_KFY95+1&8~ZDk@ud{H65RRJwBs#ZjTPD8a{N`cJXe%xz$hJ$etKeXM5X^ zcEx}Gtm~4RO(XMz4$gnEv3}>m)#vS}4g6z~Q`6ku-R5PVa{uM!nxpewtDRl`2-Z0I^GB#pG-jqF_7phEZapIB9JFzC;etdW1$9+Xz1H!KwRl_5CNm-tAH&_{5A3kKeDGey-PqUR(a~8_?^E<5$$n zZr=Fj)T#z)V|(swY@a=&Z&aZA&hV9?F7r(ib@QIJ?^5mgkcOkT_d9)IvC}}y{Mte1 z#?KG``Qypv8Ey-#ir&$C)h_tG<>x=R90~vS-fyR6`d0wge3o9WwYNc zH}TGiC;f+h@mH8tocEQv7yKv9dbs&$r-Ng5**n|0hxx2MkdV``j(PLzdkx*ZT6I}k zz4%>|jUJb`o13k;nR$Lu_Q1&SImf2N?{NG{H!P;B@1UJ0;U5hD{_xL%|1$U=ga3W_ zZ-V~^@DG9iWB9)h|K{+20so)jKNJ27;olAZqv5|C{@37N0sgh%zZU+7;C~zbOW|Jt z|6k$X4*qB0-x~h@@E;5RN$?*B|4Z=S0sp%29|iwR_}jpLHvC7xKLGw$;Xeodw(ws9 z|H1ID2mg8S{}%qu;C~(dgW%r={*~cx3jdGc{~7!rz<(S3cf)@e{M*8RG5ibRp8@|9 z@c#+^jp6?u{8z$%HT)gnp9Fso__4}||W@UIB}IQSRC-wysC z!v8q@C&K?b_@}|&9sU>K9|Zq=_#YAf@K1q%SNMMk|K9L_4*!AhZv_9A@Q;Ll6ZoHk ze@FOlg}(*-Ps4u|{QJOv4g90wUkCnG;r|=_{ovma{?*|B75sO>|1kVh;olSfd*J^U z{6B^NUig0xe`oklfd57KkAZ&#{Ex!l68=8$*T8=o{EOgU6aM4j-vj;+;eQwYC*dCh z|BvAR4E{~we-Qrl;eQ_ff51N%{@L(<34d4k{|Nt9@NWVCb@0Cl|LXAn1pXu8e*pe@ z@E;EUKjFU*{!`)qIsCW7-xK~D;2#VBDe$iX|3~mQf&WJMN5OwO{LSEh3;w^r{|ES= zh5tGDZ-&1X{tocZg1;O5*Ta7c{H@{N9sW6#j|uZx8<=@b3qIC-~Qf|9tpQhW`Ti>*4=9{9WLG5B^^8e+T}Z;hzlu{qVm7 z|Do`=g8y9j&w_s^_}jxj4E_o5H;2Cg{$1ezF8nXUe+B#(!9N`S@$lEd-xvNH;2#D5 zb?_e!|8ejSfd63lo5KGY{Kvz80{nZz|7Z9ggTFug*TR1Y{2Rc(2K=+&p9KFx_$!vKN9{g;ol$rA@IKj{{r~mh5sM${}ld~@P7~fmEd0y{@=kr9sZ&4 zKMQ|r_&EBqtjKOg>`;cpNBMey$p|5NZ^1OIC9p9TNR@LvG`{qSE2|Hbe> z4*x*-*M|R2_&YHwKlty1zYhLa;eQ1F2jIT|{)6Ga3;x&Op9X&o{C|M| z75LYOe_#0bhkqpe$H9LV{LSFs75)M6zYPB=@K1z)5BN8Re+c{w;QtN$zlXmY{9D0) zKm1MMUkCm_!ao`QmEj)^|6k!>5&q8bUl0HG@P7#Z_u&5+{weSu0snX59}E8`@V^BA zPVm14{{iqn2>(j(4~GA8_^BmUt(6aH=CUlaaA z;9mp&sqp_A{_Wsz0smp}9}0h0_*aGhAo%|d|4-nb1^z&`>0qu~D>{(kUp2LF5TzX^XW{3GE1A^Z=)zdQW* z!2c=yTf=`9{I|h>F8s6Mza9SZ@LvM|eE7eBet|0no2hyR!Gw}Jl(_?1^nNE|Hts} z1OKJ)FNXhK_*=vOHvHq@|0(>pz<(Y5JH!7o_^*L~Z}`uK|04KrfWI34cj12m{_n%T z7ySQ#{}=FI2LDs=9}9nb_(#EiIQ-|se;)j+!GARTFTmdt{^#KTGyF5)Uj+Z!@c$hC z;qbSGe-Qjn!~X&Nt>B*v|0nSO3;y2l_lN&x_>Y0V9sJkAzajju!`}=3)#1Mp{^sz{ zg#SSJAA`Rm{A1w1Q}SDq1CuKZkQ}4r`Xt9AIULE`NDf5uFOnCL{E6g`B*!N?O3A}X{!wyE zk|UDbjpXbk=OcMY$-hXxMsiz{pOL(YLp9lB<;5ljL+IHzqkI$^S{NRdREZACugl z*O8UrNqZ@|TjIlU$nQ*Cg*Mxhu)DN}gEqx{`a9+^*!sB*!YbXUS8DzVwr=)`7yqE$U8 z_4Pj$EpO$(f({kd{T@q zIzA>!-adBcAE(w$jEaiYXf@%nk+Mz8l7nrji4W%=v5w-nUu_!BamK`ikCcs-SD-nD zx2+!+86JBy{-ny=FrPnWc%Rd25RU6MlYPkG$lQ*UR;o5l`-@{HE6PFaD z?0Zaje0)@_N_DAKX3SW-%$PL{HJfL~JY#TfnHjU1;h&se$!p|)bAIKInd6O)jfhJ~ z;5GL$*FeR4+Oym6S1=6PwwW>Y>@#D++GWNZW031=eAiqP7ax@nuGE{Hdt}t8=vb;_ z6hxGF%inn4TvhT~y{hy8r3@)6HZHbZ)THP{9bhyg`KPnTmEPvYTTc_N<0bl$NxCRa z%jStKRes@eg(9P3xMMY-dA(TPvj4~JWgq{-YOn6jl1nc4%HNVyc4O2Hf8z*$KA>VF zm7o0aKwNu2<>mPHGN)G`|A4RnKVRdBpRZrZNMOkv-px18=~Fs~`%Ohl!Ffi)*X+_jW-8y7p_=t6T3NX0y>mC&#eF zG2sbOk@32OcAXSD-rfqKxKXWGCoG_ASpTkF`UUv6mVLBu-+})|M?|!9YVX|MMaDWr z#Kn$^9^E0@$=$UB<+U@G*Ny_)E7vn9O4mgfH@-JbN*7J5a7OuS0{-I^*hgaP)|w{X z8i%)?!GHg}(tms3k-u7$&Y*N?#k4(x14A1ssw1B&xti_clOt7f4gRjy|ND>mfA{d8 zseu8jUzd}%PNg!>;Gg9$GcSvI8s;r8Gtd1q{=S5He-~LSJI-*v%sP3D7cr>U@PNT! z!(hi?&*0AB&k)Q|8p9ZmVVKFVnjw?nAj5fvyA0~Jl)+%n;KMM0VGP3@h7}B37)~-= zWhi7&a~@UF^(Q(3Ys<80HME*{R@#U(psDy;rIH?Eg3gIK;nCnns)To%GMCI9|bA?d}QBf*Y zLWS`>|H%KvL8Y2hfk%mm_+%MRmhtg%6IewxS;iBiq*x+2xl6+6u;B27L|JB1c6vY( z4`K4)S!Nw&UYFQN9tkJLB}B@f1U6u~U&&KfnBSPFh;dwN<^?1qgz-0B%@9OXl}d9*&GNKP{QpKbbCg zym8)|l6eD?es)<=1u43kqejxzrw^Nj}|)+MpzEH1pBC&e4*Qf2++nVPu#dbu*|%Ij%4 zY*noa!O1cb)-ADr(ny`mQdyPGQ`S(ahA{kOZl@nRh&$Bqxm3yUU7<~M+~#QSG|uUzlehD5msu`pF3VgtxEysk<#NTP(8bENfop5m zj;_O9qg)eR=eQ1ci*k!|OLqInZMEAMZoAxmal7jFz|F*6)kiI?{Yure%bxD`y=;acUzC<9*!Q~9{oL*d9L)_;JL%|oaZIa+n)D5YkJvuwefQH z>hCqoYmCCwixQ2X_wXJfgEo zqf*J=nY%i8Ih=Fw(RS4i)lSe((azO=tKF;3)#hpMX^XV&9i1FIIR-lpa}0Nkah&Qn z%W=Nr5y!qxp-!nzUpsAg+UKNpuHtOv?C$LC+|_xM^FFszZs*YnF*&%MaKgU48p2_CCF4tX5&IOk#8u}Me!j=>#6I*#Z#y<@)j3vbg-wL3NL zWY?)fr_fGgJH>UH+-YN{YMm`RH|X52^YqT|b+3 zA=DwsVX{N2!zWy$%?^7UvK)>%Ty}WqP~>2$t)qQMJ6pRzyIH$a`?L0{wpd%+u^!np zH^)Ja!yO|XCpgY-Vk~T<^F(aDC=l z!Oh&Qp4(8j(QYwrlicRHEpl7VeX_-Er(3q$5x0wO*W9YQTXVOxb$4~|LfsBWr%Ma`18Rcj)TS z(_w@*l6yE_tJ6-^&eYD)&eyKiuG4<0J+3`ZE#1`4bX3Jl-^lYq$IOm9JMQh6)$w4* Z!yWBC9Xy>q-95cLeLVd=S^WQO{2$QKpBexF literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/Crypto/Cipher/ARC2.pyd b/gam/gdata/analytics/Crypto/Cipher/ARC2.pyd new file mode 100755 index 0000000000000000000000000000000000000000..a9dfbf691c4fdf7e4ba78894f38e6cc4835b72f8 GIT binary patch literal 15872 zcmeHO4P2AgxqkyuqN2eTlv-MYMFp!!_)LKC6#{BSQ9!Lq5g{1XkkF7==|pJ*EAM?x zXT5H7o9m=?o$Y#CyLI@{t_AcK?Z>^;x;3q|#SYsrXh+p5we52M&v_FFQM=yXZ@=ws zx9^XW_nhZE=Q+>Ud7g9L_oZO%PA-t+I4O$T&2fiurz?y7e*I4)x~I%MI)!^~!t0TT zB}-qAEH+fvDQYd|O_uT+MMb&EWVR|c>J=87Nl|T5mOtcrXY1vr+%`!PpyFl$l8O?565=UYxN`<0YXeSvOHk;}u6_7L;}QfRxj0TC zj->%a0~fp1avYnmk>eJPW8iE5?8Iy||KIqEwd%K6K~t*04>3ftjtGG|t_}E$tt_{e zqkfQh$U-5UIVe6?7RR;6T13RG@#rW-k)g~(@wu`9W+qF zpIg1E$UYb>Gz8n-5_!|FFe$$^N-C6w2n8~JeUyw3j&e260Q6;`5zg*TWqb1_`R?}S zPU|`$jN4!^HlMQUgj+&T8<@P)_p%R2)o)sE5(1mstry(gA~plo4LGcGI6gGWb>LQx zQ=Z}(p)P3@N$qL^tl4b~@shXYDK9bnA&36ry1U9sJA*lD1HaLW{}|(>-D>OtgyS9~ zc3fw`xvL64{d3%I_fC;X$7as)!dyc72K*`AuJPc8*ARIxe|@)rq8SujS5WKXfk#U@Emwbm{s?7=HM2ccha)A9+R6&FgvvU7t`QlOK|HR`I$Mfx2GF3E=W4 zI$UyxbP=lpe~7;l`HF~N(g(Tahl;xc_-;aXVk+L*;hN%*?qOA+5A&}?zB7h=Zl@IV z572-iFv|~hxF$QKVif~U@b5&P_oF{vq(2N{`QeVj60|LR&3}lRrG{-XzMw&sG!L929Il26ycmLvc3%ZaN8L2=f zl!Ub8NLtngkc6_9oIrl4B}Yo!qcM&jZpj%B-l!2|Kfa!P`Iy&g^RHpGc3#)#Z#c5# z7{iezeGTXA()G$=IH91g+VX2_&k#y6T&-qU~&Q2i_12Diz))Y$mM055CHqU^e zfRO>8@^5#XmGjOwT#^oON8UmMn0xgIrQ(24=Lr7W_J-bZ%zV9*yynySnh&F6I^~Bt zp)7uA)T%LE@W)qFbUm!DzmJ1}y zNj;VuCvPk5Ygs# zd#k%ecbR|1kxjlQZ_`~4kWQ1gwaeQYxk7^xeJ#=iEkv-aa5>+WKXWgnCwXtie>jP9`K?nea6lF^2#D~ko% zljM6|j`<7rZBd+cnvfUemf2ODEf_VSkRp|PWq6mCjp)*b68Bx&aPbx)-V}6mXrsGw zqhyV!;w~i+MY*7x2tS0HU0RBixY7QEgHqgK2f@@RyHH+nmu`q5>0FFc-LTlYNH>RW z80w66=q}>b0_|I5fmSts zrDN?L0s6jfGivweF&q)sexEGRwhFr|Av>hGy*|+19>jNcTnQ8k2Kf{Gn?Jt%1-4Vk z6hZR5{T;)BBs=j{cwO@Ai9A*U^lb}+GITw#GV|o!G^Q3ZT;<8pYz)aVUjASkgch=7 z?x^N9MF!(bBzjuIpxn_%@KTxmSTqG>T7NQrIFKzivY_k1%Wk*5O9tGn@AV%7^61bf z&~n&%nT2602{M zXllGPzD0Ut&KQ$G!13|U|uN9AVd=Yq<95Kd8 zN`kE&xic6m@JsLn%M9E|Mv}!i?d=glL60L;&II@D?>?s9QkkIjF}$RUjIbjTE2w`h z&a`X-c*pH|!D+g|tyx}WH_R693I=J(up>7-MBY{?g`DxB@)lHdJuRU@`OH9IO2`Qg zgrDRD_fG^pHlAcz5zN0Ka^Gzq=C;MY$d=Tr7$CXgD4qzCIgF%KjzkD`v`DAt^KbJ* zF;4aH_P0QxE1Ly(_@Lb-Qx9)jHDU_UC=p|gQuvwM%`zdgKLVoU3dBNMoDv!@o!Ej~ zi*!=9{qvx0e}vV&RJT9*^;EB1q|Ml140a$gh{z^@IP!;X#S!*vXJVt&C$_)ee*y8< zt5&(=99r<<>-iSXCZw$tpA#zNN%8?0eD1lGhNC!?fhL0N}Ia{o^h;q~#kumE+jr z=%0ZJC?tpN#z15@s}7__5?Y@hNeXONk+k*DPUrVX_996-i!qWrfbA&hWv8DpYjg1h zc=ok2f?(^B{6#2rNxpCt^g@R>l2=Hlv6$h}p?t@y*tQMiF{hT%nj_gG5WQNhqp}mD zSZj2UP4_Z3p!7&TZv-+E4#3pOWbg$2VhP?7I1?@6QDuFsNVCtMcY30N7NGm!gR=$%0 zowNUU!1oYg&XraIA_s^FP7L{>e13$GAI|41#BH~ilp~*|fIXllrhtN;zyh3fr8DAl zrz6Bn1%1p4Ku8M!%)QRCc?+Cr1%q3E(XShw_j$&ADdvr%dBv#Uz}#><&qVb@-ibpNk$^&^TP+00T&r2|2i4Qkvr2f zHA^lJO@@hv#oSWj3@mZ^=9Y)iIc8>w&MA~S(;&=_WJ)e~d6UcS`7E<6^D|h(LZJ)^ z#_0$Raum-%I#`0l;u376%kfAFaVJ<(n3l)eoUrfK(?UMWGx@>&fgtUnCaezT-}Ff{ z$^6^k!Vx-COfhTyXgWt}@~?`+PN;{rztWZOlWWp+&TT71E65!AEX{a~G5Cqs7#^#y zpN@^Buf5}(Z~Wp>orQe=M3aNwziFb;_0ZnurHS6G5QYiHBpQ~p_E1RqYdNc=hqnIK5mK)}f@>uh zj{^)0UO&48+Gqg;mb$K$Unuaf03hG-Cgm6{0@>Fl7%`cinc$IeAQAbClE=vr$SiMy zVe0TE7&tgd^eE8$a*X#}PKFwSZoH?KV&pUCdU&Y1)xHe}keP&Xk<{mt_D0UD1FP-Z zf@RiRAupIDaNHG&xFF@v$8^%RG=fZ1jM;@%!Jc*LD2}H^BtCY;4q+8~yYko(tMxD@ z$0;w2?sVE3ea;ZgmtN#;!+gV+^zaxDI(0*WE%>DgI6@x5`scp9@VNX(o$_7ns!nT+ zP!jAt_huD?ZzJVs@tjTR`Y^2K?mvW4&1b|jOsyXlNhe}GOg>yBO$*xtkTq#!TLi6+ z?1`9y_7e#||6(KtR*Z4RxzOgwuIDWph3Uxmjyho1bDFW7>a}kkQXmp%&T+?m!fAwHa${0 zfSLSd#WS4H)_v%B>^N|s%#aHd|M4J(sF89#cj-Rrmqy)y+1+7JVFZ7Sx)B{%beat~ zlcxz?)Io2daF@nX2W?cn9pqosk>cM$5so^t{5wc})UnvVgWh0K$4dVW;+i^0r(-4u zoOO^AM><9xcT9$W>(Jv4+Yc%1$3yXWV&nhjBlH)?%O7d`nE}F zqwSKz;n7lvFqs;{3aKMo`WRK4PucoU9&T0+h2CFXJ>M(m-0n60^M#7@ zox*&hzQg1{lC=s>cWxk5WCL;w5_^YC@&LUhzyEXUwft$+;YwO}f(htPSR{3yS z&0+d1k{@cf4^~o0c9a}YoY?;!yb>4w&+rpuN;*n9mJb4Vt+YgsNdKm)`pQdF&FTJpKTsKoBK==V)&wol}Suee2#49n^ zS6JZw`ou}={=pZ3DrkuJX17)5c?Wo`OSdN*S8F3?a5U`c((R>Ymu?^3c-`UFCUkV^ z_KVO1BJ_pU76yGvgdP;3?X5c)^f|G4ymcpBZ ziIY!9APRw=m=2{lsW;yPgG0(109Lu0TQ+!g8sujYgfrBdFG$@N2#(TnO7u6>UUoA?u${S(7hP@Y=Z6 zJ+unK_OkxO)(zMr4m|7nDd>zuT9jlW|P9a@jLno zYb?_mcA~T!rA>Y?N+^o2Hg)1Ch1H;^55qQBSL!PjbyiEYX_LZSrPx?)D!0@t=o=!D zobOnwSL)fU(c%y=mMa3jDZ#pWQ$;j}<(VxWw(AtOI+~q0D$FlbRO$7V8_O&1W}l|b z2N<8Q)#$ATb0tBltE%;N40rLW`3jgWf`) zcacQV*eg22MV)NOG8p5~PM#^Y_o1yq`zNyvMx+3)7j2T+_nLqp`p`6kk@mOTHTr3s z_nHjc1D-|z#!$6tuECf;=W4p%G5_#;+||5!D(j zm+W)??B#7K_S-iXJ+yIYWlOH`#iP@Ia_46c{$}}&<+>Wjg!>!ffA{^Yf$tsq?xdj~ zM9x}&_p%3y|DgZ(|Jv~Dh`Lqnp}*YmY0nL|S7xTY*!M<`=5*X+>1}h&FHLzTuR3V; z-1E2m^o8Bpz~QM~mv;U+D%e!uWU)va&ITz`76apfnMo1U~hl~nMZc|V+S>YlY> zi>-J3y!$R``2NAp-fWY6#jCGHJ^V~z>&062kGG~a-*))9`@RfFUN^hsN6H1ISN8nz z-*!Dc?){jMO~#`q-{e00@3W5_m|rnXlDXys{)x$|#G8MW;6C{LqTsS4&%Pe}pBK*L zpZmq$dnZ1wF#KhE$A2L9aTNKu#jCgqv(04HTev#?rW(D;T2@zmpPs9`ZEJPiZOf~< zJM{HFpR`i+FxiYoPb|Z5MUAbFeeF8CTCuU-il|{Un>LZll$h=|neR1uBE#5#qnKSi zX1)Pd zA~krp7QWeJtVb}^SJ{kWAk!AFWKj@86W@zXwKl6qK*U6kc;%>#vIb%CWv@|h+GI6+ z(=ntwkB$|jW5_dx$TBkmaE%#xp?q_BwI~QC@y$XUvFcHQqRM8fV0o#sy3#b)s<3!- zmzXrJjVmIJCL{V(8LAc~Ar1T6jUxJ0wo4Q9MV-NHGqSl2<+ZhX6ZZ4B8jLs3Fc?>% zyomCfn+?Vpw-}5MpnRQto_zPscm}`KrV6vgf^6(#gWHi$j0%JCS(KPagE3^5!FbNqU$Njtw{D8ZC1Tv?(Dj`oVJ`SRH-+@V-;95qusO{xOzOz@eR;MywhWH z<}24-lWZPwd1L*H73_038pUa8mt&6AvNRscKn2Tlb!9qj4#z6m9IdC4=Ye1wAwrgW zA#hI)-@ilDYE!kfy4*-^jyPk(nq(wNF1i*hU7VAz)viv6U!YsGAYnn#lBG))rDLv&22Po7QCZQV?3KFQ7@`>)yAXey zDk>Hv#3sfjQEg#`*;G}%X<>DODtRI3EfDD~0KwP_8k4WLW?RiQ%MePk718CWVz+}o z@Qpd@Vqz4L8busoetZ3^X~6rx7xA^?|5DJNl1en%^HJhZR^rh^8txohB(b&il^l8Y z7+ zlx~z>lpz!;Q(ZH2&QuB^PS4(C+V_i$x;i^XInJ%k06Z!`lM7f@AYwAHaaU1mU)MX%>LOF#|w zmGoac9JeI^TeOPWdaBn`y~ey5DBOKiuhSD*T!r=7mQ7`a<(4|?llZoaZP?t>zZLo5 zFj%&!5?k$i&6Y}f#CJ3LwVvHq?z>| zSdrdZ#GW<))`@P_xV~2J8OlBG zfvqYx+PuK`09%anL!QN4v($42h?Er87qI+a27{qjYc|zc%T1)H8uTq^8}%|0RjdNu z4YOrvLZUG;CgSgpMl;VZ4=wv4FQGqR4H3rS7h60B@h?tg)t0{ zTZ?vC9v;aUMJexMZMaqedo044sl6D{Ulb^j=G`4oHX=l>-pD=T(_V(HRTs6(mcjqP zvaLe&k46milsj}Qm+6)!#K*!(0p&sg|0boTZgYjjDvpZAn7>!4t;Z80Q~U_O#PdJp z6~b}*!MHZAAnu;H9dVDwy&l&ScRp^aa=vn-(x}{}+^gJ^^mLLd>60XT@`uTvBui2v zQX*64r>slakaBm*j+BQ}_NJUlc`N0El#3~xYMd%WrBKaQ#i?pk7gTQ5Ty>Fpjk-ep zYxN;@r}{(nfclbpdg^1EHqGxeuWR1cyr=2cOwX8~q0C6n_)f;}Gv3KKml2U!oVg*h zCG%kBZ!_C7!JGoQK!SfQjGGiUEiN@KGj2(oAuc#^X5yU0~4sUhi!q~9jJmh?taPf~nxYH~$#Q!<~tJNe1vXOfR5zn*+L`E2sX$>)>5 zN}ilDD`jpI6nBbDHCL6U%2usam8rI?zNgxudPMb0)ge{8s$2E8 zs$X?p6`-D^zF8fmPE>2u1?pAm_3BOP8ubI}UFt_6ZKwK_`UCZObwKJ3sk2h!Q*%;R zrfy8Nq&|?kEA`RTuGG`1@23u@-k5evT5Os+?as7~X*Fr~G)LMaX-}keroEHan|3}e zGJS1&W%`!%2hyKTe>wgA^v}{KX=Z3HkfmV5T(*>k~F4R3;b_j0v>~rxTnBJqf)DeF?6F!31=Fd;K5O!2bZz CDWGrw literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/Crypto/Cipher/ARC4.pyd b/gam/gdata/analytics/Crypto/Cipher/ARC4.pyd new file mode 100755 index 0000000000000000000000000000000000000000..4cd51f209b382223f06a5104fbd2d5198c9f3561 GIT binary patch literal 8704 zcmeHM4{%e*nO`~Yf|3~Qwo)-Eghwk9yTwAXY+07%J3F$#fFTYxshh-+W$S_E%90~J zB`C#Lg3yd#Rkf61=4da;MIpm+)7;#pLn+WqqS_RPwzNK)Yng<}q@89e*W4XPe8j!s z?%nU(CmCUAd!5N$|7FMD+x`CR_iuNV@v?3Dr3jdQ)}VRU;o$*_L7w+ zm$0L^on3WY*Ku}L_h2w4jzl9nqW)oVz#k4rWU*foqw%m942x~|b&11~fMj00_;yo4 zb*GoH4&5Rq@BC3)(b`*V$;a-{l{wfFQ0@Yh0V)e1qGvrr90#5J)&Z#JZ8x4kuD;E9 zT#Sj@T$(^S(0JF$7&q9**z)oW)Z zvY$jK2O@xEJ_smjEsW*NQB5#MzFGhz|6xE$Yr(Rb`(rU8P0^!;9yhk#q)*cH@f>wR zhV)fkFWW-;f|?x9VITOn0`3Hqv=;846p9QKaybHKF`nz??kz#zAO1gNV4L#xCm-la zUMx?KmM2wR#on*NsIto>q(@KI6fqWP#q|`@aec+HFHmi)IQEPV-7DmkYqWASoVj18Or_2xZp}PN{M9J@ zs`5`Y->Nt!STlyI0&fp)g97h|H4_!b3|8=MV}fX)!CZyw6`4mMPuG~ z>V&%Vw(>Dy>3Ue(wSSB|F~)sJo)T#7CNwY8z4{a6)L8kl)+c{S=KgZ_bdp?Rr<3He z=JU%KFfItSu(1@@m+7W52Fs0}{;1|OaUpfrn6PZja93u%?#u6vRw_R(TKHzsf#gL! z7E@f+Go%^^{XIix$Sa%#1Ki4Z^pkBSeHnZ}&&x8c)@N_SHdt;_jRD9fPZ=KKkzkG~ z|5eK2M8-R(w3!SU?<~4XOf9b#v1g!4u+Z99f-E3YP^KEIp(K@yFIDeIx>)?Sv_~M_ zomD3(-WgwVbdJSWl6gJmX5ElZGd{5%Lrut;<_7g?Zcu>REAG4i%>DpX@986BVb z3^@c(%-ER9X#~mRa8n!(NZb&|CM^KOLDmJO-ju%uQwvl@rljeUIUe1e|D~#hcp+CX zIYCpNr6Tn@1tvc92w$52yU4o9)_i1rzfEK{n0nL5TCV=8$WG%{y*@J36Qy@niO(sc zSJS=a%A2nofDb27i`dn74p;ouc*XvlV?17gm7#;A8e5R}nHI?X4as36-yjE4QLc*W zK9wpg1~lZbiB;pnv~s<(>G)iF^r~|4T7t9P;2bio;ybo( zMVMEOeVCz4@ZH=_!_(lL;+zgb3p?-!pzt+b10w07P-xFlto$Ih6&OV$6x=*RI|DcY zi=q9Wx%M8&7q*91FxAy`_V?{01=Q5lbhb-zI;N#KoxW0>&RHo=N2L^}BT52OQ;Ji5N^we1DNea5#VIAFIE8!--*%jGh)VHCFDt8j>hsdMq^GZUGkcT;|}8YyJO6__xm$~CE+G9iCRxl*|ARKwH6+7!QnoUy(Gzkypi!JS@AG%{-9 zMwih2f1|z6!Cucodj`$kIoK<`JrVzjoE2NSKx?NCzqFmW_l3rQibj3Gv$%_HH+cVpaSl=jI*sAejABy!#$nY5O28C+4 zXr@_rX5-E!yLWy3NNardw(kNWE5enCXhT6ET4QA6OVV3hxF2bou9vh zj+bB>1MQptPHH)qUeoYOk^dLGRxiva-tV5g_!30Z&E$JZmGy;psEJAM!B(^^gSO1* z!Ab8^gid;&rB3l4&kA5ndY{**M>Oh7Sv{w|pi%!(qvo>ZoO)D4Ph~5CD&AMK2Ht-! zTgCgS>~h|}R`HEL6EWJZc+V?tl<_?B#B(U%ib*|+1xJ+K@H#(@F)et--Y;T=nqfWj zGBNl&lx))H^CavMrk10yVTiHbvJ6*E3!z>il;+4zv@-~erFPOXN1^PDTGL0~+~N-9J{O0AL_p&{i{Ny#+i zxlHY(Ax~#&7Y((H?k;3>@3oAMWm05?m)D>5QC=@4FAAwE@q4Z3aGLKzh0N2h!1=_3 zi`?he;%Y@zUP0R=;}H(+=Dq&Vv(#&^-4n{CydGjr=atLx7iV6~r=h3t?n#}xdJYD! z)f;uS-hVw_z5r(n@X7_aVF6yX0KdAxp0I#V2e8y1+GwRXCFmNC`qn%Bx3l=Apf#jX zt)!}jMq}##nA%oJjz`0yUkpo+i>+Hd^`aCWh$cqlU?eO?`iG%jt0X!#K?BBKN$8$6C!;h(9|r{5zyvWYg1~R;%}*NFZ|LOG~Zgp zo}teJf1`l!2ENeVLi`BiTRzk?gsRHeP2{Pb6x$1sEiVwtF^A}1H~wZD#reOB-cs1R zrM8Rvcwt{E^ZxixBLkn75-j#H#`w_(Qu5W7j&wL43KfrYwMFbt$WlxUMZ!C3ke)3O zaabCT;C%ZZ^9Muz{*XkUc}uIvuazmBeO9PAtcdsHOW~(|vEUODgGbr`<%#>qJXD+_ zW>0WrP>Pz#7N?t~XfzUK+7n-%?~&n3P!9S-P(xpx<8Y2kU%#%k9UJal@9})nR#)q7 zueH^7ZS2@syS}@9YwhL@Th@aN@yBg-zSy8YDg{R5XzibqV`6i&NPT2`HDr9=c3)R} z>sD_YrG=TzYw@>ZV4&7!t~WQ(Xzf5Gygj&MZP4bhuZ6x^OCRA;y8Etqx)(d~XTS$bXvaaD1Ad@{Rvuu?(kfR83XJ)jNH0~iDp z+wYS>t>=V7P!>xUK>q9_I4}La4OT&LI7tD8hXHSWJr2_#pG4&V?fs zpRvr}9|6@;wtagj9vdXuUzBZ+N)ltyvSB||@J#G1L#ZAZNf7@D8pkA3h1Qv9jqdPu z`lB&S!M|ln@b36XNNNoSHc5%cBhdi;WM~J!EjS?ix+HmnEDd+@S3Sl)1-dsH_1!lx z5Ral#l4hwugJ@BVeTis02Yj3P#eo?p+J_6Y2mGP9q>aF?+@ z2o+$Hck32!hpotlJ9D8gaDr+;UA^%>$NCM%6>*oC+U*2zji4Ce(O_KX1mp9xAoZuZJ)6{Z~HsjQQPabkJR5& ze_ws5{?Ynf_2c#bQLk&LX;{~=so{Z!ry9P~Fx_yu;RE*7_D|To_Ko%-`=j>9?R)HL z`o?Zv99sn#;uM0jnT$Ejr$v)Yy4W{c;mUo zpEfRX7#u4d4o8=x*AaFk9A9=k=lFZagyXE^O~f>^L) zL&Qe$hy@kp=s`tAMGeAHL{tPdcg;>f;CSvm?|I+<-v7P3KPG!-&6>4lm04?M?*#AJ zxtJV=VFV~*F@}}G9bHcN=YRf@3Ej%NWy;t|g&TUMGF~_I0%GHm$cb!L3_Co592w4J zvN+@j2AR!elH-_UH(!5p0xODPI&`R_zLc~d9mBk2;$(&7YnOcaB+0gmP^0|njjs~A3^k+}-P<-#zsWGothXuyTvGJ!8@6p96b&ebQ2Ja>7*=k| zmLTRMS;C+UhGGY$&*cOZOe2z#5U3V?B%+VMUYUp|A81bw~sV?)OAIXYr+vjq^umK$UbpB8E|A+YUHpdp+Mm6Y=Ak189=vnx4s1SBj`6q9|2Ll!{DpDPMMMARgz(WXL|9=dhopfKR|;2CmHao z`Y0Ie7t$O9GD=|*dkW}aPgP+tT986JvCytlATJ0c3Pw??_zx;x4`24|iyTN)Um;R9 zKsABJaJdcUXiXy6U>s%v03wE|lvK%WSb^k&sS7*_l-m}*1pc+}M6UdR2*3sqsgM$( z)LDcQ|M%yBxGVh}ch#TcuJLo+HGht~*3WS#{~UMypW|-$bKH%8j=TBKaku2$^K&^t)lJ2=N*o_9%O&OCgsc#1MUi{ffEy$RjUU{0c# z3&Wr$2y7A%)!o$!TqNU-k_)$yml8VtNxV zy-6;kd|7w+s1<=6?P`!23lqf@aMnVU#~`?iNwH9E{zV2&T7=kC(_1AsTm)f=2nIoj z1^zHu$hmwieG#n-QV335A)`?MC=z*vTpSr_K^*Q?uySAIW!>j}z8&Cd^3p{ZR}Gg- zI+&N8q!pVA?R5LHSDBNVQfq4j}ssJ^r75EbnoJVhx1+5OtIR2fk zM2;Rkfmu-I1U;!E;`AnPN0~On1T?r|B{CSIwPEE3sPQh#O3ih#$6UvFmyCNB0IyXQ z^p6rZN^N~y7qTbH&Glcz;;U$-ND(Ev{{iCB?}(Z|q-6WGml6>+5ab!67%}N2I7hV; z5mBvn!Z$rQGqfyK1zaNkJugEa;|v$L>x+pzYmBP|HGv=6HHc?uR=eUOs$J1>0IPP@ zl-#r=H!`{jT@8_Y<${KB-vkhnYV=5ikJJt~T=;5N+y`O7ib=&VDc&Vj>9T{V1XR=8 za9p|wpDDdl(&|7rA-xmSW+|i}@bv?RhxKr34o1TRMM4)buXE6%acFaf1|0g^0psv_iR8GG zdFh=PSH7CwiMR;pou0f-$3^76Y-@f%1(xSv?gRno426ifFp=zXt~KI`h5uE?79C!C z2gcDv{pLUD7wrY#f&d4Wa8*S_UG?z+gP<;kNg?e+NJiQxfJYz?@P`p%QWVaZGeSIs zN5i-Zuq0tsiTyNjrM9^Jr=I1n0ScE8Mz$uPP5g=BNCzUmD-qlZDTM!kU(34`Fz0&! zx0ASrlEgJ|HO~W)daUo)V-sY3EjYiTu*KEFHG|A3IT?saxq!#7Z0>@S?nip7JjVvs z9500^$FYG=LVvqQ4N%#4yPt*OkP>-|iE`W^lm?wuv&&QEc;)i^%8D*Ifp;ta3cqga zr*1gfOosI+aPR4_=#)dUqj_!lTk>-ec~}Xc?^F^fgWdpE#yzsFe?)$#9KP7_xHuMaF0Yyh zxM`1@PXqGzq5GjF`#50OE4*^t zXKp~E5y$~wzN-f0NP*i30^e0r%I8S4Z$CcH5G1ZoaJ-KC#NK;<^IOCKY-m#8{%)zB0OBhXwV6KM&Spclz= zy?d-!RnaY1iX73X9w`ZIEp!`&PX+uE<^+~`3lCLbUPkr2axH;(gHS^S7hHPQdWw2O zhyvF>hL@>EMp&f>E2w!k9Gq|l;2jg)m1y+VG^ZYAryB_7DFJDjPNAEos!E9;0pz4= zsN_I}-jJgq?|~Z#oylC3hPTz<*A46HoPyxE(DuqCb0{aeV!;-g0 zLu+6&PI2f)>d%ODN(w@yPlmKemt;)-Rl2Bg3h{ zAwhlwWPyeZKNymX41w>V+2RAZkYPOjCc_Bbbfv8E$z@(oa+&CXXBM9U21_>+ghGO;7i!2014cnQmRAH9@cvzphW3nasZrvk+?NN>a@10W|Bs)8t>5 z47*~}w)9%HN1t3nqt|Clmso))^uW`M)EGJK1FSxG1Z*UcYxLOLydGQtH~KS)v!OHt z1eYHDAwer32ytQ}IIM>f?oW7f8H19G)Up1Cg=7?%`%!ZTiu97zfafQ0GZobM|paY1m?rWH4@{LbwA(qkha_$*qG0*`@-#szx+ zTN4ev0quRN6nnA)2^jQCGT;ZWK2(j!PU`| zV5Ac51+)PyP2QY~bfnZR8Tq$FLJ!T>5-M3PqkutS&%z18mc5v!mS83hxN z&>fw&dJgWqRzrSB2cO%?Pyd2mpoAC^^HF55PjwNwC%;AND8ENN7LAnbf;20zc{9 z6X6Q#KqLn2iEt7v)B8E&c+h6=uE#J62K9Lg&^x;ptzNeiz>%hnKV1XX4BuH3f3776 z=@4)`1?gY-t@vrQzk1L<9n`~r{6pf0jLL0Q<7KFDVzX3 z<iBhF2T~vE*gLQTy~RcyM+bJG zS)&f5(|*hWj&~p>_IC6>?idOJmO_u1v>T*A512#xzD>)`6&oO5&do(+&?PoN&*}QM ziwCr44rn8v?2C_{@bqn?2QPiwegoRM1KK$Q+UQ`>cRV^C^lhWPecv_;P<`9T&HA>H z)AVg4%lB;~tM+Xp#eHuFmLmUvmyqy27QKSRAD0pf8o`&~p(Q+CQWXqE4Z(DR(3wz( zs@ZqA%~z|0C&wZE0zP;sp&-)&9dg7RqilFw&r#=pl)gU|Yy673rg1-l<@`+Hh3O6Qp9Ni5i=vZ2D;rfIJax2O=>PszLSLXwhL^9qai+@X zeez%V*#f_r1Nmu4_}u}1{rxBKOg;k~??sD-WEvp1K!#UAlv$3xwX*Cp>Se$F?sS0# zxiAUPAG}%>KjnW6EOWv2>92CSoYxwK!m=>1i1H9Bl5?d7sJ~1XaBvymjXWGOs=QQqsIs&<0ID{YpUxey=I$ z#P78QU-0|yDyIiwAVV}CHiLE$j8s8SxDE+$TF+hx1_vnvVAi5$s3U``WUYZ3gfz<4 z3#bEsvjESmM4brPm0%7Yg)6z*m6&M;iN^_~t(HTm0+Ji5yBb0rgU3(>ZQ$2@e@y|Y z165qL3jFYXW0X1h{$#$Zc|j*y1*9){zhyxf^z;3#1*9lcakVQ*#0R)Z1~~CuJq09! zKQP7MgYWN$o1E|J#jn@9f`4B?pDc5g;GB~+!4rl|O?`#yoSz*FEnLg|?08f~s^w=V zqAJoSKbwQ9xGu9(P!(5ab{eWmG@2>ZXx4X)<_NP97d-ka;d*U@HJkmB>q5B$t62x) ztO$Lps8{jO3%VDX;m@Y|+Q?0Wu7tu&{7x*)M|a8BII8$J1xhNfroW6-cS}l?8)L1oyOy%kXiJwV zkst5V`clu3o~&MLeJeWPLcCJ5DbJNPUE;WAVv9&<|Q~cT!&)Q{(!@sP~t|!Mu z#`y#{_K92JH z_MGOGT5`TIeskI2azcsk{)$UsJDn=?=FW@MpFi>_t2_3Tf4oD>aT|43>kOLi^e+j< z8_l=7(U;XIoS%1;Q2YGC*Qpkd&cp7yGv~`w0~M)Z zqb_3w_fpx$PTvf6#?%z*c6T=KS=~8?`tZqct{rJX_}42ser5+}3_9F(%AOjwa_m>n zB^3YVPcVnG!E^KSSFx^{2CW?RFmA+?k3}wbtHc?Gm#Hy_k^+5K6_1}2cV*_avmu4z zr}@L$O1Ic|sh0BezQ&*WGIdVfZIjd{|7{FxMgkbk%g-dzs|qAenIW+)wPWV)kn?d*sVUj zfxEi<*~c|e6V=+)N^^3w+J@%l&pYg^RkbE|?YmpI4_6eO&>UlCmOMiF!Z#D{>jGU_ zfl5vO3lWEP>fZZ9np>aoZ#McCojl#B{&gAcQri8Z!*`;0g$^H-yfITFsb+n$#wa}o z`S8mXU8j;ixCB(+n)GY8($O0&`Fw{Zb)%i{P07$XVxnw-%{#G>d=`rxbF{HEc>Cli zeVsE3ZQ5%i0}t$dGK=q8k+Rf(_WfOR(#gM_c_F(lTYr4Gs+@jrK6EV)qWv5tWMBV1m!3$b9wUh1-ouJ2vEGhYX z=9TFnOL0_C_HQ93#r_3rcJANmQAV6EpUrGeTs+Oq=)@^g1@6PK<3?U8-pf3k{$$^j z(y%u>qL2T%X5p#>t>O{e;^JpS1wG9>R=G!Z@<<~u1+J6&tb7Ms&8BTz0vs8?y3D!P zv}VCQ$4^`3cibuXU@m(5(%QtkBE+C=1^JQJxR%7M%gxIp=hf)QIxdJ+xcautK|^yh z*4u4rpb`JIp=ucxkI!`s&j5;lWl9Y>S5~`Lz*rwc5ld zFxgQbl#U$doU)s_m}hWaPod$n`r6wc&Gx4yc~CdVczhVM?8OV)h6j~amWxhZ+4t!E zHurG1{mOE+Dh}0GTAg*cySLu3erBfpnpSW&lS(<^plK}#@FAW_@Rb|t!n#OM{Ftj& zYMh$3S)W~;8g!I@^qg+;w#TZ(_4n0|2u_$f-fGZgs|D&WTwwb0(I~%_H?nqWddyrr z;sKF!ZrjdttJ0@6+qN!l?b6PE811SMd8aDsl82_&TjG5Z)r#i&Xn0wrzN>eE+Vo3| z39{G*zei2B>Uo_{76$rIRPQApTDE=Uu+VowlL!l{3;&>L4zp9(qd)71c<4B0IQV^#Ltd_&u4_V94}+X-1_ z%sGZ7dZff~c_XgJqgGYY&T#Xh8ud-*!&`K|S<2mtlsk|+H|dnku0*dPd3kp=9v`@V zZbi+TeZ~=C2j?E#!OVJp{HTr~>RQ|Rk!Rw@=KGzU&7jVXbvki!g5BNbJj$n1z4DPm z0=93>4_%qD_0z_pbo+>HIaZy?CeJsAuK#n`GPT&@Mmdv|@9Dm*E_m{I)7G+?DSNU$ z1V^&Rj22h%wr_H;9Mz(D;zoeEsehB_{WXS^a|v(TTV}o%U$zYY)9_o8UD})b8dF1_ zpN4~=BM)H9U(b6bx!zVJy`P1*Ri%b`EoQuVF{k~A2`1|}0vc>7&PrDus$NWq` z+UU5`^*3{T^)#Gv1Q)fxZ1g_5_SB}jM;(cDv;62*g>x6PH=mcQO*piakbRwUVtmUc zXPd8qFFgz%Y*V~;&>+Rnu)~Pp|6ql&Vs}DGbA(1iEEeba$L@0F*f+w0@w>h``mQ>> zQZ%@6)v1zu9gEX+ZXRe2wW`}%m~zu)!UCsBdEdxqvftl7_=n9KfsY#PQ`c4PAkBnn zl<2~7$2UKJ&FNy=QeEdf`t-^JJ4u(Z_G}EVH%s4_qnakx%bm z@NL@0O||D@iUMbDBNap%?XR(T_;hQ~)Cqe(8&DjzGCJm^QH*kBMV((0&BhbvlFnv6 zH#QvPNvha5CgF0Eqk4sB!wQw^H&;K)tLPG(cc*$djbP@4v`i1B-Y(yMcTL5)_NYn2 zoGnw97=-PZ5LPJad@?#dQn&J?nsOfXsqAgv=%b`%x34vB9J}D@ItPo0A^Nm2XD+-^ z-gP{C>(H{7tCZM}4L|KNp{Jjr%GJ+&Lz=1(jVR&a%*jV;XKm)95eN^`iJLCznHpVNy|Ojq7$YzR;jaw z)ogw?F4Kf!+OhAjODt{T{L3|7Ts5tCrYTk~YV%)BUw8WD*@D4-k=@fYRTqrv`t#bb z`tt_%SF4^rn>F0UZH2m;BC}QL{)E>D@A||XjS4MV@gS%9;)QQjO;N#_=YCyL{;92V zNlKV^m|U1U@9XViUF(m8BlP5&FO&bUSm452cu=QprE_L+=se8sLU4Yv{t?mntuF3? z;ifY$uIZTRv+L`lcI&sCj>_}=l~tdk{W+^oTnTLFo;-8^i+lLEmc?05v<0ck^Ru39 z3|+f=7Sla3_#}C4;N4YTk7chuJv8J@?V#mnJtK;Se@wG39j$hTe&3T1m@?fv9s#AlD!VMRXXlq>Yq5QziFf1ay!TgWB zm%}1oTaVrFsM6nZcx7eKo9kyEmz>KjNdMBZNPE^zg{DEQ9GSFf_KIv9{j_yyq3q`? z&iv}lJ~^ji)R)q{x|G0xKTqxXLaa-(ufL>n*n8evKb_8(;m7W4Wbd+5pC13J`Y!8| z&(olr7bi@%x^nrEC3V{$YReutZv%a-ulp0XO&!eaJ0#9v-4t#=ek%G z`Z|qYUUy;pxvjsh9(I?Mq;fGs%jDjYOIY=S(W_3=a|`q?9Ah3o$(%gj?4W0GDs#iR zJBtc0U#XnJc+;ukpZ-|ruC17K_p*1b*Tb_mMt<&P`$gk69wI!SccJZWSHZUFCi9=I zdK)?B&5CWMmHhILruwc2M&{;+2PADr{f>!htsXYi{%AKP>a^pKddr zE|@55d+6?_^u6ga_Z)8BHZ)03yJfIbcYA63VfM*;v%Nm#%wBlNsPW~Vk&Ryu6?<&& zTK}i;Qk|$Km}Y1ceYexlR6TXAOlV4A823*%C_9p$r;Z} zirnpus^a_~duF~=k5VNb*`c?CuHZ6_X;)1z)Zj-Pqt3R&yXrwenO zgfmAkmAO-M_W8*L8S{5NqKjwQx+zX_xO?N|xuTn|*=Mt&x{jzt3q4qN7bBat-^mLx z3mBbTc_cB?=c90_GACGl&Z~vP*_|Qj{zr+|ot$c(o%EUSe#bKFC`rbUQ2bQecFV?- zJ4*+9<}+_Dr#Z5gA0$tjraelgb3A+eu`p72gX8*F6H{%^o;RRdKDEiopO$)a`su4w zv#Vvg`59NV8b98yK36qN=2rCqQNfgzxudq7_x=6);|I0Y4;1fCsIrlZnY{LL8s)Rl zcjB?e)3fqE*`EF2Djs>aeFy#ic;hjfFHZRKlC<#4S``D*hV9|&Uv#zXD7iT+s;KgI zQJLC2`HB*y`GcM_{O+i_?3TN(%xrC>J=`+9jB8 zJ989Aud*zhMqYT~w|eTLD&fvaPfX3WZuejfd8z&Gxk|tTuNQ0UAL!gljkw9maJUhD zOmp}kZR4-M-C2{hbxili;P+!cj9oj z)8pDHXO@l5_4CpQy1ejw_2rj}8jGoeYoF4$Eora%>VM!WrcxB~?4rThk9W>b8Wg9t zaEvVBc-pP&B@=fnx>4>h%)jor)9|qK2d&!
    8>TbC`m>Hg`rf}_b{VO+A$x9i#U zLeVe9pARg~IPlqPZl&kQRlju0HqEb+`TVryUR>wzx%rRh?lmz`aNPNlSV;}{IX&lD z^3CT@)&##~SJ^Td$%YFUsmA0v=$C&XaGw$sVq{C6!QgP&Oma9G5Xdg^tmNppq*yY8 z8Ocsf^No&nH5crh+~GcQ_1KzYB(v$pgx~b44k8(WC%>mO#=9*1d~#kk%lnLoyC?;If=|o zLfp}e{5<@~(F{gZM0n%^{HKq`0EVAPCNMa$tSAJDi;iO?;kZ+RjLC>vEStrQTZ|~g zamdMWoLK2p7?F%91dWVKjAgLV&j*o2DexLCN=u@Taz9Z_CbTQZNcA&a6q5t(aYmvT zc%+FrLA#IWcy%5;6dW~56ocN`gu(cM{b*dz6#=;T7q$VgdZYxmeS#>a4(Rvb{J(b3 zl|EbRJFlMSYyTVMgOZVj`CB`#QG_ZyrBXg1&3?`XE4rfN8G2^8(9ZyaShaW97VA8!q!(o!~@!=6@ zFdmJ-DSm{{V#cS!qGv>N<0T8$HDCt5+CgIEM^l)IT#i&aSo>1h!lCwkY9J}7HJ-tY z;l%#aF-UPz1(T72L7sj@d{_`@5?B!b!WV_dNdy5VJ?89*wiueOR1M^4E;CYM`?x6P zcn+D}6Q?Bs^2fLmr0;cWiiKB7=!x z{t(ahX^Uc>L7A!}igDBx#q1@EVwCknG5?i}()j+*<_&X>V@9&rY{)!)Y;oF96w^6Y z6l1S1ia7;khk+<&@OV*75)@=BNe&>h5*cjCxKG1YG;|c;;YxR#|trU zUpIOv-PJ`>admN(R@|i!$e|L5Zx00g6@z~+0CM-nD+XF`Tv({RWRak7nRrcc@o;qw zwxF8OJxwf3{HJiORb)J)0$Qb0K$6+>tnLotW) zcp`>5!X191FWsz}CZsujq3A;~hGGrH4T>L>o@*}DAh~crU=|h{o|FXV2&iwtLK$o} zlZEs^k`0ew0n}VJIy#=4gzrThWuw^)28OX^6JS4t{)-2kJ7i&N5t*2Z>Zz!nz*+<- z*kV*qVj!|``K3CuV?zDH*-5BRrf)ld3!5_ZZ$RWSU0KW|PB;@ODgpX@@eO7u5>-+KcrCyslMi)Hl1lOLI}DMRKtW~x zXoC&cz9fup;MClxe*)$!ZdndI0lBzhIXhs9GT-KQqIHM;9B1J zu@z&X_7sTx5`ioc-wm>%y+XJ#;<3CwZ6DYLRHJsN5BMKUnHwqb4-`|RWg2~k58cax zY6?z@=xG4_pOl28MUiZdWRxL{`B#<1R5-yislD?>bN;uyRMEP?FjsSL^M&TC%y*gJ zFmEt#Hy=hZrbJNUDeEXZC}$>ro$PHj!z#q;tyPDW*otGb*ha;cZM)c3Xj^1^*tXR6 zm2Io-CtFRLHqDSWj}}Q|(lTkQXlrRZXoqOWXy<63X-n+{b|>tf*frU`wR5m{wfC`S z+b7#Ew=c8@=46QDu;DPHxKO50W>FZF`4ldtkWxxHPq{+*NC~G#Qx{T;sEyQCs)mJ~ zg|mg1#X5`47P~AiTU@ueXVGD?#pa*xs}q zN}Eh`0ZHO%0@?=J30gJn2JI!Sg*L=a%} f}Mq(tDTqKTswvx*Dk|uwcTdBeRjom z<#t!??%O@H6WP7D`)sFXKgOPF?_xj0J_4j&Zokfcm;G`3O8YzZ&+OanWgXNU#yC(N zf*qn9SPnT3g${=uE;w9uXm)6K`0AkUsO@O#XyZ7;alT`s<8sGij;|d%96?v+5PRW9 zG*>fsFb^_knP;1CH$Py0+`P=Z(!9p}zWHd%c(9!{#e?EUnF*Rqq@+-mQTSlT0?IYY zT}mUR1#~JyRibK8b*L2T3@U?~K;=?*QIAtAsduQ)sO?l)3pI-|7E}utiy0OX7Fia+ z&=2w^+;*E6nIAGQHa}@zPPs&>q0~}-r!)Y~CQ2&>gP(3HQH`nAR41w%SPKi2{9CKt lV7o)qV(Lk1DfJvx-@?$s*uvbx(!v^S2_*mL`oE}w{{RNQ^hE#w literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/Crypto/Cipher/CAST.pyd b/gam/gdata/analytics/Crypto/Cipher/CAST.pyd new file mode 100755 index 0000000000000000000000000000000000000000..c3e1fa6233dee810949052844150f8ba1275e0b1 GIT binary patch literal 26112 zcmeIb2UJr{*C-qy2xv5@ps1)(K|!U2P(qU$ib_$64Wu`v2?>fy4JgNB>?jrxuwlnS zm5vHn5l|5kP*JLaf`~PD&j~2{KJRzmf35HT|9jW^&T?{Q_UzfS+svMs4AE-!9+C)& zL?T1t^GT#!c%#RV`24RwqM>@+%=~er>k{{8666!&7C`fG3k(cm z)7-siA)G*(Um(rY#*P*cP8Q*C!g$^AgY_bhHO>Q2(p{#5F|p z`~-M}4pX6i6eK4|BOXJ7KQI3vk0D)VLM4*JeFPF@4M`;9KOz59`hQyk4%nL&j&|H` z3LZh>@`b2zFm6cLRyi{6EQwo*V@`777)7p~18#-g1~i0opp0G;KR`WD5?{t%gG-a5 zs{`U|*erayB;>lr?iu-TyU5zLAyaUX_!4#>zgmDz07JWY?71XNO0GRgkwl``U<9EG zvK$h*oei*fK1XtxJjaYaK;ZZCy!zLyb#X4EkWd-$qlfWN5S)~-{VM=MBAr0&wBH5J z+a2MfV=kZ1-y>j>7e?Y0%tJ{30e^IU`+x$7uaC$N^H&Fujv%1e-U+!15(Q|{OFCQt z!pla4hv+-xBQCHv+-{jX)E;MvWA>oarE+A9EY}_ZjjOnWLQ_i01*wA9DoH*_vj-e z-8G6{-6gD8U0Z^cmUhTsWu;#wV_rcFAW^d$soA~5$h0m!oXjKHRtGT1i6r~#fCc1a z(#q-p5povE2@I|?RJ%Y>DTwO zty=n4oSJPYByk%uxC20pg^4_5L?mugvsaT#Ujv4*s6@FSGio>^CQ)9Di~t_28*WSG z9fv$OLXyN$2L0pYT#VHVdZA|9>lh|D^yE4^I5>Rn!3IkQXDXKQq?TiYiq+KO>Ml*B zJY!qML40ZKcg(668VDOe0}G7RE@%|cj$5^tewnFw2b$rmcA_0Mo7DpbqhO{;{k??k zWaZl3e)9R)N302}rI+)}g{Z}YL+tHz1FymAOWS2Y$^>DQlT0ucqGnel7z-z?7QuR< zD#4hH-cTWmeV}IFN-!RkuvmF*lM;bv*44CYWGs3<~o803Sv!%b@T z6U?CiE5n@&iV!`9R0__z30%q}H^NxC#fUPlw;hYPjt$bQso4%UF^5dlAhQT{g&HIy zMnTx2-Z6&@)a;KKCj+Z27W9-ubaOHf^?+0MVpgfx9eNqgy70TBV>gJ7WVx7g5>`*I z#yJ;C-^*Z}1I2_udO29c9_%YL8WK&wAnFlh!5nr{vv1>q0@9uZnf?f~O2An=fAw6XOK~h?>nqVq!dhRG*AM z0*%S;!jgRvOkf{IJfeE@$*A{X-7_4L z7!G?F4yo8OGG2(NmiEYCg|3KVB2nxd8sT|_FOc)lh(=mP<^W4E$SHV4LB2p|`BU0oyh(qRv{*1(Nf?2Q@HM@40WdRKa>pIRUA@t24=irH`R|feIW<(p5ON!J z9LrjX_=(8KB|Jtw7t69o0EYv41PkhWu`FBEHVcW2^#M_a>=i880!pP_GFY-H!sgr* z=t2EBo``CO3<$E&pWyKX?ywie8zMTrcrto3$cL%fS*RY%(u2=oei`vdRxF+gp91HN zSBI=XPm~yprwS$9>Hmx-0SPxCK_-{zmy(O|zQb&Ub21r}YZz}3dzZ2SQUuGNKO3#W@trOcF0dy2iamQ{`F6OsG^Cjfu%VLPbAt6k8Lcm01&W`cp-WV29;DEHJhN0CFcV0 z*W6o(q-01HVTsoPEQtPCVkXfJMGKg1c^9B0@CN!FJQ?9pvzxKx49E}@53ZhqWu-!i z{t-(&NgyMn(%<|XLqHy?8k#2$VZ0;+Bd8GqAAq8OD@qE++k+|zE9Mik1|=Wk?F1O% zItfHeGyoa0JMcsVAJJDd6tM<83%!ep8Ab}?2hkeiZ3Upfrcz*Uq9sI$tW5+i5stv{ zX#5B~Rtm=w*-(dPA*B%$4lb30q6mUQfW+d1Av2`nm|^hm;6jWH#0f*{I2sv_G&~*| z1t)LE;{8!35m}muYz*>VJQ>lYWq(L>kW*#D{!*NQp>WXwV=xz@@Ii#DF+kh{q$X!fc8N#jiw_JmgNpQveokk6>A- zgW*|`UWO$uMbLPp42INpEYSioXw8vmir|Fj5>pdHb}!8C03!y19%Qfvf%c?8{Y0yw zX&=qZXxfTb2PD)3lW3KUaIj=m_#7StSmHt`4Jif<1qA}L42zdT@c8TKGZEzlEFe1x zP+%0$0F0Fl2VguNRSeJ4zgUn3Ao3bPr3`|D`e2?%@M;F(30S-olt8TEbx0N))j}{s z@R1}@jDyv0A%bItlIXKfV0hU)Jd40aPEA1-&4M{b?+QLN?x8VGNuhtic;ZlxCnK!k z2!t03gd`RMc{m~4u=n7FNM%C`$itms;#0-5Q0K#gjFE`MWF#V?a6)y+l6LBNJn9Js z3!X@1Iwh&ucQ76ap$)BhV5u`?5u$7GIAmF38Nwig#~~&e9ft4{PjPsC5u*GCNnb78HaRIlOF(u|ET1yWv zuF)1b80Uxs3%HeJKH6Jo--#GY%_dJxFr8m-tbJ!|ANMOYYRqjYk&Gc5`HA~wH1&81 zHM@jIjz`-=%*+;c2KA*au<>wsCQ&WOGz@2+OH{+Q0x%i{6)%P>nuasY2%{S?S_Bv` zhbvwUXI>)=*ru>r1sHFJE8Y%g+7L!R+8pwn+hLp7Ue*UYR#p=1RmiA#4T^+#)NIql zB)ORhrjrZ_aeBBngV(Gjb{epk#T_Jh#^1PuW5T9ZKuc0^bwJwIH1I7F>}vVazL4#j zgf_aclS8D@wimX$S3%oVXtRP29IPln1s4J<7?Ck!GSCr6dPUo%A@uGK|1~-~_#1&< z_#0gu_UR=?)d76z>%dBdp*TXVFeJ_v2nLWrCBdLrA2(8t#GZtk$??UxOcI9zIouX) z&iTt_DoltL6(&-MvkDU#!An-~qM;YhL;-Pb4_QJJ@J5o7ryzVugi&FFtPd}Nc0kZtCv7xwG*7O1_h;l^y0AwL5B)Fu$!rz9FNrtFB*^44KG<9s~ui4 zJXR~bqV>>i6d6Q8brO|tU(Lzpg|aIWVbi}feJUGeH3Rt4mv|3I{_o{ zB^)N|bMP;0!dYizh?LhfZMGna1DyUH3&63f=I*Gw;i?#vc4=F*K#AeNgP=uIgmpK zYUst6-UE18S;tpV5@~?dEymV{LCM`DF2dG^Px7C3clt|ZzwLeln!{Af-7GG`afI^@ z+(&txS)5z0 zfU_Ef(D)O_21SU$M$D7UZ+t$tLL6|nHg)6x^6#c+p~T~~5S@Xkga#o>ptDpp)A3RX zWHp#g#Uk5q=SHD##OW$Jr>o?a5I*zjD+EOm=od4Q!W|lM)5&DaL}sX;OC2_U^kXFG z2j+6{JaKM`6!H-wN6j42}EC&rYH(Lj%Ak7QX! z!5#^?ujUStB9)7XA$1oT2z}+*j|P&)1W7U#&fh>C3FOJ<*fXqGv0i&H>M2mDFd`;* z@Ih|7xb|SA<1bSPj3WD|QG~vrMHD=Q{b52B`U+w}tQ{w%Mjo92uLSZKBkmWm$k$-? zVX9H*{%fk|2f=Mv-Ed9`iyC7J(E*4<-8BOp2NLH0H$4%pv@4^UJNjV09abxq_XbMf z!~f2=bchaHi0-6tGYH>;2~E%!aQ_LMgGpf|Ni?zj3rS8RrY_fB1HoQE5`FzZklYE_Jckw(?EZ9v zuLI!O{}d5$PNUFI+__!o8_%i*bT|=t1$h!NXAJ3(y7MlqqPolor$%&%Dbfis%Kj}o zVid+2`tC-1n6WPUko5C@L1uyj5Ude03{CpwNE{Ks5yTd~;YBda=YA)HIs0f=NEBF-(ixfEtDi<`?}<}@^;LZcy4JaZxhl=TmX z0INn3t9tOKscNQ^VTu_qKm~wg0ssj2dP($aRj+H+z4cTF>vz952Hk z-!SBcI-38hBj-jD=X%_#Uc{=sYs`pA;13%>kZI*cL4=Wp_z1|jln}N8f{YLuGneVI z23&|R=Km(b@Jwe8bxi~pd@Pt~P!L-j>O~ydN5+<2Q29sCLach+xn389*{O+w%i7`K zvcQ~(EY^P+EJ6i$fdEs_lM>_EPla#*TSZ`*gLF9xLV{`~5fmnwVVruf@76lpoQN~# zlnxOfJwy#(LBVQAgc(ij8F0aqnkfh|tN)@||C=UuSJ13dyEp1ih53kBqsyv~TqdxB zIM192GegEm{`i;GohHLV61m2(y;bYM1qh?R5;z+QUO+JF@;?$>3It)ESO5-dP9eOI z2rjcxa2ax}f76Aa6`1=)M?aeB1*3t8PvB-$UNZpy(V?EfRt_zv&57t@{wuotaxTKP z2w$E^2+~6cLYQ`IX2&(4oL7%0cOYnj`jZg=pw>F13<*N{h#6sns|r_fV#65 z#TXNq$o|U!BM7GT5<@a}sYCoi!Q=KX$n4<&L#Sgozzj)r9MJxXF~j?EWGGdbK!Z;nL`hc3Z<6cW$===G>2Q2C<0m z=)MFTa=;hM97^$G*kb0v_$PhuJ54=QM%`D!EMqI8BiEsQZ#3fDAIJ?YRZzIj1*`Ep za-dcGbHN@a^DitUod9bu@?kr&t~6Q?u{C}zi@;BQt%(Q)wIULKt%(Q{UCaJ(#(5)G zJ(JncXvA^OFYdZzf4LnFTbf>MnG}o}%t(f~HikR*ikaCCK% z0{IspkT?Eafo3>V5&d@sQXi_o{;oimwot{szbg>er~>KqkIn&3R3Ih(s`z!>F%|?o z3N?J`lOP56rW71c2rj7b<&hKb*@N1nhcAx~=th<^|0=KitBjm+BtAOY8CgarE+fmf zf0g(ARZjS;jCK?w+oOHK$nws=%4lX9S&#f{WEpwO$TBkd$TBkL$THH{@3KQKatyfS z1NWVf28pv%@w8}TP%D!J=Sq_JSX985k$Fbs6DS*B!|A9h=UtzR^b7dln&h+=9kjdQ zvlZh@uyXc9Y+&eq5??AARW0ERfXOm7;KJ16=Y0Muuou0AtKSM$q^{A1xNJPi!o1Yq zRrKfmCINkXIxU*0*C2YzBbXkGV5n0$qSJBczSh6?*U-S{ul&0|DM5cV(BH`0P9T3? zGzUEHO_!8_4Uiib;+BdF#iCoGF<($^NawJ(0e;#ZU;^|9mpb?_u>psCV?v*H)SMD- zwD5XIC`7;5BkO9uAVRtFkr7u*qc+o#dOp`wCyQUJ{mKnv0; z3Frj@G*bXANxMftuM3Jd)2g9}u`1K*i1)p;dg5K3)=0eTs5zo7kn_f16{rWZkR<4d z&>G2!D;2nZa3s$E~8f!p$g*88(U7i9UXC3n2`5?M-fV{FcELj zgotAZ(pHEdR9xB=63t)mWmqK*%BtlOqOExvnq-EcyctM zDzZ^R+T^!J~>#>iVD_vLGj{=>cyMpM&m#6(P-|*#C|0=8hRaW?`%>1it z_*dEVud?1>Wx2n~3;!yk0r`7AiNq4LqOpCw&>seb`FVPI(n8rGet|x;Aa9zx zU!Yq^I1Sw)C&-O#O5{Dgh+Y*0O@J{|0pLF+7#bevp#aUyf{OtN?fGP~j6zKx>s+b$|~2U+E6F`TzUfJ2W;&@-aM4|9+3e z`JeoMj|S=tzp0Mlryf_i-@ShGPI&_rgIP0btw_fV--x(cRvmvh?{ZynR^g3=J%%E$ zI0vhfT2eg%Yuw7Er+c@R?c&$X4$}2iSXCWWQW-vn|H{^7W36GiiQ_hv=BX#QCMXrw zq(5n?N}FQguf9DX$4!N5Kl`Ox(fsw3?9b`!a-3alRGVID_}p7XJMN4M--^^!lc#jS z;J~5!(nE`LOv9F+(dy6Y$zPbe>d~^N?|MH6&D<=py4rV3tn#PsJ@zG;vo%)jDu_}E zSd#8}!&cJLyGAMIP0^m&jGW`rx*g+NcU^Z@=PxlVWBo8VM8Bw9J#f=2=DWkiJ69fG zQo4{9kW-g#OF3Hl(QtMG(H3i z?Yxj1J~6iE_Db$yc8O-)-g&noj=K8k{CQG~x3Ay-DE;!bn)!Qc9M8F$U{A+#oQ&tD zT-#q2w?Rhf8p&Vk#;Z`hf}+%LEqD9#DJ;)eT+JZ%q0Gk957JjDlirixZu*JYN}M++ z8*ot*HDJb6-tzE!vifdV75i}A!?7}V9>52U_M9I1<&heb17gv+G_ImTxNX_>>i+7z~k-tFeHpi&;vMoog^vtI*tM{HS zwqHM2YRrPCRZGvu-alsQ_3_*1zFYC<4?K66CilAToI&Bqhp$d_DhXTBQZC3BR$g;x z?o3K4e0-HY;kEz8$Mc&MO%fGYMHl_38|tZ5%gzm~-*j$vg`fMedDutqO=_KJdS?zIfBO;B{5cM8-C0H`g_INVa(P zYns<9)UG~w&-_|B^#$c)A7Am#>v(ExW+l?pmpKI<#}7{tXwLW9pvnBd@Ft9R4-X%<%fmjJDpln zg`S@)<}F^7uaU~1t07^vrTkf-YJvCMQ%UllEgX{_*Yc}9-ccT2_1OB7H759jZRg_< zg|qQz3TIvpwwP1bH)i{pa4TJDNws+N(+_C0!m&U{lK3!*J?zEP&b=8zPkCH;B zF`xI>e6ridJwIoLN?^*d4P3L9y@i1+$0HP)cH@Ryl{DFg1qb;D-@X(-Z*(OOyR((s z<~P;E;+}B$t=om5K$13d&6zPhkq=+{MVlCCZ+__Z zb2IJM`vb=k9F~px9unSj=AuT(bn6Ems~_iUlAC%|Zyr7%MsbOF-YC=8^CWGHN>-|< zX8WoGnbPkip7A@jkxUy`nI7o5c8{C??S6Snj8Vg*EAu6EOFta?vgP61Q_sg53$5vC zYc=Ah$&VV0eidK!{cvF(KLHkC(efz?<9RDh}rgGv-F6rh4 zc@5jAE^eKl?dekP9!ng?&KmFQysF&0GW4N`eu?_42iqg->t-$Raf)%9DHi4aSvyX3 zhNkX{uGW*DDz3-w>pamgE10Gv5|cvtBsA~(=D5S*&975;_W9=@P>qvZdsa*SXHZc8 zOkIB3x^Fva?47LL^U_O}K7Q`}5^!g$JjOzMQwb?DXS8`$MfS9G=|r4EFA_8WXosC?h-V z?A+&ZQa)KNIVQccTvt}ViBs*h59tpYWtTSfgoAlwL$R}a*4t^0z6aLd;p)}|Dm@o^ z*qvWpd)i4qI$Pq*#>-1jTxUF~+@hVh;MH2P-8dJwpm#BzCT+A$6W3lp>LJrKJ54s> z(OjRr_A{2zTN-j^%PTpo)v5_uE}K{rz7!jKTlo4;i#0z_PFsIG^W;P63|UcCO1ek$ zhJ?E+O?e$vw<&h~_hTlrX1&%>u)nUS=y6=J&FKABrRjm&mXmG!;=O0w7Ex+1G<_#bVaa>I>9m@wby^sD^jw&O6=7&%B-c%+1f@D#_phKSx)gQGE44 zgGth%m!DN~ri7h5D!V&XYg^-$+B1`K_Lm>;3Y>Um(d~lU##+m!7AW}kJn<4)etzRR zr6up?X-uk_8LJ%VB=POzzUOxpJJ){Nw^Ay~X64>3eq8%|89s9@T&Ki$URbiT&Xvji z(lTf4j!34zWzE~s6xX<`(?d1t&2L|6>~vgu;aW`LqRjK-=4>xudVal<)q5>{?QUZ; zneUs|i;OPBGHtL^$)0PCSSRLiMV#_E;VD+v`ugfx1si=@#^t=0*ru=eT%T0s z@Q~P&s|F4WULCPGaHyxQz+ml!_uu)meoR{v`|g&*KEw1C6NI@_t@%+c`E6#g%g0Hr z*j4;Fl#vi~d}q7$_tU4cVykkdmt=UB9bZAWk??UbDEwIZnQ!==mou}pX8o?CZ?>2e z>t`Nba{GDK+Y+9~;p7+x*~&v{fl5M)6&5Gl+7MKJZ(^BA?gtYwe?3fs>+<3q)wVrK zB{HhyX>pXTF!{*0)zvlTXOyQbo3yAn_;U2b@|DV}NiH|bw}uE`r;d*8iK1Toad79Q z`3s#bk96$HO=z%9?VlI7XT^mXme+&Ar5l&W9*rpMiC7hKy52D3BANAxCBk<8Dq6m% zeJW?KO_`tLKBJDFIm#bI&VC#_&o#R>OqlKU>CFyu3X=LUQe}0;j&HA(aXm-j&*F@N1I_aJK>yG+`&f0 zh{jsSdGELG!3}F>X1(58(w27J`^{Rnj`C#Mpx*082V9L#9J@cxEX+wasgo45=}Fzf z73M#lz4QNkY<5QBxr-4Kti^Vi>?t`kbI0>(;zg0E`=#$N^Z0t1hXzZ^^t-Q4 z^!&Evo^QfD>a!Rj%L#S-H_qz@BVS0pN)PiDIhnUR(RtrjzV?!iOWOHkYWXL1_kaF6 zXPaff*8;sMEROrU&HYZXsd%B=BjXv5`pFj`>^l)TA%|xy8FDrxHsfaSzAa#{Vai_&%OG-v|UP0_`C)WB}OzVKF&e#k2Wj_eJbmtzI7mSgRrJUmknQ~I{l_=aZQPcL0v%4b=yZf&BYRA0!a=A zZ_*3u-sVk;CZ{u)D-QWY(4Ok=rZzFQ42Cdl?wxnOl@OMBnzTiTwB(P%TybC2nDd}eGK%Rm`is74FZFgnpsS^hy{kp(ycN@TbSz*_zP>}Q=p~KX zaPYEx9Lx8OF86Q*Ypkp7eia4xYwBFu`03A5c>xRPGZuJ~R;wOMcw4Z1$4ZfDk9+bz zAA4tPG~S7R^420XuBKT}v(PO@nyg)I2UknuJ&k-Vxv3+2X3v(QGfC;8HP5%$EMBqm zqNfRYS^T4M!e9BD6(T*dsc+LXTqI~8y24G8MSon$Ippav)3dL6@eDbaC@F>ex#>2B zB$F!I>~f0gnA#sUTBk~$k(8!OZ;_IlI^OEN&$2tzrF-}re!5Iia`6R${q1( z;eR+#zNyd>E(s&><9eur;N7e7TV=Znf{SiN@AkGkz$dHU?VHO`$u z-3@z#_8nK5ZFW4uY_ImNfZ|g&H%-P1a}!OvtY=l{a%sX8nQVg@`v+y-uAHQ>D9~bV z@Q$Z=Y3oMz)|OFO@}rgdgmH=Rm>Zp~b95&tFfTUwO}fLDcAvJrImb|T)|Zs2r?zNG zkt%o0Ad47|m#Ar7RruKP(d)-y?KXxd_Np|sZ*e%&+DYmzdqX*We0XDQ;40Id|g&g-gVWHae)4b@v-!N>#;c z+`Z3(?rECgV=64xy?qwG`i*1P(sjc8NR#E)!*;8jivBY8^?mlXE^@;=a?#nz&HXY9 zlcU%<*FVocPqQlT*}7gr**@K9vPNq5n*n3}S#!6_E~I`cbNkY;+-s0tR^vyVD4!lR zslM`~4EglIsigCBrN_>4qI-0Qj9&ZWXTIe#@o<@Kg=2YVg2&S-o<|owm0WZ$yY|lG zgz`(2mqMza`8z`V(_VFM2<$sDQ{~a!hm+%)O4k>OU8qkpz3WIlikBy73*Aw%+2 z==vyyP4%x%o|vHaCL$<1^2bV2$BgPS{zbW4)WiL^&W%5mxMe!2rtMAaKxmTWK~*=> ztVE@9{;af%(s6qfqYmAOdVVeC^89yIie4vQIW6o{J=1P*u6l2Y(~h=I;l--9WypHJHBoza_XJ6{WS+)HtN>3p7K+*w%v9lSpW+irx-)3H_iT#ddd0*t70dHQ z-uvK0DXZZ{?NJrc$Y$AK`RDJKt)2E(&Thc}_4jq;dy_;K%`)9qy{a*1rlME4$&V-Z z!nI?K2JSD|Z&kh`JGQ3Tnxgn-^Lo2Hy`Quh@sSRUq|Dy#i6PhcwWYV>4Qe8LMw2O3 zk$LREh0WgMj*bs!oaRTZ%l)*ncDiZu3)84c^vPd8d=tMn{!<%inrICve|2d6?40>` zV?3BTaswAO|0wc{YD<56pH|U3$-?qxbm^yihLhr%78#k+cV+Om{HAkDg+^ProVz-K z{9P(Z{#8%45%0R8^|$O@azADBdBR7}8j(jkOug&1JHM@eS&01PQ;AQzN0p9FI(6t= zp7yPEKf|p&KKFezjK1vam_m7nCte?)u)6DI=39yFjvHE@p82V0t@Os*gH-ZJSajEg zn;SlD+&^DSl=4v{euZ$6+w7`T<0sMS6Ps(+WU)&Q&+_elGpnv$@0;DVcNZ(GqB0a> z4kzl9zRYwwT+949hHp*j6q6fu#YW{^m|vcaXkx`4_L+=Kewss1S^BXz^OUkrdl;{K zJCo$q>@9I?T!F)jW`|Ktibq0c?B!sWC(G`wQ1d-7h(&h@jqP_ZUtXn`EppYgl5(!M zub@No!uGjroi_{WuL)VQQ>I2Lz4vUm^8CJ}WtC8Lv2X5=zPV%D-Wl(YD?3Hb*7EU~ zl>N!}wYT}lQq4QNo{I)f+{^v&Q)_d=x4O@5PpTrmRWyJ1P;J6KWj7cpb|-qb&Yj}v zm~*SgejsNhuUg1{pGDN&{8Fj||Ba=pcy@br)|%DIPq)7eTzCF~x6bVQeG6`Xi5E{X zUv$@(D&#LtYoDIpv~Xj&^hRINr|bPa zuUYLeJ<)sJdxz(TlFN@H{RUJzEN!$>RM%-_FAR;3jyD|KYbkuGH_WPCSNw>qvBIbW z7N)sN14AVhB}#9*RN?){wnYTF*-Ye(->Cd@!L(~3*Y}*8{PdJw{M$rkRV7g2aN)pP2fZGczbV7GGXo()QS!Ew0-vxlA-M6YU+72I|cs^rxWkOUPh^2SYM;)t96#D89)E$>&ZTc za^z_1&wfgjzyG9o2H$s~sRO5I)bS5Tyjs3Yx6FGlBPOJtdq?$#^v~JtT!|G4mhR&? z4w3E+8T_!dW-Pla;!WlOb!{Cz$-BP}y!5}Y(8uD$(ydX9S8d|Y6R(^23KclL>%L&; zpyn!Zy!hR#wC3a$z1+NQ?>ur(XHV5GTHB=dd3PW43Wm8^eqa=8O(|)(J?c~BTKCq& zo#H3-UeB?<_w;VKk4c)vl!Hld!K(i^Wk$#JBp9B&@p`{qr5`Tbspmu-hUR6pAZ_<>0U9W z?fH?98oU=vop$o-dSBVaMnu=XssxT*Z-#NP?_5H=ObaY=YF_|<Z`+Ej21e2prL0huIWzjV z&6%Vz7wdd`Ad|uPcso-1GJAnmYpA@RMg7?hOn6e?^d5H|=Bw>z!Uj(D*;#0^JtPxp zJijoOSl!ob+b{E1&C&R*xjpdP(vk-!BhD#F`)3>!x$FFM?5*=+Prs|&oF#Fiw;9`1 z^vZ={v)i<4q8W!*ExI87ah`?I0dnKf-4d1C+TKNkGLDpsl8lU(O_o@9y0UNFlMoUX z8%9$o`h50mvX5FZXU&>A(hs|9D_>{c9X*@MmAblnruf5<+KG$x?s?M(b{1`y9hI=V zD&f#;ch}9k9-4EPS7v{^P^K5=S7NYSc(=6`z25V6FtcHmz4we>af#AXvs9Pm_^$C! z-Jue@=)Rb0Ps#>%%-rpbb%qSFXDTL7$%)UGazOYOH8b%?rBv{hqOrF7xD^S2H~WmT-s zzuYvCt#iy=N4`PWZe66lmnCnmedorL%ho)eY@6U*^EAwVqG@FYnLI1)P413;Q*-@R zzH>de=GLKUhr2r`uG+8oA?FADb;2(3PXpOUju$W0J^$KyvBq@Kw+{6$Q$H+SU)%N0 z^pl)MM`q#f8$}P@pQhWZR9VvdvY7TK6*h<}YFp$^Uvs+fkZiolL(Y>ahAr_vpB6Cq z#Y*1$9dADjisH?mP<8Ztnz(Dz%mxKoTfscxNs69Ur1GyavG@nu@prE-bZS$(`Y};+ zNt4+2s{;itsU0y!_bIb%Ulx9_eic*hdj)H2{ZXG8?qu=zId&Co9je{jOO|=A7ukH zim;864?3=1+kH-0bcROBnE9X9Pm1MF=o;^5@p=-+s+O;_#d?~LiHlatz%lNH7|RFV z#*7@J{X0wS>#Gy|b?z!E6qsw}%bPE#S$BHI%qN?_<~vcuK4_V`A8yN@R9|D9FyY|n zz>-rheNxyHr;2|M`K>cHgy>xG@3`(97N7u)YEw>e6B zxAXM*O+i&hj^vyg6&M(Q^{ig^)P!x^q7Jh7ILD$74!W9e+MGY8U5mfb^jS~jryIka zJxO7yh#s2~(BOaTT=>P8t$Mg8wghcj&pSQYV|D6hr>_iBgL7u_u8)%Cd7q-oI#V}?<%wt3 zJUcPjxHg=(VXE*t#_c1nKgQ2{Z5wTpA%1w&&394yJqrS2!{WNEvp(7!hkrE-a;A61CjMQ1eHsRt?^eiFp@ zx7=oVTvqJetL8_>A-R_#o2E>4TqL?P=%Tc&zq|L+GW!kl7l?OiD|YSg&1{@}>0{l> zZwJy-mO4IM?){US6eD_5x!g`WOxf>=ka%fo?YK#8X{Vb%w#L^i@=G$?!+tSt!BWRC zj?E^9wzJI~wKQj0HT?rGKa~sZpSwgw_;^KRqRraEZ(j|@UHlx@zu#Li=)p|= zBG!!C>gTg+bG|J2T%t~%=93j-t$$*lgV6rjSy34^4}EejxvpnQ$>^6Cs792eGCz)* z^yBCnLmRB`*FAJ`{632b;j@r<`+g=f# z{;_>p%Yh|98}^*ou_SS<>ZDiS`7dr1HgugyjP}&X8N@bDn^5|eG)Zn;R{KKH%!g;k zk5@^2Gx~5_H~Z1H{j&Rw=Z}wh9j4QxbmjYt7g|@Ejx;SSr;Jt%o>F8eks3$Uk-PCw z>(TYzaO>+o-ldS8)1=4v(#Kk?Zn}XRzf1O}w|7^xCGIwuHl;aaEF+q;%vkKe*ZUDp z9sxz?J!cu78(k-K?are%;Y-t3*rmtPH+R3tnBdGf_oLk;XmXv|w+*xROkma>Iq=}3 zpLwo({hEUg)qSx(hrW4RkI9uzHD5D5sO87f+?%_~0z=wYeBZR$)6{v};r)+sSCMfv z{{fdq?GvOi&L1y(TvB_2M^bi523y+J-XJ$8DbDYwB<<}FIBNGIGQ}c(?;=5uzl4z;i!%LN9m72p>xs| zZeBkgZdolqII%)~dyL!7q`=Lc-vb|Q_1~^3C1zy*LF#=|(Y(tYjz%XJryj2StoJs* zdtZQ%&x@|uNz%>ljWe#h=KtVq+4F%a@|u=XRxhXPqx~}1;B=3@>h=w~4?kubd#WA3 zcT)N;nKd<&7t#9R&9udFjm>TO!3}qfM_-D#zj;mjih~$0g>N$VR+Z zDV(S|JN?e67Yp_`Yhkf+H_N;FF60bIuXwRZy(h@c=VR+#i_*0*%lDk88m;w`+j0G5 zRD*J>;gLP8-c3Yx}+PNaLTv5DM!AYVn# zHg-ag=i-_L8@k3YTjv22MM)!Nv+*o6?a72 zl&C9~VGCja=+P*n`pk<0hgN{wuhouW-;i&h@eV;h)SPkk|=BmCF zZDX-5hc)#j;>NixI=gsFy(^c3pPjjoX*vFsd{{@$BdJkKfR6J66?Tx&t-EDkDb#tBl_Z1TOr@~o!s=Ryp&IPCvmk0E&$O2IHAOE zb-J->{$6g7*PywFv*GTGTc9T`FbM9#gm}SC7q>tfU2~|$KPb?L1Z_#^>JZ7&k>nA? z31oYPkV3tD0=xp*E}?##y-1;Rw)%z6vGF4Djo zxWgK_J}_uw;Ly#QKQHXi(W)IM8O^5U=xS z!EW$lrCz}HucmImroX?NJ8Deabb_lE|AZeD=pPQ3iM+fy{(_4@CicsTTSOo;x}9Me z7|dZ0$p?3eh7}CC->iYG@XM(FUV%Pr-~Y4?Qr(b>X-LJOgFlE^gSEWCUg0#iFgHJe zAkfLkTU;c>>W~U(-kd-W;$D}hpJ(7aHZ5fM?w8=w)j#73kbYn0LO=BisuoCstn#CrM}7PUXurI3*yQNO-@hG_ zL_dPj4Z~lMfqQ}^GiOC9vni9)GiJ@e-c-NE<^TqqjSMxHP+zwYFVA3hh|(fr$mr`6Lnp{v0dQPc7A|%cM$1{IiioDNvI_k3 z@$gV$D61=Lpqz?FP@uP;kBT3Isi^|=lmzsYfS@vAM{_T>5j!Zr8m1B>nt~f-;kF?B z1HPe;P(?-BEPa|PqW*v0GyL1jYVcPX=sw|aQh@S8NUD%n4EO_9c#{YtD+h;rl8|No zu*Ls*lKl@y|2qxXr~bxBI7uQ=(%{bO2v`ZgXaGwZ0b`zmpO^;Nzk5oc?jEH25jgFT zAB03sho8HKBm+qf(n3f~NT!f%Aq_uHkoSia4Jj4Ubx1cM)kErrL_PsDAT5Ms2+1Cj zFQhm~#~@`ws)p18sTUF%OiFBD{<`+0r zXe5oCkw9*ei;Jf}tk@vfutDG;l8aktDBM$qd=kmUD&-u|3W zVohQo>>c9eMIwa=2f(@r{faki$kzY9xg`1Ar*)Og^);lhu~JkqDcTe zJ_NH1htM1#fTa(??8Ae-hMJO255XMW{5iwGCV*MO^4BaRC}1gCBf~RG$Tr-Hh+i&X zFsL;N3Jhht1tLWSK%EV-&~!nf3bFvN2DpHE7o*T2DW3m^A@Tw!sLT#6wc%;{T~C0T zf0q&4qzS^d;V9!`8EVIIXCn|vY8Xafz|Xux*~JX5WkG~ z)wihcQcqF8pkAf^Qk|qRR%3yNg~nQqtr|NuQZ=q=+}0S>5Y`;6DXnRsX{NbMbG_zP zO`hfr%?Fx7S~M+VEg!8-T3oF{tzIojraaSx>CKE_?qeQf7BinSdzd2Hb}+;r6;R5 zUyrHhrx&4jUhk$}quzHtWqosf7yXU;iTYRdEA?OM_vni*nz(4zB9%oZi=r3pT6ApD ztwr}1y;ww|K^y_2nyD^PbyR(>`cgHF{*3;T-cP5hNvX{QZP=-;Rf|%KQ`@a}OYM%@ zW3@K5KD9wL5ynJ@3}X(%mGPGGkug=>Onr&EqxvcJbLv^@&(xdLJJd&O9AutlW-?2d z_n42Ejm*&?o1C@^$X2O+U%OtLs$;HWr?W-pgieOeHJvXyKXoK@m2}l~^>x?i@)u1o zm|~!4pkbh6z%mFmh&G5f*ke#+0F0_aTmxGwsw%5$s|KqcQ_WBFQV2%hYSsU#Wjo7uJ}dFxnL^t0wKcR2wU=l+YP)IsYqPZ@wfAaY)xHJ# zeXK34GeKvzj+&0K&T<`hoe-T^oqam#I#+edbXs*lkA{N(I>S)aRMkRtsj982E8P>u zLNJ{T8jPmL(Rb2Q=_lzI==bRLpu<*rv>GWG#aM`?YRA-4)lRBqs9jLYRFeWfkY&&q PatsB=LRdlnpZEMP-9TFF literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/Crypto/Cipher/DES.pyd b/gam/gdata/analytics/Crypto/Cipher/DES.pyd new file mode 100755 index 0000000000000000000000000000000000000000..b9967dcb1a0b50410c391efd4cea16b38cdeb023 GIT binary patch literal 20480 zcmeHv3tUuJn&+wFCKifPDkR1by(%giAN3&NQ3V1;f!d;|pm|6LLMcq3prOi0RwJfl znmgC(n2edY+euoIOt(Fq&SbjNMr|hnbb@(~dD97#5R-Hlft{eSO(iB>`~S|l1x1OO z?EYrAw|D!|-&EF$oX~F?Iwsl^p*0?H?^*Pnvmb5_>xKm03r0 zt6rH^QeD#|H#F97ZLFx1D=XaYdXK!>B{zEAa*bQgU$b7WtFLk`nK*HLvSzy2$=E90 zIM%rR?fkH}J~nC21f4#ek@2ISx9-tpiZa%TxFS^3%19_lq%E=#m22 zK;cyjV|j$^V{G{q)VNGJ$k<8p|DRlzcwE~(uqn;K2L(iZ9qt6$D7WKhNmYfX0`1dN z@R0*Z0HgpSN)BTkOB#h>&Xr)?4ln@hfQXU<3rjXPH4*7FeKgR=rKN@Jxr9BQBM|r) zMM-Q!fRGom#xabINd4M-VpG+v{|w03wt51kg$oY*wr_snBTLUQ2HEXK^QE8I}G zL<@lh_JGzJ@*2bDy#?kATz<&!I)CeJ>F$yQ}9wHBH z!AJkRP$;xd1nJ+#{Kw~$(07Q>912{CXp}56ACB*185ZkdG4K^y-7F5a%pLvZp!g4? z9kyS3jG|CJ@eNKpO8TnPpp+n|tCA%}N)Gs;{DLp2yQbrWP%Aw;EPpI4zY6jtLcWaT zPeddb<5$-I4!Wn&!sBUxBh{pCmy|C%`z7<)-5NNY#x0J5v?psrvd)m8v?PDi(h)rK#>F3=9)CDb*V$)?J%NTE z>xwsGI{T(kW2FS@6~yjD<4R&vV;r$Nm9l_;XFypasm=r?KRJ#TQ9c$wwqwPf9FV^b zh19Zux<+c7!RLEhfsSfpTY;YHX>A2DZL1}|l-dfe;ESiN;7Y2&8mEPw6b{=rXzYd) zT9&jc!IAI>?{Ty@&Bk8#0`*T5#MB&H@JkE_7vv|2ff4}=&Y}3j7Icm0;(0W9E~vqC zfyQ0Kx#$L!=YkqM7v0Lm&H>5%`RzzJ3pF2RK{L@*-X^H6zoJ7iF#3^ zejuo@@Y(>0^8M+3zjQ73r_VbF3?vw$jf@2QXd@d#@T=WturspGGe#vi7s4#IiyCMl z-^x3&-O3O!Hq;rAV!L<$4ylUob`FBV)Y8G0W2_igRL>0MT<52HWvH`ny7G4H^U7C* z1qp+Jf$7S**FGiYwSox*ylaUk4DS9tnN`mEK3qnIuJoOWfp}n?ug@OarN;T$a%_}w z53m*3;J}~gI}=a*NxnW)Y!@+B3PytOjE)$IzP>Bb4YaQE^`#4u??L3E@1tcD+cb-R1~~Dw;U=}R)j*+;NFG6kIr0d(HDDmA#`tGmi#?4kR?+*q zBpfwL59KZt+u7MEDdRy?N{2#pJ`Bdk|L{f3pXfD(D@lS{7I%y?>Mz1nlCZ8?*zG}g zn>0Sp_tm(&2gRh7{LUeotvkO7g?uLyZsjY|H)y~?-M2RA4kbQ)A5QHY1I^y<9Dcdr zbCoVVM%8OjKKI?7%sf}A1<4_UFP(Yg&{B)(bPlbX*6rXL-442Tvu?*UQ6-5=rpoU~ z!7?(mpoyBUwT$AKa}CKG3GH_9Mb_=0J2_KQlS7G5At>KTHJm zBheOiDDftE=b3Hoh~M)H{P}i%Z19*>ZX=GAC{pUpA}F`-fVfJX3MJki*6jZ~sHx5& zU(gW$Kqu@x&=r5U6BojTk?N~2tGn zPRtaZ=INVhPS7qNN-Pc!DNTkXJicL3GH(*~9OmePO;spyALNv-{;#fJ?1FP}oF@b8 z&$rzW!MIuFkJw)S^MMru9Ee{6vl&^!& znO(4|#=x)QNKi|A)%>d^#W79u^JM>;QGRlTAH_p6&*1BzD+HE2THKZuwnPl%N-YZL zzEp}%4@^>vP9vbn7Rk~kP0brMBuIJ2>0R$rzyFId$2)~Wkgld!L?8njh4`Uy-lFg* zp6I+7bA%@1f*wi|#@e4hori*XiI^}lfvGe|h6O#ojwH3H*Pjs2JFY$Ayi43NgX)MF zcwILQ!roa}LH)PH;4Kwj`d-8J1#vd#;EtSdvUko^Z^Nz0pbPr*ry1i97fa};B_X~I z4QFp#!ni^{Ky*XNi;KZL$&2eB4|`mn`m#1oc}+xr+81PZFL{FPq|x606ho{ZqGcVMAASG!OR`T~ZG;N2UBhe8ZW(&#~Xsvi>t&CHro#bk$%*@+h||sEgsI{4Fc4*u&cjPgdV6uk2VTe7 zFHF+w2ySvOr2VCR`~|?MwfW$A%J8jzK zbg0BQ#fItZi8xkD&*4~c_F(1r&_>tO|Id)ujPS{2D}|9mWTdQ5Elf}flhnd#N})_U zDwH%+{z85PjDt0C1Qhkg6!l;}O}FG~N9P#@b z9()=6n#ix%75wmxX{R~rJ{xva)*DmSqZakV6a@w*L~H`1#sG)R$G%-SVG?mZLRd}E zDI0Of=p?0Z+Q4ea;)E%@%n9SqpQ*X#hl@H9LlhKeOKDF`X&~}!xf{$;o^{S1wXDa6 z&g{LB4wu`)hs*Va{A5`jHDEiJs^vIfdi)9F{3X+I9IV8_5>UOA%PVmtm`?FS;i>|~ z+k^4l@uphH&zZuw{ur3ntf?F0ls6)dnH!bgA_{-POmW2A5@oYz#HR9+P&<_|wClz0 z!iclR=Ipt9tr!J^zmOj@+F)c3g$F|$^-WjfB1uytJl>`rOaVUVXT)TqV+ICy{o|Mh z`#{{8Z&ON^HmKnz|yjd8xv z#=|#&_`Pq?8RNi4hE>k*;A0*z;xO%r)%rM)it~#OkLQOev%?1rPe=HG!33us{TO0V zXH57pod)VWaua@PDZzQhw=#UMu)}vZ9zbT2NRZMOaqJD>@&`8h?v68f^3{Sk>Vf~Z z1ndiva&AEvT}zW_Xx75Jx*<+mm;Mq9Eh6#e9XA9AxZMT(5v%^NKg9?EIR(Kfzly=MY{BJ{OBe$fH>Q?CZ~e68~UV{QVv2U7iK>K2dw_&96e- zwRA?0JZB%lsD=8UfmZ8#;u*#hB}KgxQuomuUN6}a4}j#kVfb2vtPbCac!Ca*iKzL- zO$@9ZH5un4o5Q!B_ed$~Er3u;8lHA49%+V@wFy`=O71lNI!7%#uX=;(&aah0?X65? zdgz{x^eFF-nH})}%CGr&$-ti*#ANBepC}T6)8D2%e(*O7E}Q4}$U7 z$B;oXji1y0uf1=jm*tG5#Gs36 zm_hRk0#bT3gLXJ#dXn1~K;0w#o(}^#vLUW%6W#2y7BXP>u2wr-GCI-Ir!}p(*LH>H)(0*_o z(wffjUdgCVBt%^+`E#YmsM-3uxBrC`{-@_rf+3HoDYXw-=;{obXIlE@tuY{ z3H}Y-(@`GuM52+?WbSkleSdqYzZcJhLN`VGOA!8Ehrh^gKhgiAIa@H}J{d?5VW8it z&ev(s-9x`6vinQoHhx+*tW?^*4?!S*>T)Ucp>m=0SRT*k`uJx$e1lcAANxxWncqW0 zJWwVe{l{iPhW7#AfaB?M+(4G@g_YYUwd`1UDA#*F_9bA==X|yj?>Gg=>DMouK=K~P zd_VRD6py?~YXXL8(+FHk+W__+#PyrAWR`R3Yj%7k9QX#SJ360u%-TJ89HMF##oik7 z7_{%ePjovEN0JEVh{Q9Q}r4#UtAoH#k?ZFdZbs)_m3 zcBG)q@cya&sdWf${WLZ4X=*r<+KV}V5gnPzdRcLp+K+&vCLZVf)b`uKSJtPiiB;5a z*xMVpLcUPQQ5?(F#FLP1?;wTs#eB#W$10^~)+zq|e6p;2LCS60vv4_7RyO~h=Ui#6 z2FA0jwANCSQmeE!P?Pehw0fw?bJ@C`nmnDYJE$o#+M;E2&qzkw{H^4LZ~JjPuR~a~ ztrxs`=GU>BXJCU3&~Nb!CmvknKCqO(lvb{zY2tTCkG1gH@Yq4B#jj1AP=*2qh-LIB z=e++k@K1ps!H)L315}0P%A@w1&|Fg#o*IRxMB(XCcuo{0bQ4~%Ka%4XY z&83MKiBrNxaO`*Z{fPJRn;)Vet)V^(b}BIX#$q&`5?CE=Mh*gOA;d+ZdMBc&LcS?tvV&$p+ftLOP^RAK`d}FVKibEc$>R zkBtK0Hb5snw~aQ@@I$)7AR|Q3z~i=wH*B*-k|n&@LXjnWNJe}bC7E#iRvyvNmIR&C z&?Z^va9Pq}kcS>mHVSUiCDR`=pSRbIZUU^0SSW+FXYZC70pDLdho^ zG(@D!hk|GTDq&kRh-i}yk|8>`E84^r(}Ua6;@}3zmhj1SAjiFc$8CjUBD*3E5vS%C zV%(4Bi(=ruC^|jM!6(vd$Ypv%4sac6o?XGCyFEZpS;ljTkbx}7+6ZwP@yL$QMU!;6 zuAq?~gD39=Ag^>B`&&nBg`Q(TUTxAsyJGxJ4Y?CIV`KSPK$D zt7W$lHPLvWU=WQ1n#3bn)YRUMHu0!aq(iz?!nVkUj^t324(U>xXe}hj{h=Zr$q|WU zsRmEb!up)Zc@!s)gM6b<4CIsig>CGHJ+cw@8;*~3i6(q9CNtT^X(6vH#hT|q0(ndu_En{?m>wxNj>twRR=ct&gz`+Ng z#yrx=GGojHKrN#Nt^=+Et^=-<;fa|5sAbf^!3UrQu7eupEjdB=#2k+W9t%7ccr5(! z`3^SCTZC=^AAlNoEc_YZ4~C(S8h9-5Sm3e1!H2#HfExb5hpq{L8aV8WqK5*4WP17l z07os$eEXKLPl)rHwsV;QfTIQ<2<@bLmW(BLPXZl$0Q{l5 zj7@=AI>)ELDGdW~)UrGU>U2;{ff}9Nz-8c|gAbSj-$uX`_%;H7Lr(%d8Fez`zz0l$ zZzBLW^uUL2;E+Sn(Fcs^04O@(_>oXcItnlbKpl+%0M`KrT|zDCIDv$a862i0h(i_n zdgue_Y8C^~gBJq;t^*D_^a0Q`-~$stLM_omhy{)TmQYKvP&L3G-g-%>C3^c63mkL_ zwG<0~2KY0;A8P39p$`C#8v5YDp8@_*LmxcgphF)3e<(;fMWWMYB5)oQaAGkdKJW!G zn+!Ud2H*zZ2H+@Sh$wkFBXt0PnmPj<`T*3EQBMXPIP@jx188W`BMcj5l9>dC$ubV;%0HCI10Ea#R^<>nOK?e?f_(lz!I)H+X zhy)NJvO_>+fC+rmIum#@0JRRa4s^=FNP?m}3?B51keiHpGW3klGoqf1da_BVXaZ0p!N9?X9{d~O-w6M}0jS|0 zID)5w9ReTpBNAXRpay_MnI#_hyGFMI?P}Y_|TyDM(}~_fP)Sm_>9S* z0O*gBbO0uU3t%azQy_=5>j2;z!3PdK0QkTuJ=z8X-!TA6R}6qs8v`HU8?oSx=)Dnq z;5y)-g9kp-%g_Vp;X4MPhwm7G9=2oP1AHU+M(~Z`1BV^}eBg+a7mOBuurpIQ0MvXS zu+@Sf1Cha(!3PdN4L)$erd^*zC22Oq0jMR^;6ng_8hpsl1`a?CK5(R!QjZ$;4X{rs zpjjp+fJA8ppawq$eCi@_J#c^|Lmq${_Mtx;H~=;51D7EWKn*_RX9EYI1|K*EjhAo* zZ=;Iab!W;QuI5GZt@M|BWvEwdC|i&zuXB04jc&O@hJc*!Tra!am5t2}o|<~MT)+9R zT$P?BjNvO9`kIEm{fYx507e-H;uLDKr`kn-IJm8*%2g#dc^YfnTjlyK^5zHo-X`)+5fv8}%UfKos?8OZckn-5 zTm&-y#IerhsjjagO3juUR}+_Av0;%+)~Xxp-8FZSg&L21XN{*?3&mCGsv>G-O+&S- zk^cOTdb9{%q`Y^n$V*Fc(47iAW46FMfZKsToDy{FsPAhp&0ic|GTw7d&>aJ|40#fZ zEL=|`7$JK|YhRcqWNQ`%-76PdsuyYFPujL#8e90iU3C2?jq}gcIWB3qla3o_Fie;* zapI&&M&s0}iHTQTbw6{O{=;M$7^rwdo{ru;@_{Eb?KK0bEe)a2LA3l8a=&@tRk3aWZXJ=Q}$&=mP zr%t`}(krjL`s(S^Z@lr=TW`Pp+uy$XZg1}gAN2Ko^wB4u{O))C{hxk1IQZFTXV0EH zcmDhrUj%~}FMjjQAODEGjnU4=R%~FE^X-l*?Yt}Qigq+<&XLw?SUI{TP*#ItaKWP@zE>)nf8 z+iRLUSTyox{F(P1;b|_Wo?PL_3L2p*S1o3&j3qO|rW5~Cee&@g z#D-_5Hsmx{F1sMP8?nN%1&9W&G7L)0K^K|&B@TX2FCj|NntW%u(~&0{jy#9fD9|Xl zCkV=#Fa>jy;Xgn?KWp4Io|=kUn&a4LyjYNAF>f#%cg!y3S`Qi%gfg<&t2!tUqE)3 zELn=5t(BFFtxHmu+(50RmG$l|HCva~SkrG@3VVx%y~QxNgpXmN%aiM=uUn06Bv($U zKohqa{2*-jXj-s9o|Pq=!cqUF{(mV0;s1NX(t@V}bSw-hz!w2bfYR$3%S6p+g)C`k zu3|K=BWE)4{!;$jGElN_B;vOHjK$r@Sak%g12h@50}-_J2N){`?Q*39yq5tz5i$X^ zg8=D%Jj(;70g?fW0O^2yKrtX(HlbY$XaO7qJPkMr=m87@q`!p?z#>2npaf72*aLV7 z@D$)QpbszvkkF3=Kr$c|upDp`pc)V^e~xkMSZ-An?cBMH{Ua-=acf8~i`>!CZ&qGj zRg23i4r&iNT+hlYnwsz&iuM6k?rLmw*Ha!6^%a}zK{e^OY^n7&@%!8|{gy_Ti?K$1 z9qyF$zql~AU61QoWkWNyo2gw_zYQYnE^0Tq$Sg{6b8h3-^5TldCgSNL@e(gCw)Czg zLWa|F-Bq|~-&x;S)dZbu!FOmk*>XpxR{ zvd*zuthv@A>n7_R*1N3FTK~oRru8H1XVyPjC#EK)7N)LFElaIU-Jbe`)W1vp`_zqT z$J0)v^`?c=u1lYnena}|^e5AM)6b;enelGMnT$VV%*;&AT$*`XW@V;3^WMz;nLo~a zEA!pVPcpyAjIkMPQ*Co>DK@LE!FIu>w=cABu-|ImV*j=MnEhYu{q}#ie`%kdg$~F# zM{sfZiRq}R!*m*b_|PcdPBkwx*O`0F|7H%DXIeH|DlMwzSLn?NORRO0^%`rI zH4iTtYFsE z?CY|ZWM^dGoV_`_F58#w&weEPXW3oZ?`HR9pUa-L?3QI!%eF7OXW5g>o?rICvOg?C z-b^@WQB5#SH_bLJG+9h`(+ZPndeHQc>5%Cenxisjh1^X_gfyd{L<28dEIi}0w={d zdr@sN-EOKfRhw!}EzsX<-e=yAS$5F;xcLclhxw%WW%HmpXlD3}wa40P?X!Mt4Oj=QLsqc=QvMe+@IL^u-m4G* literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/Crypto/Cipher/DES3.pyd b/gam/gdata/analytics/Crypto/Cipher/DES3.pyd new file mode 100755 index 0000000000000000000000000000000000000000..6768126507cbe50f0f43974a4bfa5b8d262a2808 GIT binary patch literal 20992 zcmeHv4SZD9nfIB=RVEOa0TYdw$^`-h1e}j(6Y`zOB!C10Bw*DLLNbsnlSww2LE=jw zI7D;zG8HLo*{%8#buHa?TiT`w)YW|O8vI<;u0~5WBDNEguHmB^#X9f*Ip0|?{&-7ZaHtoYPqJi(z$Tr#PNxm=^_VX z%XH&d!`3(RLf-n=q?r?R`V=-Bl;r?Dz@$MoTGsC3IOr6&4nQrx4&xKZU=!oxVoVmg zq(C-s@urzE?r;xdi?8A2D}Nkh>?HaBPrept;f&8%5qOR z`lqJgBO4G4m#}sTd}7?!(bDC) zTa9CN?XH&BJr4C-CiDg-?g_tq15)Z6^*5?fEghZvP4Od5(AII%nQC@-HjdHLapT3i2U)8{4&TF z3;7z7?+!~a=C8E>4;Y?C3(cnij#RU{RZ>3d?3XO(cD`@KWqDOO+1V#qdX#gW{U*z2 zJKv|8Z}~T(nk>(gte#|TxGb;wB|nWsNE^5`3u@8^l76k9giCOda5Mxzd#zLX^tB%4 zRn=iq9D}~5SZ3UHiw+rb43RD6l3FsPItFbCdz8;?%l1f}oe9c|zB5^fAYkzIWvQ|I zSx&oaLTn4OBX6LZKqb^6(0C0k0NShdv#BriNitWUA;mm_h7|MLU6wbM3tAla>@g_k z5J=2lCy=*%jv>j{G&Ifl-S@u?2K#4=RAD~%6c(>i;m zU1<$sXrcp~`6svNd#Lq~+69y_n-Ju&UK`k+;-WcPNsm3F@y3RgB zbT>MB+*iC4)!7$Mouv|}*ATlCook6ropHqOR7wN>rhu|SQXR2MUSbU0M0vRRQI3`L z#DM%oFsPOW)D=?e&3wJL=If|6wdU)ojc?75YF#ezTd6hw8h-P%=3h%KSmU&?6GCC5 z>L;qDh8{SgThs5OgB1DmJBH98h!`qSH=(4F449B<#;1@8kPL}J=2J{a$PR58B5-g6 z#C;oF--*P#d9uR1l0}jp>+i>5wyx&yK~07%?Cj=oU*@$U+$-PX3^YX;lw-(v#EPWz zS$ZBFK14DxW3l@LB3nE_Dt_mJ2&S;1%^l3!NWKAu3}pzic6D zUAqP%jx>z8gq<-E{2U?9ODl~@w&qY`@s!OwS|LCt2JXH3fQ zEC{pcF6y9%m7u&8-J=WvV?&(*DY|Fp?=gGE9>*XkOf4B~KF*48c=p_^obUWduMBne zO;z5EeqQ;UuwlYrU|_0p{&dyFr84sFLG8CkpYtU%??u%GMk#{?tmdvWxpccm9jO4py zGVbs2U5YpE?=l{t6VX)M2ofuAceQPM82dfL9T1GGLU-Wa<1`DY?=;04k4WQ4v%E&p1Uq&jf%iG&~cAr(E!D zPh_6!)%?Vu!I#3kG3cp9WHh)czQ@irdhGN#!g}oSqDc^qOpV{3#DjqYo)M5Jpa}@5 z$8I8t9y>2QJ$8ClVM;RcbQnSTPMEZC5lXRkaOiOac%+VDuIac)jo&eh1SR<$7tth& zG9vpOgTNA~_T(kT`yFS|#QGh5XiNwsW<72I3ak)Qq}3@=>4I^0!aJX%mJZ{tXW`G+ zbe4H%Q21-EFri$u|L-BEI);412IKxtDAGFU#5$NqPH1GL?uk%eAM(_{meflE{@kE% z_?j)V>2OF2d#PesmX zVcvs4S~|Qbyv$Y-7Q=O77TmLvVsGjd`P0yQCx}28-f(QU~(Ba%HWKMZ$$)nrn$3BD$}q z90U}*0n3UKqP(M=@|`FayAH4a>0)Wbh^4{PoQolaX1^#jCp7yBc#cI4VBQ28In2?M z1~tLBJ&;qn`ai#hu}>U><2#!hi;el&{(cDK6nI!RSIkr`@ z=K4M#xBU-l>1o|1{@6(s?4+-QubJ*oarB9TU&S7)mYi1eZjcmvyyoYp{VT@!$q{}~ zP_gn1z79G|;WUFDw{&Yn8^gqG^>TmKd$~p^-JLj`7n3EL3^;+Dhc*x|M2AHx;O*5KyzFv}+E4 zGM z8rjfHC<@^wsjyvU(t8@4df;tr_QE8+m*FONL)u@`$KQmExi^1%0W14TJA&#xt@{!W zKe|i)!an5iI$9Od^}J?iIW+Em3CFd8e7>j+bkEV)7gL~~(XmsYxYy{VR(2?`0gE>J zg(H+{SO7eod<?)J`HFi=iiiXo$_uE< z7zj2LR*4gsqnGaXXXzu&i+$yfRJ6-zQ;EVMmjg(@}-JSWi=}c{iZMOcq{2 zFtvifm)HB;Z((m*;o!D^>UWI#{WTB16n@R*S5yT*d}G;ZjwT@N&|VW&+N&1!MimAI zCWIA%F>`=T=B#fAc9=Nqj}TU4b;??7GCD~qh#y!ES?n+cSJ`3wdDAr4d~?}55mPiQ z_Lh>~sFFbV-m)LeF`jjfUbVD04TITzJ8drOLYvDi1$<{&9x-7}x2k2>V0!(r@=~(+!bN7chsixl2ALDI<(`(o`SHwCe6{ieWjQMgTH`pGumWq|2Q-m+N|Gm0}hh3 zG(z)j?!^+|lYT}lHrl*kaL3QCXm9}xqE6g`1zQlqrTMTXPt=HVCZ2aa%aGDprilR5mPT> z1XtQ%v<1-g$E)uyQC_+MqVNXp3i}Hc9$o;9yWgNa#*RWpHR`GxjMz-yi`K?Ckc|C{ zHjh_EC^JGE3{OXBgV9Fx6vQI-n9y@MxlBL-4L!9KBQJdE;R}{+zU_DbnMNWqM8dZ{ zN*@;+SnJy!WANmu`7tyC|GHSz1xYzSw~LOY2{bi#!n?XAM!PTl#a6nB#8(hFA>0LS zPd1iwU;O-ktQc5o>-eINb3;H4nUwSx(@OJdM+wh2a4EI0#;=)tL z?{^vZcBFK9=F*E*?YY-Qal2qnJF3ujeH616?0*_sEx#4dFpoq?(ddNK!?cE1OKEX* z9`f8aaxB6+895X21f3!i5oaQ9VqoQ%#W)Aq969wwIiVExT4yA>frOD|r{a-jNLd++ zdq&BL=WmbH(u=BhSZ(@38Ps0Ggr|qj=}3?A?iI5m9zgjqA1@jBbAwnc{d;M#zx$k- zQi@$RJf+^r^z}xrfu97Mk%@sSNKQxe$0kHb8u469)`M|eE*o<5Ne;6-#v;jq6q1V< z`zn7?3{LY(dZly-o+8JJcQ~&k+Dm6jE?Gq@br4@l%!Kz~+)tpP zbQ2rZfwNbF&d0M0EMlO_a>iZ6po40NLF1-d4Q-KP{X_ z|3bIFeKzG6@>ro#8%w5#oS<&ev(s-A}(Yv-30J)_*j*=t|o5APD47 zT`UFPS3W5@p3C#O+W2&bZ?KZevcKez<+tdF=gL^5|M)b>@G;;w0`QPIZXiSV!qTmi znzzk6l;gb^{SvUo^S-oV?>Gg!>K89`BY9`B?2ms6#iOwlAq>+wIdBtI11WJaXum#3 zW;vIB?Eqhq2mXwkJTjkn+S)nz3`EroioGT1F=)RN(B0!Wkb|qO1HSxC2YMWj5ZdE7 zNR8q++I9kr9>8X*SJ4(e^}!YFV+H+m6y*5cdq{C%3H!Us;`^##K_sp59i+74n2awqjqb#+`s{TL&quF5*+J z*q152(@*l>S4fm~FGxA9yXGy1%F@O^@SH0xRlsbTqs>}Ica3JW)!#y1c=eCrc^$$%+wzGw*YY~<=9^Jx1N2)>BZ&tWxewgR z-%TsuqGjT@OHVZO*6_qZYQ?XSbSpyv1H@8$mGj=85Bxmv1K0_x$iZ3in0gbOWsbm; zBk-gMJS76pj==LG@bm~gF#?|-fzu5+S`UM>XyJw9l(118^{%wP=Xdx`7;z!3!67U( zT1eEV_M7YfqYUV=G4NmRYi?%x`ZfAqG__+7LSK?G#_L`$nqGuUlJr80JkwZ9v& z#&WWmPL~Z5AP2uqOI`XPI?;6e2Qr{%g9bX0IScZ1>4?r1vo#r^!|jngc*G_dGQ%%U z15OOOvcabhY6<9r+ZQsVrvuK#@j-na55%~fMiViEm;)JM6au6R8h$uEn>wKB^jdoK zz|g1k;W9~jBwNp?3Vohh;NVi9>+q@MGJIMn`DBBph;;c>5Dh?A$QDf^`ecJ-h|cYb zK5@nJ;I_0lxB;>yd~zMgaWCL;TcMc9u82d#sriK%_oMlu7`QKrPS3LOiS!zBnBI^L zT!)rtSMcar2cV}c<8e&LKo(?ehB%FQWJlMkB_7G5rT$Lz ziARGX9nz&MWQ%O*NDeLOkS_I!)=Yxj9~$D39Fa(tTJQudq|b?*M{)8v$Tu#Efqas` zkd2+NM>ax!L-CO=(S%RNWF{L}vuu!?Ax>8_C0a&@XaMwZkqjdl(5U5!BpJvH8tD*S z=!jW`EH3mzI*=D~kPX@9dPEhv&CuXF_=G^X z(6x)~jKobeq>U~at&G_Lt^=wAu9L|MU!!8J0tX*}7VAhS%ZxEI0IiG`xDL1uxDL2Z zh9_nQpq0@A2OoeIxDIMqx8wxF6Kgygcr@^6;L-5M*E`s>ZV|cxd;nVD(eQ_7BLG_X zGXReU9t}JiIQY;v1JJ@B_|P>2&;o~jap|FeAeo*%0Kn18GB4i}>V(*@shrCU030p& zK&X=HSt8m*v_cR#^b;WmK0r?|&j7%oCxH$=0RAvs#-_k5?c-D6l%@eVT3Ma~b=oMV zK#lfp;4*N~!3Ru%Zxdh&e47Bkp(lZ!h&B;&-~*<>w+R3odf>x1aLD1((Fcs^0JwC( z@gt#?bQE9|fCd@`0ImZLx`bBJaRLb;GuTW?5SuFW_0R{<(JTs}2QLZ$Tn8L<=mVf> zzy}t9gjS-35Dgp?ETNU6p=y9XyzGI zKLh-sg+6$|L5Ds7{%|4X6p2n-iNJYKz=_37_`vt#Y%=KR8h{&s8-U{yQ$)$rnP>n2 zv@{st&pqtQ6MmrgF;0E9Z;0EA4 z@t9VgeBBgE001o|132^nXeXnc3_5V=!#7&sGyq%}h)4ht!UqIY2AIJ|t22Wq1JLTw z>OiL)j3y{@z~Dj81i8s*CqvH!Jrml=XeXP6IzCPCZ$iRg4uBR31P+c2fSe93`~wG| zg@54OwF&-Bm=F^ZiY@>x5)2%C=)u1U{!Q=?9Do-7fg^ajP$BR^KOzAJ16lw`G(URq zX#)AyL9(+Ccdhmfuz|jK6!?zwV9{%-!@$jt&jE8SM0DL|8dhqq&Lmqkn@PVU+ zZ$1D%dL4y>;m|({U56D60-qX;-UL2y9dOXW1D`P&6aY&?3t%$10G5O{335og4gkIh zeBjUnfDfF~qZJtVjsj4+q5zcIDEI*1gd5(3(VM^rt^*D_c;GX=3_XAzzM}wo_>KbT zVLJ*wz&C+!0^bBaaOeTR2aY)Th1tRnDl>%xK+7irr4|Gkhz!0AK5zh9@PP|9Rechb zq!|zgpq0>q4*>vL@F70~H~=m9z>!u;JzCf|z&@pbR+(4;5~US@7W^deX^6n}zyXpB zc>r42hyD!U0JN|VT!uUVE%=b10UUr9eBhWgeuWx%8*{mx_a<$2HqMvtroY@QMZ088 z>D)!~DyPR=@0QDD2*`Pk)i&AbuBdOU^HkTm<=PE@2D0Vxo}n9uXJ*+NkRpI%Lc7yC~Anz1WQ9+Ts(dn$*P+qZ_|LN*{kntz3HBL`eZ6#5vH�v zxa^WO^JTJDRbT6_zK<+ad*pkoJylvL&I)HGQ7fwJs+{%o=btp9VtfsGHbLa1xv1TB z5AfWX0`CA`3H+(K?JgaScIDXI5$QI(RQ<=P?XD>FWyljP{Nj2VAr-QRG{Pk45V`)A zcGv28m+OVw_-pn}^bJeI6?#t@*+z(cgMN(+;E;wh>9}zQ!-NSFCr+AVGF^9FT-^27 z-+1HHsR;?wrpfZmnX_iio;`Q&ym|BI-+HUXVznkGfBV}hDQRgL8JU^c*>=0bk)OY0 z$sa-g)QUcbAr~TUTDbVMAr*rcG5W|gxZQPi4GmuJmMx8q z_uaQ`+m0R0%|2gC%L5N6%HRFn?%fYP^zg%b_wL*G{qG+*@Pi*b`sibiwY5F|_>)im z@P~&E{rJcK@-ILA>EXk_{N=BHb>zsgW5V}yss@WYdIQr-BR7u=kmrJW_P#^I&t2*2P=BvcL*Yu1r@GGNq>7n>Ke+eY>vXzhi(KxmB%L{0uyf?Pa{OVw6LF8I zmLpD=t9%0~^Qw&)t|VXUb~U0fIyZVAoO1igzZR-Le)|rZ}hq= z_#RbRUFn|Vk?TYInAkwBEGvXYHzxXT4UlRv63TGoyis+(Z0^#;-qBE1>veJ8RpoVc zPB&w3%xHJbxw+l70q`Q=xoPdL+0)xy-vxZt__V$6>&1+CtKAi~_4U}9!{*@I*ju(H zw!8inaNDeQSHkRe*8_9fT^9f}M`C}JYwMi#`S`5FaB3PD*BqF{M46blTs+Fk9bE_cx~N1gHFp5!KJTvxjmEL)OWV7ITe zSr<4KFR(3Gy>!{q1qH>6S1l-9vb+FfDBf$cmNitB*E=igJoO7Q`E8Vu!EdM9jY*Ix zb1W`fy*Ou;BX2JGS-9|4{A{YISYTV2yzn;a-C9xW-dMfq)@obI?YF|-0%30f3@+qz zTj2EMcxr2wqrBwEN#*F`G=v|F1wI<)&XuQU$mXk!;v4#3GXtUjd&Sy}rvU^Kf+XPc z0cOCu`S?9;w2UwOg>{XUj8^MkT%V-z&Gp|m1I2qrV{6^VSj@wWRfW+yK$Af`5JpSc z&sY&?SGzjEdl}FhCKEt^7$EJ%vpgUkkO-I$NCD&liU6VO9`s#+X23x}JKzMM7cdBr zz6TqC`G9OdF`x>t3-AcwFyIuR4=@CfFpgM2A|M&C7;q<`3J|*fC&sN~IhB=Ed2<;1 z8OyJBYshI9UXd|wR#sN&!Z8*bum=OKWo6|J4R{Vk{{Sm<*4MjhDGzb_@(s11n)Mqu zy1WhioR+2ESnqV=Sy5kuvm*U3K8$VEetk6fe5>g`VCGpi>s(Hr+!me zQF(m>@pR#Mu@?tcde;&r!)ZD0N*tf>t*x(YfX+?e+qDyFnZ3$cu^Dp>nxnqH4A1N} z&R^4A`$mhdI0{$fDMJnb|Bt*(sMQGEZZ0(ct$DZkaq}zY)8_N$$(H$+4HlPWujLWT_pJYD z{gL&k^@Q~e>ly1gYl3a2&1%cB7258xZMNNKd)D@UY`?L6VEfqirEOwzLUKX!^5oKF zSMt{62b2Fk`Ja-{eS1wxY08$Ae@J;cr6c9vQUWP!Q}0RLl-iVfG4=D*hZdb*^!cKR zX-R1d(o)hkrMc4XO*@eGXxgE)chf#d`;Ro19+N&fJt2L5x+Oh5y)iwQJ|V-FQI=7e zQJe8>Mt8=m8Rs)T&CqAg%w#g^91br(vvgTru)J;g(DISxPnMb16l;ccv2}~}ee3TL zeUfdx&1KtXd&bsjd)YQ6`G({f$@$55B(F*KB*&-Rl44ECOdeQTX-d@zV=#Ps&TlD3k z=(HQt7N*(KGSil)`O}W1bzvNXX%o_8({1Sm>4oVV(>JH9>3h=;r2ljJFVnlzUrs-r z{$cu`(!WfflyO7GoD6eDcE*y7?__MqsLE)_Xw2A~@p#5BGP*Ke&p3mz>NBTg&d9W8 z=4P(S+>lwHd4J~K%*Qi-k@;5U+02hK4Oug@7Gz~+Ey=3Rs?Tc5`d-$fSwGD>mi2np zM_EHzm}oQhR5W_C#r!vBkGa*n*Zi#cP4ma*Yb-Ze=2@(kMV4I4GRt=?Kd}7J@)OHZ zOONH0r5BkTvJ6{v)@W;jb-Hz_wZyvKx*0j$Y29tzXMN22bL;EY53PT+{>eH6Ya`dT z%C^B)Z@b^N*Y>#W7q%|j>$WpCi7G`A_FpvX&6VaVv&&p(Zno^Q?6&N&?88bsXn7Jo zJ1i$GFIxuTkKvE84_euJ+P_f0ZLPN5wmq1;1GYzOELlo6B*!G1l4FzOc@2WdH`o7T G2L3NHP{`8& literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/Crypto/Cipher/IDEA.pyd b/gam/gdata/analytics/Crypto/Cipher/IDEA.pyd new file mode 100755 index 0000000000000000000000000000000000000000..860c12177453abcb1b7807cb624c28af0f242edb GIT binary patch literal 15360 zcmeHO4^*4wx&H!biAo!)V5_1{E4AQSOwv*k0t5&lv_gvoTBoIyLMW+_u;fb%uC%zo z#&^EP%X;1H?wl*?cHFL4o!3*^+N%_LrS;DlS3O%?nKFl47`ThtTT^xB{+{k2c+mB|t~zU^1V;nl{tg*RRyi_>w-0nwqwp(TY{0q&e`HyZ;^a?8-@&aNZ)i$MbT zl3pCAl4yy5c;I5UR*qu}c5>X2c?>-FpI(mRY5l+QmC0M%c<}66hCd__=^9Z2eOx>6 zFSDVJuS5SylA!>NaBf12x(YaMAk!%!7LYC#X!Fr-MvJ-%z#(&|%SE6O`m=-neERAn zepZPevr#K>Xnpl;+ydfflkk`gJ26k|YSE&u0w$le*-;;|WiKX45Ato)RlspYQNypg ze*+6t3vX}TQt6pW6gv_jkNa50qYLBlfvxXC7KHif7)w3f9Lkvn!!Yl#=!3k<3G$K8Q`0T z0pYlZNgV&3klg==>o{)WreHAGD+%e{!+B4xB&07OpC;)4_EM;yN9@D$H2^Z<07w7b z=nZp;;H4Rus0GA(lo*HjSDzp$gtt9YOO6x0XjTYSuxVMET#%>v7l8PzCm_3KU`QgX zJ|4k85ymeAewBp(+Z%vC6lGxeuV!K^bPT~He=7TH6oRkO^O0;%%P|=eAC!UuDl>GtX!oMk;ktW|~ zlP@Tr7?y!@nsIu5h7);aC>&D=phU4o@hovw9+jOMQ#2}%-X@ccGzxN;=TuyY=aYo} ze|V0jJ)f*Ops@%SJ-aEMX7MW)Z1G(vxKjE^$MxpAP3x$AZOyNy63BLsC zn*F~7bML=XCR6su!ou#p9bOMtNu0>t6hF${)<*G2P`#^){8fgX_>q4yEJ zkLZ08y^rW$jiDcslpztMFrOxNkVCSapP)V=pg99Eg5~4N?#B>lz?SsJ_6V;B*q66y z$d2G5*%xa^3|bOL76s%>nG<4A2;7>?f=H}RY82vx+%$hq6aev*ih2bR-VB}s6=0xH zw>y~PzX#=@KtWk_xPVnH#)_(GlIT_l?|b&Aas1U{Nm@|h(Q)oX^u!7Z{@~^%!zM;C zY=YRJH!YEFNaQq9(amd0ABGr|-*Bh%5=V+|xP~y2XgF-5vV$A#Lrz$m7EBq3P;9fZ zGz?LyhRxFq$?Rv6ngi14Jl(wJ08~u(n$P2w=7k4}(^Os8IMFFBDrywuL5!3 zlRy_qsbI=|Vakc`gAB3Y2`H4`I|V-98&n=Wg_Fk3L{5ql`Nm65hf%(dZ9=6 zfLljvo{YhM0CST!cskB={1TduW()I+gI9PAocl7-B*#_>RxDxem@NLxK&woq&Bqv%>siRZid2kxD3}z8 zGBSwp8)3wAy6VoE`kpIAN}?1g%*@x`Mlm4;Ls!_P6bz|&F7R|raqd*o9PH@iMz>(f zY2XQi6CdKq<$`%Cf!AZZdfF86ydHn$v;D3D|GC5F8IX(B3eVk&c=r~ZT*bnq?t%7r z&p-ll=!fxQ`IK-*copX$997q0dx|CD`BQ;-(w*e0x+ME;#9qoYKS+Tw%%kw~a-um* zG><@sP$ZL=nb9ZG84c+&U-_M*P+BZd1k<{=ZPx8(CkjKyl+rs+@EV0@C|xv<%4j<( zgg`vod*nfX&POQD!wTT;dt;&>$TOrr!^rC%V~fF6IISI()1h5kyePRxt}XI>n6Uqk zV$G=RBX*4E;n*{t0TwgkAEFk-V_m_NELM+-#f#;FX-R0E$0s(;TBlH2N8l;)P+^lP zMp%#WIt*Tt478M3?w4H@%10&_isd5^XpvT~?U7$qG|MG0xqjrp8>II)lM38dk_hs} zB#R`-p&(FAvS*o#A_K8im^ftcYVBf;0+;%k{Gh)c+XouEWAg6Tw;*@D?vg zQXZ|4Lr-n8vIiaWXisv2g*ga9iA9O=h?AnkiOa!{(UUH>B?|u~Nk8HVaQic#Wn1b+ z5Xe6CR$UI3MU16f*@I5?XpelcMR;A9&hY622Yw9>!-Xu@BL+Qwg+8!_e!fJX*XkSu4N12e%&i6@{LQ3HyH!uZOuF`1xmZ{Wqi<;AY^= zrhtlU0jQ(A_XfNk_{^0UmHy0uHz(dlz73mI>3tg`#PFqJE0--e#+FMKOXNb4l+dKL zDAb}tjN*Md62s!GbviH#XC4+?X)Ilzq}($^32Fln!~(ft-b(=STl(n6Oo~KGiG2iP zZ23H5a+0?EV*o^L`PjS*32FP5E#f#S8(fP8C?%(y0U@dzZw9MKLG#D3qhJzP>Cfy=Q5T9!1h*;QL&pH1|MHQWY1L*ME9ud6S2lG`^Z~91{+>QQ6ayY<&2OG zl?PwMiE*-o1vT+yDN=>()%WO(?j$Jo8oeag5f*Gx6ViTT1hNzXz(iv@gp&RcsdF=M zq}q}@>k%VY>`;7(0<-jU3(Q}_1tv?hC_MgnJa+Gyb*Q*>aTx@u=Kv~8g=h(9k58AC zH8Yc6sHQZ})D*LFR!wOmBhOh=bdn?}C})wCp$H0!57@(IAMJMbtZi_kuDnA@yOxSd zyje<^W?xiUQTz|eirI&q@1sQLoA?jlhnTS7GE{@fqr{|UOtd5mmQ>NQM6jqRqk<^J z%4=aI;KccG2`C?pFZUsyuGJPVMv7S^p#qT90{{!J-?4SeeTMR>eLtBn&#e0lX<(?q zx@uZi$_ierjV%{iXjNpPn$h?gpIGjTFZWMg5tRgH`2a;`+;adGCI$5o$YQcg*oq<} zlM9w5lN*7H3S+rMh4B_I4K2-z%To*BqJUIes(ta*{^;8B6HLxpS!VW$H9k`2;5sTU z+r!1>YZg{nHpV!t;})?N1;*!1PViP;i*m35g~czr$(Hj`64Yl{QMkH9aQoojeXofY zR%a}U6Y*dj;wElQ6kd%gGwbk-Sp%L+r4n;T3{O0dqtGy(!iy5w8U6Hu7ltiSwZ>rf z?Y~X(g2HQIWhUf|AHNsl^=*rBkfhKE`&;cp2r#F=B?TK5FEBjt#GDK_f+6z6*ATFl zL>7gtxGbmQ5(=zKEFn?B-M^hUPnllY4Xi#P%#_|Y0h}|S&b>=ZjtLf4T`ZC65(zFA zS}c~YVv77XQ-ry8-;)#HnOSUR>p2OCn5A$QEwP2B2(y3HLSr7Kv(N7@4_Ad0EHJCk zu$uJ%rIgRBS=FQTw0k(h>IG;pSAhuyz=PA5t}gLz+5qu2{<-Q4B_5Ullm}m>8e>8t z`^*9(71Q8lAsr{zqJB~Fcqal`9xgD<9N_|k04I%}2U<*x2|uQjqt3%N;ir}!)H4=( zcwxQIvmXy2O9>@F=8GzOkyHNUR?q%K1z#+d(AxrqcY8AOf?RknV~~!espOhAtS)Xz z4DCyAm6kS<^hy^egx#1OE@6*YeN$K*PkAYHr>8BvbYqWOo~d-9=Dgqz2pu2Oi+Q}y zFi(r_#OE%!N2o^6JZB$+*MbxMpz1y+ zJ;U%ZSfrhV^<9eLO1UBBC4l&>$gv1kjhu;?fgU9mG4aJ%Ox`vt7*|H0dd?A1%v%77 zC^ZsxA|7d`h1-&`XN1Bf#~=?*n&(A#KgGF&ka(r zOdJAd5Xs4;sIt)}buSXDr^O-#I|)7wOH4Ka^CF}_enkwX63?~7J($Aq3W#GB@ZjZH z90?C6pD^AMsjkvfxEwP(3h52gC{~Kq6|Z!X`(DGu!>54*V%`5*6Pz>My+I7>fA zXkMOnHC7L%jDbQJqG@CYHk|=D)8`nbXaZT5O=xL?4yxe^iZ39@4Y3oH;b@{bc7n`D z6XmfJ^mULXw!}`5)HFdhowYdGh$hI0k%`Fbjs;M#7Bj&VAGGkCPR8qrsIMcvy}=bc zCVG3pgLzd@yEwU(A@^_(b!ky%Rk?95e2U8J~_BpNtvPgGV&KGcjX2 z`$x}H0*f9~7)FmNsG`Rd8PQ|%di0nKJ2S3k-+J)T3E$ty(Ch`PqOY?E*4J@({gfmw zpn-Us+*>Grn7ZAsxF^mId7r+C8~}Vou)METLk~egKBId;7~-!IE`+|91(Sb7Q@6S= zz_SIb@B#0{crf^NxK%UY$>TA_Oo>FfODv$MaqN&Lm@RruJ3`7E~5aSD22 z0l{d|G`8qM`hNK7^)_I=+hf;DmezX(>&=d@>f;5~2+X%9lO-{rw(uh*A4{9z&HoK|t00`0Nmj_XxJdi4Q<{>^0gt zp+`gib(AfTq9E5Z?-E-!OP_i1Nz(rpB!O6a;`{JDQzwBc=8^2(L0%Dly*_L{T!^c$ z6DxQ-4iB3jpy9Cj2;BtpvA!-$44WU3ppQz>lYKo5`j`ZLQi2Zj9c0j_rQzwmUJM2E znZEnj{iVLc>^{=>0K31Y?4Nf8y+#3t#atw-wXhSjp&U>1-QR%2p=A}M_D`Ts4p(;l z6M9H_nzPT+1p5YycUt}cA)DkXL=-dfuu0Ln8ilC<+78DPs+dwteN#I699~oP^&+l? z$|YjTebh0f_EoU?Ra9ApN|j(z_Z_BPkn#YVU)#3>^FpOgOlhEwDYvhc5fn=V1%jzm zOnC&jeMg9(vVyr>FqH|uWoOv;;WU-(th}(N>*i9BY-s-@v$@dSgb}l{(A`X3GOf_v zN?o$2(9Khq*|NKhy3Cy2`=~2fv@>MUt{IE=c)Mv8RvpyLuG84F-519`k{67SrsEi$4 zQ9LW(1XonWjBk$_m&J^?#f%$b#yeuhO)=xrm~lnSm^S20J`Ap)z>6M}!DhzDclbU( zhF=~?7t9*mL0JqJ_38fiyzJJ!>33P%SE=r#UuM*E!o&^cy0HT{JEA8?0>AbcqCl6-$6G%FxwruTKYAiNhHZ+of{5XmygP z!cw7Xv|1Z>*45v|ewwxlVElxw#mYB18VF)*v{_vYcm0-CD&p4Ubl7co69*fw+H2#R zLZVpftqlaNx3xA|o%HiA(r6XVGi$GvY_g*OeT)yOr124qb1}YflWsRs5I2S~=^TA6 zz$QMoSht(bVB8$@L>Imm0hfK<17H(q2tJsOlj6!x^9|Gdz3-i&y$~%&cwfZcqjA3K z|8*>Yz0c9k=hkoG>K$%7Z*_7m>#i27ov(G-?zVES8~52H@71pSR38V z(q=YQZDyGOij#%c+gsgyC}ANlgtV_iZ$>oO0>5r;w%T{`O<$CTvQJ2771~bar`!D$+O?@To?Wloz2^qq?l;gr zOTSRg|6(#jUYot%;dG)RMfu=6$f;+R>2@zzjuR_d^NqUQze>~XHm#6yp;Q-Cj#jI) zj@gZLZ?HDn>@Y_oiUq3!vuXnus(|5|#1Rkjx7}f1ZEdr;cx)QgPW;UEu5g%3p{J_j zQSf%Ud8=yW3fD@`R7W0aur?!NRoF5!(^L^ZP0wQL^U)&F8FD#`l}qkOwvM#LkprX- zYCT6(oQW#EBh$o6StbLOZz?v|noUI<>zIm6p-xE%f>J6$HiaPwPY%BUhpINajknb` zQ@j$+UHicD1>5^_ta{8_G7Uwp5jFUR}O^qXl5#>|LX+bv4yF ztqrZbb9Fx3E_r!u+c+B2fm3TPt*tCA+-xq+AfB0-x8UEd`uf#tGS_BiQSX*|hrQ9Z z>lWJ@-MU-AZ?(j4H5g_xA6l$@A@69}h%{2DO0Po~)gJ#KGuCirWT=+qsno>%AMbL7 zetWNNm3~W4X9{ZR7_UN8qdgAG8gNHOM1W+rwl{F()rcQy=Bw-fp9PvaXJme<3%MT=okkl+3!o`H9G8lwMk__zhSrMKg?1S2F|;#iV`%5mlA&`N z+FGN=O4z9M&<-)TK`iHq%tJ7(BkX=&Z>UKH+RmU|pHoINy3}lRJbXu((=ZtH?Im}T{ zcx`bw($u%MQ@@@1Esi}v;qIos%Svpy?OYS@u)DYh&dxiU=p{o%d!chzZAG2aMYyu) zag`f~L;9~NQ8)}%Xm7w_b+5zOKpd{ayeV`VtTi=R>+j;YD!|N6XDyx!Tk5d#*F)n? z_4RI?B^p4+SwpZ&D__Z;SOM!ISX+HpJ*T0^AVPeZURb7b#uwNhIP zPQ_(q9G8pn->S8?r}^?T|?npK*enr6)* z%>$aOY(w@)_8Of*w^7%o>(KS+`gJFD!#cn29bG_|sb8bd*H`Gb>Fe~(`hEI)^au54 z^_z0;%<<$rk@IxUb2$qQR~c?F6d5dr%?6htn17}58e@hr%a~&{8(qd8qt|%Y_`DH9 zsF1H^_zi5M$~u(wVAjJ~ zk7tc$O=SH!>tfc*b%)m7zwV)Rudkbzy(l|9dv&%h``+yDXFr!cmi>12boPAR6}qc+ z89KEtPq!Yrd|hYL?bms9_v#+h{Z=<$ewly_s((g*PXCtPum4aV zpL11CYR;;htQ>RB#+>aryK?sCbmsKt{2=FNIZx#r%Nfc!oAXxAdpU82D-FvHT4=l3 zu+!i)e9Lgi@Q~pt!=T|6!&`>;3~{+v<}S~*;-T7kvzvMrZ|MUE(@=xZU&A-C9 z*qCZeH)b0PjHSjqj5~}?#%~zgjR%e2HJ&j3r*Xu1&iDu8yT*^nwrbQk+~n$o>SgL8 zb+x)iZB-wFzn@YMs$WsR1)s-huGB2oXf;Ke%^F?<2Fr z?$dN?x-LOU9@D&}@oC01>iWA`;C}&p*_pBc literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/Crypto/Cipher/RC5.pyd b/gam/gdata/analytics/Crypto/Cipher/RC5.pyd new file mode 100755 index 0000000000000000000000000000000000000000..ef39b3ea0f99fe59557eac8420c45d4b92d487d0 GIT binary patch literal 15872 zcmeHO4Rlo1wZ4;#FzCRHnrPH0H!w&Li1UMz%uGU($t0iz14+QY7$=#5$BQgM6@;#o-_E<6~WfArWzI7Nt4zTcoOkv-nY-alS~G% zcD=UmbuI78nS0JY`|Q87&)(mI#e(_Hix@XPaJBxdI z#;ZAd6f0lNscCBV@oiqu2Ct))Z*;ib9zWmU;=KVk-|XhguB+x-Jx*8N%$aBFWYSf3 zj$5gi&iOa}x-7wMh?_O{97S3_M@VLrG!(5|igD*6PSysT9Mg;A zcxfyRAR4&Xt&QVKsnNx8%g>_98LsUdw~hG!jjuevYm*-|ADxRI5=e(Kp#Kd)!9bNzCMKjX1jx`R(GRTGU2nZcn~~Bg%rq-iyTa?mqvq_<)4XfWDEC|8h>q(8ZR| z;W)z~fe{KTX(4bi9bnz@fHpxsP;U5$;g5w}$8Wl&zHYyoqc-rn68MiWN&5UPVL&+U z5fUeM7bK5O!_V;L@pyc@BxGnK7kYj^A$D)da9r!39vtWWvq$pL z|L!xaf9b>MuT+Kaen=tZfRRhk*|_@MXE6BL_N5Pt9jfrxq|SK!xe0QrP#KpN9Ot%> zqlC*IAQN!5f@tR$28b2P?!NZ)VBd7%#eJWrGyZIVU>_7KwJU_k11CNg>``_Po>Tx@d=1tQ`$2A7sqY z-aH1+y>` zULjNt1v`c~%{LxrI4ZGNDcD~pJKRS+gxUe¯hBQakS?J;zvmp!OXw;vH}UtjQ& zP#Y2UAAV1>yE1Y(CcHLd-`vUpp)yAPvvQ|ehyZUdu|M&VJbE-;x%Z0)9}(@Nf_hKL zrg-2)$DnYyLKsISD!dVwW&P%Da9^?!@FH2~cC}=z zoL^IH0b?@-)HD=0=@1? z&dW0T&G%@7eQAPytiV3n@olkEEf~arR=6y9AY+PNTG6f+^hi*xDMIVM^ zfo|C(-k=ofMn$$tpoUO)Bq2)R2t+?A){S9u7S`<4iglw4UKR!p$9mp8{N9WKu_Klf zHk=T~CBYf?(FtsIDs~{2^P=JNVCz^~uwzWo`Jh;*TIf@YL9$xTiwp14=7fWHDfxgJ zLJK6cU>-SJ(ydM_Sb_(%3EhIDyij*s2#gCIpVE@7Q$u?!OoRc=)`y@ULV>1xG3aV` zpRd_%Q(&3Q*pEuuz9B8Er0*Nbnz0|#q}g4qM8{bPjA;ps=`zMx$UY{N=~N-O7*?!c z)x}=?49;X9?U}YOrs^qIu-`QNVqy1`r(=qG%9RXw7W*YM*t(~D`tXC0ZBitKEI=9r zfIup0pN8GKsq7eiVh3JH(kF;b12M>cCSqsH#1axRYLX(;#k%8SU|j6@R2XGT5<*a53939Hbyc8yT$_ zPzlF^TXdZN01RVJE~n^ z!1513l$Jici14+95w@{(7q+RWB`iT!dJ;mh)yKR7qBtiK#UPW^KFUT)X#??*2!-v( zd3GD8R>(e%)w3jIKaQJTT2hOoC6yaO%qi39LiQuL@ge&VZrLIGAZ}S9B+@cnM#%m; zZd!<}E-r?Jph7KDr)bv#tfY8B+4FkH$wEhzZ$%hIgh6zcs;Wcp43_9SoJmgX3pyyqvZ`G+^f$ z2mA^a3w8{q2d)lwjBg3u7{u`-3)5+}Qzc7AI(82{~h&H8=t&3M>F&`Ky z`@$ttodN%l2$O3nX=ne$1vF)Yf=z|>Lh~p5Mi>YlaNabzr?5)yt(M%Isrv%dO*m^+ z!iC6LvvIg&?vPCr339akr&~O8JMe`4!zXe4{m4E#-CuxQ9NdH*Ljiu3Q{D8y8O`|| zhGU-*yi=7PSc|QISfWkrV)ySmnNC}yh;aCU6QAI`b2S2uSf1$LH=drDt?@IGznREm znqY5#CX8Vpgj2GGXI*pZ4p&-uxoiySGEMWf-B4OAR>gJQTy@0BcBGb$QBCLw!Ru7P zeoXYBg87Xwp3VXW&0y@q6Y+R3tOD-lw}+nr^5oDTpcM)XF=lWW#kgj-k~Z|lvU4+f zl*ZEF$?02uFV+nzK4E7;KPEO3>|;Lj0H})T;49d$GaMAlE>sG(ESb*}!`DsWQzG#Z z{N#D6U|$CL2&*w#m7pcbK&-^dnBrrha$sbZSUCWJ&NV8HJ<9V-r%VY0M`W1e^m zc5N!fc&IE(tJz(pgr3F>O%E#e!Jdri6>Ng&j8KYmz^G8F9zGlN*m%-qwOV*hl74{B zU3o9CCG|1}C{BiI&IZX+MpCKiL8W%EM|oj|@TM@fAX+fq`35M2OIX%|4+dkZg7GbD zC#FK0lj|ozKm1~< z4WJ6#1>=w#Dl!I8NAr!#C`qvCKLaN=svy$&&hRlr*@Rg&p%2i451+}m{(PJm;(P{n z8DhCoD3t=5#EV=l$;HUu$0mGOoWiFAtwiKuzNKT?rH5iqKLx03fPjOZ<*c0m`PcT+ zjhPgIlv3|QXk*D25|LYJ$!`K6Dc7dPT}VjFcmBB?Cq;vc!GJF=@gQILF`syuVcF*k)($ujN~?8hiZq|<6&XTB zwpfuRRPYp0F-RsAs$ePLDNtjh;0fuKQTWrv# z-C_GAU-`KZ`09zT6cs|?4VRV~y*rj*R5zGj7sd0M^vc-CIY~ia${Zlc90_(J!(<{q z0$I#Z2nNFF1I9-%fbqlSyrV?SjUxOJrYbblrcS2b1~AvHApNmF+jGQhLoU>u%vK7 zxe$nAemB1^R{pJg?ktEkh%(p%Y7hseAp5$zlyg;z? z$EPKj4uXgi3*oR8YUWQYxy+&D^2IEqQJC)E4j-pTFRccapWtSi&^rL0)}glli6t1J zf@PPAiR?1rT+FqYFQ3m8`H(5Xrgrm_!(W?ZHo5e?1ccAhS`;f%6HO_4|E7rsn{@1b zUN}3E6*9qKN}^#otC&K{U&>jvgS7ScO^|vC8k{b{$PQ5b$1`V_^Z+e@^t#yT@(TqX z763Hc4pWY?A&`Axf{~KxZ)V6kjx0g`qT~^ofXqoG7-o({f`NmRM!i5w$uWrs4m43a zVVlIMr3U$oxgOqnYz}V00c19zJVfS8N_%wlWN0Jnf?L!of0phsY_%Ple zaNTc7XPB&-saT|)gmo|ZaJABsxf3A&RTJAHSao7g#0<2ONTm1|BQdgiiZjkn-t~0S zprnTu6Vi=jPq>|kBh8qwI)k3Vmt?V*+^0UqOEwSf(_tRr?bBK(zz(3&1Bo%|+#os2 zFg^OlpxI+G7;|@!W{0k#Xg_vXAi8hFgUm-A ze@gA3SIpFLBDI60rVg^{6y|`l4l?3I$He1~nPkOUQt(k|K`#gDCSI|{=aCz4-%b?h ziqCsGrA?3hlKZbuX_Gr9_tUeeG;SCQ{Tr*^ue zBc?jWbQZl|K9%1U;CFp0zYK}rA@G}U9ri}H_SqqLmhizyh9m~`78SvLD#bnY70cZp zQ?K_Alm1rQyB$Kne&TXv{9WOr+P$UBp4FOX`tU@VB66tqF~eJ^NJq*Hn1AnV;4mHV zh92k3=_7@TgV$`D)wTJG$4UanXS|G7`_Z7KCNNzNb?h^&?^Ju@`+>Vhp9iW~NV0dw{VMq#eSg@#vjkT! z4{D)~oniY!)C}8q(M_=L>D8elY=2aOJ|;n*@6Bb<$0g{K611;(5raN0H4pUa(G={F z-X-k*N^d^95A<5u{dLVVXBDHip58HA5r?#}6SE=SOKZAs$Am-68b}=*Mx7>Hv-O*( zA(|NuzCa!99X%db#l{KQrYwd>F(ZdX6O$14shOM13mu^dCLY5zU5xzOE&7PGR@-9l9|t@ zRkEkh?WZcUW%nkkGIMrsrmAGoF4>}6CoS3&>LxBM@T-|!$FOF*KMIr@4q-K4h7cR0 zuZ$)Pk6ze5b|qd_3H`#Qwa5V7L$WyU@10DI> z2D-)P@l)bWe4aj~y(Fcbo6^otX&0xottsuYl(sIVy(pzk3vx0Z#^;gaCAZ07lWoMi zy5Xg7IAjnE5p>f57YJ9Xt^D zHMXmGm%Gv1-sW%ixOq>*zqlIxc}%QJu+gE-4(%c5qg;t%MJYpBh4PnVw@YMsf0K(o z9NgIKbUAsS-`niozGT>VS%Hdz^z!zHF^SWzst+{ zTpLhIT&|Q;WyFfva`j=Jwp-mRHNvMaa)F_=7mUaXP*ZM$< zw76|GYgl9eKC@-1!^z^Mqh*7~i`C!C%Vsi@2og@M+ZSkS^B@Q~c`_bP=1l6B%s3%i zLJ>Vv#CX+Ji+K95nyfIFW_g9XE#Q|eir6IE(}CKgIbdCUf!*SAZ}2yL*)cR(@`N)9 z!Ep+aRUX+@c*jOZvt&FlNy--Z9uVe{&Cah6xEomlaW*^2;k=0)BL(l%<4Q=ANr5!S z^h%mAlcx9zMb=aN#UjORUy~=$!gx11+S*)hj%z}!eg4vj^8`x$Wf5o1Z0vjah;wcZ z^4FR5lhe)_^h=c)@;190Jzg);X3`wIk{fZ()J2?YP+mfLYF@;7$^3}32ZiQH%1ykd z&E<74yOHir*ZO8R%&{H`i{+vz`H7R$G?x&NXb>&8$GzCKsoCep>g5}-&~HmDEy>+^ zhaX9|A%NvSf1YnXCxry3s|Bls$10udrmTGGc;fLT&?dZ7o^tY)GsXbMMhYy5MN$RZ z5n4l>jW#2mJR3`0s`OU@J|Zx#AbK1zuD1( zRY+?&fFM8==~}#UMQMf2w$5Z+Y+t_Ew7B}3mDen;s9C;dapj6t6#xV04wJFo*W~cJ zoNa#Z;v!nid|@G5GM@Fhz^S({udiNSvc_JvfN19BU5UR9jg5;>9{2j@ z4Ocds@~^%U^cG9>7K30Oo4*Q|zr^opU4^h!!sj|r#cl(C-~}GO1q=9`LS9eQ|N1NO zzZb?fX}>^Q1S+{`FGA6y99xLb^>JqrEU&HI$&t+xRwDRU*FU2LYPu$+?%B$5>bnsW zlVE*-@qq12g5`JPUrYcy)76K*S5Tr!I5E`6QIvS@p+(6;(V;9t$ww(esX|FyH>2Ky z(uJ}MF%%{A$UxDdEJ0b0ay?2D zO5*xwlv}}-IGq$3N;vMvTzRuwZVYnC5rh7{sJ`Bb4W1iGzz>BzT)o5R!#M=?om{=k z>vem`4w-3=25d|?ecJlO0Z3h^(Bc^2O=IX$K5N# zE7`tR0{({#t8PciY?WZ&kzqCMZ7z8z_dOZ5*3lA30N(~|1vbg$UQg>v+RWn0QB<{8 zveZ(K2}7^Vm!L3NH4*{wd3&;- zf!>*Hli1vOX;tl1sbAr%4m9`)#API441?otM7zEmNA4C$%Uf6*u2sp7y_|>ID-ixA zg_aQC9clFwN|d=;xQCP4tB|w9)UICz|AWYZM#(?AFiake6isr@XZIpF9X|(^u+m*KgB*NB^pRP=8c^u3?d( z!O&v3&+w4pyT%_He`b6#e|P@11<`_`f@1}<%;%dgGgq3|nCr}2%t7-v%m>T|&99q( zXFh5^VNSE0XSvWa$FkI7wNzMqmQ9wR<-3+2T7G7kW!+)jWsO*mTR*lc3#$vSFKjIQ zTH&_BZx`+O_{>gg6`k7T#IID0@p|P;Ea7|%Dp||j!!utxpQ}~m@{e_1L-zz*?m{xRN z(VQYt!xx literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/Crypto/Cipher/XOR.pyd b/gam/gdata/analytics/Crypto/Cipher/XOR.pyd new file mode 100755 index 0000000000000000000000000000000000000000..fb53d53e29d9cae5a590f14b016a623949456bc8 GIT binary patch literal 8704 zcmeHM4QyN06~0Ly!=-5xN=yqX^aTv0(73h}JGPTpOwXFXXBOK%{k5{e zf~MEX+PeJ_J`@hF3wwHbuO|=)ihP&Ahob@BAK)8Swer0|pP-*P^E_Qvbc>6zrh;>s z*!PEqoVNGbtjlK<6k6FVP%Z@&0!p&Lqi2I&1qYq%E&x!kv`u&d`Qxoj@ryBDT}ul{ z2Aa~fFjh}UCu8#_7ZYJh3o&*RDfpV3UKIL7$P|n4AcM%)DRy+iI=#;$dWeN$zypY9 zHXyG#7)$BHD&v!6s{ug#7XtE{1KXJ1qY3d!CY9>aG5e*{4 zmkoL|+HTLF_kbcTc7&7LHHi(Hi6Qu@-YuU!iY?D*ePD|1=hW3Wf4cOu2%R*XaE(h1 zy3&Me3|+}cgRV%&BkB!04QHU}m4M92t|A04qb-M!cq%$4Gb2{RqSIw3C(YAkTIMP) zj=Ez5<19KKYdHkUbVR>;W`Q%HKbNGS+!M{QyEVcZ@v%Fhz*Rf=taavQO=&LjJAc8?i;z z58@z6Z;@ks_C$Pho~I>7fJ@gWgU;{z$jHhzC8SPE9a~)%HbX*>okdB-@I>|Q7?9`PhC%xn|ypZ8sP)VU4+o<#$qj^sE1k`7= zome3aE2p`UhA(2~1!bm*(Ci6(1r%kE7lKH#$QAS)`AW&6ZOBgKl#p|i6KW1117pmg85m6lV>TtS(gk5iK7+<1vMY1 zz~ti;oqU|alaEtm@^K1CK28pw#M=icf+)JUf5?Be_9fphLsYCxoRcY$XA&a+kW18Y zJ82j{6g_-kII;UmiVyfvE4XCmJS-T`hzsH=X;{2KI-b2RGNljD)QaeFMBFh47ucQ? znam9+GsaYGu@6&h^gM?=FHXbw+tIPn=TE(FRQd8HJT4o|n-xt%iqSKPMoM*x(phwW zo~~~>^j$wyU#Y6^5cCa`+mo$*=t(0E%txc8su}QGE{MILDY%o~((gDyv*AxVCKt6Q zx55a-Pj2Kgf02&2@1ZwL7+w5aDt5$2cVnV`m+|fFg_qGD=|riX4<5w_yn&z=o;$j@ z;K1^}S(`Rqy{j%dHvJV~{l{W8ZP9b3H|36r1H%YjDKzi-918~<$U?ewaqiRqz>6nl z`#}5krzBRHrPmx>$?117J2jqoKf2?{Ua-oG$@X|g)MVd5hDTi6>(G)%ks67C?IW)H z2_11gM4jXsOpak<#Pv&+x=W?*O^z$n$5iT*Dm9gyP^i0AbbpeO)vlM4oYKFV)F}PI zWRcRpQT*IFB^a%hTyILX$m2BPq{NWJ1rxs&8xAkqV0HR1#GZJ@EU9AhRqOOr=&uBFzwvUZFH&Uwie zWqu*9&5YE_OHM;_oU%YWq0Cn$J25Y{T4k+|2F}`KNLkRJE^tWBMp?^U2Bi!SskKGf za>>~wy;=5>@_wY_3l4B~TQ*UDe}1~^K+-*lz{p6&_T<`DL^N2)VGeq>;8^6ZM7kcnHH*A zXf&q&Z2R^^V6`Ad!vWsI2ZY=B>sPJjg@8BQ9}@k+03Yo7k>C~e_@1Ed-=x+5&w9U4@bM8b><_HtgKPONf4~#&=jj^(X0+X6h4Pov#vMhF|o1dLVq}vk~d?7Jh@qMy{U%Z&7KDf3VJZ@K`yS1@ywW~pi zgI>P~f9t&7ib{Qz-b|xK-e6#@f88Q~rPZc&z*tP_L1D?PnJ9bfLj za3LSTpQ?)jWc7;0pl;z>l?z+0iQ}MIY+-F$!@u zs6;;7zLODX>pW@pW}M6qfp_Q8=$0D?zAcYdEB0`wP+nF1+E)XRDw1u9o zAgG4IwQGB#k#3^>pm1$i5Eu&=_Ie;fi9}x^O0_rCPxBjS91%zqT1$Ujc%8e&6OLdB zewN9@+oGW!p)TNCA@tuC4EyL`o<__!_`RaLRS=hnLT{_`s>j$3pu56h_bRV98b;+L z$wHO})*KtVn`nJrce8SFUg(~ zH-d;+XD|>EJppn=cXAGaBnugP3p7=+B4*Ntx6ed}o2@9mq^Gp7rN6B|B)I7Td<}5- z61>9psK(u_;MlrmLF^9tSdj2#D2J+XRU~6up}VDjmGT1ZZV-CdgL(K$TxP5jOj%gr zTD{WMRB6&EMRi3v_`ePa^~0TjY0?g|pnexysyXUT5w#|I~b!`7!e|<{|Tb^K0hU&3`th z&Ew_*%lVdzExhF#i_ubVS#9aEge`Yk?y>B!JZ;%$Ib`|3a?Da#J-hm{>ZR3f)$Z!P z>U*mnsD8Y9sQORUA5{OXdYW~n^-}8sYrS=iwbL55CarH-KeUcp3vGJa5}Vr=waK>o zY&&d^*q*lSwH>g{u+Op2vzObe?GAgReT}`--fh3t-fzFlez$#({de|*_P6aH+ds8W z*twcnHDxvRHSIMWHA2ljH9KmauGv>}sOE#3V>N}fvuiJ_HPzPF`f3?ZH5y<@8TK1q aH5@d&fw(avXDmYGtVW0NedBlG blocksize: + key = digestmod.new(key).digest() + + key = key + chr(0) * (blocksize - len(key)) + self.outer.update(_strxor(key, opad)) + self.inner.update(_strxor(key, ipad)) + if (msg): + self.update(msg) + +## def clear(self): +## raise NotImplementedError, "clear() method not available in HMAC." + + def update(self, msg): + """Update this hashing object with the string msg. + """ + self.inner.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + other = HMAC("") + other.digestmod = self.digestmod + other.inner = self.inner.copy() + other.outer = self.outer.copy() + return other + + def digest(self): + """Return the hash value of this hashing object. + + This returns a string containing 8-bit data. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + return "".join([string.zfill(hex(ord(x))[2:], 2) + for x in tuple(self.digest())]) + +def new(key, msg = None, digestmod = None): + """Create a new hashing object and return it. + + key: The starting key for the hash. + msg: if available, will immediately be hashed into the object's starting + state. + + You can now feed arbitrary strings into the object using its update() + method, and can ask for the hash value at any time by calling its digest() + method. + """ + return HMAC(key, msg, digestmod) + diff --git a/gam/gdata/analytics/Crypto/Hash/MD2.pyd b/gam/gdata/analytics/Crypto/Hash/MD2.pyd new file mode 100755 index 0000000000000000000000000000000000000000..d11706263ee485ae287607511773126100dc456e GIT binary patch literal 8704 zcmeG>4R}+>kt?AD0R}0l-56@fg9tDL8%h3?EUA?(2@J7`itUmF99foSTRmA)q$jWw z!x>~XetlN!=IDh>NrM9EahF`1q!cjWQpM&_Xu{QTXiGzy5bn~7*v%2}fs?qXGy7yL zoS)wH>wUSqFZ*fU?##~2&dkovTS-;-^pa_W5D7psNyuU7u^H*}(;uBMJ?GXVbI5@i zCl?+TRi0c})8Yy+KEJowZ*OHBc8|x)G7V0~AM`LT4`aTwnrZbmI`e1Go~cNwwps|O z6ip}IwqKejRy#}P+%`)j){r?sSpXmgkf(rwp7b~<4s^6z1b{tlo$v%?$$NzQMF=Cv zq5$fEMmsAZ<%rZ1(v1Q5q~;^UiVOUOTR!V-V?pPE+3-Mv(AR5Dz!+LReDWLZtR2Qa zoMQlP1h^R>tr-c4<@*IjCEBV4z?fwNq%|XKYkor@fTTfu81eDZ)`|L@f<77}EAXJc zdDrtAQJ+iTqcN<9d9*VJAgvjxeNMO6k&2}cCQ%;)kk*W;ElDGNKKvh!KrQ$7hObqJ zMl$%V8KI;|*7YPT$aN?re4U)Hl5(3AQZ7T0utVCMQmW79Y1T#2I|Hj_7*`Kk=A%*_2f z6t9Q`Dnjo_#<^EsI4{2R#N~c5n;|=NV-q($6c>k%i!b+!*c)UAZ|t6q=@yqq0`hTc z*rg}%M2d=2@ynYDuZw_D6wFw5CG>z%E(>o5xrs=a%$$kgO%Lz4!pJN;I4Cx&BWI=cTs?b&0 zwqHC;s6?M2HUyHhYzbF9YFiBK$9RXV%Y`Rn9Rzx_c*p6!*w^NvOx+7zh_a8BbjA}xf9f~&`2hlWs**q z1wAGM!HhnGl_Cdp8G%1^EK^Vf6(*B%De(wo(8V%=ff^5`VQJ!9;667nf!`7_O3{8e zE}@td_16CZ=3nCmuc?GI|1^#ANzosIr(8cbNUf*Ilk+K(vabE$%A~Xpr6HOapA}#- zcbrdQ3dAfU5lJzcnyL&!5IM^QDnMBTuV^Z0-`iTqZykY@(Z&)I{fuoA$%bLEMAE;z<*9p~#ZxHo>f5LDCSJAUa0 zvWNR+55+Y7K^X)Bb1IpY1G2-}_zJZ3Z6eP5IL{}{l&@{8j(cz%m;z7B)cpe;|AC- zaF+hpdWICV$xG6b+Q8!iC%k0q0L!-lD1A;UhHBEvI{d`=jL;WNR};peY^Ukm#A z`3$_QVZMm2)JKh;jWk?TK^L8a_m7XRHxt&oYwCKL!g@on-ay*h8~pE6s$l=$9?lfZ z0Ka)r=mn{02Y$=h{yxt7->JJMR2%IDBOrc!g(UeFce(Zm-q68l*`Ziyq!I5Hk=p&L zGpQF|GVAqvsIs>L5A^|VaBxhf4;w@$*0#;*>{!0PEI2yjCBWJ*hIBQ->D+0)Zv4am z1aBu~-;paIc=#A*C|+71TNAS|$0dp}pgjlQu}B}C#cLv5X%fGOYyH%CLdD%aauis3 z1KJ)=veMKYZXj;iRR%5U18tG5yW*DJh{i2@u;VO;qby9sE&BxOet~*4+D55Q3DlP4IHE@IHa-nm z`7ASzO}Q{8oGCMEg}mmfv-zwV9GG&VRyw}|>Qj=dW;l~FdKaF8tW9*jFj^1uT(ySJ zYQ%v_AN5fIvmjvPOcgK%ywQ3TsJ7BA=im}^df|)oeyLzYCnROvUCS##WNrKJXw12A z3t%+LT-c363@sP-;Sl4=g;^ZZSccnhNW&TKz@ZS)&QwIZCL`J%3F9iXs-3FGxL$xW z8@?PYR}H~wUVzsR{1$pG@GuLpPpqV6pPP?q5-~~kcG6zDcMtZ$wSR!SkdOkK_B3}f z_{{J#2_E#Mov7sE1S~jNZ<340rr_gK@C#FLa$UVjE|yHerBm>cDe|LJa6EwN_F$r= z;~1bx9O_%$`K6=qO+{!B#$+#6EoeB#{x8{<<(p1Ce!4?sD6WZ)T)6QI4S#>brC2=B zv+Hke|Mk(z9Y0&}y$$+5oI3l(=5lZLH*b0S=I7TP`{irje`C#_5C8L9&#ip-{(op` z|4!Dk)4#H_U(U^1WLwzz*L9EF)2m)u^yQa-_tPs6PfN~K{%qJ_AL?)WXsn-3)u!~|PSmNXeLBzQiOJvYW4-wmP$Kh@4!`y924`cP zz13O9f?GXhb8-6f_J1k@HT7xc!pS%VcuN|s5NHX&cc#(WfR^Jikk?6i#UVsNY_>+X+v{<*xmYmVOKkQ)!0E^FE@E@~{T?s+ zlO?t{c!8=EH#NC~ffl5FMcm|fItlTM;RZ|+woqhkucB4l22R1O z$?JhDq6ZZZ!dx}I)Y{4d!hoEaq@fytTIOl2hN6o4-k6-Hq)f53y{6sgwBZBh?*ZJl z3NFTOu+Hlz_fi}}Smk6}yp6<*_!=xb)Tlz#xl?Sjw%y6WtH+iWsmZ47!g~t6g%1?I zPLOdwxuSQAvWuBwL-D3!U-9PR=ZfRSmx_%gmJ(-)yTo6zql7Q{ zPRZ_)$4j0q=`R^7IahMAM69`4vq+=Xlxx;&8Z-}Tc4{8e9M-(5`HkjX&HI|U+C|zt ztw~#@y;tkf?$AD>-J^Y0`<8ZIsiIU(q7Y9?|X5?bkh{>(jlgdqwww?gqVF|0Vqry-KgwoAvATcKr_hclD3z zpU@BK&*?Af#fF;=iwtT*xnaGb!QeOSFzhqHjxfUC=|;6#U8%OJYt)<6_3B2oTkTV` b>JIf&>ci@ont|x7C^XWH{7iQ7&tCM8%=%{- literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/Crypto/Hash/MD4.pyd b/gam/gdata/analytics/Crypto/Hash/MD4.pyd new file mode 100755 index 0000000000000000000000000000000000000000..4ba243eccd17d97f91875e27668fb8c6c9ed7c2f GIT binary patch literal 9728 zcmeHN4R8}zeqY&2kcmOMoIP<;ldOsOO0dz&gxXlLB^!YuHgU1t+y#sgw!q5Rl95)# z&Oqu!*3{cYttUf!+zfX`kZYNQPPiGD7>-_Ln?oig6PMCEC(y*ng`S9gbLOMqOK7&g z|63V^vG1;#>!j`Ejo-fa|Ng(B`dj~%&!NW%U*blzl~muONTEZdbu8) z@G)|n@Lzj^vXCwX$Fh!-z3k9QHNdpOsFk$FcpZ$-1Sd2PM+a21+T*wNPfY4pK)*)S z&n5kVB!Dr0b(jAZ!%Of$BSehTJnq;YvuUL_-uYF`ru*XJiG49!`pz1&A!cK{N6bbW z4i~e*A;;AXzz(x3Wg8_Ck#-q%n0&c5UAAXPr5Un4ODfHhHByRf*Gi>Y*{+jHb+SEO zDusqYDm8>(%nt9!mIi_ovvxjoGe(<~sF&^8QfYShMLxWP2hO*o_xdiUT>HU|ffO-a z|JME6q*=jOO7L{bjRB2#pZ?hWU9;qDqm1}6;!9&JDYiH_$+bEeX#@;m?RZgXhCfw7 zIu1kMNGc>6NYIbZ)0AcfcN;i;=M}{7wIK&-Ko01h+-xYSGwe*I746Wa+6=*HHmz$6 zZxYrOdX9spX0t(4m%gOV5PZpC(}xbgO>8v-=tpqI9j6Oi=#SOtvh~MS>E`u~=rsK> z1Tpu~z7d0_pL&n=jbv&1rJC;@4)$rwf}iVOdRtzvl{f2PE4QXgZ|A%(4c;8e8Bgn{ zgbb;*hx__drJv=zD-D4?oEFo3ks3JCcY2m|Gv{a0ICz8LeV$r(v~S=6>BF3x(lB^K z;QczavZt@_o6@^EA0}MJG@qr`_D)_+m8$W1d4Jj>1^b7|VCVE*1 z7okT<=@5;=Fqa6Bh5#~4xyvyu_4j?M>#BW^T0>Xu`+f1OF2@BqvAP*$lsTSZo8!1DDDg%>B*Nc zz4)In?U#mF_U!SR{TnRxorjUr9qaFtwIj|_UW$|@+`=klB+4-s)~=vb6XzrqM#(YG z448^KD)uZbys~?_yn9T4Oj{U@Yb|LnX|>Oerf5xF+DA$ge53wuw!QGmuCoZK=z;}Y zAYHDF%cWWzB9*EvxptfY$-XmPSz*U5B$FO?OrT)^X=VUuiOf{-h$ht}q}y&PN~9cP zQ#d)3+5~bq&`EXIrND%=`<7gnE`9KiIdFZP&VjeC{f+)R{rXp<1^od%fS^^SWM;#5 zG>aWPa0XN%>JVrx6+j}_U6%tB^6p!Sa}Sn?G3K}qOD_I0VLs4H{m=z{=bxcfM}&c0 zpB$JlWniIb;34IKU=bHclgqTU2CK4|i_uD~Ra!&;{2y3knOO*qB{zd^PH^{iPJCEJ z1>jIPZNh%@4Wu<3JY`fDH2{g2;Ox*R7!+2N;Tm39(Gnum(w)w3^)31A7+r{jl}rmP zNJto=)e?wot?&@s&TbA+T+8&AgPnW;Ru<5G+TBggsVEER&P=B1&P%50MoXq?6(`fQ zNRw%rw`7`TE14z$lW7`JGED@*E=M zKhlQn+C$V0oeNw(GZ5}sNaIy^AZ_I6d>Bk9;*wBQ8W6uJ-AKHDC>h@)(bIt&fUtWW zUWlH*rYPUWlnK$GwFe?@bhb^-4pBFBA#i>4$9KQKupLe-&og1ehWsfO={QSz4)G|c zkp-Pg?_>AI`|PTsd~0gF3^m?4#7lmg!T67x>{tPpMl;kjFm72B?9*vppG!B;BYzQOfYF*1r`Ejx|bn*9zAj7A~Me4?ZRHVk`F9$DqT; zfOj6;D6>YbnltO#=639Q;;=n%J?)=BwOMQfB^ z;;>%h56~*t8YjT}(G_ybr1aX1SG@Qy@iWI%JlWIvBvfT9;#Fw)7| zvym>=4n_8`_PqYB`?}HFCgJgC0ZzlfiD9S>MfIT_m~f1;23f~1qfZmA@B9n&0IG22 z$0T6yw4zZLA1388?G~&k26C)S7k#wT8>zx*W2vMnXVy@^Y%%(rRF)Cx#=4fOv*gUZ z)G5o3Y+>@H*r61unwQE0FrCDOqRR;dOysjOVid=0vh z17uKL#im>;tB{6s2HE?zk=LBj+PgZRD2K_q_Fpi}rBD+nhO!iDrY=D%g<7dgcuFCW zx(rKpN>SltCz1rB9SKA`?;zS04p9`g{?4}&uH(TmZRkdzR5*vb`BCi7ar#y6Ht>)F z`}i`p@k{e)nS{%rp(WD<&>b54TixY{euZCd)B+fl z1GH`8*@;jZ)0T=J|!>eEPF-P0@t!|Ig*Ur`Pble#PZ;J5pjwErqvkL!iS8&&k{l9w6yeC4X;Q|2I#=j|Te+bX)f9!zdx)i{0I z11&C($hSD#>2uWNycF^7Z6I8J$d;;8ny`4m-P}x?&VcA`af*>S_QaZ>veush=6?x{X&w%k@5MTr*vpc!o!r-^w#hobx4!>zj&4~_y^HSL zPn^2)6a0F=`$ZQAz`0#80Qg*)uf0|D=9S|X&m%s*_AL^D4TA*A^rbJ*?4n6Z&Na7U zZRwh_vS;(nxsLMO{M_nw73*@>)Rb??tz5f)4cJhAA>Ul@Z*uxvjjf_Dx0pinRx3}f zciUp<)H}-StIO>h9IKX)@4UQa_}kvlkei>kJnt#$Eo<<4wz;=2bLSVVSO$N&s=r(~ z%xfTytyQXvK`%u$ zp{~KV#Fc0{S_FBm?Ts8w^_0ma@z>@56aqD^N$KJYdLDFB5^p(panO5{cx~Wi>`1bk z(yfAQ6KYeEP8<4NsC!WlpdLo;K|P5&h0?dwrGD=XccvTAN|_ZSWjEU;Q%;4FMl^Si%)`qentH6uH(kK4>>Kv?M#n{aaR61|qT9hTH`^0_yqzN-BhcD}4% zpG2?6V=6{;{d(YxbOH?w6Z)Tz$@;Wo!+J+WzBvz*LH;rz`)#$wzq7$7suqi( z|7B`xJBH*j-;PVMzmj6$xM2h?GgX>)nD&?sn%*&8G+i})Ls%+o6`F-th5bTK!BYh# z1#1hc3!W=zEbtVx6?7FmQfMwLC~PX+T__d4TX>-`ZDr<4%gS{tohw^bj<3A6a$!+k z(bGlG7rj)}S#+l8UyC%B^_FUj-}0iR!_sZ}uH~;SuUn2<`Yq=yA6u?kQmhYJ=UdIz zQtJlmR;$nY9qVh>H?4!#pId)v{lc17yrkG%Y%Q)Xt}FHww->)!{Ce@x;{M_*#ZQzh zFDWnCQ1VjAUzNOB@{^JaC7+gDE%~ej1LE2L&u}TGbkoD8ji&9UW>cH#PfgvXeWuq< zM@_w^KGPYD^dr-3AyeRmCxsP)U8oRhgw4VZjQR({-wA&&d@NiQQp^vU=bO#uQu7A$ zRuh-NIhrbU=7bI3gSsdW4h0m@px5<}LY+`OWzq`B3~{ GTK)@BR;<a-&EZe8L@97?PG z&V5f$PqKh!r=98U^bObV{+xU6cg{WM+^csl)!X_xJ;!kdq-d1m&fratll{H@pFT7% z|Jb?Z-06&$3(n}OUM{F>Y3;Ujc{+D{8rv;RjU64GUdv9m#pCO+w02l3AFs8vcQ(5V zv$B?171lKZ$5rXlxvstcQ5hR;id()SQ#B{}ittSzjJdcv(kqBna6 z@fU^DB^;BB<1BJt>Oeg3uvZtyZKgsW$6Y1`-0JD)xE(aWpZF>Cy7zj)^YlvmAwVRn zDg^R)cHm!ObECHr`7XlfL?W7xBWWHd$At<#GNFyI)*zA23XwFA6C4V6c6U?NIQ`i{ ze-=M|#Lq4BV>;>r4e_%j(mIJ>t4zmq*oSt)SA?W_oD844t+PqdWj`8;-@_UrWStyW zo>=qO(w|!cF7d4=K2z(THA%fDe^h5a_#KQW?zbAG`fRD%C~mhJMUyomSWtTbWJEJC z!iNIyng>FG5${t{F1N$l61d_Or2Ddwo5||eKK>cQhM#%vk@SI(_nqjtOwB60XM)}b zI5Ec>X|Zw~e??@B#tbNMT%;1OK-8BVWAEF?SW`C+Ao6)?%{&N4-(NIfscK6%b%JB$ z9CBlv3B35wwFcA#UnIr~f7dxeA-?6G%{xo|CBZ1xL8nz#gJ`fus?qt9e?fOoXjJaz zI;+y3i_uqszD}mUkLX7=3da1^U;CPcfDNkGoFWjfwlox(m?|-vYbj_I$ehqzt zbsxz;<3#2Y!kj3~Z5Zp#gTWXj!6a4!B_{Oe_d_{4L?Q_kl?hG5X!;@ga#H0t^GO5u z-Ul?Z1~Dv-gx}5!M~o{uEJL~x^Nar;p32sZh!?|Cc|lW}SoO{P3jgoS&s+#@%4Y*P ztdWy6hHFmeGOhl(KU(_CK57$Y$1o-Vlw2lKoFvTBNM*@G59WW+p(WQWn+XifV`#r% za{7Pc#K`|}m_{Zo$V1Y2KP`Z+WMd^wADBJLAQ`3d98owamFJ1Vsk5l!B*PcQ%HYv5 z|NDk#-jQo6zX%zFr%`2o=6X;#O8g$ao8wq3RI-pJRR)e8I=Ff-430EsJT$PnN6BX+ zzYqCvBo7pSkDNbj@V|f8GZ&y31I>Rh!1@`i(cHQN)|N|zuNF2VRhy)W9I-4%s>l<| z^3KWVzA75NT<*VNcxGO%sQfCxeK{Xh04JLf%zBK|tq^vPISt}>UjEIP)A)zUb4SOV zrag5<*<;S^xvN)3V+MC%c95hNjM93k#sIsLoJP!=(Qx&HDP;@YphOf=4rJGl{N4XcK0^(gc>2y8zy^JZKn2w&*%Y6mwv^MAthP z9;Kl%u#9VKj_B$`JAfHe0Lv4P?8iKS0NBzOW6Kwh(2SrR@G=&`vxrB$ax+7inG&H@ zJkq5Q5y-3*AXkY;+GG;oT%H2w8u3VrOaPqb#W-!^k!HCaIF~NM=@5_XP#};d(^KMH zFCN*h5E0Id6gZ2;BTvdC8hyIv3T}uC?~ShzW-~_gOkcs%ANl8=XSc;wdL1NlL^1~w z*TLYiyI8vl^|gZQ;>3I71q;N1pkj)H2eTPb2+ljF8;2!g>cx?Q*FeYE^g|4lC`-41 zpoiHPM^?CIK{7$CNUOo&yBQzVGLrz40jPj86x}8OWcia zoSq7^Yd{5TQY;|}Fj1=B=y0g6y6z$K~| zvEZS4G`aoc@V#FOhm!2_It38ft7(#C%akciaX%_}jpB$Bie#{NGa@yt!WeJB{A0mL z4iTq6!iZv_6%$l&9U~|a2`+i;E=DlAJOylK&9X-vfd)2Ny%zS;)Uet5aVO-6O)=O@Wyeo~%~oU; zc}hD`;+L##lNL5}Me<~L`9Rv1-`D=6@Ea;g9 zv$hLMR0|%b@Iax3MTcX0)~<#pIW|n_+1^cdF?rks7h}LEf-n|1y6Zgww;Ov&lo!45R|EsWFQ z$}UV}5+zpzQv6DKz9u|^UqF+@ClVH>us56{vT2$Ld7+q82{UvriLS}8Fb%3{qKXg| zi@Aq&j8`~AUp*BTA|QycDY*z^l3Y_N3+w==X)F*;f@l&%7l{drSgvW64l7rL=%&Nr z7;umYhqbuPx57@_p8vCFNQ8}0^-n@r-n|J9LEh~Pla3d#W-$wY}`~mW*lI z<1)h`hLg;C3NB`h;0L_q#LwqfbjVJtJ9+FPMB%hX-t^Sd%H++)7lcH4m)=2ME51A=$_s-mE*xi=Xnx6XnfRDNM59ISlW6XJc1;SQG77AhQb*y>L+ucZ^^p zA#9KVW{lf~Nxg7F_C1*haZL3+=1^G5zy+TL@auk1ho1-JfHE!OXWRjqOA*H@z3_@0 z@MJ2ZCTg%a%y5TP+)I_YVH?wnfXznB6J5t>GzuLuAGmuw>S^Gg)WS!6xdr?+qU&fP ze1w_Q@Xu)BXQhOHT7@5r*D8Fya9j&JA>wIZpVz|Ht}v?B^+JNSL6aKpDJ|UelyEOE zf*T7#6armmWN*K=r+r^IloZ?wUZP0FV&E}3qRC{~e`-t~5enOH#6P|3lpM%pDr2&t zW2t6wPwzSbc@Xeqy0~>yw1b(*aojbCki{s%>f+*>Ou&9ri+y>b=W4JIYq7(@Q)3_3 zVh6LtpcKb`QHy=4>Y0hyVIvCmc(_f(eq4(kF<%>HHP|m}v1i;0`$;YKMGHtA`zu=P zOKySvyo%kdhR#InAuV=Ha~wMlCq`I7{AJrY)gMI)4m>tt!#Y~nBCw!IU2LNlK~!L| z18Yxs#H4$Pc6QW*8uW8;^6+dUyGQ{?Wd&gMj_4Ex z%;NK~zTBV->$hP?KPhfA=shOAFCu!3`hD5@-f3|klsTtQ(+g8#dA7Lhz1<4y1!`9ku#z9+&z_F~I6JO`SU4kiE)tC<${`G5vnL71uz?E-C+Oe=G81VRNd4!% zF$MW&jX2vHEVH&mbGy_t0sm|cjs=1hMmkd20a@t;fn+8QQ%1b=afZ+adjI(x`6vL# zPdSXLhEBUoAdKcZLBP%qa0>Wfx(VM8clD9(7sV0dCC&0srZ_-MIOZ#5@Ymu>j% zeniYj)aYN&`49*rQn>*KYSEfJ{9Go56tJcMAormKdo~Vx%yD zn7V%V%V>1S3)!NaluK_#b2zOO>*qz^g4lagsyB&m{I~$D&(b01&3C?S{@RH7E1~rx zJ~I?S$4SxL&FDLmN2kT)VzP>yWn@duXp!pYCEtS7dsCcc&Il8+jtTSV$wdzQo{WKc zUEp=|!7rjtmI#8)#{_dE2^I+X?v7^qKgszrqzVJsU`-xlF;fj&B^#Q56GoO>iNRrV zx1cTX_s(wVal(JzDjPKvGPE7cUN~NcGvG>V_B(p!3#1Y933!6+ z8xal-aX`m+Fh6@1urlQf$Vix(3zvMWYxS3^6DL+8j!0W#GnI9#d z*DL4e_gQdQkM?zJq_Y(A#Vo3$L9J}i<#hjG@p#KH-jm7W<;de*!Fbx!d&qy*_GuW! zTQfN_49G3%{9&W+S-RzP;C*WKyj`z$at-wZ1oS6uHbk$A@4C*F6STK#%|8hFXPfD6 zLC|%Ae_gpS5Y4^WhtT^m&@dTrg9E;kK2xH*^w{3zefvLtqRcm+@e`DKuKPb(=SvqS zr23neMxnd`nB=+l(DBT9GF!T|027X6lHnVy3#j|yZ;35yrE4O%(h&KtG`MfV{Reix z_8uJ*j!|(;7^D||HxGHyFeV(AYfs3v!$W&n z?Mb=zlw2Dc+RtiF%f<6UeJJA7!O%hWzC3h@y~l?Rv-d0JA1vudZkH%bh{af<7N94l zLqi~B4tyRH4k_z^I&uwpnsD>MFChp2Cinq25 zIU((so=BzKO3GKPLY@;Va)xSP*J5p+lv_uciu|D(*1iToD=OAn#0uNcljH@t+gZC~ zXb0NG+VxUyGi54@hq_pYO1Xnmtk{eu&<^dO4z)FG%EgK*aiZWNyI*3p=q?$`4jugX zW^{S1=hsZ2(f18+g}O&R>By{|#MDp<8IG;?WA-M;>Bpfp{O8NwC83LLYk@Ul^pfa+5-w zix@$+VM4s_`|iwlBNF&%?!!b^8WZA8bY)&rIX|hKom93Yl~*N|tx085QaLB7Ob!^2 zf6XxcI~3l8*lp?D`FHLnZ_;f-8AWsJZg;m=!-4wAm6kn?Z9cc9u^U{wy&lGB zmt|*bN2905LbnEqVT*h32WEjjjm_?+*7n9W8miUXoe~-Y=A&;-Iwjc!H0;9Dh1LfzHvJ>TOlwVcK zeJFnv8DU;AaC=B@m1Q_qHnCjz2HgJcNN~5$8FgE*_%4tPp^7P)_%R#%usLH z`e=<%U70iug%(R$ho!U2+uGTI`Be<3(X-pv?(Xnf+8cZ5MsaLvK51#)g@U^qZENIN zReC(#+SW#tjXrN@d!x6tsj;oChmC~^ZSC-OvMFxuX!W+TVQA(Rv)7vYdg$B(9n(DQ zC||7WQ;KhD6&dr>Bc{KjPBK|Ed?c(r2Ed)@BiZyyL(^MCEukdTc@-NUsKnDI^zM3JE5uDi#iADPHXChQRhV6t)Bg8 zJA^c#p*fEHX`~R+Wu#Y-rjQ~?^GFXPCPXLr9@o&&+}76F;ojTog%F)wLt}Th z+e7)IT!Y)=>F6Zca?=`jcB0mnwrf|Lubb@;O4D|E+-{EZq+z$j{s#uf?M-WUw>Kf? zbVwgXGa@+zow_mv%8_D(bMg&^L4eM|7TGrcsvb{H#PY@h|EO)2I?N| z@HTAnbhcM@cI>9Q0$oi{H4FI-5Du*sogLU#bx?b}dp*o4hw8_;!nxX-aNR9Wrzwsbagom74laY_c| zAf5wh4K+QFvpsD?rMr#$rl!0F>)IHV8@52-fa7aoy5aiJQAwW?wr&xs>_vr`6x4D# zsJ~Nc@7~ko@yb0`f&Piwq6ToNtg3GP%E zD;+7lQu=1;^-}sjMW?_w&Gup2{k9@oxoxX$r|k>20o%81XKX*S{ip4}ZSULe;qT|y z@)dkF|2e*uf0q9`KgfTNzsi5K$Xc|%sJy77=pT!|RrLL$pA@}T^k&i9Mc0dN7G>B! zWq-oH-QHq<+TLeBWdFK-(0;=HUHe)4W&2O)*7v7^$l#nI?^)^Wt~ z4adJYt~lOwTz90c`|!H^*A=ZRU$=GL&UK!3&#pVZ4nUpqe}i)Jm3$Rn!`JcK`5k;S cG}Xm>`ThJ!{tO@DEq1GYjooPn{a^U=KTm-{ga7~l literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/Crypto/Hash/SHA.py b/gam/gdata/analytics/Crypto/Hash/SHA.py new file mode 100755 index 00000000000..ea3c6a34f72 --- /dev/null +++ b/gam/gdata/analytics/Crypto/Hash/SHA.py @@ -0,0 +1,11 @@ + +# Just use the SHA module from the Python standard library + +__revision__ = "$Id: SHA.py,v 1.4 2002/07/11 14:31:19 akuchling Exp $" + +from sha import * +import sha +if hasattr(sha, 'digestsize'): + digest_size = digestsize + del digestsize +del sha diff --git a/gam/gdata/analytics/Crypto/Hash/SHA256.pyd b/gam/gdata/analytics/Crypto/Hash/SHA256.pyd new file mode 100755 index 0000000000000000000000000000000000000000..865a16c4d41a80e801a628d32fb40acddb50ee7d GIT binary patch literal 9216 zcmeG?4OCNCnit5_ph##76&zY0HCC{KJfJ{;Kp>=|1x11BI7%TxAn}BRCNB!JVzD7z z-#jy8J-T&9*<(h#b*;0tyK2=sHUYpk~LWm4N6bU&DJvI$}e*L2x*s(K?#*zck@1`7% zsC+l2)@ZUb?KW$xP2a{e=`9v3$E-9kHiv~VS(x(0HB6he*^n`H>SSfmbhVC<%7{s% zz2p7zkhftHJ9A1zWIl-l3Ih-cfa4HOjAl9pI>sFVfIV&9@B}1v4x#g+22c+bXdmpV z38_GGBOx0w0rzU{7~j2^e>y#bGjwpEenJ5cj0@rRBw!4!5&klo^_(8Y?HD5ifHLs_ zVNFAbH^U|gYA{!s04SFT5Y{xXtQjlqb|ekrLxYbmw{Em&knGVMRf7!LO}<-JgZ504 z9L-@P@X_xh0AWo-<1?78O~G8Y0~75%8YToo4IyQBQ@+Lir!!E;fAaMAYMi4nLRXAa zj8OEv3Lg2jN|{g}CsfJ#6-qfDqYTtd2I@%=Lzyjotk-p2vBm4^a++&1~R|Yy_31QFj)R14M#3=`Af#wn& zaUt`LC)w*%{<7OJw)~lf`o0)~ILLR0p#8d|7zO`{ zb2RY?>I*tKUkf>{RLXdnGEfi7r<~&vN#1@*t?tNe`J*BEN|3LW|Dsa`6jlPeC@ZcFuj zl&CmVzAeo+92eaOs(}1-j&E2V-G`&cqx<+Ry#ruX%TqPy?-lEgOSa&j)9^&H^*#@j|`at(Avj zK_g&;U?lIzgT^!rGNl>zEdDHe}J3|UNw2LYNkcvZ{}{1`*+58>0?n&9;0PA^T; zEiCsBc&LS-NsJBs-os)XBq*r*399z-gEV?tK*?XDsp!EB-qC&qwRHg+@>~d+KPdz; zC1Sc!w=5S|BKW3|vdHT$Qw1s_&FTndNOh$vN9Y<=9FnQL0a;P>I+^V0kw{tQ2HEto zAYG}rn_Q~8vi3B%6y?r*;)oII0z#P#UZY}lqEL4k6OtU-98f=Q-AxE4#;qHNjwUGL z9tlbls3?>giejk+(`AQU=&69Yi9|@iKA1VM&exCej&Z*0rcfWlfB04km_CAU!kgFr zM)6XgVzW2D&!K=sFhCO%l0kPQ5w8@%Vg-&S1#2lEI70oH;20OWLbV1ZqV(&=K$8U4 zLkN67{cPeouju&^(4-l`#KuAs%b74Sm)9{(oZ>7bj%YzE!-B3(q$WnHpjNRqUx1A& zCM1G$D7kycrZ~IC2$v*f1p-HRvY`Gl(C3GoCzMiALqa^6U~JcJm=si0+%?$$&|^lh zhn+xs88ee!B#IzlF`)=FkjVs@m_%AD3@~m-Hx+VKP33cdeCG-v&caQ)X%m{pCeFfr z8II$=3&(Mrh2vPm;W(CPIF73pj^kQ|Dq_kibpT;X=6Tx+pG#1!KKV^#Nd!k;xh%*@Z*bdB@nu@w@MDl6~P} z1}<;Fe}Z~CLW7=)G<2$@PGj*t`elDNJ`=_7PxKcr`8x~#!f$gh|09|#SpQE(;-xTP z-a>@aCy#iZ5Tkkh7s$1JT7O%q&a(-Efcyy+GVvmRz3wP}go2kcFvws$5`|`fOTGV7S=i@@k2uW%~So5 zyxmZJM?VL}!{@L<@e&I=Aut7NT&9#mTyI`Mvy_V;8{mo?_#M2uC*~7Q-1Vc!KvYm; z>@Ja$2k-R#e%-cGXr6BH;O^Sy*X=;muiJ?ouRHAN0ft|xtsisTLYb*JolmYsAL&-#(l9g`gnNtBO3p@Xf=A3uo z4S{BvcbRdBspVbmIK+JNE)It@m#z*R(sa7k;!w(HcQB(pcQU%c?Lsf~R6Ey*dA$sK z)^*)c#-4@U{0Lr}@LP%7i3cv^p5FiXe*R%B6Sr2jtDE-nT|2RtJ_f?|J0OR*gKCJs z>Ue$R^?(3&!ag5~vjX7o&Uquw8lQmQoPb}RfRnq50xp|?k514Zn}Fj6437s3 zEgZ)L-NE5}tG_huDtvO48l+L&iKiAc9Ap3Iv?gnNXKGpjv(&&jY!*h(Ac3*AFr3lA z81;6e$2GNelWVV-pqhuOgwm3L2XwjH7bS!Rui zGt%k97n!R~&4%V+^k3;j2AhI=$Y_Qw58uxj&C!7SgSZCpIKXcO@ovDU1OCA@Nq!h` z8syhJsNEnx9dz-lGi(~LiG6$P z|1INf^L|wJVtL-P|MYwB!I#e;ytKUUd#Wq)+D@{224+X?_dmDnt!q<;PtJewm0vGudi%cCw6u8FN--kiK@b;hZ_^{%-slHuH%>Jan<}=Fse)_)aiP*9B zovY#=^6gHNoAVy2TvIXo;IaKV{}7X`TDW&;(c-svEWc*WF#h}VKjExRFL%Uu4D`q& z{TpwH$2X0bt9O6C>x<)8JDxpB>@x`-c|6 zZ0D>Q6>v&s1X%=r^q9>-ebKT49sI$=VKxM7{mZMKUb&!bzE=Bmc2>HsB0W33W?|*R z^!c?FOVg_sESe8wP+60m)nGU3ZHDG{&X!(`p)hJSgT1vS6=WK86%7!;QeAl(ddSF_ z2Y;PC41>$Frx`|<{P+D*eHwiZH7`NRS(0t zj2-?YANI5~CPmF;qU5*j|62w^zrAHuOTVSzS%;7c_*{TYfCWi}sG%ppO_0&v*-Ws^ zLxqp5Z`=R54AeG;g$yUbIlzr!v>c!X0N)Zu>i}B3p4_ul18)U@F-)cd#v1^(0PF&84 zq@{$sMxHcTg2;utPFO;eq@khNY_?hq9VQL}w~_|E-EOeqcpGUj*lZRn&L<&KztRfS z%*d7&v%_vgT1jM!&0rwJ76~_C`l~7-9g%H@wkCLwLHi+&?FKXj`?$KZ)YjTit+&|? zwT^Z(=q&@9&Sq;^+|=Z-!CM98J_OnV3)k?Z&DvIJwX`BFC8D~sioU)YU{b)FmRg!?;3b0gKD@)Hr1;3{&f3m)Ljyj5{{z4qo`equ zW{A#eBhOGAQdni+jMipiMSKCAIZ{wLXmd+sLv`n3`uU`x++Zeu9fmK0ceEezhDDHP z2*=Sxb3z(HF$hoTmM+p&W@lx26N^Q-dB`Q7<1qF$_kbitSqnUoQSd z@xkJcimw(&mON1MXh~K{S;^9pl_j pool.entropy: + pool.add_event() + + # we now have enough entropy in the pool to get a key_size'd key + return pool.get_bytes(key_size) + + def __newcipher(self, key): + if self.__mode is None and self.__IV is None: + return self.__ciphermodule.new(key) + elif self.__IV is None: + return self.__ciphermodule.new(key, self.__mode) + else: + return self.__ciphermodule.new(key, self.__mode, self.__IV) + + + +if __name__ == '__main__': + import sys + import getopt + import base64 + + usagemsg = '''\ +Test module usage: %(program)s [-c cipher] [-l] [-h] + +Where: + --cipher module + -c module + Cipher module to use. Default: %(ciphermodule)s + + --aslong + -l + Print the encoded message blocks as long integers instead of base64 + encoded strings + + --help + -h + Print this help message +''' + + ciphermodule = 'AES' + aslong = 0 + + def usage(code, msg=None): + if msg: + print msg + print usagemsg % {'program': sys.argv[0], + 'ciphermodule': ciphermodule} + sys.exit(code) + + try: + opts, args = getopt.getopt(sys.argv[1:], + 'c:l', ['cipher=', 'aslong']) + except getopt.error, msg: + usage(1, msg) + + if args: + usage(1, 'Too many arguments') + + for opt, arg in opts: + if opt in ('-h', '--help'): + usage(0) + elif opt in ('-c', '--cipher'): + ciphermodule = arg + elif opt in ('-l', '--aslong'): + aslong = 1 + + # ugly hack to force __import__ to give us the end-path module + module = __import__('Crypto.Cipher.'+ciphermodule, None, None, ['new']) + + a = AllOrNothing(module) + print 'Original text:\n==========' + print __doc__ + print '==========' + msgblocks = a.digest(__doc__) + print 'message blocks:' + for i, blk in map(None, range(len(msgblocks)), msgblocks): + # base64 adds a trailing newline + print ' %3d' % i, + if aslong: + print bytes_to_long(blk) + else: + print base64.encodestring(blk)[:-1] + # + # get a new undigest-only object so there's no leakage + b = AllOrNothing(module) + text = b.undigest(msgblocks) + if text == __doc__: + print 'They match!' + else: + print 'They differ!' diff --git a/gam/gdata/analytics/Crypto/Protocol/Chaffing.py b/gam/gdata/analytics/Crypto/Protocol/Chaffing.py new file mode 100755 index 00000000000..fdfb82d0c39 --- /dev/null +++ b/gam/gdata/analytics/Crypto/Protocol/Chaffing.py @@ -0,0 +1,229 @@ +"""This file implements the chaffing algorithm. + +Winnowing and chaffing is a technique for enhancing privacy without requiring +strong encryption. In short, the technique takes a set of authenticated +message blocks (the wheat) and adds a number of chaff blocks which have +randomly chosen data and MAC fields. This means that to an adversary, the +chaff blocks look as valid as the wheat blocks, and so the authentication +would have to be performed on every block. By tailoring the number of chaff +blocks added to the message, the sender can make breaking the message +computationally infeasible. There are many other interesting properties of +the winnow/chaff technique. + +For example, say Alice is sending a message to Bob. She packetizes the +message and performs an all-or-nothing transformation on the packets. Then +she authenticates each packet with a message authentication code (MAC). The +MAC is a hash of the data packet, and there is a secret key which she must +share with Bob (key distribution is an exercise left to the reader). She then +adds a serial number to each packet, and sends the packets to Bob. + +Bob receives the packets, and using the shared secret authentication key, +authenticates the MACs for each packet. Those packets that have bad MACs are +simply discarded. The remainder are sorted by serial number, and passed +through the reverse all-or-nothing transform. The transform means that an +eavesdropper (say Eve) must acquire all the packets before any of the data can +be read. If even one packet is missing, the data is useless. + +There's one twist: by adding chaff packets, Alice and Bob can make Eve's job +much harder, since Eve now has to break the shared secret key, or try every +combination of wheat and chaff packet to read any of the message. The cool +thing is that Bob doesn't need to add any additional code; the chaff packets +are already filtered out because their MACs don't match (in all likelihood -- +since the data and MACs for the chaff packets are randomly chosen it is +possible, but very unlikely that a chaff MAC will match the chaff data). And +Alice need not even be the party adding the chaff! She could be completely +unaware that a third party, say Charles, is adding chaff packets to her +messages as they are transmitted. + +For more information on winnowing and chaffing see this paper: + +Ronald L. Rivest, "Chaffing and Winnowing: Confidentiality without Encryption" +http://theory.lcs.mit.edu/~rivest/chaffing.txt + +""" + +__revision__ = "$Id: Chaffing.py,v 1.7 2003/02/28 15:23:21 akuchling Exp $" + +from Crypto.Util.number import bytes_to_long + +class Chaff: + """Class implementing the chaff adding algorithm. + + Methods for subclasses: + + _randnum(size): + Returns a randomly generated number with a byte-length equal + to size. Subclasses can use this to implement better random + data and MAC generating algorithms. The default algorithm is + probably not very cryptographically secure. It is most + important that the chaff data does not contain any patterns + that can be used to discern it from wheat data without running + the MAC. + + """ + + def __init__(self, factor=1.0, blocksper=1): + """Chaff(factor:float, blocksper:int) + + factor is the number of message blocks to add chaff to, + expressed as a percentage between 0.0 and 1.0. blocksper is + the number of chaff blocks to include for each block being + chaffed. Thus the defaults add one chaff block to every + message block. By changing the defaults, you can adjust how + computationally difficult it could be for an adversary to + brute-force crack the message. The difficulty is expressed + as: + + pow(blocksper, int(factor * number-of-blocks)) + + For ease of implementation, when factor < 1.0, only the first + int(factor*number-of-blocks) message blocks are chaffed. + """ + + if not (0.0<=factor<=1.0): + raise ValueError, "'factor' must be between 0.0 and 1.0" + if blocksper < 0: + raise ValueError, "'blocksper' must be zero or more" + + self.__factor = factor + self.__blocksper = blocksper + + + def chaff(self, blocks): + """chaff( [(serial-number:int, data:string, MAC:string)] ) + : [(int, string, string)] + + Add chaff to message blocks. blocks is a list of 3-tuples of the + form (serial-number, data, MAC). + + Chaff is created by choosing a random number of the same + byte-length as data, and another random number of the same + byte-length as MAC. The message block's serial number is + placed on the chaff block and all the packet's chaff blocks + are randomly interspersed with the single wheat block. This + method then returns a list of 3-tuples of the same form. + Chaffed blocks will contain multiple instances of 3-tuples + with the same serial number, but the only way to figure out + which blocks are wheat and which are chaff is to perform the + MAC hash and compare values. + """ + + chaffedblocks = [] + + # count is the number of blocks to add chaff to. blocksper is the + # number of chaff blocks to add per message block that is being + # chaffed. + count = len(blocks) * self.__factor + blocksper = range(self.__blocksper) + for i, wheat in map(None, range(len(blocks)), blocks): + # it shouldn't matter which of the n blocks we add chaff to, so for + # ease of implementation, we'll just add them to the first count + # blocks + if i < count: + serial, data, mac = wheat + datasize = len(data) + macsize = len(mac) + addwheat = 1 + # add chaff to this block + for j in blocksper: + import sys + chaffdata = self._randnum(datasize) + chaffmac = self._randnum(macsize) + chaff = (serial, chaffdata, chaffmac) + # mix up the order, if the 5th bit is on then put the + # wheat on the list + if addwheat and bytes_to_long(self._randnum(16)) & 0x40: + chaffedblocks.append(wheat) + addwheat = 0 + chaffedblocks.append(chaff) + if addwheat: + chaffedblocks.append(wheat) + else: + # just add the wheat + chaffedblocks.append(wheat) + return chaffedblocks + + def _randnum(self, size): + # TBD: Not a very secure algorithm. + # TBD: size * 2 to work around possible bug in RandomPool + from Crypto.Util import randpool + import time + pool = randpool.RandomPool(size * 2) + while size > pool.entropy: + pass + + # we now have enough entropy in the pool to get size bytes of random + # data... well, probably + return pool.get_bytes(size) + + + +if __name__ == '__main__': + text = """\ +We hold these truths to be self-evident, that all men are created equal, that +they are endowed by their Creator with certain unalienable Rights, that among +these are Life, Liberty, and the pursuit of Happiness. That to secure these +rights, Governments are instituted among Men, deriving their just powers from +the consent of the governed. That whenever any Form of Government becomes +destructive of these ends, it is the Right of the People to alter or to +abolish it, and to institute new Government, laying its foundation on such +principles and organizing its powers in such form, as to them shall seem most +likely to effect their Safety and Happiness. +""" + print 'Original text:\n==========' + print text + print '==========' + + # first transform the text into packets + blocks = [] ; size = 40 + for i in range(0, len(text), size): + blocks.append( text[i:i+size] ) + + # now get MACs for all the text blocks. The key is obvious... + print 'Calculating MACs...' + from Crypto.Hash import HMAC, SHA + key = 'Jefferson' + macs = [HMAC.new(key, block, digestmod=SHA).digest() + for block in blocks] + + assert len(blocks) == len(macs) + + # put these into a form acceptable as input to the chaffing procedure + source = [] + m = map(None, range(len(blocks)), blocks, macs) + print m + for i, data, mac in m: + source.append((i, data, mac)) + + # now chaff these + print 'Adding chaff...' + c = Chaff(factor=0.5, blocksper=2) + chaffed = c.chaff(source) + + from base64 import encodestring + + # print the chaffed message blocks. meanwhile, separate the wheat from + # the chaff + + wheat = [] + print 'chaffed message blocks:' + for i, data, mac in chaffed: + # do the authentication + h = HMAC.new(key, data, digestmod=SHA) + pmac = h.digest() + if pmac == mac: + tag = '-->' + wheat.append(data) + else: + tag = ' ' + # base64 adds a trailing newline + print tag, '%3d' % i, \ + repr(data), encodestring(mac)[:-1] + + # now decode the message packets and check it against the original text + print 'Undigesting wheat...' + newtext = "".join(wheat) + if newtext == text: + print 'They match!' + else: + print 'They differ!' diff --git a/gam/gdata/analytics/Crypto/Protocol/__init__.py b/gam/gdata/analytics/Crypto/Protocol/__init__.py new file mode 100755 index 00000000000..a6d68bcf8de --- /dev/null +++ b/gam/gdata/analytics/Crypto/Protocol/__init__.py @@ -0,0 +1,17 @@ + +"""Cryptographic protocols + +Implements various cryptographic protocols. (Don't expect to find +network protocols here.) + +Crypto.Protocol.AllOrNothing Transforms a message into a set of message + blocks, such that the blocks can be + recombined to get the message back. + +Crypto.Protocol.Chaffing Takes a set of authenticated message blocks + (the wheat) and adds a number of + randomly generated blocks (the chaff). +""" + +__all__ = ['AllOrNothing', 'Chaffing'] +__revision__ = "$Id: __init__.py,v 1.4 2003/02/28 15:23:21 akuchling Exp $" diff --git a/gam/gdata/analytics/Crypto/PublicKey/DSA.py b/gam/gdata/analytics/Crypto/PublicKey/DSA.py new file mode 100755 index 00000000000..7947b6f5fb1 --- /dev/null +++ b/gam/gdata/analytics/Crypto/PublicKey/DSA.py @@ -0,0 +1,238 @@ + +# +# DSA.py : Digital Signature Algorithm +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: DSA.py,v 1.16 2004/05/06 12:52:54 akuchling Exp $" + +from Crypto.PublicKey.pubkey import * +from Crypto.Util import number +from Crypto.Util.number import bytes_to_long, long_to_bytes +from Crypto.Hash import SHA + +try: + from Crypto.PublicKey import _fastmath +except ImportError: + _fastmath = None + +class error (Exception): + pass + +def generateQ(randfunc): + S=randfunc(20) + hash1=SHA.new(S).digest() + hash2=SHA.new(long_to_bytes(bytes_to_long(S)+1)).digest() + q = bignum(0) + for i in range(0,20): + c=ord(hash1[i])^ord(hash2[i]) + if i==0: + c=c | 128 + if i==19: + c= c | 1 + q=q*256+c + while (not isPrime(q)): + q=q+2 + if pow(2,159L) < q < pow(2,160L): + return S, q + raise error, 'Bad q value generated' + +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate a DSA key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + + if bits<160: + raise error, 'Key length <160 bits' + obj=DSAobj() + # Generate string S and prime q + if progress_func: + progress_func('p,q\n') + while (1): + S, obj.q = generateQ(randfunc) + n=(bits-1)/160 + C, N, V = 0, 2, {} + b=(obj.q >> 5) & 15 + powb=pow(bignum(2), b) + powL1=pow(bignum(2), bits-1) + while C<4096: + for k in range(0, n+1): + V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest()) + W=V[n] % powb + for k in range(n-1, -1, -1): + W=(W<<160L)+V[k] + X=W+powL1 + p=X-(X%(2*obj.q)-1) + if powL1<=p and isPrime(p): + break + C, N = C+1, N+n+1 + if C<4096: + break + if progress_func: + progress_func('4096 multiples failed\n') + + obj.p = p + power=(p-1)/obj.q + if progress_func: + progress_func('h,g\n') + while (1): + h=bytes_to_long(randfunc(bits)) % (p-1) + g=pow(h, power, p) + if 11: + break + obj.g=g + if progress_func: + progress_func('x,y\n') + while (1): + x=bytes_to_long(randfunc(20)) + if 0 < x < obj.q: + break + obj.x, obj.y = x, pow(g, x, p) + return obj + +def construct(tuple): + """construct(tuple:(long,long,long,long)|(long,long,long,long,long)):DSAobj + Construct a DSA object from a 4- or 5-tuple of numbers. + """ + obj=DSAobj() + if len(tuple) not in [4,5]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + return obj + +class DSAobj(pubkey): + keydata=['y', 'g', 'p', 'q', 'x'] + + def _encrypt(self, s, Kstr): + raise error, 'DSA algorithm cannot encrypt data' + + def _decrypt(self, s): + raise error, 'DSA algorithm cannot decrypt data' + + def _sign(self, M, K): + if (K<2 or self.q<=K): + raise error, 'K is not between 2 and q' + r=pow(self.g, K, self.p) % self.q + s=(inverse(K, self.q)*(M+self.x*r)) % self.q + return (r,s) + + def _verify(self, M, sig): + r, s = sig + if r<=0 or r>=self.q or s<=0 or s>=self.q: + return 0 + w=inverse(s, self.q) + u1, u2 = (M*w) % self.q, (r*w) % self.q + v1 = pow(self.g, u1, self.p) + v2 = pow(self.y, u2, self.p) + v = ((v1*v2) % self.p) + v = v % self.q + if v==r: + return 1 + return 0 + + def size(self): + "Return the maximum number of bits that can be handled by this key." + return number.size(self.p) - 1 + + def has_private(self): + """Return a Boolean denoting whether the object contains + private components.""" + if hasattr(self, 'x'): + return 1 + else: + return 0 + + def can_sign(self): + """Return a Boolean value recording whether this algorithm can generate signatures.""" + return 1 + + def can_encrypt(self): + """Return a Boolean value recording whether this algorithm can encrypt data.""" + return 0 + + def publickey(self): + """Return a new key object containing only the public information.""" + return construct((self.y, self.g, self.p, self.q)) + +object=DSAobj + +generate_py = generate +construct_py = construct + +class DSAobj_c(pubkey): + keydata = ['y', 'g', 'p', 'q', 'x'] + + def __init__(self, key): + self.key = key + + def __getattr__(self, attr): + if attr in self.keydata: + return getattr(self.key, attr) + else: + if self.__dict__.has_key(attr): + self.__dict__[attr] + else: + raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr) + + def __getstate__(self): + d = {} + for k in self.keydata: + if hasattr(self.key, k): + d[k]=getattr(self.key, k) + return d + + def __setstate__(self, state): + y,g,p,q = state['y'], state['g'], state['p'], state['q'] + if not state.has_key('x'): + self.key = _fastmath.dsa_construct(y,g,p,q) + else: + x = state['x'] + self.key = _fastmath.dsa_construct(y,g,p,q,x) + + def _sign(self, M, K): + return self.key._sign(M, K) + + def _verify(self, M, (r, s)): + return self.key._verify(M, r, s) + + def size(self): + return self.key.size() + + def has_private(self): + return self.key.has_private() + + def publickey(self): + return construct_c((self.key.y, self.key.g, self.key.p, self.key.q)) + + def can_sign(self): + return 1 + + def can_encrypt(self): + return 0 + +def generate_c(bits, randfunc, progress_func=None): + obj = generate_py(bits, randfunc, progress_func) + y,g,p,q,x = obj.y, obj.g, obj.p, obj.q, obj.x + return construct_c((y,g,p,q,x)) + +def construct_c(tuple): + key = apply(_fastmath.dsa_construct, tuple) + return DSAobj_c(key) + +if _fastmath: + #print "using C version of DSA" + generate = generate_c + construct = construct_c + error = _fastmath.error diff --git a/gam/gdata/analytics/Crypto/PublicKey/ElGamal.py b/gam/gdata/analytics/Crypto/PublicKey/ElGamal.py new file mode 100755 index 00000000000..026881c91a4 --- /dev/null +++ b/gam/gdata/analytics/Crypto/PublicKey/ElGamal.py @@ -0,0 +1,132 @@ +# +# ElGamal.py : ElGamal encryption/decryption and signatures +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: ElGamal.py,v 1.9 2003/04/04 19:44:26 akuchling Exp $" + +from Crypto.PublicKey.pubkey import * +from Crypto.Util import number + +class error (Exception): + pass + +# Generate an ElGamal key with N bits +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate an ElGamal key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + obj=ElGamalobj() + # Generate prime p + if progress_func: + progress_func('p\n') + obj.p=bignum(getPrime(bits, randfunc)) + # Generate random number g + if progress_func: + progress_func('g\n') + size=bits-1-(ord(randfunc(1)) & 63) # g will be from 1--64 bits smaller than p + if size<1: + size=bits-1 + while (1): + obj.g=bignum(getPrime(size, randfunc)) + if obj.g < obj.p: + break + size=(size+1) % bits + if size==0: + size=4 + # Generate random number x + if progress_func: + progress_func('x\n') + while (1): + size=bits-1-ord(randfunc(1)) # x will be from 1 to 256 bits smaller than p + if size>2: + break + while (1): + obj.x=bignum(getPrime(size, randfunc)) + if obj.x < obj.p: + break + size = (size+1) % bits + if size==0: + size=4 + if progress_func: + progress_func('y\n') + obj.y = pow(obj.g, obj.x, obj.p) + return obj + +def construct(tuple): + """construct(tuple:(long,long,long,long)|(long,long,long,long,long))) + : ElGamalobj + Construct an ElGamal key from a 3- or 4-tuple of numbers. + """ + + obj=ElGamalobj() + if len(tuple) not in [3,4]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + return obj + +class ElGamalobj(pubkey): + keydata=['p', 'g', 'y', 'x'] + + def _encrypt(self, M, K): + a=pow(self.g, K, self.p) + b=( M*pow(self.y, K, self.p) ) % self.p + return ( a,b ) + + def _decrypt(self, M): + if (not hasattr(self, 'x')): + raise error, 'Private key not available in this object' + ax=pow(M[0], self.x, self.p) + plaintext=(M[1] * inverse(ax, self.p ) ) % self.p + return plaintext + + def _sign(self, M, K): + if (not hasattr(self, 'x')): + raise error, 'Private key not available in this object' + p1=self.p-1 + if (GCD(K, p1)!=1): + raise error, 'Bad K value: GCD(K,p-1)!=1' + a=pow(self.g, K, self.p) + t=(M-self.x*a) % p1 + while t<0: t=t+p1 + b=(t*inverse(K, p1)) % p1 + return (a, b) + + def _verify(self, M, sig): + v1=pow(self.y, sig[0], self.p) + v1=(v1*pow(sig[0], sig[1], self.p)) % self.p + v2=pow(self.g, M, self.p) + if v1==v2: + return 1 + return 0 + + def size(self): + "Return the maximum number of bits that can be handled by this key." + return number.size(self.p) - 1 + + def has_private(self): + """Return a Boolean denoting whether the object contains + private components.""" + if hasattr(self, 'x'): + return 1 + else: + return 0 + + def publickey(self): + """Return a new key object containing only the public information.""" + return construct((self.p, self.g, self.y)) + + +object=ElGamalobj diff --git a/gam/gdata/analytics/Crypto/PublicKey/RSA.py b/gam/gdata/analytics/Crypto/PublicKey/RSA.py new file mode 100755 index 00000000000..e0e877ec16f --- /dev/null +++ b/gam/gdata/analytics/Crypto/PublicKey/RSA.py @@ -0,0 +1,256 @@ +# +# RSA.py : RSA encryption/decryption +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: RSA.py,v 1.20 2004/05/06 12:52:54 akuchling Exp $" + +from Crypto.PublicKey import pubkey +from Crypto.Util import number + +try: + from Crypto.PublicKey import _fastmath +except ImportError: + _fastmath = None + +class error (Exception): + pass + +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate an RSA key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + obj=RSAobj() + + # Generate the prime factors of n + if progress_func: + progress_func('p,q\n') + p = q = 1L + while number.size(p*q) < bits: + p = pubkey.getPrime(bits/2, randfunc) + q = pubkey.getPrime(bits/2, randfunc) + + # p shall be smaller than q (for calc of u) + if p > q: + (p, q)=(q, p) + obj.p = p + obj.q = q + + if progress_func: + progress_func('u\n') + obj.u = pubkey.inverse(obj.p, obj.q) + obj.n = obj.p*obj.q + + obj.e = 65537L + if progress_func: + progress_func('d\n') + obj.d=pubkey.inverse(obj.e, (obj.p-1)*(obj.q-1)) + + assert bits <= 1+obj.size(), "Generated key is too small" + + return obj + +def construct(tuple): + """construct(tuple:(long,) : RSAobj + Construct an RSA object from a 2-, 3-, 5-, or 6-tuple of numbers. + """ + + obj=RSAobj() + if len(tuple) not in [2,3,5,6]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + if len(tuple) >= 5: + # Ensure p is smaller than q + if obj.p>obj.q: + (obj.p, obj.q)=(obj.q, obj.p) + + if len(tuple) == 5: + # u not supplied, so we're going to have to compute it. + obj.u=pubkey.inverse(obj.p, obj.q) + + return obj + +class RSAobj(pubkey.pubkey): + keydata = ['n', 'e', 'd', 'p', 'q', 'u'] + def _encrypt(self, plaintext, K=''): + if self.n<=plaintext: + raise error, 'Plaintext too large' + return (pow(plaintext, self.e, self.n),) + + def _decrypt(self, ciphertext): + if (not hasattr(self, 'd')): + raise error, 'Private key not available in this object' + if self.n<=ciphertext[0]: + raise error, 'Ciphertext too large' + return pow(ciphertext[0], self.d, self.n) + + def _sign(self, M, K=''): + return (self._decrypt((M,)),) + + def _verify(self, M, sig): + m2=self._encrypt(sig[0]) + if m2[0]==M: + return 1 + else: return 0 + + def _blind(self, M, B): + tmp = pow(B, self.e, self.n) + return (M * tmp) % self.n + + def _unblind(self, M, B): + tmp = pubkey.inverse(B, self.n) + return (M * tmp) % self.n + + def can_blind (self): + """can_blind() : bool + Return a Boolean value recording whether this algorithm can + blind data. (This does not imply that this + particular key object has the private information required to + to blind a message.) + """ + return 1 + + def size(self): + """size() : int + Return the maximum number of bits that can be handled by this key. + """ + return number.size(self.n) - 1 + + def has_private(self): + """has_private() : bool + Return a Boolean denoting whether the object contains + private components. + """ + if hasattr(self, 'd'): + return 1 + else: return 0 + + def publickey(self): + """publickey(): RSAobj + Return a new key object containing only the public key information. + """ + return construct((self.n, self.e)) + +class RSAobj_c(pubkey.pubkey): + keydata = ['n', 'e', 'd', 'p', 'q', 'u'] + + def __init__(self, key): + self.key = key + + def __getattr__(self, attr): + if attr in self.keydata: + return getattr(self.key, attr) + else: + if self.__dict__.has_key(attr): + self.__dict__[attr] + else: + raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr) + + def __getstate__(self): + d = {} + for k in self.keydata: + if hasattr(self.key, k): + d[k]=getattr(self.key, k) + return d + + def __setstate__(self, state): + n,e = state['n'], state['e'] + if not state.has_key('d'): + self.key = _fastmath.rsa_construct(n,e) + else: + d = state['d'] + if not state.has_key('q'): + self.key = _fastmath.rsa_construct(n,e,d) + else: + p, q, u = state['p'], state['q'], state['u'] + self.key = _fastmath.rsa_construct(n,e,d,p,q,u) + + def _encrypt(self, plain, K): + return (self.key._encrypt(plain),) + + def _decrypt(self, cipher): + return self.key._decrypt(cipher[0]) + + def _sign(self, M, K): + return (self.key._sign(M),) + + def _verify(self, M, sig): + return self.key._verify(M, sig[0]) + + def _blind(self, M, B): + return self.key._blind(M, B) + + def _unblind(self, M, B): + return self.key._unblind(M, B) + + def can_blind (self): + return 1 + + def size(self): + return self.key.size() + + def has_private(self): + return self.key.has_private() + + def publickey(self): + return construct_c((self.key.n, self.key.e)) + +def generate_c(bits, randfunc, progress_func = None): + # Generate the prime factors of n + if progress_func: + progress_func('p,q\n') + + p = q = 1L + while number.size(p*q) < bits: + p = pubkey.getPrime(bits/2, randfunc) + q = pubkey.getPrime(bits/2, randfunc) + + # p shall be smaller than q (for calc of u) + if p > q: + (p, q)=(q, p) + if progress_func: + progress_func('u\n') + u=pubkey.inverse(p, q) + n=p*q + + e = 65537L + if progress_func: + progress_func('d\n') + d=pubkey.inverse(e, (p-1)*(q-1)) + key = _fastmath.rsa_construct(n,e,d,p,q,u) + obj = RSAobj_c(key) + +## print p +## print q +## print number.size(p), number.size(q), number.size(q*p), +## print obj.size(), bits + assert bits <= 1+obj.size(), "Generated key is too small" + return obj + + +def construct_c(tuple): + key = apply(_fastmath.rsa_construct, tuple) + return RSAobj_c(key) + +object = RSAobj + +generate_py = generate +construct_py = construct + +if _fastmath: + #print "using C version of RSA" + generate = generate_c + construct = construct_c + error = _fastmath.error diff --git a/gam/gdata/analytics/Crypto/PublicKey/__init__.py b/gam/gdata/analytics/Crypto/PublicKey/__init__.py new file mode 100755 index 00000000000..ad1c80ca14b --- /dev/null +++ b/gam/gdata/analytics/Crypto/PublicKey/__init__.py @@ -0,0 +1,17 @@ +"""Public-key encryption and signature algorithms. + +Public-key encryption uses two different keys, one for encryption and +one for decryption. The encryption key can be made public, and the +decryption key is kept private. Many public-key algorithms can also +be used to sign messages, and some can *only* be used for signatures. + +Crypto.PublicKey.DSA Digital Signature Algorithm. (Signature only) +Crypto.PublicKey.ElGamal (Signing and encryption) +Crypto.PublicKey.RSA (Signing, encryption, and blinding) +Crypto.PublicKey.qNEW (Signature only) + +""" + +__all__ = ['RSA', 'DSA', 'ElGamal', 'qNEW'] +__revision__ = "$Id: __init__.py,v 1.4 2003/04/03 20:27:13 akuchling Exp $" + diff --git a/gam/gdata/analytics/Crypto/PublicKey/pubkey.py b/gam/gdata/analytics/Crypto/PublicKey/pubkey.py new file mode 100755 index 00000000000..5c75c3e3ad7 --- /dev/null +++ b/gam/gdata/analytics/Crypto/PublicKey/pubkey.py @@ -0,0 +1,172 @@ +# +# pubkey.py : Internal functions for public key operations +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: pubkey.py,v 1.11 2003/04/03 20:36:14 akuchling Exp $" + +import types, warnings +from Crypto.Util.number import * + +# Basic public key class +class pubkey: + def __init__(self): + pass + + def __getstate__(self): + """To keep key objects platform-independent, the key data is + converted to standard Python long integers before being + written out. It will then be reconverted as necessary on + restoration.""" + d=self.__dict__ + for key in self.keydata: + if d.has_key(key): d[key]=long(d[key]) + return d + + def __setstate__(self, d): + """On unpickling a key object, the key data is converted to the big +number representation being used, whether that is Python long +integers, MPZ objects, or whatever.""" + for key in self.keydata: + if d.has_key(key): self.__dict__[key]=bignum(d[key]) + + def encrypt(self, plaintext, K): + """encrypt(plaintext:string|long, K:string|long) : tuple + Encrypt the string or integer plaintext. K is a random + parameter required by some algorithms. + """ + wasString=0 + if isinstance(plaintext, types.StringType): + plaintext=bytes_to_long(plaintext) ; wasString=1 + if isinstance(K, types.StringType): + K=bytes_to_long(K) + ciphertext=self._encrypt(plaintext, K) + if wasString: return tuple(map(long_to_bytes, ciphertext)) + else: return ciphertext + + def decrypt(self, ciphertext): + """decrypt(ciphertext:tuple|string|long): string + Decrypt 'ciphertext' using this key. + """ + wasString=0 + if not isinstance(ciphertext, types.TupleType): + ciphertext=(ciphertext,) + if isinstance(ciphertext[0], types.StringType): + ciphertext=tuple(map(bytes_to_long, ciphertext)) ; wasString=1 + plaintext=self._decrypt(ciphertext) + if wasString: return long_to_bytes(plaintext) + else: return plaintext + + def sign(self, M, K): + """sign(M : string|long, K:string|long) : tuple + Return a tuple containing the signature for the message M. + K is a random parameter required by some algorithms. + """ + if (not self.has_private()): + raise error, 'Private key not available in this object' + if isinstance(M, types.StringType): M=bytes_to_long(M) + if isinstance(K, types.StringType): K=bytes_to_long(K) + return self._sign(M, K) + + def verify (self, M, signature): + """verify(M:string|long, signature:tuple) : bool + Verify that the signature is valid for the message M; + returns true if the signature checks out. + """ + if isinstance(M, types.StringType): M=bytes_to_long(M) + return self._verify(M, signature) + + # alias to compensate for the old validate() name + def validate (self, M, signature): + warnings.warn("validate() method name is obsolete; use verify()", + DeprecationWarning) + + def blind(self, M, B): + """blind(M : string|long, B : string|long) : string|long + Blind message M using blinding factor B. + """ + wasString=0 + if isinstance(M, types.StringType): + M=bytes_to_long(M) ; wasString=1 + if isinstance(B, types.StringType): B=bytes_to_long(B) + blindedmessage=self._blind(M, B) + if wasString: return long_to_bytes(blindedmessage) + else: return blindedmessage + + def unblind(self, M, B): + """unblind(M : string|long, B : string|long) : string|long + Unblind message M using blinding factor B. + """ + wasString=0 + if isinstance(M, types.StringType): + M=bytes_to_long(M) ; wasString=1 + if isinstance(B, types.StringType): B=bytes_to_long(B) + unblindedmessage=self._unblind(M, B) + if wasString: return long_to_bytes(unblindedmessage) + else: return unblindedmessage + + + # The following methods will usually be left alone, except for + # signature-only algorithms. They both return Boolean values + # recording whether this key's algorithm can sign and encrypt. + def can_sign (self): + """can_sign() : bool + Return a Boolean value recording whether this algorithm can + generate signatures. (This does not imply that this + particular key object has the private information required to + to generate a signature.) + """ + return 1 + + def can_encrypt (self): + """can_encrypt() : bool + Return a Boolean value recording whether this algorithm can + encrypt data. (This does not imply that this + particular key object has the private information required to + to decrypt a message.) + """ + return 1 + + def can_blind (self): + """can_blind() : bool + Return a Boolean value recording whether this algorithm can + blind data. (This does not imply that this + particular key object has the private information required to + to blind a message.) + """ + return 0 + + # The following methods will certainly be overridden by + # subclasses. + + def size (self): + """size() : int + Return the maximum number of bits that can be handled by this key. + """ + return 0 + + def has_private (self): + """has_private() : bool + Return a Boolean denoting whether the object contains + private components. + """ + return 0 + + def publickey (self): + """publickey(): object + Return a new key object containing only the public information. + """ + return self + + def __eq__ (self, other): + """__eq__(other): 0, 1 + Compare us to other for equality. + """ + return self.__getstate__() == other.__getstate__() diff --git a/gam/gdata/analytics/Crypto/PublicKey/qNEW.py b/gam/gdata/analytics/Crypto/PublicKey/qNEW.py new file mode 100755 index 00000000000..65f8ae36b31 --- /dev/null +++ b/gam/gdata/analytics/Crypto/PublicKey/qNEW.py @@ -0,0 +1,170 @@ +# +# qNEW.py : The q-NEW signature algorithm. +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: qNEW.py,v 1.8 2003/04/04 15:13:35 akuchling Exp $" + +from Crypto.PublicKey import pubkey +from Crypto.Util.number import * +from Crypto.Hash import SHA + +class error (Exception): + pass + +HASHBITS = 160 # Size of SHA digests + +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate a qNEW key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + obj=qNEWobj() + + # Generate prime numbers p and q. q is a 160-bit prime + # number. p is another prime number (the modulus) whose bit + # size is chosen by the caller, and is generated so that p-1 + # is a multiple of q. + # + # Note that only a single seed is used to + # generate p and q; if someone generates a key for you, you can + # use the seed to duplicate the key generation. This can + # protect you from someone generating values of p,q that have + # some special form that's easy to break. + if progress_func: + progress_func('p,q\n') + while (1): + obj.q = getPrime(160, randfunc) + # assert pow(2, 159L)1. g is kept; h can be discarded. + if progress_func: + progress_func('h,g\n') + while (1): + h=bytes_to_long(randfunc(bits)) % (p-1) + g=pow(h, power, p) + if 11: + break + obj.g=g + + # x is the private key information, and is + # just a random number between 0 and q. + # y=g**x mod p, and is part of the public information. + if progress_func: + progress_func('x,y\n') + while (1): + x=bytes_to_long(randfunc(20)) + if 0 < x < obj.q: + break + obj.x, obj.y=x, pow(g, x, p) + + return obj + +# Construct a qNEW object +def construct(tuple): + """construct(tuple:(long,long,long,long)|(long,long,long,long,long) + Construct a qNEW object from a 4- or 5-tuple of numbers. + """ + obj=qNEWobj() + if len(tuple) not in [4,5]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + return obj + +class qNEWobj(pubkey.pubkey): + keydata=['p', 'q', 'g', 'y', 'x'] + + def _sign(self, M, K=''): + if (self.q<=K): + raise error, 'K is greater than q' + if M<0: + raise error, 'Illegal value of M (<0)' + if M>=pow(2,161L): + raise error, 'Illegal value of M (too large)' + r=pow(self.g, K, self.p) % self.q + s=(K- (r*M*self.x % self.q)) % self.q + return (r,s) + def _verify(self, M, sig): + r, s = sig + if r<=0 or r>=self.q or s<=0 or s>=self.q: + return 0 + if M<0: + raise error, 'Illegal value of M (<0)' + if M<=0 or M>=pow(2,161L): + return 0 + v1 = pow(self.g, s, self.p) + v2 = pow(self.y, M*r, self.p) + v = ((v1*v2) % self.p) + v = v % self.q + if v==r: + return 1 + return 0 + + def size(self): + "Return the maximum number of bits that can be handled by this key." + return 160 + + def has_private(self): + """Return a Boolean denoting whether the object contains + private components.""" + return hasattr(self, 'x') + + def can_sign(self): + """Return a Boolean value recording whether this algorithm can generate signatures.""" + return 1 + + def can_encrypt(self): + """Return a Boolean value recording whether this algorithm can encrypt data.""" + return 0 + + def publickey(self): + """Return a new key object containing only the public information.""" + return construct((self.p, self.q, self.g, self.y)) + +object = qNEWobj + diff --git a/gam/gdata/analytics/Crypto/Util/RFC1751.py b/gam/gdata/analytics/Crypto/Util/RFC1751.py new file mode 100755 index 00000000000..0a47952495b --- /dev/null +++ b/gam/gdata/analytics/Crypto/Util/RFC1751.py @@ -0,0 +1,342 @@ +#!/usr/local/bin/python +# rfc1751.py : Converts between 128-bit strings and a human-readable +# sequence of words, as defined in RFC1751: "A Convention for +# Human-Readable 128-bit Keys", by Daniel L. McDonald. + +__revision__ = "$Id: RFC1751.py,v 1.6 2003/04/04 15:15:10 akuchling Exp $" + + +import string, binascii + +binary={0:'0000', 1:'0001', 2:'0010', 3:'0011', 4:'0100', 5:'0101', + 6:'0110', 7:'0111', 8:'1000', 9:'1001', 10:'1010', 11:'1011', + 12:'1100', 13:'1101', 14:'1110', 15:'1111'} + +def _key2bin(s): + "Convert a key into a string of binary digits" + kl=map(lambda x: ord(x), s) + kl=map(lambda x: binary[x/16]+binary[x&15], kl) + return ''.join(kl) + +def _extract(key, start, length): + """Extract a bitstring from a string of binary digits, and return its + numeric value.""" + k=key[start:start+length] + return reduce(lambda x,y: x*2+ord(y)-48, k, 0) + +def key_to_english (key): + """key_to_english(key:string) : string + Transform an arbitrary key into a string containing English words. + The key length must be a multiple of 8. + """ + english='' + for index in range(0, len(key), 8): # Loop over 8-byte subkeys + subkey=key[index:index+8] + # Compute the parity of the key + skbin=_key2bin(subkey) ; p=0 + for i in range(0, 64, 2): p=p+_extract(skbin, i, 2) + # Append parity bits to the subkey + skbin=_key2bin(subkey+chr((p<<6) & 255)) + for i in range(0, 64, 11): + english=english+wordlist[_extract(skbin, i, 11)]+' ' + + return english[:-1] # Remove the trailing space + +def english_to_key (str): + """english_to_key(string):string + Transform a string into a corresponding key. + The string must contain words separated by whitespace; the number + of words must be a multiple of 6. + """ + + L=string.split(string.upper(str)) ; key='' + for index in range(0, len(L), 6): + sublist=L[index:index+6] ; char=9*[0] ; bits=0 + for i in sublist: + index = wordlist.index(i) + shift = (8-(bits+11)%8) %8 + y = index << shift + cl, cc, cr = (y>>16), (y>>8)&0xff, y & 0xff + if (shift>5): + char[bits/8] = char[bits/8] | cl + char[bits/8+1] = char[bits/8+1] | cc + char[bits/8+2] = char[bits/8+2] | cr + elif shift>-3: + char[bits/8] = char[bits/8] | cc + char[bits/8+1] = char[bits/8+1] | cr + else: char[bits/8] = char[bits/8] | cr + bits=bits+11 + subkey=reduce(lambda x,y:x+chr(y), char, '') + + # Check the parity of the resulting key + skbin=_key2bin(subkey) + p=0 + for i in range(0, 64, 2): p=p+_extract(skbin, i, 2) + if (p&3) != _extract(skbin, 64, 2): + raise ValueError, "Parity error in resulting key" + key=key+subkey[0:8] + return key + +wordlist=[ "A", "ABE", "ACE", "ACT", "AD", "ADA", "ADD", + "AGO", "AID", "AIM", "AIR", "ALL", "ALP", "AM", "AMY", "AN", "ANA", + "AND", "ANN", "ANT", "ANY", "APE", "APS", "APT", "ARC", "ARE", "ARK", + "ARM", "ART", "AS", "ASH", "ASK", "AT", "ATE", "AUG", "AUK", "AVE", + "AWE", "AWK", "AWL", "AWN", "AX", "AYE", "BAD", "BAG", "BAH", "BAM", + "BAN", "BAR", "BAT", "BAY", "BE", "BED", "BEE", "BEG", "BEN", "BET", + "BEY", "BIB", "BID", "BIG", "BIN", "BIT", "BOB", "BOG", "BON", "BOO", + "BOP", "BOW", "BOY", "BUB", "BUD", "BUG", "BUM", "BUN", "BUS", "BUT", + "BUY", "BY", "BYE", "CAB", "CAL", "CAM", "CAN", "CAP", "CAR", "CAT", + "CAW", "COD", "COG", "COL", "CON", "COO", "COP", "COT", "COW", "COY", + "CRY", "CUB", "CUE", "CUP", "CUR", "CUT", "DAB", "DAD", "DAM", "DAN", + "DAR", "DAY", "DEE", "DEL", "DEN", "DES", "DEW", "DID", "DIE", "DIG", + "DIN", "DIP", "DO", "DOE", "DOG", "DON", "DOT", "DOW", "DRY", "DUB", + "DUD", "DUE", "DUG", "DUN", "EAR", "EAT", "ED", "EEL", "EGG", "EGO", + "ELI", "ELK", "ELM", "ELY", "EM", "END", "EST", "ETC", "EVA", "EVE", + "EWE", "EYE", "FAD", "FAN", "FAR", "FAT", "FAY", "FED", "FEE", "FEW", + "FIB", "FIG", "FIN", "FIR", "FIT", "FLO", "FLY", "FOE", "FOG", "FOR", + "FRY", "FUM", "FUN", "FUR", "GAB", "GAD", "GAG", "GAL", "GAM", "GAP", + "GAS", "GAY", "GEE", "GEL", "GEM", "GET", "GIG", "GIL", "GIN", "GO", + "GOT", "GUM", "GUN", "GUS", "GUT", "GUY", "GYM", "GYP", "HA", "HAD", + "HAL", "HAM", "HAN", "HAP", "HAS", "HAT", "HAW", "HAY", "HE", "HEM", + "HEN", "HER", "HEW", "HEY", "HI", "HID", "HIM", "HIP", "HIS", "HIT", + "HO", "HOB", "HOC", "HOE", "HOG", "HOP", "HOT", "HOW", "HUB", "HUE", + "HUG", "HUH", "HUM", "HUT", "I", "ICY", "IDA", "IF", "IKE", "ILL", + "INK", "INN", "IO", "ION", "IQ", "IRA", "IRE", "IRK", "IS", "IT", + "ITS", "IVY", "JAB", "JAG", "JAM", "JAN", "JAR", "JAW", "JAY", "JET", + "JIG", "JIM", "JO", "JOB", "JOE", "JOG", "JOT", "JOY", "JUG", "JUT", + "KAY", "KEG", "KEN", "KEY", "KID", "KIM", "KIN", "KIT", "LA", "LAB", + "LAC", "LAD", "LAG", "LAM", "LAP", "LAW", "LAY", "LEA", "LED", "LEE", + "LEG", "LEN", "LEO", "LET", "LEW", "LID", "LIE", "LIN", "LIP", "LIT", + "LO", "LOB", "LOG", "LOP", "LOS", "LOT", "LOU", "LOW", "LOY", "LUG", + "LYE", "MA", "MAC", "MAD", "MAE", "MAN", "MAO", "MAP", "MAT", "MAW", + "MAY", "ME", "MEG", "MEL", "MEN", "MET", "MEW", "MID", "MIN", "MIT", + "MOB", "MOD", "MOE", "MOO", "MOP", "MOS", "MOT", "MOW", "MUD", "MUG", + "MUM", "MY", "NAB", "NAG", "NAN", "NAP", "NAT", "NAY", "NE", "NED", + "NEE", "NET", "NEW", "NIB", "NIL", "NIP", "NIT", "NO", "NOB", "NOD", + "NON", "NOR", "NOT", "NOV", "NOW", "NU", "NUN", "NUT", "O", "OAF", + "OAK", "OAR", "OAT", "ODD", "ODE", "OF", "OFF", "OFT", "OH", "OIL", + "OK", "OLD", "ON", "ONE", "OR", "ORB", "ORE", "ORR", "OS", "OTT", + "OUR", "OUT", "OVA", "OW", "OWE", "OWL", "OWN", "OX", "PA", "PAD", + "PAL", "PAM", "PAN", "PAP", "PAR", "PAT", "PAW", "PAY", "PEA", "PEG", + "PEN", "PEP", "PER", "PET", "PEW", "PHI", "PI", "PIE", "PIN", "PIT", + "PLY", "PO", "POD", "POE", "POP", "POT", "POW", "PRO", "PRY", "PUB", + "PUG", "PUN", "PUP", "PUT", "QUO", "RAG", "RAM", "RAN", "RAP", "RAT", + "RAW", "RAY", "REB", "RED", "REP", "RET", "RIB", "RID", "RIG", "RIM", + "RIO", "RIP", "ROB", "ROD", "ROE", "RON", "ROT", "ROW", "ROY", "RUB", + "RUE", "RUG", "RUM", "RUN", "RYE", "SAC", "SAD", "SAG", "SAL", "SAM", + "SAN", "SAP", "SAT", "SAW", "SAY", "SEA", "SEC", "SEE", "SEN", "SET", + "SEW", "SHE", "SHY", "SIN", "SIP", "SIR", "SIS", "SIT", "SKI", "SKY", + "SLY", "SO", "SOB", "SOD", "SON", "SOP", "SOW", "SOY", "SPA", "SPY", + "SUB", "SUD", "SUE", "SUM", "SUN", "SUP", "TAB", "TAD", "TAG", "TAN", + "TAP", "TAR", "TEA", "TED", "TEE", "TEN", "THE", "THY", "TIC", "TIE", + "TIM", "TIN", "TIP", "TO", "TOE", "TOG", "TOM", "TON", "TOO", "TOP", + "TOW", "TOY", "TRY", "TUB", "TUG", "TUM", "TUN", "TWO", "UN", "UP", + "US", "USE", "VAN", "VAT", "VET", "VIE", "WAD", "WAG", "WAR", "WAS", + "WAY", "WE", "WEB", "WED", "WEE", "WET", "WHO", "WHY", "WIN", "WIT", + "WOK", "WON", "WOO", "WOW", "WRY", "WU", "YAM", "YAP", "YAW", "YE", + "YEA", "YES", "YET", "YOU", "ABED", "ABEL", "ABET", "ABLE", "ABUT", + "ACHE", "ACID", "ACME", "ACRE", "ACTA", "ACTS", "ADAM", "ADDS", + "ADEN", "AFAR", "AFRO", "AGEE", "AHEM", "AHOY", "AIDA", "AIDE", + "AIDS", "AIRY", "AJAR", "AKIN", "ALAN", "ALEC", "ALGA", "ALIA", + "ALLY", "ALMA", "ALOE", "ALSO", "ALTO", "ALUM", "ALVA", "AMEN", + "AMES", "AMID", "AMMO", "AMOK", "AMOS", "AMRA", "ANDY", "ANEW", + "ANNA", "ANNE", "ANTE", "ANTI", "AQUA", "ARAB", "ARCH", "AREA", + "ARGO", "ARID", "ARMY", "ARTS", "ARTY", "ASIA", "ASKS", "ATOM", + "AUNT", "AURA", "AUTO", "AVER", "AVID", "AVIS", "AVON", "AVOW", + "AWAY", "AWRY", "BABE", "BABY", "BACH", "BACK", "BADE", "BAIL", + "BAIT", "BAKE", "BALD", "BALE", "BALI", "BALK", "BALL", "BALM", + "BAND", "BANE", "BANG", "BANK", "BARB", "BARD", "BARE", "BARK", + "BARN", "BARR", "BASE", "BASH", "BASK", "BASS", "BATE", "BATH", + "BAWD", "BAWL", "BEAD", "BEAK", "BEAM", "BEAN", "BEAR", "BEAT", + "BEAU", "BECK", "BEEF", "BEEN", "BEER", + "BEET", "BELA", "BELL", "BELT", "BEND", "BENT", "BERG", "BERN", + "BERT", "BESS", "BEST", "BETA", "BETH", "BHOY", "BIAS", "BIDE", + "BIEN", "BILE", "BILK", "BILL", "BIND", "BING", "BIRD", "BITE", + "BITS", "BLAB", "BLAT", "BLED", "BLEW", "BLOB", "BLOC", "BLOT", + "BLOW", "BLUE", "BLUM", "BLUR", "BOAR", "BOAT", "BOCA", "BOCK", + "BODE", "BODY", "BOGY", "BOHR", "BOIL", "BOLD", "BOLO", "BOLT", + "BOMB", "BONA", "BOND", "BONE", "BONG", "BONN", "BONY", "BOOK", + "BOOM", "BOON", "BOOT", "BORE", "BORG", "BORN", "BOSE", "BOSS", + "BOTH", "BOUT", "BOWL", "BOYD", "BRAD", "BRAE", "BRAG", "BRAN", + "BRAY", "BRED", "BREW", "BRIG", "BRIM", "BROW", "BUCK", "BUDD", + "BUFF", "BULB", "BULK", "BULL", "BUNK", "BUNT", "BUOY", "BURG", + "BURL", "BURN", "BURR", "BURT", "BURY", "BUSH", "BUSS", "BUST", + "BUSY", "BYTE", "CADY", "CAFE", "CAGE", "CAIN", "CAKE", "CALF", + "CALL", "CALM", "CAME", "CANE", "CANT", "CARD", "CARE", "CARL", + "CARR", "CART", "CASE", "CASH", "CASK", "CAST", "CAVE", "CEIL", + "CELL", "CENT", "CERN", "CHAD", "CHAR", "CHAT", "CHAW", "CHEF", + "CHEN", "CHEW", "CHIC", "CHIN", "CHOU", "CHOW", "CHUB", "CHUG", + "CHUM", "CITE", "CITY", "CLAD", "CLAM", "CLAN", "CLAW", "CLAY", + "CLOD", "CLOG", "CLOT", "CLUB", "CLUE", "COAL", "COAT", "COCA", + "COCK", "COCO", "CODA", "CODE", "CODY", "COED", "COIL", "COIN", + "COKE", "COLA", "COLD", "COLT", "COMA", "COMB", "COME", "COOK", + "COOL", "COON", "COOT", "CORD", "CORE", "CORK", "CORN", "COST", + "COVE", "COWL", "CRAB", "CRAG", "CRAM", "CRAY", "CREW", "CRIB", + "CROW", "CRUD", "CUBA", "CUBE", "CUFF", "CULL", "CULT", "CUNY", + "CURB", "CURD", "CURE", "CURL", "CURT", "CUTS", "DADE", "DALE", + "DAME", "DANA", "DANE", "DANG", "DANK", "DARE", "DARK", "DARN", + "DART", "DASH", "DATA", "DATE", "DAVE", "DAVY", "DAWN", "DAYS", + "DEAD", "DEAF", "DEAL", "DEAN", "DEAR", "DEBT", "DECK", "DEED", + "DEEM", "DEER", "DEFT", "DEFY", "DELL", "DENT", "DENY", "DESK", + "DIAL", "DICE", "DIED", "DIET", "DIME", "DINE", "DING", "DINT", + "DIRE", "DIRT", "DISC", "DISH", "DISK", "DIVE", "DOCK", "DOES", + "DOLE", "DOLL", "DOLT", "DOME", "DONE", "DOOM", "DOOR", "DORA", + "DOSE", "DOTE", "DOUG", "DOUR", "DOVE", "DOWN", "DRAB", "DRAG", + "DRAM", "DRAW", "DREW", "DRUB", "DRUG", "DRUM", "DUAL", "DUCK", + "DUCT", "DUEL", "DUET", "DUKE", "DULL", "DUMB", "DUNE", "DUNK", + "DUSK", "DUST", "DUTY", "EACH", "EARL", "EARN", "EASE", "EAST", + "EASY", "EBEN", "ECHO", "EDDY", "EDEN", "EDGE", "EDGY", "EDIT", + "EDNA", "EGAN", "ELAN", "ELBA", "ELLA", "ELSE", "EMIL", "EMIT", + "EMMA", "ENDS", "ERIC", "EROS", "EVEN", "EVER", "EVIL", "EYED", + "FACE", "FACT", "FADE", "FAIL", "FAIN", "FAIR", "FAKE", "FALL", + "FAME", "FANG", "FARM", "FAST", "FATE", "FAWN", "FEAR", "FEAT", + "FEED", "FEEL", "FEET", "FELL", "FELT", "FEND", "FERN", "FEST", + "FEUD", "FIEF", "FIGS", "FILE", "FILL", "FILM", "FIND", "FINE", + "FINK", "FIRE", "FIRM", "FISH", "FISK", "FIST", "FITS", "FIVE", + "FLAG", "FLAK", "FLAM", "FLAT", "FLAW", "FLEA", "FLED", "FLEW", + "FLIT", "FLOC", "FLOG", "FLOW", "FLUB", "FLUE", "FOAL", "FOAM", + "FOGY", "FOIL", "FOLD", "FOLK", "FOND", "FONT", "FOOD", "FOOL", + "FOOT", "FORD", "FORE", "FORK", "FORM", "FORT", "FOSS", "FOUL", + "FOUR", "FOWL", "FRAU", "FRAY", "FRED", "FREE", "FRET", "FREY", + "FROG", "FROM", "FUEL", "FULL", "FUME", "FUND", "FUNK", "FURY", + "FUSE", "FUSS", "GAFF", "GAGE", "GAIL", "GAIN", "GAIT", "GALA", + "GALE", "GALL", "GALT", "GAME", "GANG", "GARB", "GARY", "GASH", + "GATE", "GAUL", "GAUR", "GAVE", "GAWK", "GEAR", "GELD", "GENE", + "GENT", "GERM", "GETS", "GIBE", "GIFT", "GILD", "GILL", "GILT", + "GINA", "GIRD", "GIRL", "GIST", "GIVE", "GLAD", "GLEE", "GLEN", + "GLIB", "GLOB", "GLOM", "GLOW", "GLUE", "GLUM", "GLUT", "GOAD", + "GOAL", "GOAT", "GOER", "GOES", "GOLD", "GOLF", "GONE", "GONG", + "GOOD", "GOOF", "GORE", "GORY", "GOSH", "GOUT", "GOWN", "GRAB", + "GRAD", "GRAY", "GREG", "GREW", "GREY", "GRID", "GRIM", "GRIN", + "GRIT", "GROW", "GRUB", "GULF", "GULL", "GUNK", "GURU", "GUSH", + "GUST", "GWEN", "GWYN", "HAAG", "HAAS", "HACK", "HAIL", "HAIR", + "HALE", "HALF", "HALL", "HALO", "HALT", "HAND", "HANG", "HANK", + "HANS", "HARD", "HARK", "HARM", "HART", "HASH", "HAST", "HATE", + "HATH", "HAUL", "HAVE", "HAWK", "HAYS", "HEAD", "HEAL", "HEAR", + "HEAT", "HEBE", "HECK", "HEED", "HEEL", "HEFT", "HELD", "HELL", + "HELM", "HERB", "HERD", "HERE", "HERO", "HERS", "HESS", "HEWN", + "HICK", "HIDE", "HIGH", "HIKE", "HILL", "HILT", "HIND", "HINT", + "HIRE", "HISS", "HIVE", "HOBO", "HOCK", "HOFF", "HOLD", "HOLE", + "HOLM", "HOLT", "HOME", "HONE", "HONK", "HOOD", "HOOF", "HOOK", + "HOOT", "HORN", "HOSE", "HOST", "HOUR", "HOVE", "HOWE", "HOWL", + "HOYT", "HUCK", "HUED", "HUFF", "HUGE", "HUGH", "HUGO", "HULK", + "HULL", "HUNK", "HUNT", "HURD", "HURL", "HURT", "HUSH", "HYDE", + "HYMN", "IBIS", "ICON", "IDEA", "IDLE", "IFFY", "INCA", "INCH", + "INTO", "IONS", "IOTA", "IOWA", "IRIS", "IRMA", "IRON", "ISLE", + "ITCH", "ITEM", "IVAN", "JACK", "JADE", "JAIL", "JAKE", "JANE", + "JAVA", "JEAN", "JEFF", "JERK", "JESS", "JEST", "JIBE", "JILL", + "JILT", "JIVE", "JOAN", "JOBS", "JOCK", "JOEL", "JOEY", "JOHN", + "JOIN", "JOKE", "JOLT", "JOVE", "JUDD", "JUDE", "JUDO", "JUDY", + "JUJU", "JUKE", "JULY", "JUNE", "JUNK", "JUNO", "JURY", "JUST", + "JUTE", "KAHN", "KALE", "KANE", "KANT", "KARL", "KATE", "KEEL", + "KEEN", "KENO", "KENT", "KERN", "KERR", "KEYS", "KICK", "KILL", + "KIND", "KING", "KIRK", "KISS", "KITE", "KLAN", "KNEE", "KNEW", + "KNIT", "KNOB", "KNOT", "KNOW", "KOCH", "KONG", "KUDO", "KURD", + "KURT", "KYLE", "LACE", "LACK", "LACY", "LADY", "LAID", "LAIN", + "LAIR", "LAKE", "LAMB", "LAME", "LAND", "LANE", "LANG", "LARD", + "LARK", "LASS", "LAST", "LATE", "LAUD", "LAVA", "LAWN", "LAWS", + "LAYS", "LEAD", "LEAF", "LEAK", "LEAN", "LEAR", "LEEK", "LEER", + "LEFT", "LEND", "LENS", "LENT", "LEON", "LESK", "LESS", "LEST", + "LETS", "LIAR", "LICE", "LICK", "LIED", "LIEN", "LIES", "LIEU", + "LIFE", "LIFT", "LIKE", "LILA", "LILT", "LILY", "LIMA", "LIMB", + "LIME", "LIND", "LINE", "LINK", "LINT", "LION", "LISA", "LIST", + "LIVE", "LOAD", "LOAF", "LOAM", "LOAN", "LOCK", "LOFT", "LOGE", + "LOIS", "LOLA", "LONE", "LONG", "LOOK", "LOON", "LOOT", "LORD", + "LORE", "LOSE", "LOSS", "LOST", "LOUD", "LOVE", "LOWE", "LUCK", + "LUCY", "LUGE", "LUKE", "LULU", "LUND", "LUNG", "LURA", "LURE", + "LURK", "LUSH", "LUST", "LYLE", "LYNN", "LYON", "LYRA", "MACE", + "MADE", "MAGI", "MAID", "MAIL", "MAIN", "MAKE", "MALE", "MALI", + "MALL", "MALT", "MANA", "MANN", "MANY", "MARC", "MARE", "MARK", + "MARS", "MART", "MARY", "MASH", "MASK", "MASS", "MAST", "MATE", + "MATH", "MAUL", "MAYO", "MEAD", "MEAL", "MEAN", "MEAT", "MEEK", + "MEET", "MELD", "MELT", "MEMO", "MEND", "MENU", "MERT", "MESH", + "MESS", "MICE", "MIKE", "MILD", "MILE", "MILK", "MILL", "MILT", + "MIMI", "MIND", "MINE", "MINI", "MINK", "MINT", "MIRE", "MISS", + "MIST", "MITE", "MITT", "MOAN", "MOAT", "MOCK", "MODE", "MOLD", + "MOLE", "MOLL", "MOLT", "MONA", "MONK", "MONT", "MOOD", "MOON", + "MOOR", "MOOT", "MORE", "MORN", "MORT", "MOSS", "MOST", "MOTH", + "MOVE", "MUCH", "MUCK", "MUDD", "MUFF", "MULE", "MULL", "MURK", + "MUSH", "MUST", "MUTE", "MUTT", "MYRA", "MYTH", "NAGY", "NAIL", + "NAIR", "NAME", "NARY", "NASH", "NAVE", "NAVY", "NEAL", "NEAR", + "NEAT", "NECK", "NEED", "NEIL", "NELL", "NEON", "NERO", "NESS", + "NEST", "NEWS", "NEWT", "NIBS", "NICE", "NICK", "NILE", "NINA", + "NINE", "NOAH", "NODE", "NOEL", "NOLL", "NONE", "NOOK", "NOON", + "NORM", "NOSE", "NOTE", "NOUN", "NOVA", "NUDE", "NULL", "NUMB", + "OATH", "OBEY", "OBOE", "ODIN", "OHIO", "OILY", "OINT", "OKAY", + "OLAF", "OLDY", "OLGA", "OLIN", "OMAN", "OMEN", "OMIT", "ONCE", + "ONES", "ONLY", "ONTO", "ONUS", "ORAL", "ORGY", "OSLO", "OTIS", + "OTTO", "OUCH", "OUST", "OUTS", "OVAL", "OVEN", "OVER", "OWLY", + "OWNS", "QUAD", "QUIT", "QUOD", "RACE", "RACK", "RACY", "RAFT", + "RAGE", "RAID", "RAIL", "RAIN", "RAKE", "RANK", "RANT", "RARE", + "RASH", "RATE", "RAVE", "RAYS", "READ", "REAL", "REAM", "REAR", + "RECK", "REED", "REEF", "REEK", "REEL", "REID", "REIN", "RENA", + "REND", "RENT", "REST", "RICE", "RICH", "RICK", "RIDE", "RIFT", + "RILL", "RIME", "RING", "RINK", "RISE", "RISK", "RITE", "ROAD", + "ROAM", "ROAR", "ROBE", "ROCK", "RODE", "ROIL", "ROLL", "ROME", + "ROOD", "ROOF", "ROOK", "ROOM", "ROOT", "ROSA", "ROSE", "ROSS", + "ROSY", "ROTH", "ROUT", "ROVE", "ROWE", "ROWS", "RUBE", "RUBY", + "RUDE", "RUDY", "RUIN", "RULE", "RUNG", "RUNS", "RUNT", "RUSE", + "RUSH", "RUSK", "RUSS", "RUST", "RUTH", "SACK", "SAFE", "SAGE", + "SAID", "SAIL", "SALE", "SALK", "SALT", "SAME", "SAND", "SANE", + "SANG", "SANK", "SARA", "SAUL", "SAVE", "SAYS", "SCAN", "SCAR", + "SCAT", "SCOT", "SEAL", "SEAM", "SEAR", "SEAT", "SEED", "SEEK", + "SEEM", "SEEN", "SEES", "SELF", "SELL", "SEND", "SENT", "SETS", + "SEWN", "SHAG", "SHAM", "SHAW", "SHAY", "SHED", "SHIM", "SHIN", + "SHOD", "SHOE", "SHOT", "SHOW", "SHUN", "SHUT", "SICK", "SIDE", + "SIFT", "SIGH", "SIGN", "SILK", "SILL", "SILO", "SILT", "SINE", + "SING", "SINK", "SIRE", "SITE", "SITS", "SITU", "SKAT", "SKEW", + "SKID", "SKIM", "SKIN", "SKIT", "SLAB", "SLAM", "SLAT", "SLAY", + "SLED", "SLEW", "SLID", "SLIM", "SLIT", "SLOB", "SLOG", "SLOT", + "SLOW", "SLUG", "SLUM", "SLUR", "SMOG", "SMUG", "SNAG", "SNOB", + "SNOW", "SNUB", "SNUG", "SOAK", "SOAR", "SOCK", "SODA", "SOFA", + "SOFT", "SOIL", "SOLD", "SOME", "SONG", "SOON", "SOOT", "SORE", + "SORT", "SOUL", "SOUR", "SOWN", "STAB", "STAG", "STAN", "STAR", + "STAY", "STEM", "STEW", "STIR", "STOW", "STUB", "STUN", "SUCH", + "SUDS", "SUIT", "SULK", "SUMS", "SUNG", "SUNK", "SURE", "SURF", + "SWAB", "SWAG", "SWAM", "SWAN", "SWAT", "SWAY", "SWIM", "SWUM", + "TACK", "TACT", "TAIL", "TAKE", "TALE", "TALK", "TALL", "TANK", + "TASK", "TATE", "TAUT", "TEAL", "TEAM", "TEAR", "TECH", "TEEM", + "TEEN", "TEET", "TELL", "TEND", "TENT", "TERM", "TERN", "TESS", + "TEST", "THAN", "THAT", "THEE", "THEM", "THEN", "THEY", "THIN", + "THIS", "THUD", "THUG", "TICK", "TIDE", "TIDY", "TIED", "TIER", + "TILE", "TILL", "TILT", "TIME", "TINA", "TINE", "TINT", "TINY", + "TIRE", "TOAD", "TOGO", "TOIL", "TOLD", "TOLL", "TONE", "TONG", + "TONY", "TOOK", "TOOL", "TOOT", "TORE", "TORN", "TOTE", "TOUR", + "TOUT", "TOWN", "TRAG", "TRAM", "TRAY", "TREE", "TREK", "TRIG", + "TRIM", "TRIO", "TROD", "TROT", "TROY", "TRUE", "TUBA", "TUBE", + "TUCK", "TUFT", "TUNA", "TUNE", "TUNG", "TURF", "TURN", "TUSK", + "TWIG", "TWIN", "TWIT", "ULAN", "UNIT", "URGE", "USED", "USER", + "USES", "UTAH", "VAIL", "VAIN", "VALE", "VARY", "VASE", "VAST", + "VEAL", "VEDA", "VEIL", "VEIN", "VEND", "VENT", "VERB", "VERY", + "VETO", "VICE", "VIEW", "VINE", "VISE", "VOID", "VOLT", "VOTE", + "WACK", "WADE", "WAGE", "WAIL", "WAIT", "WAKE", "WALE", "WALK", + "WALL", "WALT", "WAND", "WANE", "WANG", "WANT", "WARD", "WARM", + "WARN", "WART", "WASH", "WAST", "WATS", "WATT", "WAVE", "WAVY", + "WAYS", "WEAK", "WEAL", "WEAN", "WEAR", "WEED", "WEEK", "WEIR", + "WELD", "WELL", "WELT", "WENT", "WERE", "WERT", "WEST", "WHAM", + "WHAT", "WHEE", "WHEN", "WHET", "WHOA", "WHOM", "WICK", "WIFE", + "WILD", "WILL", "WIND", "WINE", "WING", "WINK", "WINO", "WIRE", + "WISE", "WISH", "WITH", "WOLF", "WONT", "WOOD", "WOOL", "WORD", + "WORE", "WORK", "WORM", "WORN", "WOVE", "WRIT", "WYNN", "YALE", + "YANG", "YANK", "YARD", "YARN", "YAWL", "YAWN", "YEAH", "YEAR", + "YELL", "YOGA", "YOKE" ] + +if __name__=='__main__': + data = [('EB33F77EE73D4053', 'TIDE ITCH SLOW REIN RULE MOT'), + ('CCAC2AED591056BE4F90FD441C534766', + 'RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE'), + ('EFF81F9BFBC65350920CDD7416DE8009', + 'TROD MUTE TAIL WARM CHAR KONG HAAG CITY BORE O TEAL AWL') + ] + + for key, words in data: + print 'Trying key', key + key=binascii.a2b_hex(key) + w2=key_to_english(key) + if w2!=words: + print 'key_to_english fails on key', repr(key), ', producing', str(w2) + k2=english_to_key(words) + if k2!=key: + print 'english_to_key fails on key', repr(key), ', producing', repr(k2) + + diff --git a/gam/gdata/analytics/Crypto/Util/__init__.py b/gam/gdata/analytics/Crypto/Util/__init__.py new file mode 100755 index 00000000000..0d147681815 --- /dev/null +++ b/gam/gdata/analytics/Crypto/Util/__init__.py @@ -0,0 +1,16 @@ +"""Miscellaneous modules + +Contains useful modules that don't belong into any of the +other Crypto.* subpackages. + +Crypto.Util.number Number-theoretic functions (primality testing, etc.) +Crypto.Util.randpool Random number generation +Crypto.Util.RFC1751 Converts between 128-bit keys and human-readable + strings of words. + +""" + +__all__ = ['randpool', 'RFC1751', 'number'] + +__revision__ = "$Id: __init__.py,v 1.4 2003/02/28 15:26:00 akuchling Exp $" + diff --git a/gam/gdata/analytics/Crypto/Util/number.py b/gam/gdata/analytics/Crypto/Util/number.py new file mode 100755 index 00000000000..9d50563e904 --- /dev/null +++ b/gam/gdata/analytics/Crypto/Util/number.py @@ -0,0 +1,201 @@ +# +# number.py : Number-theoretic functions +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: number.py,v 1.13 2003/04/04 18:21:07 akuchling Exp $" + +bignum = long +try: + from Crypto.PublicKey import _fastmath +except ImportError: + _fastmath = None + +# Commented out and replaced with faster versions below +## def long2str(n): +## s='' +## while n>0: +## s=chr(n & 255)+s +## n=n>>8 +## return s + +## import types +## def str2long(s): +## if type(s)!=types.StringType: return s # Integers will be left alone +## return reduce(lambda x,y : x*256+ord(y), s, 0L) + +def size (N): + """size(N:long) : int + Returns the size of the number N in bits. + """ + bits, power = 0,1L + while N >= power: + bits += 1 + power = power << 1 + return bits + +def getRandomNumber(N, randfunc): + """getRandomNumber(N:int, randfunc:callable):long + Return an N-bit random number.""" + + S = randfunc(N/8) + odd_bits = N % 8 + if odd_bits != 0: + char = ord(randfunc(1)) >> (8-odd_bits) + S = chr(char) + S + value = bytes_to_long(S) + value |= 2L ** (N-1) # Ensure high bit is set + assert size(value) >= N + return value + +def GCD(x,y): + """GCD(x:long, y:long): long + Return the GCD of x and y. + """ + x = abs(x) ; y = abs(y) + while x > 0: + x, y = y % x, x + return y + +def inverse(u, v): + """inverse(u:long, u:long):long + Return the inverse of u mod v. + """ + u3, v3 = long(u), long(v) + u1, v1 = 1L, 0L + while v3 > 0: + q=u3 / v3 + u1, v1 = v1, u1 - v1*q + u3, v3 = v3, u3 - v3*q + while u1<0: + u1 = u1 + v + return u1 + +# Given a number of bits to generate and a random generation function, +# find a prime number of the appropriate size. + +def getPrime(N, randfunc): + """getPrime(N:int, randfunc:callable):long + Return a random N-bit prime number. + """ + + number=getRandomNumber(N, randfunc) | 1 + while (not isPrime(number)): + number=number+2 + return number + +def isPrime(N): + """isPrime(N:long):bool + Return true if N is prime. + """ + if N == 1: + return 0 + if N in sieve: + return 1 + for i in sieve: + if (N % i)==0: + return 0 + + # Use the accelerator if available + if _fastmath is not None: + return _fastmath.isPrime(N) + + # Compute the highest bit that's set in N + N1 = N - 1L + n = 1L + while (n> 1L + + # Rabin-Miller test + for c in sieve[:7]: + a=long(c) ; d=1L ; t=n + while (t): # Iterate over the bits in N1 + x=(d*d) % N + if x==1L and d!=1L and d!=N1: + return 0 # Square root of 1 found + if N1 & t: + d=(x*a) % N + else: + d=x + t = t >> 1L + if d!=1L: + return 0 + return 1 + +# Small primes used for checking primality; these are all the primes +# less than 256. This should be enough to eliminate most of the odd +# numbers before needing to do a Rabin-Miller test at all. + +sieve=[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, + 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, + 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, + 197, 199, 211, 223, 227, 229, 233, 239, 241, 251] + +# Improved conversion functions contributed by Barry Warsaw, after +# careful benchmarking + +import struct + +def long_to_bytes(n, blocksize=0): + """long_to_bytes(n:long, blocksize:int) : string + Convert a long integer to a byte string. + + If optional blocksize is given and greater than zero, pad the front of the + byte string with binary zeros so that the length is a multiple of + blocksize. + """ + # after much testing, this algorithm was deemed to be the fastest + s = '' + n = long(n) + pack = struct.pack + while n > 0: + s = pack('>I', n & 0xffffffffL) + s + n = n >> 32 + # strip off leading zeros + for i in range(len(s)): + if s[i] != '\000': + break + else: + # only happens when n == 0 + s = '\000' + i = 0 + s = s[i:] + # add back some pad bytes. this could be done more efficiently w.r.t. the + # de-padding being done above, but sigh... + if blocksize > 0 and len(s) % blocksize: + s = (blocksize - len(s) % blocksize) * '\000' + s + return s + +def bytes_to_long(s): + """bytes_to_long(string) : long + Convert a byte string to a long integer. + + This is (essentially) the inverse of long_to_bytes(). + """ + acc = 0L + unpack = struct.unpack + length = len(s) + if length % 4: + extra = (4 - length % 4) + s = '\000' * extra + s + length = length + extra + for i in range(0, length, 4): + acc = (acc << 32) + unpack('>I', s[i:i+4])[0] + return acc + +# For backwards compatibility... +import warnings +def long2str(n, blocksize=0): + warnings.warn("long2str() has been replaced by long_to_bytes()") + return long_to_bytes(n, blocksize) +def str2long(s): + warnings.warn("str2long() has been replaced by bytes_to_long()") + return bytes_to_long(s) diff --git a/gam/gdata/analytics/Crypto/Util/randpool.py b/gam/gdata/analytics/Crypto/Util/randpool.py new file mode 100755 index 00000000000..467501c5442 --- /dev/null +++ b/gam/gdata/analytics/Crypto/Util/randpool.py @@ -0,0 +1,421 @@ +# +# randpool.py : Cryptographically strong random number generation +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: randpool.py,v 1.14 2004/05/06 12:56:54 akuchling Exp $" + +import time, array, types, warnings, os.path +from Crypto.Util.number import long_to_bytes +try: + import Crypto.Util.winrandom as winrandom +except: + winrandom = None + +STIRNUM = 3 + +class RandomPool: + """randpool.py : Cryptographically strong random number generation. + + The implementation here is similar to the one in PGP. To be + cryptographically strong, it must be difficult to determine the RNG's + output, whether in the future or the past. This is done by using + a cryptographic hash function to "stir" the random data. + + Entropy is gathered in the same fashion as PGP; the highest-resolution + clock around is read and the data is added to the random number pool. + A conservative estimate of the entropy is then kept. + + If a cryptographically secure random source is available (/dev/urandom + on many Unixes, Windows CryptGenRandom on most Windows), then use + it. + + Instance Attributes: + bits : int + Maximum size of pool in bits + bytes : int + Maximum size of pool in bytes + entropy : int + Number of bits of entropy in this pool. + + Methods: + add_event([s]) : add some entropy to the pool + get_bytes(int) : get N bytes of random data + randomize([N]) : get N bytes of randomness from external source + """ + + + def __init__(self, numbytes = 160, cipher=None, hash=None): + if hash is None: + from Crypto.Hash import SHA as hash + + # The cipher argument is vestigial; it was removed from + # version 1.1 so RandomPool would work even in the limited + # exportable subset of the code + if cipher is not None: + warnings.warn("'cipher' parameter is no longer used") + + if isinstance(hash, types.StringType): + # ugly hack to force __import__ to give us the end-path module + hash = __import__('Crypto.Hash.'+hash, + None, None, ['new']) + warnings.warn("'hash' parameter should now be a hashing module") + + self.bytes = numbytes + self.bits = self.bytes*8 + self.entropy = 0 + self._hash = hash + + # Construct an array to hold the random pool, + # initializing it to 0. + self._randpool = array.array('B', [0]*self.bytes) + + self._event1 = self._event2 = 0 + self._addPos = 0 + self._getPos = hash.digest_size + self._lastcounter=time.time() + self.__counter = 0 + + self._measureTickSize() # Estimate timer resolution + self._randomize() + + def _updateEntropyEstimate(self, nbits): + self.entropy += nbits + if self.entropy < 0: + self.entropy = 0 + elif self.entropy > self.bits: + self.entropy = self.bits + + def _randomize(self, N = 0, devname = '/dev/urandom'): + """_randomize(N, DEVNAME:device-filepath) + collects N bits of randomness from some entropy source (e.g., + /dev/urandom on Unixes that have it, Windows CryptoAPI + CryptGenRandom, etc) + DEVNAME is optional, defaults to /dev/urandom. You can change it + to /dev/random if you want to block till you get enough + entropy. + """ + data = '' + if N <= 0: + nbytes = int((self.bits - self.entropy)/8+0.5) + else: + nbytes = int(N/8+0.5) + if winrandom: + # Windows CryptGenRandom provides random data. + data = winrandom.new().get_bytes(nbytes) + elif os.path.exists(devname): + # Many OSes support a /dev/urandom device + try: + f=open(devname) + data=f.read(nbytes) + f.close() + except IOError, (num, msg): + if num!=2: raise IOError, (num, msg) + # If the file wasn't found, ignore the error + if data: + self._addBytes(data) + # Entropy estimate: The number of bits of + # data obtained from the random source. + self._updateEntropyEstimate(8*len(data)) + self.stir_n() # Wash the random pool + + def randomize(self, N=0): + """randomize(N:int) + use the class entropy source to get some entropy data. + This is overridden by KeyboardRandomize(). + """ + return self._randomize(N) + + def stir_n(self, N = STIRNUM): + """stir_n(N) + stirs the random pool N times + """ + for i in xrange(N): + self.stir() + + def stir (self, s = ''): + """stir(s:string) + Mix up the randomness pool. This will call add_event() twice, + but out of paranoia the entropy attribute will not be + increased. The optional 's' parameter is a string that will + be hashed with the randomness pool. + """ + + entropy=self.entropy # Save inital entropy value + self.add_event() + + # Loop over the randomness pool: hash its contents + # along with a counter, and add the resulting digest + # back into the pool. + for i in range(self.bytes / self._hash.digest_size): + h = self._hash.new(self._randpool) + h.update(str(self.__counter) + str(i) + str(self._addPos) + s) + self._addBytes( h.digest() ) + self.__counter = (self.__counter + 1) & 0xFFFFffffL + + self._addPos, self._getPos = 0, self._hash.digest_size + self.add_event() + + # Restore the old value of the entropy. + self.entropy=entropy + + + def get_bytes (self, N): + """get_bytes(N:int) : string + Return N bytes of random data. + """ + + s='' + i, pool = self._getPos, self._randpool + h=self._hash.new() + dsize = self._hash.digest_size + num = N + while num > 0: + h.update( self._randpool[i:i+dsize] ) + s = s + h.digest() + num = num - dsize + i = (i + dsize) % self.bytes + if i>1, bits+1 + if bits>8: bits=8 + + self._event1, self._event2 = event, self._event1 + + self._updateEntropyEstimate(bits) + return bits + + # Private functions + def _noise(self): + # Adds a bit of noise to the random pool, by adding in the + # current time and CPU usage of this process. + # The difference from the previous call to _noise() is taken + # in an effort to estimate the entropy. + t=time.time() + delta = (t - self._lastcounter)/self._ticksize*1e6 + self._lastcounter = t + self._addBytes(long_to_bytes(long(1000*time.time()))) + self._addBytes(long_to_bytes(long(1000*time.clock()))) + self._addBytes(long_to_bytes(long(1000*time.time()))) + self._addBytes(long_to_bytes(long(delta))) + + # Reduce delta to a maximum of 8 bits so we don't add too much + # entropy as a result of this call. + delta=delta % 0xff + return int(delta) + + + def _measureTickSize(self): + # _measureTickSize() tries to estimate a rough average of the + # resolution of time that you can see from Python. It does + # this by measuring the time 100 times, computing the delay + # between measurements, and taking the median of the resulting + # list. (We also hash all the times and add them to the pool) + interval = [None] * 100 + h = self._hash.new(`(id(self),id(interval))`) + + # Compute 100 differences + t=time.time() + h.update(`t`) + i = 0 + j = 0 + while i < 100: + t2=time.time() + h.update(`(i,j,t2)`) + j += 1 + delta=int((t2-t)*1e6) + if delta: + interval[i] = delta + i += 1 + t=t2 + + # Take the median of the array of intervals + interval.sort() + self._ticksize=interval[len(interval)/2] + h.update(`(interval,self._ticksize)`) + # mix in the measurement times and wash the random pool + self.stir(h.digest()) + + def _addBytes(self, s): + "XOR the contents of the string S into the random pool" + i, pool = self._addPos, self._randpool + for j in range(0, len(s)): + pool[i]=pool[i] ^ ord(s[j]) + i=(i+1) % self.bytes + self._addPos = i + + # Deprecated method names: remove in PCT 2.1 or later. + def getBytes(self, N): + warnings.warn("getBytes() method replaced by get_bytes()", + DeprecationWarning) + return self.get_bytes(N) + + def addEvent (self, event, s=""): + warnings.warn("addEvent() method replaced by add_event()", + DeprecationWarning) + return self.add_event(s + str(event)) + +class PersistentRandomPool (RandomPool): + def __init__ (self, filename=None, *args, **kwargs): + RandomPool.__init__(self, *args, **kwargs) + self.filename = filename + if filename: + try: + # the time taken to open and read the file might have + # a little disk variability, modulo disk/kernel caching... + f=open(filename, 'rb') + self.add_event() + data = f.read() + self.add_event() + # mix in the data from the file and wash the random pool + self.stir(data) + f.close() + except IOError: + # Oh, well; the file doesn't exist or is unreadable, so + # we'll just ignore it. + pass + + def save(self): + if self.filename == "": + raise ValueError, "No filename set for this object" + # wash the random pool before save, provides some forward secrecy for + # old values of the pool. + self.stir_n() + f=open(self.filename, 'wb') + self.add_event() + f.write(self._randpool.tostring()) + f.close() + self.add_event() + # wash the pool again, provide some protection for future values + self.stir() + +# non-echoing Windows keyboard entry +_kb = 0 +if not _kb: + try: + import msvcrt + class KeyboardEntry: + def getch(self): + c = msvcrt.getch() + if c in ('\000', '\xe0'): + # function key + c += msvcrt.getch() + return c + def close(self, delay = 0): + if delay: + time.sleep(delay) + while msvcrt.kbhit(): + msvcrt.getch() + _kb = 1 + except: + pass + +# non-echoing Posix keyboard entry +if not _kb: + try: + import termios + class KeyboardEntry: + def __init__(self, fd = 0): + self._fd = fd + self._old = termios.tcgetattr(fd) + new = termios.tcgetattr(fd) + new[3]=new[3] & ~termios.ICANON & ~termios.ECHO + termios.tcsetattr(fd, termios.TCSANOW, new) + def getch(self): + termios.tcflush(0, termios.TCIFLUSH) # XXX Leave this in? + return os.read(self._fd, 1) + def close(self, delay = 0): + if delay: + time.sleep(delay) + termios.tcflush(self._fd, termios.TCIFLUSH) + termios.tcsetattr(self._fd, termios.TCSAFLUSH, self._old) + _kb = 1 + except: + pass + +class KeyboardRandomPool (PersistentRandomPool): + def __init__(self, *args, **kwargs): + PersistentRandomPool.__init__(self, *args, **kwargs) + + def randomize(self, N = 0): + "Adds N bits of entropy to random pool. If N is 0, fill up pool." + import os, string, time + if N <= 0: + bits = self.bits - self.entropy + else: + bits = N*8 + if bits == 0: + return + print bits,'bits of entropy are now required. Please type on the keyboard' + print 'until enough randomness has been accumulated.' + kb = KeyboardEntry() + s='' # We'll save the characters typed and add them to the pool. + hash = self._hash + e = 0 + try: + while e < bits: + temp=str(bits-e).rjust(6) + os.write(1, temp) + s=s+kb.getch() + e += self.add_event(s) + os.write(1, 6*chr(8)) + self.add_event(s+hash.new(s).digest() ) + finally: + kb.close() + print '\n\007 Enough. Please wait a moment.\n' + self.stir_n() # wash the random pool. + kb.close(4) + +if __name__ == '__main__': + pool = RandomPool() + print 'random pool entropy', pool.entropy, 'bits' + pool.add_event('something') + print `pool.get_bytes(100)` + import tempfile, os + fname = tempfile.mktemp() + pool = KeyboardRandomPool(filename=fname) + print 'keyboard random pool entropy', pool.entropy, 'bits' + pool.randomize() + print 'keyboard random pool entropy', pool.entropy, 'bits' + pool.randomize(128) + pool.save() + saved = open(fname, 'rb').read() + print 'saved', `saved` + print 'pool ', `pool._randpool.tostring()` + newpool = PersistentRandomPool(fname) + print 'persistent random pool entropy', pool.entropy, 'bits' + os.remove(fname) diff --git a/gam/gdata/analytics/Crypto/Util/test.py b/gam/gdata/analytics/Crypto/Util/test.py new file mode 100755 index 00000000000..7b23e9f5e47 --- /dev/null +++ b/gam/gdata/analytics/Crypto/Util/test.py @@ -0,0 +1,453 @@ +# +# test.py : Functions used for testing the modules +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: test.py,v 1.16 2004/08/13 22:24:18 akuchling Exp $" + +import binascii +import string +import testdata + +from Crypto.Cipher import * + +def die(string): + import sys + print '***ERROR: ', string +# sys.exit(0) # Will default to continuing onward... + +def print_timing (size, delta, verbose): + if verbose: + if delta == 0: + print 'Unable to measure time -- elapsed time too small' + else: + print '%.2f K/sec' % (size/delta) + +def exerciseBlockCipher(cipher, verbose): + import string, time + try: + ciph = eval(cipher) + except NameError: + print cipher, 'module not available' + return None + print cipher+ ':' + str='1' # Build 128K of test data + for i in xrange(0, 17): + str=str+str + if ciph.key_size==0: ciph.key_size=16 + password = 'password12345678Extra text for password'[0:ciph.key_size] + IV = 'Test IV Test IV Test IV Test'[0:ciph.block_size] + + if verbose: print ' ECB mode:', + obj=ciph.new(password, ciph.MODE_ECB) + if obj.block_size != ciph.block_size: + die("Module and cipher object block_size don't match") + + text='1234567812345678'[0:ciph.block_size] + c=obj.encrypt(text) + if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='KuchlingKuchling'[0:ciph.block_size] + c=obj.encrypt(text) + if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='NotTodayNotEver!'[0:ciph.block_size] + c=obj.encrypt(text) + if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"') + + start=time.time() + s=obj.encrypt(str) + s2=obj.decrypt(s) + end=time.time() + if (str!=s2): + die('Error in resulting plaintext from ECB mode') + print_timing(256, end-start, verbose) + del obj + + if verbose: print ' CFB mode:', + obj1=ciph.new(password, ciph.MODE_CFB, IV) + obj2=ciph.new(password, ciph.MODE_CFB, IV) + start=time.time() + ciphertext=obj1.encrypt(str[0:65536]) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str[0:65536]): + die('Error in resulting plaintext from CFB mode') + print_timing(64, end-start, verbose) + del obj1, obj2 + + if verbose: print ' CBC mode:', + obj1=ciph.new(password, ciph.MODE_CBC, IV) + obj2=ciph.new(password, ciph.MODE_CBC, IV) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from CBC mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + if verbose: print ' PGP mode:', + obj1=ciph.new(password, ciph.MODE_PGP, IV) + obj2=ciph.new(password, ciph.MODE_PGP, IV) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from PGP mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + if verbose: print ' OFB mode:', + obj1=ciph.new(password, ciph.MODE_OFB, IV) + obj2=ciph.new(password, ciph.MODE_OFB, IV) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from OFB mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + def counter(length=ciph.block_size): + return length * 'a' + + if verbose: print ' CTR mode:', + obj1=ciph.new(password, ciph.MODE_CTR, counter=counter) + obj2=ciph.new(password, ciph.MODE_CTR, counter=counter) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from CTR mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + # Test the IV handling + if verbose: print ' Testing IV handling' + obj1=ciph.new(password, ciph.MODE_CBC, IV) + plaintext='Test'*(ciph.block_size/4)*3 + ciphertext1=obj1.encrypt(plaintext) + obj1.IV=IV + ciphertext2=obj1.encrypt(plaintext) + if ciphertext1!=ciphertext2: + die('Error in setting IV') + + # Test keyword arguments + obj1=ciph.new(key=password) + obj1=ciph.new(password, mode=ciph.MODE_CBC) + obj1=ciph.new(mode=ciph.MODE_CBC, key=password) + obj1=ciph.new(IV=IV, mode=ciph.MODE_CBC, key=password) + + return ciph + +def exerciseStreamCipher(cipher, verbose): + import string, time + try: + ciph = eval(cipher) + except (NameError): + print cipher, 'module not available' + return None + print cipher + ':', + str='1' # Build 128K of test data + for i in xrange(0, 17): + str=str+str + key_size = ciph.key_size or 16 + password = 'password12345678Extra text for password'[0:key_size] + + obj1=ciph.new(password) + obj2=ciph.new(password) + if obj1.block_size != ciph.block_size: + die("Module and cipher object block_size don't match") + if obj1.key_size != ciph.key_size: + die("Module and cipher object key_size don't match") + + text='1234567812345678Python' + c=obj1.encrypt(text) + if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='B1FF I2 A R3A11Y |<00L D00D!!!!!' + c=obj1.encrypt(text) + if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='SpamSpamSpamSpamSpamSpamSpamSpamSpam' + c=obj1.encrypt(text) + if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"') + + start=time.time() + s=obj1.encrypt(str) + str=obj2.decrypt(s) + end=time.time() + print_timing(256, end-start, verbose) + del obj1, obj2 + + return ciph + +def TestStreamModules(args=['arc4', 'XOR'], verbose=1): + import sys, string + args=map(string.lower, args) + + if 'arc4' in args: + # Test ARC4 stream cipher + arc4=exerciseStreamCipher('ARC4', verbose) + if (arc4!=None): + for entry in testdata.arc4: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=arc4.new(key) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('ARC4 failed on entry '+`entry`) + + if 'xor' in args: + # Test XOR stream cipher + XOR=exerciseStreamCipher('XOR', verbose) + if (XOR!=None): + for entry in testdata.xor: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=XOR.new(key) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('XOR failed on entry '+`entry`) + + +def TestBlockModules(args=['aes', 'arc2', 'des', 'blowfish', 'cast', 'des3', + 'idea', 'rc5'], + verbose=1): + import string + args=map(string.lower, args) + if 'aes' in args: + ciph=exerciseBlockCipher('AES', verbose) # AES + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.aes: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('AES failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + for entry in testdata.aes_modes: + mode, key, plain, cipher, kw = entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, mode, **kw) + obj2=ciph.new(key, mode, **kw) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('AES encrypt failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + plain2=obj2.decrypt(ciphertext) + if plain2!=plain: + die('AES decrypt failed on entry '+`entry`) + for i in plain2: + if verbose: print hex(ord(i)), + if verbose: print + + + if 'arc2' in args: + ciph=exerciseBlockCipher('ARC2', verbose) # Alleged RC2 + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.arc2: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('ARC2 failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + print + + if 'blowfish' in args: + ciph=exerciseBlockCipher('Blowfish',verbose)# Bruce Schneier's Blowfish cipher + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.blowfish: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('Blowfish failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + if 'cast' in args: + ciph=exerciseBlockCipher('CAST', verbose) # CAST-128 + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.cast: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('CAST failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + if 0: + # The full-maintenance test; it requires 4 million encryptions, + # and correspondingly is quite time-consuming. I've disabled + # it; it's faster to compile block/cast.c with -DTEST and run + # the resulting program. + a = b = '\x01\x23\x45\x67\x12\x34\x56\x78\x23\x45\x67\x89\x34\x56\x78\x9A' + + for i in range(0, 1000000): + obj = cast.new(b, cast.MODE_ECB) + a = obj.encrypt(a[:8]) + obj.encrypt(a[-8:]) + obj = cast.new(a, cast.MODE_ECB) + b = obj.encrypt(b[:8]) + obj.encrypt(b[-8:]) + + if a!="\xEE\xA9\xD0\xA2\x49\xFD\x3B\xA6\xB3\x43\x6F\xB8\x9D\x6D\xCA\x92": + if verbose: print 'CAST test failed: value of "a" doesn\'t match' + if b!="\xB2\xC9\x5E\xB0\x0C\x31\xAD\x71\x80\xAC\x05\xB8\xE8\x3D\x69\x6E": + if verbose: print 'CAST test failed: value of "b" doesn\'t match' + + if 'des' in args: + # Test/benchmark DES block cipher + des=exerciseBlockCipher('DES', verbose) + if (des!=None): + # Various tests taken from the DES library packaged with Kerberos V4 + obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_ECB) + s=obj.encrypt('Now is t') + if (s!=binascii.a2b_hex('3fa40e8a984d4815')): + die('DES fails test 1') + obj=des.new(binascii.a2b_hex('08192a3b4c5d6e7f'), des.MODE_ECB) + s=obj.encrypt('\000\000\000\000\000\000\000\000') + if (s!=binascii.a2b_hex('25ddac3e96176467')): + die('DES fails test 2') + obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_CBC, + binascii.a2b_hex('1234567890abcdef')) + s=obj.encrypt("Now is the time for all ") + if (s!=binascii.a2b_hex('e5c7cdde872bf27c43e934008c389c0f683788499a7c05f6')): + die('DES fails test 3') + obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_CBC, + binascii.a2b_hex('fedcba9876543210')) + s=obj.encrypt("7654321 Now is the time for \000\000\000\000") + if (s!=binascii.a2b_hex("ccd173ffab2039f4acd8aefddfd8a1eb468e91157888ba681d269397f7fe62b4")): + die('DES fails test 4') + del obj,s + + # R. Rivest's test: see http://theory.lcs.mit.edu/~rivest/destest.txt + x=binascii.a2b_hex('9474B8E8C73BCA7D') + for i in range(0, 16): + obj=des.new(x, des.MODE_ECB) + if (i & 1): x=obj.decrypt(x) + else: x=obj.encrypt(x) + if x!=binascii.a2b_hex('1B1A2DDB4C642438'): + die("DES fails Rivest's test") + + if verbose: print ' Verifying against test suite...' + for entry in testdata.des: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=des.new(key, des.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('DES failed on entry '+`entry`) + for entry in testdata.des_cbc: + key, iv, plain, cipher=entry + key, iv, cipher=binascii.a2b_hex(key),binascii.a2b_hex(iv),binascii.a2b_hex(cipher) + obj1=des.new(key, des.MODE_CBC, iv) + obj2=des.new(key, des.MODE_CBC, iv) + ciphertext=obj1.encrypt(plain) + if (ciphertext!=cipher): + die('DES CBC mode failed on entry '+`entry`) + + if 'des3' in args: + ciph=exerciseBlockCipher('DES3', verbose) # Triple DES + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.des3: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('DES3 failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + for entry in testdata.des3_cbc: + key, iv, plain, cipher=entry + key, iv, cipher=binascii.a2b_hex(key),binascii.a2b_hex(iv),binascii.a2b_hex(cipher) + obj1=ciph.new(key, ciph.MODE_CBC, iv) + obj2=ciph.new(key, ciph.MODE_CBC, iv) + ciphertext=obj1.encrypt(plain) + if (ciphertext!=cipher): + die('DES3 CBC mode failed on entry '+`entry`) + + if 'idea' in args: + ciph=exerciseBlockCipher('IDEA', verbose) # IDEA block cipher + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.idea: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('IDEA failed on entry '+`entry`) + + if 'rc5' in args: + # Ronald Rivest's RC5 algorithm + ciph=exerciseBlockCipher('RC5', verbose) + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.rc5: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key[4:], ciph.MODE_ECB, + version =ord(key[0]), + word_size=ord(key[1]), + rounds =ord(key[2]) ) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('RC5 failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + + diff --git a/gam/gdata/analytics/Crypto/__init__.py b/gam/gdata/analytics/Crypto/__init__.py new file mode 100755 index 00000000000..2324ae8c378 --- /dev/null +++ b/gam/gdata/analytics/Crypto/__init__.py @@ -0,0 +1,25 @@ + +"""Python Cryptography Toolkit + +A collection of cryptographic modules implementing various algorithms +and protocols. + +Subpackages: +Crypto.Cipher Secret-key encryption algorithms (AES, DES, ARC4) +Crypto.Hash Hashing algorithms (MD5, SHA, HMAC) +Crypto.Protocol Cryptographic protocols (Chaffing, all-or-nothing + transform). This package does not contain any + network protocols. +Crypto.PublicKey Public-key encryption and signature algorithms + (RSA, DSA) +Crypto.Util Various useful modules and functions (long-to-string + conversion, random number generation, number + theoretic functions) +""" + +__all__ = ['Cipher', 'Hash', 'Protocol', 'PublicKey', 'Util'] + +__version__ = '2.0.1' +__revision__ = "$Id: __init__.py,v 1.12 2005/06/14 01:20:22 akuchling Exp $" + + diff --git a/gam/gdata/analytics/Crypto/test.py b/gam/gdata/analytics/Crypto/test.py new file mode 100755 index 00000000000..c5ed0616216 --- /dev/null +++ b/gam/gdata/analytics/Crypto/test.py @@ -0,0 +1,38 @@ +# +# Test script for the Python Cryptography Toolkit. +# + +__revision__ = "$Id: test.py,v 1.7 2002/07/11 14:31:19 akuchling Exp $" + +import os, sys + + +# Add the build directory to the front of sys.path +from distutils.util import get_platform +s = "build/lib.%s-%.3s" % (get_platform(), sys.version) +s = os.path.join(os.getcwd(), s) +sys.path.insert(0, s) +s = os.path.join(os.getcwd(), 'test') +sys.path.insert(0, s) + +from Crypto.Util import test + +args = sys.argv[1:] +quiet = "--quiet" in args +if quiet: args.remove('--quiet') + +if not quiet: + print '\nStream Ciphers:' + print '===============' + +if args: test.TestStreamModules(args, verbose= not quiet) +else: test.TestStreamModules(verbose= not quiet) + +if not quiet: + print '\nBlock Ciphers:' + print '==============' + +if args: test.TestBlockModules(args, verbose= not quiet) +else: test.TestBlockModules(verbose= not quiet) + + diff --git a/gam/gdata/analytics/__init__.py b/gam/gdata/analytics/__init__.py new file mode 100755 index 00000000000..634889b060f --- /dev/null +++ b/gam/gdata/analytics/__init__.py @@ -0,0 +1,835 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains classes representing Google Data elements. + + Extends Atom classes to add Google Data specific elements. +""" + + +__author__ = 'j.s@google.com (Jeffrey Scudder)' + +import os +import atom +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree + + +# XML namespaces which are often used in GData entities. +GDATA_NAMESPACE = 'http://schemas.google.com/g/2005' +GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s' +OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/' +OPENSEARCH_TEMPLATE = '{http://a9.com/-/spec/opensearchrss/1.0/}%s' +BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch' +GACL_NAMESPACE = 'http://schemas.google.com/acl/2007' +GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s' + + +# Labels used in batch request entries to specify the desired CRUD operation. +BATCH_INSERT = 'insert' +BATCH_UPDATE = 'update' +BATCH_DELETE = 'delete' +BATCH_QUERY = 'query' + +class Error(Exception): + pass + + +class MissingRequiredParameters(Error): + pass + + +class MediaSource(object): + """GData Entries can refer to media sources, so this class provides a + place to store references to these objects along with some metadata. + """ + + def __init__(self, file_handle=None, content_type=None, content_length=None, + file_path=None, file_name=None): + """Creates an object of type MediaSource. + + Args: + file_handle: A file handle pointing to the file to be encapsulated in the + MediaSource + content_type: string The MIME type of the file. Required if a file_handle + is given. + content_length: int The size of the file. Required if a file_handle is + given. + file_path: string (optional) A full path name to the file. Used in + place of a file_handle. + file_name: string The name of the file without any path information. + Required if a file_handle is given. + """ + self.file_handle = file_handle + self.content_type = content_type + self.content_length = content_length + self.file_name = file_name + + if (file_handle is None and content_type is not None and + file_path is not None): + self.setFile(file_path, content_type) + + def setFile(self, file_name, content_type): + """A helper function which can create a file handle from a given filename + and set the content type and length all at once. + + Args: + file_name: string The path and file name to the file containing the media + content_type: string A MIME type representing the type of the media + """ + + self.file_handle = open(file_name, 'rb') + self.content_type = content_type + self.content_length = os.path.getsize(file_name) + self.file_name = os.path.basename(file_name) + + +class LinkFinder(atom.LinkFinder): + """An "interface" providing methods to find link elements + + GData Entry elements often contain multiple links which differ in the rel + attribute or content type. Often, developers are interested in a specific + type of link so this class provides methods to find specific classes of + links. + + This class is used as a mixin in GData entries. + """ + + def GetSelfLink(self): + """Find the first link with rel set to 'self' + + Returns: + An atom.Link or none if none of the links had rel equal to 'self' + """ + + for a_link in self.link: + if a_link.rel == 'self': + return a_link + return None + + def GetEditLink(self): + for a_link in self.link: + if a_link.rel == 'edit': + return a_link + return None + + def GetEditMediaLink(self): + """The Picasa API mistakenly returns media-edit rather than edit-media, but + this may change soon. + """ + for a_link in self.link: + if a_link.rel == 'edit-media': + return a_link + if a_link.rel == 'media-edit': + return a_link + return None + + def GetHtmlLink(self): + """Find the first link with rel of alternate and type of text/html + + Returns: + An atom.Link or None if no links matched + """ + for a_link in self.link: + if a_link.rel == 'alternate' and a_link.type == 'text/html': + return a_link + return None + + def GetPostLink(self): + """Get a link containing the POST target URL. + + The POST target URL is used to insert new entries. + + Returns: + A link object with a rel matching the POST type. + """ + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/g/2005#post': + return a_link + return None + + def GetAclLink(self): + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/acl/2007#accessControlList': + return a_link + return None + + def GetFeedLink(self): + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/g/2005#feed': + return a_link + return None + + def GetNextLink(self): + for a_link in self.link: + if a_link.rel == 'next': + return a_link + return None + + def GetPrevLink(self): + for a_link in self.link: + if a_link.rel == 'previous': + return a_link + return None + + +class TotalResults(atom.AtomBase): + """opensearch:TotalResults for a GData feed""" + + _tag = 'totalResults' + _namespace = OPENSEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def TotalResultsFromString(xml_string): + return atom.CreateClassFromXMLString(TotalResults, xml_string) + + +class StartIndex(atom.AtomBase): + """The opensearch:startIndex element in GData feed""" + + _tag = 'startIndex' + _namespace = OPENSEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def StartIndexFromString(xml_string): + return atom.CreateClassFromXMLString(StartIndex, xml_string) + + +class ItemsPerPage(atom.AtomBase): + """The opensearch:itemsPerPage element in GData feed""" + + _tag = 'itemsPerPage' + _namespace = OPENSEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ItemsPerPageFromString(xml_string): + return atom.CreateClassFromXMLString(ItemsPerPage, xml_string) + + +class ExtendedProperty(atom.AtomBase): + """The Google Data extendedProperty element. + + Used to store arbitrary key-value information specific to your + application. The value can either be a text string stored as an XML + attribute (.value), or an XML node (XmlBlob) as a child element. + + This element is used in the Google Calendar data API and the Google + Contacts data API. + """ + + _tag = 'extendedProperty' + _namespace = GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['value'] = 'value' + + def __init__(self, name=None, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def GetXmlBlobExtensionElement(self): + """Returns the XML blob as an atom.ExtensionElement. + + Returns: + An atom.ExtensionElement representing the blob's XML, or None if no + blob was set. + """ + if len(self.extension_elements) < 1: + return None + else: + return self.extension_elements[0] + + def GetXmlBlobString(self): + """Returns the XML blob as a string. + + Returns: + A string containing the blob's XML, or None if no blob was set. + """ + blob = self.GetXmlBlobExtensionElement() + if blob: + return blob.ToString() + return None + + def SetXmlBlob(self, blob): + """Sets the contents of the extendedProperty to XML as a child node. + + Since the extendedProperty is only allowed one child element as an XML + blob, setting the XML blob will erase any preexisting extension elements + in this object. + + Args: + blob: str, ElementTree Element or atom.ExtensionElement representing + the XML blob stored in the extendedProperty. + """ + # Erase any existing extension_elements, clears the child nodes from the + # extendedProperty. + self.extension_elements = [] + if isinstance(blob, atom.ExtensionElement): + self.extension_elements.append(blob) + elif ElementTree.iselement(blob): + self.extension_elements.append(atom._ExtensionElementFromElementTree( + blob)) + else: + self.extension_elements.append(atom.ExtensionElementFromString(blob)) + + +def ExtendedPropertyFromString(xml_string): + return atom.CreateClassFromXMLString(ExtendedProperty, xml_string) + + +class GDataEntry(atom.Entry, LinkFinder): + """Extends Atom Entry to provide data processing""" + + _tag = atom.Entry._tag + _namespace = atom.Entry._namespace + _children = atom.Entry._children.copy() + _attributes = atom.Entry._attributes.copy() + + def __GetId(self): + return self.__id + + # This method was created to strip the unwanted whitespace from the id's + # text node. + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def IsMedia(self): + """Determines whether or not an entry is a GData Media entry. + """ + if (self.GetEditMediaLink()): + return True + else: + return False + + def GetMediaURL(self): + """Returns the URL to the media content, if the entry is a media entry. + Otherwise returns None. + """ + if not self.IsMedia(): + return None + else: + return self.content.src + + +def GDataEntryFromString(xml_string): + """Creates a new GDataEntry instance given a string of XML.""" + return atom.CreateClassFromXMLString(GDataEntry, xml_string) + + +class GDataFeed(atom.Feed, LinkFinder): + """A Feed from a GData service""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = atom.Feed._children.copy() + _attributes = atom.Feed._attributes.copy() + _children['{%s}totalResults' % OPENSEARCH_NAMESPACE] = ('total_results', + TotalResults) + _children['{%s}startIndex' % OPENSEARCH_NAMESPACE] = ('start_index', + StartIndex) + _children['{%s}itemsPerPage' % OPENSEARCH_NAMESPACE] = ('items_per_page', + ItemsPerPage) + # Add a conversion rule for atom:entry to make it into a GData + # Entry. + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GDataEntry]) + + def __GetId(self): + return self.__id + + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __GetGenerator(self): + return self.__generator + + def __SetGenerator(self, generator): + self.__generator = generator + if generator is not None: + self.__generator.text = generator.text.strip() + + generator = property(__GetGenerator, __SetGenerator) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + total_results=None, start_index=None, items_per_page=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for Source + + Args: + author: list (optional) A list of Author instances which belong to this + class. + category: list (optional) A list of Category instances + contributor: list (optional) A list on Contributor instances + generator: Generator (optional) + icon: Icon (optional) + id: Id (optional) The entry's Id element + link: list (optional) A list of Link instances + logo: Logo (optional) + rights: Rights (optional) The entry's Rights element + subtitle: Subtitle (optional) The entry's subtitle element + title: Title (optional) the entry's title element + updated: Updated (optional) the entry's updated element + entry: list (optional) A list of the Entry instances contained in the + feed. + text: String (optional) The text contents of the element. This is the + contents of the Entry's XML text node. + (Example: This is the text) + extension_elements: list (optional) A list of ExtensionElement instances + which are children of this element. + extension_attributes: dict (optional) A dictionary of strings which are + the values for additional XML attributes of this element. + """ + + self.author = author or [] + self.category = category or [] + self.contributor = contributor or [] + self.generator = generator + self.icon = icon + self.id = atom_id + self.link = link or [] + self.logo = logo + self.rights = rights + self.subtitle = subtitle + self.title = title + self.updated = updated + self.entry = entry or [] + self.total_results = total_results + self.start_index = start_index + self.items_per_page = items_per_page + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def GDataFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GDataFeed, xml_string) + + +class BatchId(atom.AtomBase): + _tag = 'id' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + +def BatchIdFromString(xml_string): + return atom.CreateClassFromXMLString(BatchId, xml_string) + + +class BatchOperation(atom.AtomBase): + _tag = 'operation' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + + def __init__(self, op_type=None, extension_elements=None, + extension_attributes=None, + text=None): + self.type = op_type + atom.AtomBase.__init__(self, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def BatchOperationFromString(xml_string): + return atom.CreateClassFromXMLString(BatchOperation, xml_string) + + +class BatchStatus(atom.AtomBase): + """The batch:status element present in a batch response entry. + + A status element contains the code (HTTP response code) and + reason as elements. In a single request these fields would + be part of the HTTP response, but in a batch request each + Entry operation has a corresponding Entry in the response + feed which includes status information. + + See http://code.google.com/apis/gdata/batch.html#Handling_Errors + """ + + _tag = 'status' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['code'] = 'code' + _attributes['reason'] = 'reason' + _attributes['content-type'] = 'content_type' + + def __init__(self, code=None, reason=None, content_type=None, + extension_elements=None, extension_attributes=None, text=None): + self.code = code + self.reason = reason + self.content_type = content_type + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def BatchStatusFromString(xml_string): + return atom.CreateClassFromXMLString(BatchStatus, xml_string) + + +class BatchEntry(GDataEntry): + """An atom:entry for use in batch requests. + + The BatchEntry contains additional members to specify the operation to be + performed on this entry and a batch ID so that the server can reference + individual operations in the response feed. For more information, see: + http://code.google.com/apis/gdata/batch.html + """ + + _tag = GDataEntry._tag + _namespace = GDataEntry._namespace + _children = GDataEntry._children.copy() + _children['{%s}operation' % BATCH_NAMESPACE] = ('batch_operation', BatchOperation) + _children['{%s}id' % BATCH_NAMESPACE] = ('batch_id', BatchId) + _children['{%s}status' % BATCH_NAMESPACE] = ('batch_status', BatchStatus) + _attributes = GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, control=None, title=None, updated=None, + batch_operation=None, batch_id=None, batch_status=None, + extension_elements=None, extension_attributes=None, text=None): + self.batch_operation = batch_operation + self.batch_id = batch_id + self.batch_status = batch_status + GDataEntry.__init__(self, author=author, category=category, + content=content, contributor=contributor, atom_id=atom_id, link=link, + published=published, rights=rights, source=source, summary=summary, + control=control, title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +def BatchEntryFromString(xml_string): + return atom.CreateClassFromXMLString(BatchEntry, xml_string) + + +class BatchInterrupted(atom.AtomBase): + """The batch:interrupted element sent if batch request was interrupted. + + Only appears in a feed if some of the batch entries could not be processed. + See: http://code.google.com/apis/gdata/batch.html#Handling_Errors + """ + + _tag = 'interrupted' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['reason'] = 'reason' + _attributes['success'] = 'success' + _attributes['failures'] = 'failures' + _attributes['parsed'] = 'parsed' + + def __init__(self, reason=None, success=None, failures=None, parsed=None, + extension_elements=None, extension_attributes=None, text=None): + self.reason = reason + self.success = success + self.failures = failures + self.parsed = parsed + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def BatchInterruptedFromString(xml_string): + return atom.CreateClassFromXMLString(BatchInterrupted, xml_string) + + +class BatchFeed(GDataFeed): + """A feed containing a list of batch request entries.""" + + _tag = GDataFeed._tag + _namespace = GDataFeed._namespace + _children = GDataFeed._children.copy() + _attributes = GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchEntry]) + _children['{%s}interrupted' % BATCH_NAMESPACE] = ('interrupted', BatchInterrupted) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + total_results=None, start_index=None, items_per_page=None, + interrupted=None, + extension_elements=None, extension_attributes=None, text=None): + self.interrupted = interrupted + GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + def AddBatchEntry(self, entry=None, id_url_string=None, + batch_id_string=None, operation_string=None): + """Logic for populating members of a BatchEntry and adding to the feed. + + + If the entry is not a BatchEntry, it is converted to a BatchEntry so + that the batch specific members will be present. + + The id_url_string can be used in place of an entry if the batch operation + applies to a URL. For example query and delete operations require just + the URL of an entry, no body is sent in the HTTP request. If an + id_url_string is sent instead of an entry, a BatchEntry is created and + added to the feed. + + This method also assigns the desired batch id to the entry so that it + can be referenced in the server's response. If the batch_id_string is + None, this method will assign a batch_id to be the index at which this + entry will be in the feed's entry list. + + Args: + entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The + entry which will be sent to the server as part of the batch request. + The item must have a valid atom id so that the server knows which + entry this request references. + id_url_string: str (optional) The URL of the entry to be acted on. You + can find this URL in the text member of the atom id for an entry. + If an entry is not sent, this id will be used to construct a new + BatchEntry which will be added to the request feed. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. Note that batch_ids should either always be specified or + never, mixing could potentially result in duplicate batch ids. + operation_string: str (optional) The desired batch operation which will + set the batch_operation.type member of the entry. Options are + 'insert', 'update', 'delete', and 'query' + + Raises: + MissingRequiredParameters: Raised if neither an id_ url_string nor an + entry are provided in the request. + + Returns: + The added entry. + """ + if entry is None and id_url_string is None: + raise MissingRequiredParameters('supply either an entry or URL string') + if entry is None and id_url_string is not None: + entry = BatchEntry(atom_id=atom.Id(text=id_url_string)) + # TODO: handle cases in which the entry lacks batch_... members. + #if not isinstance(entry, BatchEntry): + # Convert the entry to a batch entry. + if batch_id_string is not None: + entry.batch_id = BatchId(text=batch_id_string) + elif entry.batch_id is None or entry.batch_id.text is None: + entry.batch_id = BatchId(text=str(len(self.entry))) + if operation_string is not None: + entry.batch_operation = BatchOperation(op_type=operation_string) + self.entry.append(entry) + return entry + + def AddInsert(self, entry, batch_id_string=None): + """Add an insert request to the operations in this batch request feed. + + If the entry doesn't yet have an operation or a batch id, these will + be set to the insert operation and a batch_id specified as a parameter. + + Args: + entry: BatchEntry The entry which will be sent in the batch feed as an + insert request. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. Note that batch_ids should either always be specified or + never, mixing could potentially result in duplicate batch ids. + """ + entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string, + operation_string=BATCH_INSERT) + + def AddUpdate(self, entry, batch_id_string=None): + """Add an update request to the list of batch operations in this feed. + + Sets the operation type of the entry to insert if it is not already set + and assigns the desired batch id to the entry so that it can be + referenced in the server's response. + + Args: + entry: BatchEntry The entry which will be sent to the server as an + update (HTTP PUT) request. The item must have a valid atom id + so that the server knows which entry to replace. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. See also comments for AddInsert. + """ + entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string, + operation_string=BATCH_UPDATE) + + def AddDelete(self, url_string=None, entry=None, batch_id_string=None): + """Adds a delete request to the batch request feed. + + This method takes either the url_string which is the atom id of the item + to be deleted, or the entry itself. The atom id of the entry must be + present so that the server knows which entry should be deleted. + + Args: + url_string: str (optional) The URL of the entry to be deleted. You can + find this URL in the text member of the atom id for an entry. + entry: BatchEntry (optional) The entry to be deleted. + batch_id_string: str (optional) + + Raises: + MissingRequiredParameters: Raised if neither a url_string nor an entry + are provided in the request. + """ + entry = self.AddBatchEntry(entry=entry, id_url_string=url_string, + batch_id_string=batch_id_string, + operation_string=BATCH_DELETE) + + def AddQuery(self, url_string=None, entry=None, batch_id_string=None): + """Adds a query request to the batch request feed. + + This method takes either the url_string which is the query URL + whose results will be added to the result feed. The query URL will + be encapsulated in a BatchEntry, and you may pass in the BatchEntry + with a query URL instead of sending a url_string. + + Args: + url_string: str (optional) + entry: BatchEntry (optional) + batch_id_string: str (optional) + + Raises: + MissingRequiredParameters + """ + entry = self.AddBatchEntry(entry=entry, id_url_string=url_string, + batch_id_string=batch_id_string, + operation_string=BATCH_QUERY) + + def GetBatchLink(self): + for link in self.link: + if link.rel == 'http://schemas.google.com/g/2005#batch': + return link + return None + + +def BatchFeedFromString(xml_string): + return atom.CreateClassFromXMLString(BatchFeed, xml_string) + + +class EntryLink(atom.AtomBase): + """The gd:entryLink element""" + + _tag = 'entryLink' + _namespace = GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + # The entry used to be an atom.Entry, now it is a GDataEntry. + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', GDataEntry) + _attributes['rel'] = 'rel' + _attributes['readOnly'] = 'read_only' + _attributes['href'] = 'href' + + def __init__(self, href=None, read_only=None, rel=None, + entry=None, extension_elements=None, + extension_attributes=None, text=None): + self.href = href + self.read_only = read_only + self.rel = rel + self.entry = entry + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EntryLinkFromString(xml_string): + return atom.CreateClassFromXMLString(EntryLink, xml_string) + + +class FeedLink(atom.AtomBase): + """The gd:feedLink element""" + + _tag = 'feedLink' + _namespace = GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}feed' % atom.ATOM_NAMESPACE] = ('feed', GDataFeed) + _attributes['rel'] = 'rel' + _attributes['readOnly'] = 'read_only' + _attributes['countHint'] = 'count_hint' + _attributes['href'] = 'href' + + def __init__(self, count_hint=None, href=None, read_only=None, rel=None, + feed=None, extension_elements=None, extension_attributes=None, + text=None): + self.count_hint = count_hint + self.href = href + self.read_only = read_only + self.rel = rel + self.feed = feed + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def FeedLinkFromString(xml_string): + return atom.CreateClassFromXMLString(FeedLink, xml_string) diff --git a/gam/gdata/analytics/__init__.pyc b/gam/gdata/analytics/__init__.pyc new file mode 100755 index 0000000000000000000000000000000000000000..255d57757343c771eb68a3e5ae870062db474b22 GIT binary patch literal 32064 zcmeHwYiu1^p4Yjyw9nCDkt$wH_}dewCu8PcG-mmcGy|i1$IE}g3wAxAc6KHAOu3d2ZV$WA65v7 z{eZ-W<@fvjPd)C9orj$aAaGL0Ri{p!=l}ixpX1^GesJGQC!&Rh^FMp=`y#IN*x%5Ys9VZam%Bw_Ko(s8%g%M_xHKpZ@F5n zws|=JL_sOQ~w&Al5VYuv?q z-SR#ckGbXjE*^Kw2VA_*Egy98ez*LXix0TvLoPn(meHBV-0~3@A9BkREEchgWY}Flr57iQ7pGi& zN-rMQi^q!>Pq_FAJ}jSh@##v5PZlLU>Eb6<;z<{Os!}2-N_@)2PpL%U;-@Pmo+(N^ z<>E6c@wAJdRf%U@f<9t0pDPNTaq(Fdde+60D)d}l==q}1Sr>m=g(hA6nMzydiW1Me z_`FJd+Qn0q64OPA&$##nl{n|(&sIu&t|)Qd#V@MFl#5?-$r(-B%f;(y7r&y{FSz*C zN(G-UN_^JEUr>q9xp>AUXH~%$i`Or@_<~-)1(O4B3_dP%pJq)9t#wpW8UI-S*461)~=QINEf zb!4Yg`}PIF+^sBW$7wK|b=HedvrZ7jaoCe6J!prFyeDs}rmHOz}G(C%gzFhPLc5(XrG{-XC=_c(oiF%E-UYbsScIt)c z^fQ%8?^T#}KU>zlRZn9~_Vl$VYpkVD-?e1aXmM1prq5M6utkk|IkcUPq9|57>1Ddz z=%Temw=vcix1^;XY$Ux+-OcrSonAp_xf+*nrDI4Ekb#;D7bkJ=CWnw@%&ynl(Rxy^ zXZ!T9-ibF_c(hW;a=J^IUY_};t3wsR6Y$M?z1ePN_4-tIGi0Yf zk1J&>qqXc5e!bmHQ$T2#e6Z2%CGnN0hi0>+m)>u0!@Z~_WS4i91G8@)zS^oC>5+)d-g=8l|7ObHlS{;#=@!Ei>%d`M~caEeFRz)vH2Lzp<%&U#It zY9nd`U00G`0L)uwqd}@x({s29vbCm1>!91~+-S!5Bm(YsTTvt7Cu!E{C00YyZs0BQ zvNa%o(7AR!X=Etg>VOK|Y-Veya6LgIS;Xa;LNyHzi5Ede^rViP4{Pp2Kml%sbc4rX z-QaPLZtw_}Rder;x!#{(G2V~s9-}VqK_gIpgavL7xmsI1eNljx_5cW%dkH`nU2OZ_ z2A!24+w3MmW%@;{f?&3{n$Doh{8?!N9@nCF+)8GGSv>~!7<4;LBYQRv_73;gk^u7) zb<>R&nv8>Ho73d$vs!&xY1+STfWCri*4eWe%y^Gvb!iQ~oPT@H`pbSJgB_a+a)1t+ zD?wE4>{iVMSm4#>jik->;7{IKD`~H0YcuF+rpD6dx3Q@A)eh?1vZDVk3S~ zlbvqX?6jlS^PJX=Rx9A^faJgfmLKBPRCBmxD+ZhBY2VoM;i*wpSTxF+R11~C30^xJ z89-yx`p|5zbb9L%yO&pG^QCm#DIb{Lg-JqtMhve2k+G-BDrb+OK>tEyq!LBvGotfC zr=4UZL1~h`j+M8%h3H9ZC1fKZ8DDr1N%=jeG-LxM(Rv_S>S?sm);ZLZvNCsgyrzFc zCu;|4jbY^E*JYNON0JG*fuF!eVmJ`nZiZY28FEDEinoUUqj~0DV!O{ABT-wNWYy`j zU?#J{TGHx*e64J>8(f^==329{Cc4%TdItsuIfAbAAS*-$hyo@qz2FG=9AGAeNPg}O zx%k4c0mQ5oL|M=QsoRb&+yPqxVb;Oby91VmZ;+md(?ns>-~m+ZL;Tqa+V=@s{944z z!*N+t23qHO*A^xTLjoO&ASHn4eJ9l$_Etc(nq)+I10M3YYY{{`OqRh8S0FsXgsU$& z+aWxKhf%?w#3h-)jZXlMj@JxRN)D?&)$4JmQLl%G*yK?r{I+aYcMk;2A!Uq0&i1B2y)(}D>b%LNx2_PjS9(ak8;IF{M# zptkV_puAT%P~paO11)Y`H~Y!4*|YRr!gxD)8dNyxt$^u1?HMuQ1*~+f0|^@ZWd$7# zTIg{hB~!#MdHrUQ4;l_`l7n5azR}8>T>vIOPCfR-%@v9S#-zaATc|6_z<920WMBfl zoJk1Yrh<1^!Rq@LSFK`zImt{hC&;cdi>!NKL97pg*M1vRPH zLdB(0Q~^udAcKg|kzl=f3**3ln+z~g3O%s}Q022+mH;d|+vv3m zfdVTWK?|CCiwlV9ZG*Ctd+WDHXIr1ODCT+ukXfS^7v9&ZN{NmOqK!gjegV1))K6MM z8A(1uT8ZmXoh3@_c_73Q4|^lY7BMJx`xe?U1m1hNcBnR1YwT)v9f&-aWV6(j=+3$&GFgx0}i z)Lq}}u8+CvhAXA9$5Eq%I!hx=sm=<7gsJr`T2uvCvcb@CatTVgOgr_)TC)}Rz#%!EjZSw{ z5_-KHW~nuotgwnRQx@dnhIyu0m)D$Nv*=ayTac zm{Xhp{IfmU7PHNQ8gsTqjcI|~m2flU5{NiM?gGRd-VNgp=Gp3hSaIP~MQn{(aS>i) zR$N5bm=*UBB_ite=^|Jl5uh*fVPFtB7Dxq_Hw&7sIa8zFwPx0fdYi%f$>#YRQEP*? z)WZ8z*imWy+k~$cx-gvIEm)A`oC?^qebu0m2#!}6jNpHaWY5T5ZEDL*r0oFgmbDZX z-pMId@ccO%BT)(7!xkAQ!Q{K^txK)Wwdc*YDw(5iD6+zOiK|fJy&c)l$K?pllsM`I z^a9J|C7VkzVdx1}&xp?Bya<)`jKCoIEiQq%7klq#C2cHrA!#|ODS^?>IvtVyv~HIy22X?j^+_5ixix5*Ye~M0F`WZ zlqdP`P$_N=p=B|8vJ{A07$_tzc+BVuhxr%5Sg<%+E};C>A|K5F`5WxCH!}iIw9?nG zz*=8#l@I2ahn~})CaiX3@eDv z+y=eV-g$5vAcUpxkq6&K$!(|HqvtkLoFngmS#8@1-C`=e8S71SLQ2F^#|zq;MM9D} z2#7qacc;@yWG$!D+MvVM8yycoY1mh5usvQ}Pfg7)lW&|(!3+;$5VJDNMyEb%s{&=!>;YRn!eK*x8s8s5TXS9HX=z+*f^=JI>71Ujn* zQKS#iJt`YQ_jTAa9?ug7r^fKzJk`Q(sbWpUX+goke455shy@j@WG<~P9NTe7tdv&V zgwIVOhgpEE`e2tSbX10zkp^s4#=y7YZTcz-?i4eqtpUV5p?QHgK>^Jj9ETcx6mlFOU|L5mDcGZFXt4G!xvfQWO&H!>=(3k(49Q-2*AoA<^}Zaivt>X_fMJ z0HF0gnmqQxO0{2flB-c5IEMumbytgk$(d|m_i|>jq!po|lr`Vr?2`aF)A?npnuFcD zlrq`#OSsaHkW{5ihCM4}g(ft+SBSR1mqqQi9R5CMoHjP*z~OK40unkuM$Ueh9H8Wx z>?fgP@V12yK)ST>0ZEsXKT;Y*1fzNAth!55g5a|d@|lWTV-4IIyk|6Ou5)k_ML?PBKuUxXb@eTBKj^clm5N_M>2$b4D}(` zR?L1r7_zSL$CwaRBzZ|XOkq*#mPo9q7Yq?)cA|#cz<|i}F@rQrIDlOCL@AdQ6HXK< zgoEB;GPXrDUl9tvf)cMW_;kJ5MnvMxHH3CRY*#D*qtN8~+N4CC+f(IAwd|Z|4x^Ir zX(W=0mwFo}6~7*}Qj2&NYv0X)=TkL{$SptM;RCre#K9H=rZ>JAaEhG=7%-p3$}7VL zKK02$a0(KV$uQRGD$WB<&e5^TTzUZgG*|&;*aW=8DZ2@hUpUSwTIQ1@sQ4D!BUC7y zh+-7JSBym3YwRA__oHTjy5WxrudHtjy-6iVHh zEr66{2MQp4Ied@RY2<$jcjfZ$faqUE1z+Q+j^TzTg#!roqw#lq&-k80L*ruwu^}v8 z#)ZY-hTx~P+7%(V^6!?!hhASXd>^S7<)Sa#@{o$A%o)VVGiJe#?Vee1RAnDZHt_+6 zEH*JDu|j3&$7|pN{qRe1i6>CB$|V5#=4e4EGBydp<1*-Lv7D-}1?GtT+^dqn00D!< z0e9j8*g$eH&jY_v9;$mR&)e(s#$0m90r33i<7#maYuKmIGT`EV-C*a>gi8*)~hE238R#X69p z-i1xHsH$pq6r&JLQH%yF(Nh7}Sl+ubQHpYgrySlx;i2I-Q0Gq+Ah?CrbP)v}Ci85e zzLI%{@8YrKo1!Nr6?rhJaT!(p6o(>RdK-4RjeK$(-&Ut~HEBcf!3Kog6K<^i}^n9g_V0tC+nzS+X)tlDF=t5JCRRpNMHxWkbbE;?}*VR&EiF) zrG`b92Zq%hFI8|@%ep7?M~G)Im7UEWVL5{$jx~YJ%xr0Rges04JnnVfujeI^33VI! zP{_x1gGYpJAqS89b%VzPy20Z?*ZrCNTVy__{DbbpL%PA^Vcp>IsBZ9h#C891{>c&d z;e_k{^W27q&5nif!$bKCxziYlxPawd>qqXojvRy)1TyESmxmnoa@wNZ3YIPTp#*p?yH z7rUd10>Wm30z@jq^bIx9(o8Uqbc=T}-o%xB*`7r&Vc)|WuYCKeS_rMU+h7@baYrv( zoz>1v@D|caJHEnRb9F6CXM#|_EARXI;otW6CEd7|H8bwtS@d_?eV&&b7>xgViIr48 zEre9efXWPfXWQj4aaXl(u5b3L-}-U`Uu$mfHixWY&SYdSKwHhs~nDMR9e@?7mi?*~-jB#B_4* zR#^qJtoN~iPXO06n?VVb$m`xellf%uNQO^X!fim~VpFk}0+b0k9OdLBz*hChtV z15Z9xW#fg<1Yj698v}HLIk3Ocmjh}+{j0t z?9w}L4;bq-{ge;~^gPA8SfB&FR^de^RBkf*HmcH(>3@T{Qvbr3Us3H-Y|X$|1+l;5 zWmQT`l&DE?qPf? zA!S>A(t9>I>!uU3+1=}e9DSkhl`7%CDCI+e?j;6$&zA}zUYkNlEw`!=!Wf9Hxrw|M zrV8FRBcbI~jD*mZSc}Qqv>I2SjPWos*j<`kss@bR-4eX3Rs1fR{8t<`!JNikhJEd3 zeOTe{*M}J3C%;^se_vT03c@Pz4m3ac$Q#Jf5A80OaHZ6+cD6_mEfd*u89Pw^N0cOD zE47b{gjOOw%`s{}JcFzT8|TI86l721rqC*2A~b6mgk-bkHxPDpjbq>==Yg~8ZaMREb; ze>9O?-$O(HfxRNx$bohO9*R-S;BJ7I74IH+PokE7*eqfAzg1wEhDDWI%%W_gjPK<# z3V_KX6VMSE&Uqw2Zg#rg_};BTU_hqeq1c097;IV`_>brpcsmpZOaM;5kKR&Jyhb`W z1H%itnTt*EOz1ceNL@i>C19dWyU@eDMOR+PSx88IpLxMGcg8aQ;{uw@Rr5hAF4d0r$T_86$8q-MM==5xeF*o85_OyDw9dS+LqDdtXX%+YF)vWLR_i)}gT7zMbymIP57uL{ zp5{)Qc(p0Zi|*&S38_IC<5ke9Sv(YUstl(AVSp1_ch%Fm}a6$$@eML=eSxO2)>90MKkZkF_~2LPld`g$0*_-j9r${UmzuM)!<-4b7$5Bg#QkCRvf|H6XceSdJ@S?#?{%};HX8#a3RcdAqREkaT zv%-#U4p(hkz}&;EPA)tqn>X%3%EoE%RxBUw;K;+L%EOmtT1Ott=gecB2WXXNRS58_ zM*Tg|J>mdi<$kP)1DwH1%~}lAOd_3}`ysHcjl@KBGABxQAQH)EIV-u^4q{R&gX_uq zHSVv5Y036OZ;GOn#0M{~QLluk!H7Y{mY@g~M)iQ9b~y>2f6b0~!GQ);KWTi2%aK8Y zIuVBFxu7Ok)SEc?h1-pbW}xls{1O!OR9uFH;B|~|od?2Hh!^8l5}us$AjFHrin@np zsb02A6r@eQ$dP_(rJAvf%6#3lohFuV1W#g#_X&`L64rP-03cX6At1uM30?}@CJt$X zNfU4ednWw+FlOpVHdriiBpd9Rx3Tk0q8$a>h42HUrZ4k|1XG&7&AZb`Bvy7euE8FO zjiyqTjmGN;AL%9O#`-$!54x8Ppwp_T(gQSe4=esTCO^;Q7no4_lyEo6=^9kxx9C*( zi!3L_{m=34mzexAlTw8rXk(xoD7#bAeU?;af)(sBef?yO|0YDh2~mRtT*H%0`V!y1 z%7iLgNC7VdNpRtJnUFd@%q<2#i~3Hmvt%QqdwK2zMdoAr2T$rl(0|#)?g=_qy`)Md zJ%5#*$3=R6I4?8LV=8(ZT^#z*;4A)6Zn%Ui{TNA=sy{>-bp$dhNc**qmK}GATpAZ>VSc%mC};U-cc|gV$UMbW(m0x?C`CQyy8~! zTr?IP*2!BGJrPx%N%*vA9W%y4(wli8YPprDM~$J14=jTU!|<61*gy{AF)U!?I0Z|>UrKg0Yr3OXk*QDjubHF5L;?xA{mJDcoSPLqEbWzvU!_ zG$tDj;KHT3X^7=YcV1j2J611V4ok`P1h`L97~j~9um*7$$m29zG0lm{!G zm4ut$W66O_w`&d&^6hG|Z{@{s{^GZ|%@@tuX>KI^IC;_4@EirC-8az}MVt$P3r)ba`AOktGT$^|U1C>hB0HuG0wIFKU|A$B$yXC*Re))FassFvU|MW?%edgdCd-M3j921x^n|^ zv4}cEj)nR_Q9EY_#+m~=2V;Y^)xjP|S!kw$_c|MuRtYXT(N7H%Q~c^E4#SW>w^ipu zqHQThrs`}6V|=Rs8j>K+t%o7ZMLy&Cc}ox}EIbOchC`Ps-MU2Ez82YoQvPWl0_F2$ zh{XAUeVt#Hz`W%g*?Ph#Wpk5vrH)~zMxV06akSKC~A02dZEGq9BvW?NMa6z zLw(gD<5lM7W++ww(5UI#zsAS>Z~zQhz?GjjJd;r4!To;Syc#ZQ^W5o#=~c>I-){YlnKB zJr|s{C=%Woxc$XPLz{J6vjQT@efiQAsrjYu#S1aZaDiTO<9?8mqsRCr)ph*BAp zda5HU{SKOfIv2!6r3W^=f`)m{VMA3J9S@&wU_Ula+_bINIZ|>n2T|F=fJ{K0hQt;A z1d@eGYMT}kN|h-5Stc(d(KeO&*sM@L&s=_Fdls@@_y%*{XYxHJKgVPN$s{E^6I1y% zqvFs^MPPg-G%vvGW>AR0F)kDf%XJ9iT1t&_DUu`S|w?Wx)s$fU<% z_AE{fra9m;E}dRaG0!Xt6|*bE(c5kqQYECjMU6H615}UpNCf?naXBUOY@`^NH(!Gi z7!AqGRan31Moz5bPLj6IX2B+)-`EKb49}hU+7Ia~YQ`k1^e>W12q1{!L>l8{zGLOb zk{=85SfK`TA{JEZewZ-CmAu2o=yOU{m8NXK9H?zEt)M~wq~$EcFuj0~ElG$~`u{1n zi1$OF+=kD86$lXi29gDfz5fAjb%53-h5XFpu&8j!_QB9qP<}+6eFye)nPPxI^ZMHk zze{9gAc;G4e3yv%UrWR^Vqe%&Jy!JFEehl)(X@h?h*2~7yC_y3*IB?n>h;hJPGBmQ zRIe5Ue`pv$NWs03ZwDg1SEE!%A)XXW!=%-!NPlN&xj05ug1$TQ`gtnlV6Ow2wBRSz zVk53xU3xyBUq9O$ z15dRcfZal(~l!F{Dae`!BZrv%T;O68TGjZZLQe; z3~GN8vnKW)2maEzv(s@hxDOjB1XbL2#`8(+WFmj=^ui?FtAoKJasq7?Eb;{wE>!mL zJA9-k)dEQ9V3Y4C`~mO&8k65*@;90M4wD}uDF>sw2is%umr()Yex1I8d&kF<9I8ED zJ61%K4A&+Oj@Ax+@xa7^qZ7v`j%iJ~t{A?F1^4eLW`nzT6w|A(Rd^jLY3U)}%k?0< zcn|R$jY(Xn2J`qsNWFwBWsp*`%gE>IZhJ%(<{R!2PErPzj4BM8Atrlqhls8_|HG+^ z5EBIeG6-I_jwJ{utFguZX@jGWPL{SM62n|!?tpLhlc++asI=61UX)@s_{4Yfk@`9E&T63VH!>I&l# zY*W@E?-c4qTn?LdY_CFx31zsBb?zO4}DvlipVzn(+yqm87^`PhNM3 zifrKFpkje%B`Tg_&pj$~E{Ujpn5YO2yh#5Fk{>N5jtLWqljFF9t9Q%)h@-$C#}!(V z?x&wX;?a`AN*`8ogn4GT%F@hXbOdt^;05}>i)=}dviaB75Njo5)#ICZ5}GtPIlKzo zd>0S*g_}S_!%f>cZ&>-297z#ng$?!#Ig-LdULq-b9R33)1IWkm$a-ACRCdlVb=xl< z9=g6vSR#w5qANokcZaZI%MTG@|2{fM;lpAxON6a?gjJB$_^!gN)CAFeS4Tllpn@D9 zON_l#V(g>@6H-6HYQ1h}8sJZ;NxUHm6x8Be^{FJoDSshyoSmOXS#y59+v#Q6K!QjL zptnCwk0nV-wY}92qg$GL`^sA|XlV0lb`&ow2ig}eZx zf1Sy1Gx=Nmkia^3tC7g3q`&DJGE3o$cATO9o}2z+f|2&~{PqHqFEQaUTj50}lpS=2 zUAV-%h=~Y4D3}X@dOqam$9eZ%CLc5T9+N-A@1=n+b_? z`1hD_&BNbEGKs5_JPM==c}eE1;J!%BI7dW77j_?JHo yofyHt^QYcBb!dE~xZWHeIsM%Dp3}?7+l%W6t|!MwPCSL*PvG|t@%t36$Nv}QP!Tc! literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/acl/__init__.py b/gam/gdata/analytics/acl/__init__.py new file mode 100755 index 00000000000..22071f7a11e --- /dev/null +++ b/gam/gdata/analytics/acl/__init__.py @@ -0,0 +1,15 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/gam/gdata/analytics/acl/data.py b/gam/gdata/analytics/acl/data.py new file mode 100755 index 00000000000..17d6c15048a --- /dev/null +++ b/gam/gdata/analytics/acl/data.py @@ -0,0 +1,63 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains the data classes of the Google Access Control List (ACL) Extension""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import atom.data +import gdata.data +import gdata.opensearch.data + + +GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s' + + +class AclRole(atom.core.XmlElement): + """Describes the role of an entry in an access control list.""" + _qname = GACL_TEMPLATE % 'role' + value = 'value' + + +class AclScope(atom.core.XmlElement): + """Describes the scope of an entry in an access control list.""" + _qname = GACL_TEMPLATE % 'scope' + type = 'type' + value = 'value' + + +class AclWithKey(atom.core.XmlElement): + """Describes a key that can be used to access a document.""" + _qname = GACL_TEMPLATE % 'withKey' + key = 'key' + role = AclRole + + +class AclEntry(gdata.data.GDEntry): + """Describes an entry in a feed of an access control list (ACL).""" + scope = AclScope + role = AclRole + with_key = AclWithKey + + +class AclFeed(gdata.data.GDFeed): + """Describes a feed of an access control list (ACL).""" + entry = [AclEntry] + + diff --git a/gam/gdata/analytics/alt/__init__.py b/gam/gdata/analytics/alt/__init__.py new file mode 100755 index 00000000000..742980e819b --- /dev/null +++ b/gam/gdata/analytics/alt/__init__.py @@ -0,0 +1,20 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This package's modules adapt the gdata library to run in other environments + +The first example is the appengine module which contains functions and +classes which modify a GDataService object to run on Google App Engine. +""" diff --git a/gam/gdata/analytics/alt/app_engine.py b/gam/gdata/analytics/alt/app_engine.py new file mode 100755 index 00000000000..afa412d5bca --- /dev/null +++ b/gam/gdata/analytics/alt/app_engine.py @@ -0,0 +1,101 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Provides functions to persist serialized auth tokens in the datastore. + +The get_token and set_token functions should be used in conjunction with +gdata.gauth's token_from_blob and token_to_blob to allow auth token objects +to be reused across requests. It is up to your own code to ensure that the +token key's are unique. +""" + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +from google.appengine.ext import db +from google.appengine.api import memcache + + +class Token(db.Model): + """Datastore Model which stores a serialized auth token.""" + t = db.BlobProperty() + + +def get_token(unique_key): + """Searches for a stored token with the desired key. + + Checks memcache and then the datastore if required. + + Args: + unique_key: str which uniquely identifies the desired auth token. + + Returns: + A string encoding the auth token data. Use gdata.gauth.token_from_blob to + convert back into a usable token object. None if the token was not found + in memcache or the datastore. + """ + token_string = memcache.get(unique_key) + if token_string is None: + # The token wasn't in memcache, so look in the datastore. + token = Token.get_by_key_name(unique_key) + if token is None: + return None + return token.t + return token_string + + +def set_token(unique_key, token_str): + """Saves the serialized auth token in the datastore. + + The token is also stored in memcache to speed up retrieval on a cache hit. + + Args: + unique_key: The unique name for this token as a string. It is up to your + code to ensure that this token value is unique in your application. + Previous values will be silently overwitten. + token_str: A serialized auth token as a string. I expect that this string + will be generated by gdata.gauth.token_to_blob. + + Returns: + True if the token was stored sucessfully, False if the token could not be + safely cached (if an old value could not be cleared). If the token was + set in memcache, but not in the datastore, this function will return None. + However, in that situation an exception will likely be raised. + + Raises: + Datastore exceptions may be raised from the App Engine SDK in the event of + failure. + """ + # First try to save in memcache. + result = memcache.set(unique_key, token_str) + # If memcache fails to save the value, clear the cached value. + if not result: + result = memcache.delete(unique_key) + # If we could not clear the cached value for this token, refuse to save. + if result == 0: + return False + # Save to the datastore. + if Token(key_name=unique_key, t=token_str).put(): + return True + return None + + +def delete_token(unique_key): + # Clear from memcache. + memcache.delete(unique_key) + # Clear from the datastore. + Token(key_name=unique_key).delete() diff --git a/gam/gdata/analytics/alt/appengine.py b/gam/gdata/analytics/alt/appengine.py new file mode 100755 index 00000000000..22516214e15 --- /dev/null +++ b/gam/gdata/analytics/alt/appengine.py @@ -0,0 +1,321 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Provides HTTP functions for gdata.service to use on Google App Engine + +AppEngineHttpClient: Provides an HTTP request method which uses App Engine's + urlfetch API. Set the http_client member of a GDataService object to an + instance of an AppEngineHttpClient to allow the gdata library to run on + Google App Engine. + +run_on_appengine: Function which will modify an existing GDataService object + to allow it to run on App Engine. It works by creating a new instance of + the AppEngineHttpClient and replacing the GDataService object's + http_client. +""" + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import StringIO +import pickle +import atom.http_interface +import atom.token_store +from google.appengine.api import urlfetch +from google.appengine.ext import db +from google.appengine.api import users +from google.appengine.api import memcache + + +def run_on_appengine(gdata_service, store_tokens=True, + single_user_mode=False, deadline=None): + """Modifies a GDataService object to allow it to run on App Engine. + + Args: + gdata_service: An instance of AtomService, GDataService, or any + of their subclasses which has an http_client member and a + token_store member. + store_tokens: Boolean, defaults to True. If True, the gdata_service + will attempt to add each token to it's token_store when + SetClientLoginToken or SetAuthSubToken is called. If False + the tokens will not automatically be added to the + token_store. + single_user_mode: Boolean, defaults to False. If True, the current_token + member of gdata_service will be set when + SetClientLoginToken or SetAuthTubToken is called. If set + to True, the current_token is set in the gdata_service + and anyone who accesses the object will use the same + token. + + Note: If store_tokens is set to False and + single_user_mode is set to False, all tokens will be + ignored, since the library assumes: the tokens should not + be stored in the datastore and they should not be stored + in the gdata_service object. This will make it + impossible to make requests which require authorization. + deadline: int (optional) The number of seconds to wait for a response + before timing out on the HTTP request. If no deadline is + specified, the deafault deadline for HTTP requests from App + Engine is used. The maximum is currently 10 (for 10 seconds). + The default deadline for App Engine is 5 seconds. + """ + gdata_service.http_client = AppEngineHttpClient(deadline=deadline) + gdata_service.token_store = AppEngineTokenStore() + gdata_service.auto_store_tokens = store_tokens + gdata_service.auto_set_current_token = single_user_mode + return gdata_service + + +class AppEngineHttpClient(atom.http_interface.GenericHttpClient): + def __init__(self, headers=None, deadline=None): + self.debug = False + self.headers = headers or {} + self.deadline = deadline + + def request(self, operation, url, data=None, headers=None): + """Performs an HTTP call to the server, supports GET, POST, PUT, and + DELETE. + + Usage example, perform and HTTP GET on http://www.google.com/: + import atom.http + client = atom.http.HttpClient() + http_response = client.request('GET', 'http://www.google.com/') + + Args: + operation: str The HTTP operation to be performed. This is usually one + of 'GET', 'POST', 'PUT', or 'DELETE' + data: filestream, list of parts, or other object which can be converted + to a string. Should be set to None when performing a GET or DELETE. + If data is a file-like object which can be read, this method will + read a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be + evaluated and sent. + url: The full URL to which the request should be sent. Can be a string + or atom.url.Url. + headers: dict of strings. HTTP headers which should be sent + in the request. + """ + all_headers = self.headers.copy() + if headers: + all_headers.update(headers) + + # Construct the full payload. + # Assume that data is None or a string. + data_str = data + if data: + if isinstance(data, list): + # If data is a list of different objects, convert them all to strings + # and join them together. + converted_parts = [_convert_data_part(x) for x in data] + data_str = ''.join(converted_parts) + else: + data_str = _convert_data_part(data) + + # If the list of headers does not include a Content-Length, attempt to + # calculate it based on the data object. + if data and 'Content-Length' not in all_headers: + all_headers['Content-Length'] = str(len(data_str)) + + # Set the content type to the default value if none was set. + if 'Content-Type' not in all_headers: + all_headers['Content-Type'] = 'application/atom+xml' + + # Lookup the urlfetch operation which corresponds to the desired HTTP verb. + if operation == 'GET': + method = urlfetch.GET + elif operation == 'POST': + method = urlfetch.POST + elif operation == 'PUT': + method = urlfetch.PUT + elif operation == 'DELETE': + method = urlfetch.DELETE + else: + method = None + if self.deadline is None: + return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str, + method=method, headers=all_headers, follow_redirects=False)) + return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str, + method=method, headers=all_headers, follow_redirects=False, + deadline=self.deadline)) + + +def _convert_data_part(data): + if not data or isinstance(data, str): + return data + elif hasattr(data, 'read'): + # data is a file like object, so read it completely. + return data.read() + # The data object was not a file. + # Try to convert to a string and send the data. + return str(data) + + +class HttpResponse(object): + """Translates a urlfetch resoinse to look like an hhtplib resoinse. + + Used to allow the resoinse from HttpRequest to be usable by gdata.service + methods. + """ + + def __init__(self, urlfetch_response): + self.body = StringIO.StringIO(urlfetch_response.content) + self.headers = urlfetch_response.headers + self.status = urlfetch_response.status_code + self.reason = '' + + def read(self, length=None): + if not length: + return self.body.read() + else: + return self.body.read(length) + + def getheader(self, name): + if not self.headers.has_key(name): + return self.headers[name.lower()] + return self.headers[name] + + +class TokenCollection(db.Model): + """Datastore Model which associates auth tokens with the current user.""" + user = db.UserProperty() + pickled_tokens = db.BlobProperty() + + +class AppEngineTokenStore(atom.token_store.TokenStore): + """Stores the user's auth tokens in the App Engine datastore. + + Tokens are only written to the datastore if a user is signed in (if + users.get_current_user() returns a user object). + """ + def __init__(self): + self.user = None + + def add_token(self, token): + """Associates the token with the current user and stores it. + + If there is no current user, the token will not be stored. + + Returns: + False if the token was not stored. + """ + tokens = load_auth_tokens(self.user) + if not hasattr(token, 'scopes') or not token.scopes: + return False + for scope in token.scopes: + tokens[str(scope)] = token + key = save_auth_tokens(tokens, self.user) + if key: + return True + return False + + def find_token(self, url): + """Searches the current user's collection of token for a token which can + be used for a request to the url. + + Returns: + The stored token which belongs to the current user and is valid for the + desired URL. If there is no current user, or there is no valid user + token in the datastore, a atom.http_interface.GenericToken is returned. + """ + if url is None: + return None + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + tokens = load_auth_tokens(self.user) + if url in tokens: + token = tokens[url] + if token.valid_for_scope(url): + return token + else: + del tokens[url] + save_auth_tokens(tokens, self.user) + for scope, token in tokens.iteritems(): + if token.valid_for_scope(url): + return token + return atom.http_interface.GenericToken() + + def remove_token(self, token): + """Removes the token from the current user's collection in the datastore. + + Returns: + False if the token was not removed, this could be because the token was + not in the datastore, or because there is no current user. + """ + token_found = False + scopes_to_delete = [] + tokens = load_auth_tokens(self.user) + for scope, stored_token in tokens.iteritems(): + if stored_token == token: + scopes_to_delete.append(scope) + token_found = True + for scope in scopes_to_delete: + del tokens[scope] + if token_found: + save_auth_tokens(tokens, self.user) + return token_found + + def remove_all_tokens(self): + """Removes all of the current user's tokens from the datastore.""" + save_auth_tokens({}, self.user) + + +def save_auth_tokens(token_dict, user=None): + """Associates the tokens with the current user and writes to the datastore. + + If there us no current user, the tokens are not written and this function + returns None. + + Returns: + The key of the datastore entity containing the user's tokens, or None if + there was no current user. + """ + if user is None: + user = users.get_current_user() + if user is None: + return None + memcache.set('gdata_pickled_tokens:%s' % user, pickle.dumps(token_dict)) + user_tokens = TokenCollection.all().filter('user =', user).get() + if user_tokens: + user_tokens.pickled_tokens = pickle.dumps(token_dict) + return user_tokens.put() + else: + user_tokens = TokenCollection( + user=user, + pickled_tokens=pickle.dumps(token_dict)) + return user_tokens.put() + + +def load_auth_tokens(user=None): + """Reads a dictionary of the current user's tokens from the datastore. + + If there is no current user (a user is not signed in to the app) or the user + does not have any tokens, an empty dictionary is returned. + """ + if user is None: + user = users.get_current_user() + if user is None: + return {} + pickled_tokens = memcache.get('gdata_pickled_tokens:%s' % user) + if pickled_tokens: + return pickle.loads(pickled_tokens) + user_tokens = TokenCollection.all().filter('user =', user).get() + if user_tokens: + memcache.set('gdata_pickled_tokens:%s' % user, user_tokens.pickled_tokens) + return pickle.loads(user_tokens.pickled_tokens) + return {} + diff --git a/gam/gdata/analytics/analytics/__init__.py b/gam/gdata/analytics/analytics/__init__.py new file mode 100755 index 00000000000..8dfa20ba985 --- /dev/null +++ b/gam/gdata/analytics/analytics/__init__.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# +# Original Copyright (C) 2006 Google Inc. +# Refactored in 2009 to work for Google Analytics by Sal Uryasev at Juice Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Note that this module will not function without specifically adding +# 'analytics': [ #Google Analytics +# 'https://www.google.com/analytics/feeds/'], +# to CLIENT_LOGIN_SCOPES in the gdata/service.py file + +"""Contains extensions to Atom objects used with Google Analytics.""" + +__author__ = 'api.suryasev (Sal Uryasev)' + +import atom +import gdata + +GAN_NAMESPACE = 'http://schemas.google.com/analytics/2009' + +class TableId(gdata.GDataEntry): + """tableId element.""" + _tag = 'tableId' + _namespace = GAN_NAMESPACE + +class Property(gdata.GDataEntry): + _tag = 'property' + _namespace = GAN_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + _attributes['name'] = 'name' + _attributes['value'] = 'value' + + def __init__(self, name=None, value=None, *args, **kwargs): + self.name = name + self.value = value + super(Property, self).__init__(*args, **kwargs) + + def __str__(self): + return self.value + + def __repr__(self): + return self.value + +class AccountListEntry(gdata.GDataEntry): + """The Google Documents version of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}tableId' % GAN_NAMESPACE] = ('tableId', + [TableId]) + _children['{%s}property' % GAN_NAMESPACE] = ('property', + [Property]) + + def __init__(self, tableId=None, property=None, + *args, **kwargs): + self.tableId = tableId + self.property = property + super(AccountListEntry, self).__init__(*args, **kwargs) + + +def AccountListEntryFromString(xml_string): + """Converts an XML string into an AccountListEntry object. + + Args: + xml_string: string The XML describing a Document List feed entry. + + Returns: + A AccountListEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(AccountListEntry, xml_string) + + +class AccountListFeed(gdata.GDataFeed): + """A feed containing a list of Google Documents Items""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [AccountListEntry]) + + +def AccountListFeedFromString(xml_string): + """Converts an XML string into an AccountListFeed object. + + Args: + xml_string: string The XML describing an AccountList feed. + + Returns: + An AccountListFeed object corresponding to the given XML. + All properties are also linked to with a direct reference + from each entry object for convenience. (e.g. entry.AccountName) + """ + feed = atom.CreateClassFromXMLString(AccountListFeed, xml_string) + for entry in feed.entry: + for pro in entry.property: + entry.__dict__[pro.name.replace('ga:','')] = pro + for td in entry.tableId: + td.__dict__['value'] = td.text + return feed + +class Dimension(gdata.GDataEntry): + _tag = 'dimension' + _namespace = GAN_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + _attributes['name'] = 'name' + _attributes['value'] = 'value' + _attributes['type'] = 'type' + _attributes['confidenceInterval'] = 'confidence_interval' + + def __init__(self, name=None, value=None, type=None, + confidence_interval = None, *args, **kwargs): + self.name = name + self.value = value + self.type = type + self.confidence_interval = confidence_interval + super(Dimension, self).__init__(*args, **kwargs) + + def __str__(self): + return self.value + + def __repr__(self): + return self.value + +class Metric(gdata.GDataEntry): + _tag = 'metric' + _namespace = GAN_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + _attributes['name'] = 'name' + _attributes['value'] = 'value' + _attributes['type'] = 'type' + _attributes['confidenceInterval'] = 'confidence_interval' + + def __init__(self, name=None, value=None, type=None, + confidence_interval = None, *args, **kwargs): + self.name = name + self.value = value + self.type = type + self.confidence_interval = confidence_interval + super(Metric, self).__init__(*args, **kwargs) + + def __str__(self): + return self.value + + def __repr__(self): + return self.value + +class AnalyticsDataEntry(gdata.GDataEntry): + """The Google Analytics version of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + _children['{%s}dimension' % GAN_NAMESPACE] = ('dimension', + [Dimension]) + + _children['{%s}metric' % GAN_NAMESPACE] = ('metric', + [Metric]) + + def __init__(self, dimension=None, metric=None, *args, **kwargs): + self.dimension = dimension + self.metric = metric + + super(AnalyticsDataEntry, self).__init__(*args, **kwargs) + +class AnalyticsDataFeed(gdata.GDataFeed): + """A feed containing a list of Google Analytics Data Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [AnalyticsDataEntry]) + + +""" +Data Feed +""" + +def AnalyticsDataFeedFromString(xml_string): + """Converts an XML string into an AccountListFeed object. + + Args: + xml_string: string The XML describing an AccountList feed. + + Returns: + An AccountListFeed object corresponding to the given XML. + Each metric and dimension is also referenced directly from + the entry for easier access. (e.g. entry.keyword.value) + """ + feed = atom.CreateClassFromXMLString(AnalyticsDataFeed, xml_string) + if feed.entry: + for entry in feed.entry: + for met in entry.metric: + entry.__dict__[met.name.replace('ga:','')] = met + if entry.dimension is not None: + for dim in entry.dimension: + entry.__dict__[dim.name.replace('ga:','')] = dim + + return feed diff --git a/gam/gdata/analytics/analytics/client.py b/gam/gdata/analytics/analytics/client.py new file mode 100755 index 00000000000..dbfc5f78313 --- /dev/null +++ b/gam/gdata/analytics/analytics/client.py @@ -0,0 +1,313 @@ +#!/usr/bin/python +# +# Copyright 2010 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Streamlines requests to the Google Analytics APIs.""" + +__author__ = 'api.nickm@google.com (Nick Mihailovski)' + + +import atom.data +import gdata.client +import gdata.analytics.data +import gdata.gauth + + +class AnalyticsClient(gdata.client.GDClient): + """Client extension for the Google Analytics API service.""" + + api_version = '2' + auth_service = 'analytics' + auth_scopes = gdata.gauth.AUTH_SCOPES['analytics'] + account_type = 'GOOGLE' + ssl = True + + def __init__(self, auth_token=None, **kwargs): + """Initializes a new client for the Google Analytics Data Export API. + + Args: + auth_token: gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken (optional) Authorizes this client to edit the user's data. + kwargs: The other parameters to pass to gdata.client.GDClient + constructor. + """ + + gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs) + + def get_account_feed(self, feed_uri, auth_token=None, **kwargs): + """Makes a request to the Analytics API Account Feed. + + Args: + feed_uri: str or gdata.analytics.AccountFeedQuery The Analytics Account + Feed uri to define what data to retrieve from the API. Can also be + used with a gdata.analytics.AccountFeedQuery object. + """ + + return self.get_feed(feed_uri, + desired_class=gdata.analytics.data.AccountFeed, + auth_token=auth_token, + **kwargs) + + GetAccountFeed = get_account_feed + + def get_data_feed(self, feed_uri, auth_token=None, **kwargs): + """Makes a request to the Analytics API Data Feed. + + Args: + feed_uri: str or gdata.analytics.AccountFeedQuery The Analytics Data + Feed uri to define what data to retrieve from the API. Can also be + used with a gdata.analytics.AccountFeedQuery object. + """ + + return self.get_feed(feed_uri, + desired_class=gdata.analytics.data.DataFeed, + auth_token=auth_token, + **kwargs) + + GetDataFeed = get_data_feed + + def get_management_feed(self, feed_uri, auth_token=None, **kwargs): + """Makes a request to the Google Analytics Management API. + + The Management API provides read-only access to configuration data for + Google Analytics and supercedes the Data Export API Account Feed. + The Management API supports 5 feeds: account, web property, profile, + goal, advanced segment. + + You can access each feed through the respective management query class + below. All requests return the same data object. + + Args: + feed_uri: str or AccountQuery, WebPropertyQuery, + ProfileQuery, GoalQuery, MgmtAdvSegFeedQuery + The Management API Feed uri to define which feed to retrieve. + Either use a string or one of the wrapper classes. + """ + + return self.get_feed(feed_uri, + desired_class=gdata.analytics.data.ManagementFeed, + auth_token=auth_token, + **kwargs) + + GetMgmtFeed = GetManagementFeed = get_management_feed + + +class AnalyticsBaseQuery(gdata.client.GDQuery): + """Abstracts common configuration across all query objects. + + Attributes: + scheme: string The default scheme. Should always be https. + host: string The default host. + """ + + scheme = 'https' + host = 'www.google.com' + + +class AccountFeedQuery(AnalyticsBaseQuery): + """Account Feed query class to simplify constructing Account Feed Urls. + + To use this class, you can either pass a dict in the constructor that has + all the data feed query parameters as keys: + queryUrl = AccountFeedQuery({'max-results': '10000'}) + + Alternatively you can add new parameters directly to the query object: + queryUrl = AccountFeedQuery() + queryUrl.query['max-results'] = '10000' + + Args: + query: dict (optional) Contains all the GA Data Feed query parameters + as keys. + """ + + path = '/analytics/feeds/accounts/default' + + def __init__(self, query={}, **kwargs): + self.query = query + gdata.client.GDQuery(self, **kwargs) + + +class DataFeedQuery(AnalyticsBaseQuery): + """Data Feed query class to simplify constructing Data Feed Urls. + + To use this class, you can either pass a dict in the constructor that has + all the data feed query parameters as keys: + queryUrl = DataFeedQuery({'start-date': '2008-10-01'}) + + Alternatively you can add new parameters directly to the query object: + queryUrl = DataFeedQuery() + queryUrl.query['start-date'] = '2008-10-01' + + Args: + query: dict (optional) Contains all the GA Data Feed query parameters + as keys. + """ + + path = '/analytics/feeds/data' + + def __init__(self, query={}, **kwargs): + self.query = query + gdata.client.GDQuery(self, **kwargs) + + +class AccountQuery(AnalyticsBaseQuery): + """Management API Account Feed query class. + + Example Usage: + queryUrl = AccountQuery() + queryUrl = AccountQuery({'max-results': 100}) + + queryUrl2 = AccountQuery() + queryUrl2.query['max-results'] = 100 + + Args: + query: dict (optional) A dictionary of query parameters. + """ + + path = '/analytics/feeds/datasources/ga/accounts' + + def __init__(self, query={}, **kwargs): + self.query = query + gdata.client.GDQuery(self, **kwargs) + +class WebPropertyQuery(AnalyticsBaseQuery): + """Management API Web Property Feed query class. + + Example Usage: + queryUrl = WebPropertyQuery() + queryUrl = WebPropertyQuery('123', {'max-results': 100}) + queryUrl = WebPropertyQuery(acct_id='123', + query={'max-results': 100}) + + queryUrl2 = WebPropertyQuery() + queryUrl2.acct_id = '1234' + queryUrl2.query['max-results'] = 100 + + Args: + acct_id: string (optional) The account ID to filter results. + Default is ~all. + query: dict (optional) A dictionary of query parameters. + """ + + def __init__(self, acct_id='~all', query={}, **kwargs): + self.acct_id = acct_id + self.query = query + gdata.client.GDQuery(self, **kwargs) + + @property + def path(self): + """Wrapper for path attribute.""" + return ('/analytics/feeds/datasources/ga/accounts/%s/webproperties' % + self.acct_id) + + +class ProfileQuery(AnalyticsBaseQuery): + """Management API Profile Feed query class. + + Example Usage: + queryUrl = ProfileQuery() + queryUrl = ProfileQuery('123', 'UA-123-1', {'max-results': 100}) + queryUrl = ProfileQuery(acct_id='123', + web_prop_id='UA-123-1', + query={'max-results': 100}) + + queryUrl2 = ProfileQuery() + queryUrl2.acct_id = '123' + queryUrl2.web_prop_id = 'UA-123-1' + queryUrl2.query['max-results'] = 100 + + Args: + acct_id: string (optional) The account ID to filter results. + Default is ~all. + web_prop_id: string (optional) The web property ID to filter results. + Default is ~all. + query: dict (optional) A dictionary of query parameters. + """ + + def __init__(self, acct_id='~all', web_prop_id='~all', query={}, **kwargs): + self.acct_id = acct_id + self.web_prop_id = web_prop_id + self.query = query + gdata.client.GDQuery(self, **kwargs) + + @property + def path(self): + """Wrapper for path attribute.""" + return ('/analytics/feeds/datasources/ga/accounts/%s/webproperties' + '/%s/profiles' % (self.acct_id, self.web_prop_id)) + + +class GoalQuery(AnalyticsBaseQuery): + """Management API Goal Feed query class. + + Example Usage: + queryUrl = GoalQuery() + queryUrl = GoalQuery('123', 'UA-123-1', '555', + {'max-results': 100}) + queryUrl = GoalQuery(acct_id='123', + web_prop_id='UA-123-1', + profile_id='555', + query={'max-results': 100}) + + queryUrl2 = GoalQuery() + queryUrl2.acct_id = '123' + queryUrl2.web_prop_id = 'UA-123-1' + queryUrl2.query['max-results'] = 100 + + Args: + acct_id: string (optional) The account ID to filter results. + Default is ~all. + web_prop_id: string (optional) The web property ID to filter results. + Default is ~all. + profile_id: string (optional) The profile ID to filter results. + Default is ~all. + query: dict (optional) A dictionary of query parameters. + """ + + def __init__(self, acct_id='~all', web_prop_id='~all', profile_id='~all', + query={}, **kwargs): + self.acct_id = acct_id + self.web_prop_id = web_prop_id + self.profile_id = profile_id + self.query = query or {} + gdata.client.GDQuery(self, **kwargs) + + @property + def path(self): + """Wrapper for path attribute.""" + return ('/analytics/feeds/datasources/ga/accounts/%s/webproperties' + '/%s/profiles/%s/goals' % (self.acct_id, self.web_prop_id, + self.profile_id)) + + +class AdvSegQuery(AnalyticsBaseQuery): + """Management API Goal Feed query class. + + Example Usage: + queryUrl = AdvSegQuery() + queryUrl = AdvSegQuery({'max-results': 100}) + + queryUrl1 = AdvSegQuery() + queryUrl1.query['max-results'] = 100 + + Args: + query: dict (optional) A dictionary of query parameters. + """ + + path = '/analytics/feeds/datasources/ga/segments' + + def __init__(self, query={}, **kwargs): + self.query = query + gdata.client.GDQuery(self, **kwargs) diff --git a/gam/gdata/analytics/analytics/data.py b/gam/gdata/analytics/analytics/data.py new file mode 100755 index 00000000000..6b17ef6e279 --- /dev/null +++ b/gam/gdata/analytics/analytics/data.py @@ -0,0 +1,379 @@ +#!/usr/bin/python +# +# Copyright 2010 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data model classes for parsing and generating XML for both the +Google Analytics Data Export and Management APIs. Although both APIs +operate on different parts of Google Analytics, they share common XML +elements and are released in the same module. + +The Management API supports 5 feeds all using the same ManagementFeed +data class. +""" + +__author__ = 'api.nickm@google.com (Nick Mihailovski)' + + +import gdata.data +import atom.core +import atom.data + + +# XML Namespace used in Google Analytics API entities. +DXP_NS = '{http://schemas.google.com/analytics/2009}%s' +GA_NS = '{http://schemas.google.com/ga/2009}%s' +GD_NS = '{http://schemas.google.com/g/2005}%s' + + +class GetProperty(object): + """Utility class to simplify retrieving Property objects.""" + + def get_property(self, name): + """Helper method to return a propery object by its name attribute. + + Args: + name: string The name of the element to retrieve. + + Returns: + A property object corresponding to the matching element. + if no property is found, None is returned. + """ + + for prop in self.property: + if prop.name == name: + return prop + + return None + + GetProperty = get_property + + +class GetMetric(object): + """Utility class to simplify retrieving Metric objects.""" + + def get_metric(self, name): + """Helper method to return a propery value by its name attribute + + Args: + name: string The name of the element to retrieve. + + Returns: + A property object corresponding to the matching element. + if no property is found, None is returned. + """ + + for met in self.metric: + if met.name == name: + return met + + return None + + GetMetric = get_metric + + +class GetDimension(object): + """Utility class to simplify retrieving Dimension objects.""" + + def get_dimension(self, name): + """Helper method to return a dimention object by its name attribute + + Args: + name: string The name of the element to retrieve. + + Returns: + A dimension object corresponding to the matching element. + if no dimension is found, None is returned. + """ + + for dim in self.dimension: + if dim.name == name: + return dim + + return None + + GetDimension = get_dimension + + +class GaLinkFinder(object): + """Utility class to return specific links in Google Analytics feeds.""" + + def get_parent_links(self): + """Returns a list of all the parent links in an entry.""" + + links = [] + for link in self.link: + if link.rel == link.parent(): + links.append(link) + + return links + + GetParentLinks = get_parent_links + + def get_child_links(self): + """Returns a list of all the child links in an entry.""" + + links = [] + for link in self.link: + if link.rel == link.child(): + links.append(link) + + return links + + GetChildLinks = get_child_links + + def get_child_link(self, target_kind): + """Utility method to return one child link. + + Returns: + A child link with the given target_kind. None if the target_kind was + not found. + """ + + for link in self.link: + if link.rel == link.child() and link.target_kind == target_kind: + return link + + return None + + GetChildLink = get_child_link + + +class StartDate(atom.core.XmlElement): + """Analytics Feed """ + _qname = DXP_NS % 'startDate' + + +class EndDate(atom.core.XmlElement): + """Analytics Feed """ + _qname = DXP_NS % 'endDate' + + +class Metric(atom.core.XmlElement): + """Analytics Feed """ + _qname = DXP_NS % 'metric' + name = 'name' + type = 'type' + value = 'value' + confidence_interval = 'confidenceInterval' + + +class Aggregates(atom.core.XmlElement, GetMetric): + """Analytics Data Feed """ + _qname = DXP_NS % 'aggregates' + metric = [Metric] + + +class ContainsSampledData(atom.core.XmlElement): + """Analytics Data Feed """ + _qname = DXP_NS % 'containsSampledData' + + +class TableId(atom.core.XmlElement): + """Analytics Feed """ + _qname = DXP_NS % 'tableId' + + +class TableName(atom.core.XmlElement): + """Analytics Feed """ + _qname = DXP_NS % 'tableName' + + +class Property(atom.core.XmlElement): + """Analytics Feed """ + _qname = DXP_NS % 'property' + name = 'name' + value = 'value' + + +class Definition(atom.core.XmlElement): + """Analytics Feed """ + _qname = DXP_NS % 'definition' + + +class Segment(atom.core.XmlElement): + """Analytics Feed """ + _qname = DXP_NS % 'segment' + id = 'id' + name = 'name' + definition = Definition + + +class Engagement(atom.core.XmlElement): + """Analytics Feed """ + _qname = GA_NS % 'engagement' + type = 'type' + comparison = 'comparison' + threshold_value = 'thresholdValue' + + +class Step(atom.core.XmlElement): + """Analytics Feed """ + _qname = GA_NS % 'step' + number = 'number' + name = 'name' + path = 'path' + + +class Destination(atom.core.XmlElement): + """Analytics Feed """ + _qname = GA_NS % 'destination' + step = [Step] + expression = 'expression' + case_sensitive = 'caseSensitive' + match_type = 'matchType' + step1_required = 'step1Required' + + +class Goal(atom.core.XmlElement): + """Analytics Feed """ + _qname = GA_NS % 'goal' + destination = Destination + engagement = Engagement + number = 'number' + name = 'name' + value = 'value' + active = 'active' + + +class CustomVariable(atom.core.XmlElement): + """Analytics Data Feed """ + _qname = GA_NS % 'customVariable' + index = 'index' + name = 'name' + scope = 'scope' + + +class DataSource(atom.core.XmlElement, GetProperty): + """Analytics Data Feed """ + _qname = DXP_NS % 'dataSource' + table_id = TableId + table_name = TableName + property = [Property] + + +class Dimension(atom.core.XmlElement): + """Analytics Feed """ + _qname = DXP_NS % 'dimension' + name = 'name' + value = 'value' + + +class AnalyticsLink(atom.data.Link): + """Subclass of link """ + target_kind = GD_NS % 'targetKind' + + @classmethod + def parent(cls): + """Parent target_kind""" + return '%s#parent' % GA_NS[1:-3] + + @classmethod + def child(cls): + """Child target_kind""" + return '%s#child' % GA_NS[1:-3] + + +# Account Feed. +class AccountEntry(gdata.data.GDEntry, GetProperty): + """Analytics Account Feed """ + _qname = atom.data.ATOM_TEMPLATE % 'entry' + table_id = TableId + property = [Property] + goal = [Goal] + custom_variable = [CustomVariable] + + +class AccountFeed(gdata.data.GDFeed): + """Analytics Account Feed """ + _qname = atom.data.ATOM_TEMPLATE % 'feed' + segment = [Segment] + entry = [AccountEntry] + + +# Data Feed. +class DataEntry(gdata.data.GDEntry, GetMetric, GetDimension): + """Analytics Data Feed """ + _qname = atom.data.ATOM_TEMPLATE % 'entry' + dimension = [Dimension] + metric = [Metric] + + def get_object(self, name): + """Returns either a Dimension or Metric object with the same name as the + name parameter. + + Args: + name: string The name of the object to retrieve. + + Returns: + Either a Dimension or Object that has the same as the name parameter. + """ + + output = self.GetDimension(name) + if not output: + output = self.GetMetric(name) + + return output + + GetObject = get_object + + +class DataFeed(gdata.data.GDFeed): + """Analytics Data Feed . + + Although there is only one datasource, it is stored in an array to replicate + the design of the Java client library and ensure backwards compatibility if + new data sources are added in the future. + """ + + _qname = atom.data.ATOM_TEMPLATE % 'feed' + start_date = StartDate + end_date = EndDate + aggregates = Aggregates + contains_sampled_data = ContainsSampledData + data_source = [DataSource] + entry = [DataEntry] + segment = Segment + + def has_sampled_data(self): + """Returns whether this feed has sampled data.""" + if (self.contains_sampled_data.text == 'true'): + return True + return False + + HasSampledData = has_sampled_data + + +# Management Feed. +class ManagementEntry(gdata.data.GDEntry, GetProperty, GaLinkFinder): + """Analytics Managememt Entry .""" + + _qname = atom.data.ATOM_TEMPLATE % 'entry' + kind = GD_NS % 'kind' + property = [Property] + goal = Goal + segment = Segment + link = [AnalyticsLink] + + +class ManagementFeed(gdata.data.GDFeed): + """Analytics Management Feed . + + This class holds the data for all 5 Management API feeds: Account, + Web Property, Profile, Goal, and Advanced Segment Feeds. + """ + + _qname = atom.data.ATOM_TEMPLATE % 'feed' + entry = [ManagementEntry] + kind = GD_NS % 'kind' diff --git a/gam/gdata/analytics/analytics/service.py b/gam/gdata/analytics/analytics/service.py new file mode 100755 index 00000000000..0638b48b457 --- /dev/null +++ b/gam/gdata/analytics/analytics/service.py @@ -0,0 +1,331 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# Refactored in 2009 to work for Google Analytics by Sal Uryasev at Juice Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + AccountsService extends the GDataService to streamline Google Analytics + account information operations. + + AnalyticsDataService: Provides methods to query google analytics data feeds. + Extends GDataService. + + DataQuery: Queries a Google Analytics Data list feed. + + AccountQuery: Queries a Google Analytics Account list feed. +""" + + +__author__ = 'api.suryasev (Sal Uryasev)' + + +import urllib +import atom +import gdata.service +import gdata.analytics + + +class AccountsService(gdata.service.GDataService): + + """Client extension for the Google Analytics Account List feed.""" + + def __init__(self, email="", password=None, source=None, + server='www.google.com/analytics', additional_headers=None, + **kwargs): + """Creates a client for the Google Analytics service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + + gdata.service.GDataService.__init__( + self, email=email, password=password, service='analytics', + source=source, server=server, additional_headers=additional_headers, + **kwargs) + + def QueryAccountListFeed(self, uri): + """Retrieves an AccountListFeed by retrieving a URI based off the Document + List feed, including any query parameters. An AccountListFeed object + can be used to construct these parameters. + + Args: + uri: string The URI of the feed being retrieved possibly with query + parameters. + + Returns: + An AccountListFeed object representing the feed returned by the server. + """ + return self.Get(uri, converter=gdata.analytics.AccountListFeedFromString) + + def GetAccountListEntry(self, uri): + """Retrieves a particular AccountListEntry by its unique URI. + + Args: + uri: string The unique URI of an entry in an Account List feed. + + Returns: + An AccountLisFeed object representing the retrieved entry. + """ + return self.Get(uri, converter=gdata.analytics.AccountListEntryFromString) + + def GetAccountList(self, max_results=1000, text_query=None, + params=None, categories=None): + """Retrieves a feed containing all of a user's accounts and profiles.""" + q = gdata.analytics.service.AccountQuery(max_results=max_results, + text_query=text_query, + params=params, + categories=categories); + return self.QueryAccountListFeed(q.ToUri()) + + + + +class AnalyticsDataService(gdata.service.GDataService): + + """Client extension for the Google Analytics service Data List feed.""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com/analytics', additional_headers=None, + **kwargs): + """Creates a client for the Google Analytics service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'docs.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + + gdata.service.GDataService.__init__(self, + email=email, password=password, service='analytics', source=source, + server=server, additional_headers=additional_headers, **kwargs) + + def GetData(self, ids='', dimensions='', metrics='', + sort='', filters='', start_date='', + end_date='', start_index='', + max_results=''): + """Retrieves a feed containing a user's data + + ids: comma-separated string of analytics accounts. + dimensions: comma-separated string of dimensions. + metrics: comma-separated string of metrics. + sort: comma-separated string of dimensions and metrics for sorting. + This may be previxed with a minus to sort in reverse order. + (e.g. '-ga:keyword') + If ommited, the first dimension passed in will be used. + filters: comma-separated string of filter parameters. + (e.g. 'ga:keyword==google') + start_date: start date for data pull. + end_date: end date for data pull. + start_index: used in combination with max_results to pull more than 1000 + entries. This defaults to 1. + max_results: maximum results that the pull will return. This defaults + to, and maxes out at 1000. + """ + q = gdata.analytics.service.DataQuery(ids=ids, + dimensions=dimensions, + metrics=metrics, + filters=filters, + sort=sort, + start_date=start_date, + end_date=end_date, + start_index=start_index, + max_results=max_results); + return self.AnalyticsDataFeed(q.ToUri()) + + def AnalyticsDataFeed(self, uri): + """Retrieves an AnalyticsListFeed by retrieving a URI based off the + Document List feed, including any query parameters. An + AnalyticsListFeed object can be used to construct these parameters. + + Args: + uri: string The URI of the feed being retrieved possibly with query + parameters. + + Returns: + An AnalyticsListFeed object representing the feed returned by the + server. + """ + return self.Get(uri, + converter=gdata.analytics.AnalyticsDataFeedFromString) + + """ + Account Fetching + """ + + def QueryAccountListFeed(self, uri): + """Retrieves an Account ListFeed by retrieving a URI based off the Account + List feed, including any query parameters. A AccountQuery object can + be used to construct these parameters. + + Args: + uri: string The URI of the feed being retrieved possibly with query + parameters. + + Returns: + An AccountListFeed object representing the feed returned by the server. + """ + return self.Get(uri, converter=gdata.analytics.AccountListFeedFromString) + + def GetAccountListEntry(self, uri): + """Retrieves a particular AccountListEntry by its unique URI. + + Args: + uri: string The unique URI of an entry in an Account List feed. + + Returns: + An AccountListEntry object representing the retrieved entry. + """ + return self.Get(uri, converter=gdata.analytics.AccountListEntryFromString) + + def GetAccountList(self, username="default", max_results=1000, + start_index=1): + """Retrieves a feed containing all of a user's accounts and profiles. + The username parameter is soon to be deprecated, with 'default' + becoming the only allowed parameter. + """ + if not username: + raise Exception("username is a required parameter") + q = gdata.analytics.service.AccountQuery(username=username, + max_results=max_results, + start_index=start_index); + return self.QueryAccountListFeed(q.ToUri()) + +class DataQuery(gdata.service.Query): + """Object used to construct a URI to a data feed""" + def __init__(self, feed='/feeds/data', text_query=None, + params=None, categories=None, ids="", + dimensions="", metrics="", sort="", filters="", + start_date="", end_date="", start_index="", + max_results=""): + """Constructor for Analytics List Query + + Args: + feed: string (optional) The path for the feed. (e.g. '/feeds/data') + + text_query: string (optional) The contents of the q query parameter. + This string is URL escaped upon conversion to a URI. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to + the query's items. + categories: list (optional) List of category strings which should be + included as query categories. See gdata.service.Query for + additional documentation. + ids: comma-separated string of analytics accounts. + dimensions: comma-separated string of dimensions. + metrics: comma-separated string of metrics. + sort: comma-separated string of dimensions and metrics. + This may be previxed with a minus to sort in reverse order + (e.g. '-ga:keyword'). + If ommited, the first dimension passed in will be used. + filters: comma-separated string of filter parameters + (e.g. 'ga:keyword==google'). + start_date: start date for data pull. + end_date: end date for data pull. + start_index: used in combination with max_results to pull more than 1000 + entries. This defaults to 1. + max_results: maximum results that the pull will return. This defaults + to, and maxes out at 1000. + + Yields: + A DocumentQuery object used to construct a URI based on the Document + List feed. + """ + self.elements = {'ids': ids, + 'dimensions': dimensions, + 'metrics': metrics, + 'sort': sort, + 'filters': filters, + 'start-date': start_date, + 'end-date': end_date, + 'start-index': start_index, + 'max-results': max_results} + + gdata.service.Query.__init__(self, feed, text_query, params, categories) + + def ToUri(self): + """Generates a URI from the query parameters set in the object. + + Returns: + A string containing the URI used to retrieve entries from the Analytics + List feed. + """ + old_feed = self.feed + self.feed = '/'.join([old_feed]) + '?' + \ + urllib.urlencode(dict([(key, value) for key, value in \ + self.elements.iteritems() if value])) + new_feed = gdata.service.Query.ToUri(self) + self.feed = old_feed + return new_feed + + +class AccountQuery(gdata.service.Query): + """Object used to construct a URI to query the Google Account List feed""" + def __init__(self, feed='/feeds/accounts', start_index=1, + max_results=1000, username='default', text_query=None, + params=None, categories=None): + """Constructor for Account List Query + + Args: + feed: string (optional) The path for the feed. (e.g. '/feeds/documents') + visibility: string (optional) The visibility chosen for the current + feed. + projection: string (optional) The projection chosen for the current + feed. + text_query: string (optional) The contents of the q query parameter. + This string is URL escaped upon conversion to a URI. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to + the query's items. + categories: list (optional) List of category strings which should be + included as query categories. See gdata.service.Query for + additional documentation. + username: string (deprecated) This value should now always be passed as + 'default'. + + Yields: + A DocumentQuery object used to construct a URI based on the Document + List feed. + """ + self.max_results = max_results + self.start_index = start_index + self.username = username + gdata.service.Query.__init__(self, feed, text_query, params, categories) + + def ToUri(self): + """Generates a URI from the query parameters set in the object. + + Returns: + A string containing the URI used to retrieve entries from the Account + List feed. + """ + old_feed = self.feed + self.feed = '/'.join([old_feed, self.username]) + '?' + \ + '&'.join(['max-results=' + str(self.max_results), + 'start-index=' + str(self.start_index)]) + new_feed = self.feed + self.feed = old_feed + return new_feed diff --git a/gam/gdata/analytics/apps/__init__.py b/gam/gdata/analytics/apps/__init__.py new file mode 100755 index 00000000000..ebdf98ec9af --- /dev/null +++ b/gam/gdata/analytics/apps/__init__.py @@ -0,0 +1,526 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 SIOS Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains objects used with Google Apps.""" + +__author__ = 'tmatsuo@sios.com (Takashi MATSUO)' + + +import atom +import gdata + + +# XML namespaces which are often used in Google Apps entity. +APPS_NAMESPACE = 'http://schemas.google.com/apps/2006' +APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s' + + +class EmailList(atom.AtomBase): + """The Google Apps EmailList element""" + + _tag = 'emailList' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def EmailListFromString(xml_string): + return atom.CreateClassFromXMLString(EmailList, xml_string) + + +class Who(atom.AtomBase): + """The Google Apps Who element""" + + _tag = 'who' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['email'] = 'email' + + def __init__(self, rel=None, email=None, extension_elements=None, + extension_attributes=None, text=None): + self.rel = rel + self.email = email + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def WhoFromString(xml_string): + return atom.CreateClassFromXMLString(Who, xml_string) + + +class Login(atom.AtomBase): + """The Google Apps Login element""" + + _tag = 'login' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['userName'] = 'user_name' + _attributes['password'] = 'password' + _attributes['suspended'] = 'suspended' + _attributes['admin'] = 'admin' + _attributes['changePasswordAtNextLogin'] = 'change_password' + _attributes['agreedToTerms'] = 'agreed_to_terms' + _attributes['ipWhitelisted'] = 'ip_whitelisted' + _attributes['hashFunctionName'] = 'hash_function_name' + + def __init__(self, user_name=None, password=None, suspended=None, + ip_whitelisted=None, hash_function_name=None, + admin=None, change_password=None, agreed_to_terms=None, + extension_elements=None, extension_attributes=None, + text=None): + self.user_name = user_name + self.password = password + self.suspended = suspended + self.admin = admin + self.change_password = change_password + self.agreed_to_terms = agreed_to_terms + self.ip_whitelisted = ip_whitelisted + self.hash_function_name = hash_function_name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def LoginFromString(xml_string): + return atom.CreateClassFromXMLString(Login, xml_string) + + +class Quota(atom.AtomBase): + """The Google Apps Quota element""" + + _tag = 'quota' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['limit'] = 'limit' + + def __init__(self, limit=None, extension_elements=None, + extension_attributes=None, text=None): + self.limit = limit + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def QuotaFromString(xml_string): + return atom.CreateClassFromXMLString(Quota, xml_string) + + +class Name(atom.AtomBase): + """The Google Apps Name element""" + + _tag = 'name' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['familyName'] = 'family_name' + _attributes['givenName'] = 'given_name' + + def __init__(self, family_name=None, given_name=None, + extension_elements=None, extension_attributes=None, text=None): + self.family_name = family_name + self.given_name = given_name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NameFromString(xml_string): + return atom.CreateClassFromXMLString(Name, xml_string) + + +class Nickname(atom.AtomBase): + """The Google Apps Nickname element""" + + _tag = 'nickname' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + + def __init__(self, name=None, + extension_elements=None, extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NicknameFromString(xml_string): + return atom.CreateClassFromXMLString(Nickname, xml_string) + + +class NicknameEntry(gdata.GDataEntry): + """A Google Apps flavor of an Atom Entry for Nickname""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}login' % APPS_NAMESPACE] = ('login', Login) + _children['{%s}nickname' % APPS_NAMESPACE] = ('nickname', Nickname) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + login=None, nickname=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.login = login + self.nickname = nickname + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NicknameEntryFromString(xml_string): + return atom.CreateClassFromXMLString(NicknameEntry, xml_string) + + +class NicknameFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps Nickname feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [NicknameEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def NicknameFeedFromString(xml_string): + return atom.CreateClassFromXMLString(NicknameFeed, xml_string) + + +class UserEntry(gdata.GDataEntry): + """A Google Apps flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}login' % APPS_NAMESPACE] = ('login', Login) + _children['{%s}name' % APPS_NAMESPACE] = ('name', Name) + _children['{%s}quota' % APPS_NAMESPACE] = ('quota', Quota) + # This child may already be defined in GDataEntry, confirm before removing. + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + _children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + login=None, name=None, quota=None, who=None, feed_link=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.login = login + self.name = name + self.quota = quota + self.who = who + self.feed_link = feed_link or [] + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def UserEntryFromString(xml_string): + return atom.CreateClassFromXMLString(UserEntry, xml_string) + + +class UserFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps User feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [UserEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def UserFeedFromString(xml_string): + return atom.CreateClassFromXMLString(UserFeed, xml_string) + + +class EmailListEntry(gdata.GDataEntry): + """A Google Apps EmailList flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}emailList' % APPS_NAMESPACE] = ('email_list', EmailList) + # Might be able to remove this _children entry. + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + email_list=None, feed_link=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.email_list = email_list + self.feed_link = feed_link or [] + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EmailListEntryFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListEntry, xml_string) + + +class EmailListFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps EmailList feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [EmailListEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def EmailListFeedFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListFeed, xml_string) + + +class EmailListRecipientEntry(gdata.GDataEntry): + """A Google Apps EmailListRecipient flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + who=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.who = who + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EmailListRecipientEntryFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListRecipientEntry, xml_string) + + +class EmailListRecipientFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps EmailListRecipient feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [EmailListRecipientEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def EmailListRecipientFeedFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListRecipientFeed, xml_string) + + +class Property(atom.AtomBase): + """The Google Apps Property element""" + + _tag = 'property' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['value'] = 'value' + + def __init__(self, name=None, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def PropertyFromString(xml_string): + return atom.CreateClassFromXMLString(Property, xml_string) + + +class PropertyEntry(gdata.GDataEntry): + """A Google Apps Property flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}property' % APPS_NAMESPACE] = ('property', [Property]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + property=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.property = property + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def PropertyEntryFromString(xml_string): + return atom.CreateClassFromXMLString(PropertyEntry, xml_string) + +class PropertyFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps Property feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PropertyEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + +def PropertyFeedFromString(xml_string): + return atom.CreateClassFromXMLString(PropertyFeed, xml_string) diff --git a/gam/gdata/analytics/apps/__init__.pyc b/gam/gdata/analytics/apps/__init__.pyc new file mode 100755 index 0000000000000000000000000000000000000000..160f66f5268d2857c0b59cb3c0f725f1c49f0d70 GIT binary patch literal 18739 zcmeHP+ix6K89(cFocMlAb92&cZtbRRoHV_MP~x~wC?sxr;@;epVX`w`kFz)SjGJ0@ zB}AjD4~Pd;2_(uBmnV>T1SFn$LIQ~=B>n`1fJYwS_xon&vg?iO%WPCbZO^x7&V2Ko z%bf3be&;*0&i-q#@9BN^Y(X`CviQ7&!+g@El%v!c_e~#R?TOX z)2-&am9tCD?@~^Wa=X-0k6M8KZgo$o50!drez$V=@MM0Ea(WXQeUV15a{5K1PdNh# zjloExUpYgfF`%5`gvLmuF{qqT(HK(BSVChw(im3GUeOp)&O-@}eUZkfa`ubHm~sv% zH>;M$r7Z^|jlIe_BpMGXXF@dgiQKOi&?$!_y#vZQB6*+`%ubgK@ zwr`fJmyB06lZ9$Im!Grm*k;koU7wn}`PwU|z5n26)|`OuqVLx(oIPs_i*DIAlSQ$` zo6g#>clP|bbI(fo?{7fygHy)uh0U3=?Uk;1#*Z9r7jiG-Fz-OPa1OW(BnA=OEvsC( z)h^}ssHNR%X^&d!r7OXK8aaJo4jiUSxgsw*@WbdYtM0+XfYLYfEJ}J|ZqZG4O)jcc z&MmoRx8h4((dwYCem8Ec*k!js3p|)Hjl(<*v6@k-S>Qd3}^UsD|kc|t5F~0?`_JqeZTJAUiMuhjgq^K zTUroXTCB2~It!7<5eRbzsp$*1f+u-~Pw`CIuGmGleyix%zI}^Xw=B!6c)n##)>iyI zxFbBLA;S#S)tl+f3}y+U2sL!_N!&FJ%z+&+qm*jtiqyKa_V?SG*_sij{US7_lR7W ztyWyk!sFIUYV%#7VMHCpVMZW&GCf^5GGqAdL#n{%$b&sCBZ6AKQcD@-A@kMSw_Nb=b1~G?J(7ap1IT~&WWil$mv&&986kpPLUUZ zT})-wMa-2=f8FXH>RoqBK1aOxY=O7);1HR{VZICz zyQw@E3vRkA;h-l&f6da5Yfpv#3LuI7Rh;S|jOjxZtU>H;Qftj}b*#cF=u~`BhBF&t z=wpEo242V^rRkWO5UY7v(9!cPIwZWcx->0Rc?%JJ}bd%e98~`mkjMCB-}LTWzNP3 z;UxchgtE%wlKjgYhG?BhS-WtU2WvfI`GQ^cN-M$qL&6rEQ&(7j(R>6q-(BFpIKH!PrJnD9j^@|y7V-2{CRbb#MG@X*G zkOOg$#Gdj4*16#+?8RBHaHm3cgS7lS=!qbQQr9?0qf()h2&=3#GR0#$GZV01TE}@j z6Ufz+hxkuX;^%WFcpf$T5o@=0Cdh7UfkwkA4R88hIDq3J{tMQ+F~o@$p~g(budhVo zoDAtQH9v)DJ1cB1mNCFEw~KH+q8Wo~pDf*m!aT$)B`-Nste?g_GNk5582XTTgrG3h z6rx-x+3!~Cx#~jBuH+aI%Si)s3&;$cEYt~+vHFUZfE8zs_fMG*WaXw`VDrhRnH9QM zfgQtxojZ-gd>0~!UoAkiL}O^SN4<*euqxR;^yGxpjY~6++mX0hwg5M!!h$6^cs~ zQxqJDn!T-GqTs`l0`wK8QU;yh!lq%cMD3oC$mO6cz=k2Y^pc2AyBU_VNf4@?d*ym- z*od8_XDA35iLen@HNoUe&rdUo^d;E)6%NDDTQZ8YH?vm)(#Xqf53E>3N?^snROpv7 zmSV*E0~^smEc#=pb_F4h2(b9T1h9ULL@Iz)%!K$PyDGptz*b8LcH^fA){d+Bp(t3p z7O}!+iX>TR1Te?y3C!^!1W{hLs1>%p&b(_B z*C{Sh2!PEp^$NueiWtgRSN$r5rnpIwGM@bfHiKYN&qQWG;>ct0{P1@+F93W$MAKJU zC0SMohRyMGghGIj)1W2{q={TF!``28m;(?=RaKy0n zH@2VwTO2>HA?_>47GV}UPcVzBsbJOyNEYJb!y&LCLN3G$+r!v_8c{Li1$$DXguFiG zj)gZMd=5p)1^{gKZ$RA}Rr7m^fpk7|3SvX7P0+3#M`QQO0zo($7Haw|bJ*R?2?At1 zBW7CUHxZ*_&NXCc4l={7Pu_w3u>v1UcnFUE5*gdU(fd+7l0J|-fGQFocVKhn1i2eh zwbk$xToLFBHDL0vs(ljX1Dr=x?I$4z1Vl$1FoxUp`Ba{hs^DC}P}mm%$71(Nk4h)$ zH+U+POOP?bsl1>78Rtfe{0#GvW#NjP&$hZtC0NZ3XJJ9jj4s%e4|yH;!`g-zIaFIw zaD66so_9<_$P7hPkb!oX+2lECV&&_w_YWNAI7AXF!ScZKvYA7f{SCG^yiK;pqEbT5 zahQtmFow*1d{7*82*Pthq6)h&LQGD&mvNXK!tyu>nh}moT94uBui=&*&hB(ac#vRD z%t44!`5i;TPH<$)j$Wro7`-!1V3~)#L#%bDNEi!92~Ifwgjf-y$+HPGA&5t>^E9(C z(qF-ppCQp976#HZlwel~cp+z)>}vfuSMq8Q^BRp}USvAW*b+6c&FWFwEVv>u8+q>s zgvj`Y+@91&viPc@%=2q_(%Q(^_il(2?y7&)4FRg5@qB@l;h73uA062l$L`vMKv zSsS!rd%s{Z=(n~JogK|&@D+^?GZ{K-noP!my?0paPB9r4kP^CZdDw&+F}SQGz@e*O2lEulX2>lz9!kS3 zu(dom-A&^#A3}7Doge@ngl<4nmO?rb?9Q!(bOhXO^Pw1-sAZ8OQ?Z0%IBeU7UD)2% z5cK*xXR)c2NytYWob3B%5)x1l2^VjNf=JMW{2)zOuwn0mcCZk}OS0RHm#~18VBr`{ zMX(ULNDKS;Asf>LO+-())LeVRj;0|O(WK7?jW77f=69HOG#~jnM}0>lEA-k5JctWpm*)kvfJUWN3k$nq!2%hCg#M~qhgil zYC1b3zvqMF>$J(QM{(uab|*ykiR7v<4+(^{Yz3zn7yxBezhnJKD}um4{0`~%z6C|o zDY*wL7Y?S_A^w=bPomPzlWo|9^?CUM?u$Z)p+6NkTzLEs zl5;jVC2}5{O4-Z*8hTMj$T^)Uh!3Xd{2q+{nbmGxv!_MQSwKnh#AHp zIALS4pMQcqL>4DodzKZv_ib6hm(?YjMDUS!BIW-`%*|ZCacydDCio&dE-B5Df|pZ{ zFeR@gpJ9qjKtD-Azt_)EFn*^mP)L+Y-v9e5Pv;<{5`5w13g5G=>z8@qHHz0M_}x~` zy%Q!4#vY`kNFs#cc{MAhbgvv&G9I{&(Clbkg2>}Yh%NlP2d-K!2LFZQ z#ZjcqECl{_MRp)F&<*mLJvh?aeQ*rNaU7#K#v7kUdb^JvM*39a^JwF90-v}hdK10b HqZ9uHFJaZT literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/apps/adminsettings/__init__.py b/gam/gdata/analytics/apps/adminsettings/__init__.py new file mode 100755 index 00000000000..d284c7cee33 --- /dev/null +++ b/gam/gdata/analytics/apps/adminsettings/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/gam/gdata/analytics/apps/adminsettings/__init__.pyc b/gam/gdata/analytics/apps/adminsettings/__init__.pyc new file mode 100755 index 0000000000000000000000000000000000000000..efe8665db3224632e8a29e19871bc3d5aa48a83d GIT binary patch literal 146 zcmZSn%*$mWmgtwv00oRd+5w1*S%5?e14FO|NW@PANHCxg#nwPEXRDa>{QUHsRNcga zf@0m=#Jt4x)S{U5l*E$67?4m*VoGjiUU6zkNoHPpaZG%CW?p7Ve7s&kWeEpRzfEp_ PN@-529mtkqAZ7pnhIb&d literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/apps/adminsettings/service.py b/gam/gdata/analytics/apps/adminsettings/service.py new file mode 100755 index 00000000000..c69fa36bc90 --- /dev/null +++ b/gam/gdata/analytics/apps/adminsettings/service.py @@ -0,0 +1,471 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Allow Google Apps domain administrators to set domain admin settings. + + AdminSettingsService: Set admin settings.""" + +__author__ = 'jlee@pbu.edu' + + +import gdata.apps +import gdata.apps.service +import gdata.service + + +API_VER='2.0' + +class AdminSettingsService(gdata.apps.service.PropertyService): + """Client for the Google Apps Admin Settings service.""" + + def _serviceUrl(self, setting_id, domain=None): + if domain is None: + domain = self.domain + return '/a/feeds/domain/%s/%s/%s' % (API_VER, domain, setting_id) + + def genericGet(self, location): + """Generic HTTP Get Wrapper + + Args: + location: relative uri to Get + + Returns: + A dict containing the result of the get operation.""" + + uri = self._serviceUrl(location) + try: + return self._GetProperties(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def GetDefaultLanguage(self): + """Gets Domain Default Language + + Args: + None + + Returns: + Default Language as a string. All possible values are listed at: + http://code.google.com/apis/apps/email_settings/developers_guide_protocol.html#GA_email_language_tags""" + + result = self.genericGet('general/defaultLanguage') + return result['defaultLanguage'] + + def UpdateDefaultLanguage(self, defaultLanguage): + """Updates Domain Default Language + + Args: + defaultLanguage: Domain Language to set + possible values are at: + http://code.google.com/apis/apps/email_settings/developers_guide_protocol.html#GA_email_language_tags + + Returns: + A dict containing the result of the put operation""" + + uri = self._serviceUrl('general/defaultLanguage') + properties = {'defaultLanguage': defaultLanguage} + return self._PutProperties(uri, properties) + + def GetOrganizationName(self): + """Gets Domain Default Language + + Args: + None + + Returns: + Organization Name as a string.""" + + result = self.genericGet('general/organizationName') + return result['organizationName'] + + + def UpdateOrganizationName(self, organizationName): + """Updates Organization Name + + Args: + organizationName: Name of organization + + Returns: + A dict containing the result of the put operation""" + + uri = self._serviceUrl('general/organizationName') + properties = {'organizationName': organizationName} + return self._PutProperties(uri, properties) + + def GetMaximumNumberOfUsers(self): + """Gets Maximum Number of Users Allowed + + Args: + None + + Returns: An integer, the maximum number of users""" + + result = self.genericGet('general/maximumNumberOfUsers') + return int(result['maximumNumberOfUsers']) + + def GetCurrentNumberOfUsers(self): + """Gets Current Number of Users + + Args: + None + + Returns: An integer, the current number of users""" + + result = self.genericGet('general/currentNumberOfUsers') + return int(result['currentNumberOfUsers']) + + def IsDomainVerified(self): + """Is the domain verified + + Args: + None + + Returns: Boolean, is domain verified""" + + result = self.genericGet('accountInformation/isVerified') + if result['isVerified'] == 'true': + return True + else: + return False + + def GetSupportPIN(self): + """Gets Support PIN + + Args: + None + + Returns: A string, the Support PIN""" + + result = self.genericGet('accountInformation/supportPIN') + return result['supportPIN'] + + def GetEdition(self): + """Gets Google Apps Domain Edition + + Args: + None + + Returns: A string, the domain's edition (premier, education, partner)""" + + result = self.genericGet('accountInformation/edition') + return result['edition'] + + def GetCustomerPIN(self): + """Gets Customer PIN + + Args: + None + + Returns: A string, the customer PIN""" + + result = self.genericGet('accountInformation/customerPIN') + return result['customerPIN'] + + def GetCreationTime(self): + """Gets Domain Creation Time + + Args: + None + + Returns: A string, the domain's creation time""" + + result = self.genericGet('accountInformation/creationTime') + return result['creationTime'] + + def GetCountryCode(self): + """Gets Domain Country Code + + Args: + None + + Returns: A string, the domain's country code. Possible values at: + http://www.iso.org/iso/country_codes/iso_3166_code_lists/english_country_names_and_code_elements.htm""" + + result = self.genericGet('accountInformation/countryCode') + return result['countryCode'] + + def GetAdminSecondaryEmail(self): + """Gets Domain Admin Secondary Email Address + + Args: + None + + Returns: A string, the secondary email address for domain admin""" + + result = self.genericGet('accountInformation/adminSecondaryEmail') + return result['adminSecondaryEmail'] + + def UpdateAdminSecondaryEmail(self, adminSecondaryEmail): + """Gets Domain Creation Time + + Args: + adminSecondaryEmail: string, secondary email address of admin + + Returns: A dict containing the result of the put operation""" + + uri = self._serviceUrl('accountInformation/adminSecondaryEmail') + properties = {'adminSecondaryEmail': adminSecondaryEmail} + return self._PutProperties(uri, properties) + + def GetDomainLogo(self): + """Gets Domain Logo + + This function does not make use of the Google Apps Admin Settings API, + it does an HTTP Get of a url specific to the Google Apps domain. It is + included for completeness sake. + + Args: + None + + Returns: binary image file""" + + import urllib + url = 'http://www.google.com/a/cpanel/'+self.domain+'/images/logo.gif' + response = urllib.urlopen(url) + return response.read() + + def UpdateDomainLogo(self, logoImage): + """Update Domain's Custom Logo + + Args: + logoImage: binary image data + + Returns: A dict containing the result of the put operation""" + + from base64 import base64encode + uri = self._serviceUrl('appearance/customLogo') + properties = {'logoImage': base64encode(logoImage)} + return self._PutProperties(uri, properties) + + def GetCNAMEVerificationStatus(self): + """Gets Domain CNAME Verification Status + + Args: + None + + Returns: A dict {recordName, verified, verifiedMethod}""" + + return self.genericGet('verification/cname') + + def UpdateCNAMEVerificationStatus(self, verified): + """Updates CNAME Verification Status + + Args: + verified: boolean, True will retry verification process + + Returns: A dict containing the result of the put operation""" + + uri = self._serviceUrl('verification/cname') + properties = self.GetCNAMEVerificationStatus() + properties['verified'] = verified + return self._PutProperties(uri, properties) + + def GetMXVerificationStatus(self): + """Gets Domain MX Verification Status + + Args: + None + + Returns: A dict {verified, verifiedMethod}""" + + return self.genericGet('verification/mx') + + def UpdateMXVerificationStatus(self, verified): + """Updates MX Verification Status + + Args: + verified: boolean, True will retry verification process + + Returns: A dict containing the result of the put operation""" + + uri = self._serviceUrl('verification/mx') + properties = self.GetMXVerificationStatus() + properties['verified'] = verified + return self._PutProperties(uri, properties) + + def GetSSOSettings(self): + """Gets Domain Single Sign-On Settings + + Args: + None + + Returns: A dict {samlSignonUri, samlLogoutUri, changePasswordUri, enableSSO, ssoWhitelist, useDomainSpecificIssuer}""" + + return self.genericGet('sso/general') + + def UpdateSSOSettings(self, enableSSO=None, samlSignonUri=None, + samlLogoutUri=None, changePasswordUri=None, + ssoWhitelist=None, useDomainSpecificIssuer=None): + """Update SSO Settings. + + Args: + enableSSO: boolean, SSO Master on/off switch + samlSignonUri: string, SSO Login Page + samlLogoutUri: string, SSO Logout Page + samlPasswordUri: string, SSO Password Change Page + ssoWhitelist: string, Range of IP Addresses which will see SSO + useDomainSpecificIssuer: boolean, Include Google Apps Domain in Issuer + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('sso/general') + + #Get current settings, replace Nones with '' + properties = self.GetSSOSettings() + if properties['samlSignonUri'] == None: + properties['samlSignonUri'] = '' + if properties['samlLogoutUri'] == None: + properties['samlLogoutUri'] = '' + if properties['changePasswordUri'] == None: + properties['changePasswordUri'] = '' + if properties['ssoWhitelist'] == None: + properties['ssoWhitelist'] = '' + + #update only the values we were passed + if enableSSO != None: + properties['enableSSO'] = gdata.apps.service._bool2str(enableSSO) + if samlSignonUri != None: + properties['samlSignonUri'] = samlSignonUri + if samlLogoutUri != None: + properties['samlLogoutUri'] = samlLogoutUri + if changePasswordUri != None: + properties['changePasswordUri'] = changePasswordUri + if ssoWhitelist != None: + properties['ssoWhitelist'] = ssoWhitelist + if useDomainSpecificIssuer != None: + properties['useDomainSpecificIssuer'] = gdata.apps.service._bool2str(useDomainSpecificIssuer) + + return self._PutProperties(uri, properties) + + def GetSSOKey(self): + """Gets Domain Single Sign-On Signing Key + + Args: + None + + Returns: A dict {modulus, exponent, algorithm, format}""" + + return self.genericGet('sso/signingkey') + + def UpdateSSOKey(self, signingKey): + """Update SSO Settings. + + Args: + signingKey: string, public key to be uploaded + + Returns: + A dict containing the result of the update operation.""" + + uri = self._serviceUrl('sso/signingkey') + properties = {'signingKey': signingKey} + return self._PutProperties(uri, properties) + + def IsUserMigrationEnabled(self): + """Is User Migration Enabled + + Args: + None + + Returns: + boolean, is user migration enabled""" + + result = self.genericGet('email/migration') + if result['enableUserMigration'] == 'true': + return True + else: + return False + + def UpdateUserMigrationStatus(self, enableUserMigration): + """Update User Migration Status + + Args: + enableUserMigration: boolean, user migration enable/disable + + Returns: + A dict containing the result of the update operation.""" + + uri = self._serviceUrl('email/migration') + properties = {'enableUserMigration': enableUserMigration} + return self._PutProperties(uri, properties) + + def GetOutboundGatewaySettings(self): + """Get Outbound Gateway Settings + + Args: + None + + Returns: + A dict {smartHost, smtpMode}""" + + uri = self._serviceUrl('email/gateway') + try: + return self._GetProperties(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + except TypeError: + #if no outbound gateway is set, we get a TypeError, + #catch it and return nothing... + return {'smartHost': None, 'smtpMode': None} + + def UpdateOutboundGatewaySettings(self, smartHost=None, smtpMode=None): + """Update Outbound Gateway Settings + + Args: + smartHost: string, ip address or hostname of outbound gateway + smtpMode: string, SMTP or SMTP_TLS + + Returns: + A dict containing the result of the update operation.""" + + uri = self._serviceUrl('email/gateway') + + #Get current settings, replace Nones with '' + properties = GetOutboundGatewaySettings() + if properties['smartHost'] == None: + properties['smartHost'] = '' + if properties['smtpMode'] == None: + properties['smtpMode'] = '' + + #If we were passed new values for smartHost or smtpMode, update them + if smartHost != None: + properties['smartHost'] = smartHost + if smtpMode != None: + properties['smtpMode'] = smtpMode + + return self._PutProperties(uri, properties) + + def AddEmailRoute(self, routeDestination, routeRewriteTo, routeEnabled, bounceNotifications, accountHandling): + """Adds Domain Email Route + + Args: + routeDestination: string, destination ip address or hostname + routeRewriteTo: boolean, rewrite smtp envelop To: + routeEnabled: boolean, enable disable email routing + bounceNotifications: boolean, send bound notificiations to sender + accountHandling: string, which to route, "allAccounts", "provisionedAccounts", "unknownAccounts" + + Returns: + A dict containing the result of the update operation.""" + + uri = self._serviceUrl('emailrouting') + properties = {} + properties['routeDestination'] = routeDestination + properties['routeRewriteTo'] = gdata.apps.service._bool2str(routeRewriteTo) + properties['routeEnabled'] = gdata.apps.service._bool2str(routeEnabled) + properties['bounceNotifications'] = gdata.apps.service._bool2str(bounceNotifications) + properties['accountHandling'] = accountHandling + return self._PostProperties(uri, properties) diff --git a/gam/gdata/analytics/apps/adminsettings/service.pyc b/gam/gdata/analytics/apps/adminsettings/service.pyc new file mode 100755 index 0000000000000000000000000000000000000000..ed2ab8514906ce1c9c919dad7f5cec536ef1ae0e GIT binary patch literal 15462 zcmcgz&vO)4et#ngA%tXsvB4N@KjYe1F3Xx-XHB*gvOyLYYi$Xo2W(78OGmK&jg(YwC7Q1rsWmRMCXGJz3PJ zD)l2$N6WMdjwmy!?jBWlXVl$eY6o+t)f1(@R_d$Uvs|UIc$BY5QLIK$vZt>lNjoxn zwcE8iNIHHPYd`3OacHyD&yv*YEYa3v1D$Nh!nkdhXJ@ru<@iS4(lF`$uw_;>8r(|# z!FCR>y&D;Gxx3w4HbF1^U;Np$Jo$&q-_Bm)u0||Yf{1(n7(aWtrj!AQa6puRk0z|1 z09;c95srNO2#?DOz%nJ3ql6DzW^%CHKBj`B(gN7bNaeT+j!9)s1+!9_SHW?qyrhCT zsVt~qUMeS4@RC$cs$fAXFRS2$R8Fbjq*Pu}!OK#4RRyP{a#{tiNac(QUX{vgDmX2b zvnn_vmDlA&I8^oDYwF3gx_wRsXJyQJ6}&E$H&k#=DsQUbyj0#&!5dO3r@yJ5fPUUq z!CTUPL3xeEw+YD)QLIIwi8H;Eq&nL*10*5nAT9;nK&4=JxkZ%mH^u4r+4oR90wR>U zH>1+m)FWWJrXEkISP?bZ1bZLV)XtR1zr&_GLj3t6_D*r$?!)XjrZ@BFn`v|bkCwYqwK@E4@nziY5&Q%o4O9pO zlr{CWfa2lF94r6@S{h%*OKJ*`xeK11Quk(6x~Xc9Csi#wq8_0Qlv`70G4e+>71z`) zP!ooyV^s%XE7Pqc&L9!M=|UK(vArnM$&Ng=@yZ16lckqK>_}~jbnyt5 z>?IUUtiP3VP!<}CQz<2&798fV%qSE*^G7`lxSFO(n!Sz&%A-$`^sA(oUJ^43@VNUfJc%*jkEwPp3Twt-xz?!tafMASImx22!HO!?VjH@Pot8|FcM?KQ?+lk zuOY;Nyk(F>6zOhaZMY3#f8UP)ZSAKUI~>|cXO1Svhvt{DCQy=tv)Ix_v|5synqBDgIw?&M zy@bjDq9;^X;QAsM+6hAnz)AIW$;3-I)6FhK4{&i{0vLkJN-?w8X|Aqw&etQ6{=%_6 zl41`cW4gVHjQJ*-@lFB-jx_qxXRB9{K7DZ^V?D|z(HJ1tTtJPFieAOy4z)vsB;+D$ z@?BiY$af)Li8nm-oStX`3Zxuke{@Fa&_6oFd_X7 z|3TR4b@XPhvu#ouBsXDASZcXs&je57=4rL5S7RN~T$d9xp8* zosV&^y}lza6|ZGfhrn-Fa-H$f_N)m>zg2+j~O_UKzvk3AL8CQZ1}Ljox@^aZ+j!bU0Q_iqkD+ZbBhqp z>b(h-zC*%qz$a(~Qy_a<%=QZu>+r+jnmHFp--qG96CRMzKTeX!`0v^FduCJo5HZNaAise&vF|_eBWn%~^5mv<>x`vh zi$5ZaW>6?l_=(!oL`mf(*vTKG!JbQ&O$-pp(&%-&a6$Ce`sTrCIybSM45`fC546_3 z0P8zy)mmztp?f3j9LTZO zs{_1UQe~@HD8oYbExcv~^%aBoGKp|h8(siB=|U5p`Chq=zkg1E{+nB}4^V22^r2ou zpx}A9T(l3?k${8V8V*vFk%>GmvA4Ik9NJ_VMm6+Qf;fe2qsi%(EzRHj&c%!J*kp7A z@rt;O$}XDR$QUN6ZTfNG2ARloVA5EIEB0j$HEOAMHr?Bb{ zF1Viz;j#$xBH$PJz{ORDco5hR5MHs*fLv>el_d6qM1ZU$A!f%&E;nF*0~;B^J`#m( z6pP4iGP-^v+FtDhpp1W1FM`e*d?g%sgW1F5vGpDix*GxxeaqZI;YwB#BAla*3( z?S}}sOft{mwut^h!<6O=%WB_E?EoJXvxTR;Yu5(LA;O}41={!@| zj=5(_43?b4z#oFQh}puCLi`qHIt|I1Is^Os4AL7g-|tFp18yoKeF)nl#-|F~y$0R{ z>6EzqJZUEm<~Md>kMH#27Of;0_6P#TNv1pgJw)fNDWdFE%SsN=PsSh#vLtc7kl8r~ zbmM+9fCxsEKhm~qT5uLx3=9rucN{A&>-7xIfm^eLncIQ$8V3ME`<@gR3%2NT6Z{ z%oD)tDW`J_y}QOVqizv(A#^HuF<|Vcr0s}B8An9dRYD+Jjg5&qwn__F$- z04<>QqN#oaPCU|ah19=~#}`HFlva+^Mvtk9ki<9Ba0&KUC!!ARW%AhCMS9(A`PS}1Gf0z(ePsI^jq4a}lP`C}%+MiT zqIv05TO+p(*RAcD^s!~!2Se+5zTU(KZf`M+{yj0&rRh?Pd8kd5oi~flDOGeHDLd1m^GMk_U3UI?(K+op zVW8oClPdiO+M2k-bN2{u?LbpugM8^xHxAaF8}}<~q*R9^sY{I`X~XvXJXos}1aBy4cH%+Y3Ml$l>3NGbI_ZwEqO)IzG_A zM@gR}?ER9cDw2a!Hhf$pK`Jh;Cp$aZ?uA)vH@}21K)8L^l(RsOAXwob9{%)qjo$uXm;mIAH)mq$;FY%1uU66%)uG0(gFBqXi#NyDL^vbpLj}! zh$&x{9b&MTB#=?z?E{C+#Tj%u>-{oopfKgJ&0XNaMIw*1*x-5L6y zu(ryAHnw+##Z?y9SR8`wLohf-0HzDM^bz)Mm(in6EKJTJ$24F4q2+vS4sC=U_8?BhJ!gzSE1(X&O*ev0)ECd%>?V;g-T(CuwUh?M{ejF)U9 z6w{3o9|4i^G3p42yHub<0JgJ#knD3D?l8!Ht^^v$9y^GL7djri-dMS@QBv|VC9-$0 z9unC%aj!yegl%_oR9_V{*hLP7hXg#OBmpnUY=V3YLFHdynVc<#;J;t^VFaiu z%55av>p^H~bB>W&Mj+DL#98*|5%wMikMsk{1Bi3r`_mB&L(`QpE2=K};$pyjt*QQX zaIqJ09sIKq*TE;0TnEQ3aaee;aA8^UI=IHrqjG1wqiVHU2oqDjc(};_5H)PvLZz`t z9<~7rh+KWWmu(}56zFTXbGqk0EbY95sFgw6~*2uCqxb5V3Xcj?jM7Xv};0*`j7M~B3ta1t}&#>1|WE8=2IIG2&E;jm3g zS;TKV!QDKLRe&sqSYZ+T712%2*}wjMx;8&4pG6SmrPuP8co;zYBro`Ujer)cdihr6 zEbv5^L}g!XhTSs7hrGcq$bykZ8q+Y@9nIwwk&{tD0R1Hz8~BU?llg!1#^;SO z$`G1e17zv_9$LJQS@7oQrpp@$72aSYu$zY<`Y~sj!qJBzTlyil0uMt3_cIb4)dnu0 zPaZ4GPne!!c22aocdCt&@Sfmdfbgajn2}C1JX3rLHK8tn5RrFqAD_4{QrIy$)G;ak zBRW7x12d3{a7)26r7Mada7Fn}JirM)>ikQxwfz;gk8E<829u1g_COpXt`4MZzJkv| zkVkT#t+>w^mNYW_MWY@~ayfa)5}w(Et!r*16}6SRHj!D{#Pa2W#)#^?ToqSlIaP?< zsAxAWw50hCnC1CQ5_!vPCK+>WvTa*gz#2$h#|KFAaY*RKxa<0HfX{ZE1Dq$rKgIQg z2&?z`!#ScbO66=z`hp)ttM2Nay?{r=rSFFpxkM9Gn|txSIN6KKrtz_rIEMm?91?OT zTy|GMo_q%+jU_r{qBP}@7Rzh|0OdoFA%KWRb8aQ+-TRb%HP@A=HfeO6usDN zQf@Yz;t({O;=%EeOw?=!Nvqj(!bxNm7xQ>6!lPvHcqiPWmExUe!FygFSN6We;$0T+ zv7qhleZZp5;@d2k=k``uNT%U;Sd&2P@3MB8#Sd6~#$toTbrxGJL}cD%?GI2a;#Vo2 z^Rqh&T*bt(y@F!8Ha#(S-Z}BL2`@|@8xaB@9dDw81 e{QUHsRNcga zf@0m=#Jt4x)S{U5l*E$67?4m*VrfcdNlbivW?p7Ve7s&kWeEpRpG|IlN@-529mw=z HAZ7pnTnimY literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/apps/audit/service.py b/gam/gdata/analytics/apps/audit/service.py new file mode 100755 index 00000000000..d8cf72cfcad --- /dev/null +++ b/gam/gdata/analytics/apps/audit/service.py @@ -0,0 +1,277 @@ +# Copyright (C) 2008 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Allow Google Apps domain administrators to audit user data. + + AuditService: Set auditing.""" + +__author__ = 'jlee@pbu.edu' + +from base64 import b64encode + +import gdata.apps +import gdata.apps.service +import gdata.service + +class AuditService(gdata.apps.service.PropertyService): + """Client for the Google Apps Audit service.""" + + def _serviceUrl(self, setting_id, domain=None, user=None): + if domain is None: + domain = self.domain + if user is None: + return '/a/feeds/compliance/audit/%s/%s' % (setting_id, domain) + else: + return '/a/feeds/compliance/audit/%s/%s/%s' % (setting_id, domain, user) + + def updatePGPKey(self, pgpkey): + """Updates Public PGP Key Google uses to encrypt audit data + + Args: + pgpkey: string, ASCII text of PGP Public Key to be used + + Returns: + A dict containing the result of the POST operation.""" + + uri = self._serviceUrl('publickey') + b64pgpkey = b64encode(pgpkey) + properties = {} + properties['publicKey'] = b64pgpkey + return self._PostProperties(uri, properties) + + def createEmailMonitor(self, source_user, destination_user, end_date, + begin_date=None, incoming_headers_only=False, + outgoing_headers_only=False, drafts=False, + drafts_headers_only=False, chats=False, + chats_headers_only=False): + """Creates a email monitor, forwarding the source_users emails/chats + + Args: + source_user: string, the user whose email will be audited + destination_user: string, the user to receive the audited email + end_date: string, the date the audit will end in + "yyyy-MM-dd HH:mm" format, required + begin_date: string, the date the audit will start in + "yyyy-MM-dd HH:mm" format, leave blank to use current time + incoming_headers_only: boolean, whether to audit only the headers of + mail delivered to source user + outgoing_headers_only: boolean, whether to audit only the headers of + mail sent from the source user + drafts: boolean, whether to audit draft messages of the source user + drafts_headers_only: boolean, whether to audit only the headers of + mail drafts saved by the user + chats: boolean, whether to audit archived chats of the source user + chats_headers_only: boolean, whether to audit only the headers of + archived chats of the source user + + Returns: + A dict containing the result of the POST operation.""" + + uri = self._serviceUrl('mail/monitor', user=source_user) + properties = {} + properties['destUserName'] = destination_user + if begin_date is not None: + properties['beginDate'] = begin_date + properties['endDate'] = end_date + if incoming_headers_only: + properties['incomingEmailMonitorLevel'] = 'HEADER_ONLY' + else: + properties['incomingEmailMonitorLevel'] = 'FULL_MESSAGE' + if outgoing_headers_only: + properties['outgoingEmailMonitorLevel'] = 'HEADER_ONLY' + else: + properties['outgoingEmailMonitorLevel'] = 'FULL_MESSAGE' + if drafts: + if drafts_headers_only: + properties['draftMonitorLevel'] = 'HEADER_ONLY' + else: + properties['draftMonitorLevel'] = 'FULL_MESSAGE' + if chats: + if chats_headers_only: + properties['chatMonitorLevel'] = 'HEADER_ONLY' + else: + properties['chatMonitorLevel'] = 'FULL_MESSAGE' + return self._PostProperties(uri, properties) + + def getEmailMonitors(self, user): + """"Gets the email monitors for the given user + + Args: + user: string, the user to retrieve email monitors for + + Returns: + list results of the POST operation + + """ + uri = self._serviceUrl('mail/monitor', user=user) + return self._GetPropertiesList(uri) + + def deleteEmailMonitor(self, source_user, destination_user): + """Deletes the email monitor for the given user + + Args: + source_user: string, the user who is being monitored + destination_user: string, theuser who recieves the monitored emails + + Returns: + Nothing + """ + + uri = self._serviceUrl('mail/monitor', user=source_user+'/'+destination_user) + try: + return self._DeleteProperties(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def createAccountInformationRequest(self, user): + """Creates a request for account auditing details + + Args: + user: string, the user to request account information for + + Returns: + A dict containing the result of the post operation.""" + + uri = self._serviceUrl('account', user=user) + properties = {} + #XML Body is left empty + try: + return self._PostProperties(uri, properties) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def getAccountInformationRequestStatus(self, user, request_id): + """Gets the status of an account auditing request + + Args: + user: string, the user whose account auditing details were requested + request_id: string, the request_id + + Returns: + A dict containing the result of the get operation.""" + + uri = self._serviceUrl('account', user=user+'/'+request_id) + try: + return self._GetProperties(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def getAllAccountInformationRequestsStatus(self): + """Gets the status of all account auditing requests for the domain + + Args: + None + + Returns: + list results of the POST operation + """ + + uri = self._serviceUrl('account') + return self._GetPropertiesList(uri) + + + def deleteAccountInformationRequest(self, user, request_id): + """Deletes the request for account auditing information + + Args: + user: string, the user whose account auditing details were requested + request_id: string, the request_id + + Returns: + Nothing + """ + + uri = self._serviceUrl('account', user=user+'/'+request_id) + try: + return self._DeleteProperties(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def createMailboxExportRequest(self, user, begin_date=None, end_date=None, include_deleted=False, search_query=None, headers_only=False): + """Creates a mailbox export request + + Args: + user: string, the user whose mailbox export is being requested + begin_date: string, date of earliest emails to export, optional, defaults to date of account creation + format is 'yyyy-MM-dd HH:mm' + end_date: string, date of latest emails to export, optional, defaults to current date + format is 'yyyy-MM-dd HH:mm' + include_deleted: boolean, whether to include deleted emails in export, mutually exclusive with search_query + search_query: string, gmail style search query, matched emails will be exported, mutually exclusive with include_deleted + + Returns: + A dict containing the result of the post operation.""" + + uri = self._serviceUrl('mail/export', user=user) + properties = {} + if begin_date is not None: + properties['beginDate'] = begin_date + if end_date is not None: + properties['endDate'] = end_date + if include_deleted is not None: + properties['includeDeleted'] = gdata.apps.service._bool2str(include_deleted) + if search_query is not None: + properties['searchQuery'] = search_query + if headers_only is True: + properties['packageContent'] = 'HEADER_ONLY' + else: + properties['packageContent'] = 'FULL_MESSAGE' + return self._PostProperties(uri, properties) + + def getMailboxExportRequestStatus(self, user, request_id): + """Gets the status of an mailbox export request + + Args: + user: string, the user whose mailbox were requested + request_id: string, the request_id + + Returns: + A dict containing the result of the get operation.""" + + uri = self._serviceUrl('mail/export', user=user+'/'+request_id) + try: + return self._GetProperties(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def getAllMailboxExportRequestsStatus(self): + """Gets the status of all mailbox export requests for the domain + + Args: + None + + Returns: + list results of the POST operation + """ + + uri = self._serviceUrl('mail/export') + return self._GetPropertiesList(uri) + + + def deleteMailboxExportRequest(self, user, request_id): + """Deletes the request for mailbox export + + Args: + user: string, the user whose mailbox were requested + request_id: string, the request_id + + Returns: + Nothing + """ + + uri = self._serviceUrl('mail/export', user=user+'/'+request_id) + try: + return self._DeleteProperties(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) diff --git a/gam/gdata/analytics/apps/audit/service.pyc b/gam/gdata/analytics/apps/audit/service.pyc new file mode 100755 index 0000000000000000000000000000000000000000..3fca4d7d3cb49222395a2d9edca23edb448a7f22 GIT binary patch literal 9785 zcmc&)&2t<_74O~E*RCYXhJ1oU2161BWC@H(6~+~!*ovLPR?H|ah%mJ^+G(vunw{Cq z^u|(^a>2fEf#M2(0I5__TsUyeF$eM=KovLcaNq{N-|Ly3*$+7)vP-hI-P7IEAMgF% zd%u3G+P~%-m!I+0TPpjh;r}=AN!j8nwTZH#HY>`nD!-<}s_N9#W=&Q2JfXq~wK*w$ zs85yZ(^8*QdQ$DqsNK5SZK!R`GNm3W^+2ijHfQ<%#^N*=!zyYu4CDLGjW}+H+F9*( zQ^$`xUJyB+-wC21HHl~9By~*ecs)NbPA}Do<9o(iZZsTcm2De3`5Cc2p3x zm(w{+zZ+`(Mt7^Xto>f_U;Hdq&}!-^wqCoeqgL$eb;FV6nfh{>LC2f;q<=)AAs3tl zC4_KBekRc%gC6gVJdaz^Q?;a^DbAtW=+qjT7d@+YNI&m^;;PPrS43*7drD6WNpj*PP%Cyv?C1EQ8Q zI}j+vx7?b*==^hyuirp%h{GziH=~m0)FA{~Q6E-Sg!WqAeu-E)#BWt?SJg8c5Qcri zknI{96f|DLTca_&_fT+kl|vy>{7$F`)l8Dd&?6_(97!tvE8UV@xNoOLW=`Wj2EF&O00LI&oLSxPv%a7JKZi-7t95HM9~R9XbdJU-l^AmHz)^bugqPIL|Xuc@nqz?|Bv ztK@a6{}8Pc>Z-LqK1^cBq)L8HKf>o}bi$X+*Y>798Rm`BXtPN{(U~A zp{~CFQT{$2xDposOdU2<@<~2w7Nb7PN8y2^EJ$a$uz-~3P5+D&UrV%r)N?fadgyfG zD1b*_qC>jxC4LT&H0~uW-K1Hjb|@Ugj%U(Qw3dbzkR`16vU7hYPIb1}{U8hpR) zmAH2tU#D;f0zFx8%p|~gqFXxnKnr@a_wB6NEIRU=w7vu=f6 zv_}JnB%M|-NfsJs*l{#!(2ju@%R7KU#w9HS~Z$lSgazi#W0u z;7-QutB{E!81Z!o$>aQ-$ZA`3o4p$MOgnz^yGSK$CUHj&FuDyt@wQF+sME^`r=!yp zfh46XB}$K)=}8IIic~i1q|mbOY+0SqTr!c0r9J8>UeeluPJBm39(@$)enKbt{5{BN zP9BehI#O&anYVHoSA-_gcie^C*1e9lp@p`29ae7`qhRV%r^~;&wt9WdZN9U9>pk;4 zo^#__Bxlh|>6aVVOVN`& z#AWzfq;Iz2lG`kKQ^Priy`v!s&YhjmslpwZr_q<6Ye4JLlOJKQN2r>>(R`WfNp)}t z1G*r|d?~LD3~(8C-7_doPT^z)2~uTAe;;$F-$J3zKP^#wt}>5UK2@2o7A2qN(PsY_ zrbpJ%kMm@*W8yrhoDgYnI#51KjnhA2?1dZJfV06z48+Q`Pu|-w!N|HofqI$4(V7Qi z5nJd%6AfAippBVzEQ1Si?ZQhWY<;IS$k^;_LQZA$#Pgh!04S~gv&@c~`A@zWCJqxuwOFC9l2j_B4)kIT2jh57e zsjA(Y+MQ5);6(RS1!TQqCP~kBaYdU@&qJrbtf;7>eg=NFu}HY5e?xIyhnjSKM8C&U zZ^=x@Xc?r(2~toU(y7d8_36W~MGw3$AmQuaI|PZ{Uw;aVnT&)DdDivV>|i}X;&?3J zUW5A{ZZx>1$(`Tgf)1={#Zf>5GK2=fpFHuTz%Fm4;u~GiA^6?3B#9IA6%1pd;H@}$ z51jG3qzl&eTe{1;gxu{sTy2U=flKG>@-=~)>`9*-aP%7>Do|+|bNgtu{D1a^O09CP z0^sF0z&`M{FlY#P1YA}4KVbJ%4m)@XAP<+mQRMw%7t0rmaY5|jf>*vEGPY)Gh|?H` zlms@HN_AglLC@`eo5d1}5*BEUMf7klvKOC|T>_caXh{fJwK9FX5JQIDUtx&Yh^ix= zRBM%|h>X*8N&m)<`tb(02GbK~?;-Uz42%plqc^c5fMKs-BhzH~a_)oL%M}Mm>^rG! z4diGB+*3Xd?Ozy72n!`FxhpuhjXW=-HjE@gRxuc~j*BZHN5wfT%pMQI$Ru!#BjL3n z`uiP5S?AN}J0E&rIv{=|>YYcW?|hhYLhi}Zk&2}w9GU(W#W-}}K0Tt>?8pU8v5B}* zxG+;UR*T1A9K0in>I#10eiNItkHRKM3Qum#1O-JMhDS;}wc`F5BQtujaJshq8Fo^p z7nw%JyZMrj7G;!3{v#ps z7YrHZ0Ssbwdre zAo7K_qo?{jbyvtKEFGynW(+w2zNxU|e$6^NrfkQQLMolv{y#9A$c`CwAg5T*AGl%k zNT;OwlwEB^wxQ-T3lQbOiE!s=) z^#{0wT&Qe5T=ukth(NyRg-hT)+n(HYa97G-%$=Nsb&SjMB&AFhjSI;-((YG=GkLFM z!}DwN=*B`OMUTI&JQv69jcnx@8^Ikt?D@KBQHu~eE@hVuf;`A-{aj|qC0ac$Ih~&A zc_C7IdLQ$pOpo3V%nrUnVs54hE|?r-+bcH}$8S@q_?qDW-}c!Nj=Y4aJ=5AL7R@ua zc4h4!z4p-2#>ST~Ksi&wn!8=GTd<6QHy4-8UuZ(MwDr8cd>Ld9^%DxhWp+cxb-mUe zGJw}`JwZ;+yi1$P@Ev65bDQc;)iy^Qp|oAk8J;n0sr4#)26 zJRiS`LcSjv5{cZQ%KDIoatgO##3zrRyl7c6eA{|~H8D8X-aI}BTJ%7>9%@+$x9I?fRmGMMkw|Gs3qc&8FOko6Sz__d-50 z*fpDe+-f#$%#-N1fL6)~7 z_67f(!^N=_qp1813iA5Zg_-(U=Jye)F4gDj=kV`r{Y?Gbb1(A$MY&U~;f3s|UeD~r z369D%)0UU&*DkxL*>1^gF7xk5Y_O-iinirE;mL1jY;)G){s1kO5l~iH#`IvrCP-z^ dkLUREP0v-UuGh(QBw;mGovNO#%Ad1T{|DeQ)j{QUHsRNcga zf@0m=#Jt4x)S{U5l*E$67?4m*YHng?PH}2UNoHPpaZG%CW?p7Ve7s&kWeEpRzfEp_ PN@-529mtkqAZ7pnhF>7E literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/apps/emailsettings/client.py b/gam/gdata/analytics/apps/emailsettings/client.py new file mode 100755 index 00000000000..97294380b96 --- /dev/null +++ b/gam/gdata/analytics/apps/emailsettings/client.py @@ -0,0 +1,557 @@ +#!/usr/bin/python2.4 +# +# Copyright 2010 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""EmailSettingsClient simplifies Email Settings API calls. + +EmailSettingsClient extends gdata.client.GDClient to ease interaction with +the Google Apps Email Settings API. These interactions include the ability +to create labels, filters, aliases, and update web-clip, forwarding, POP, +IMAP, vacation-responder, signature, language, and general settings, and +retrieve labels, send-as, forwarding, pop, imap, vacation and signature +settings. +""" + + +__author__ = 'Claudio Cherubino ' + + +import gdata.apps.emailsettings.data +import gdata.client + + +# Email Settings URI template +# The strings in this template are eventually replaced with the API version, +# Google Apps domain name, username, and settingID, respectively. +EMAIL_SETTINGS_URI_TEMPLATE = '/a/feeds/emailsettings/%s/%s/%s/%s' + + +# The settingID value for the label requests +SETTING_ID_LABEL = 'label' +# The settingID value for the filter requests +SETTING_ID_FILTER = 'filter' +# The settingID value for the send-as requests +SETTING_ID_SENDAS = 'sendas' +# The settingID value for the webclip requests +SETTING_ID_WEBCLIP = 'webclip' +# The settingID value for the forwarding requests +SETTING_ID_FORWARDING = 'forwarding' +# The settingID value for the POP requests +SETTING_ID_POP = 'pop' +# The settingID value for the IMAP requests +SETTING_ID_IMAP = 'imap' +# The settingID value for the vacation responder requests +SETTING_ID_VACATION_RESPONDER = 'vacation' +# The settingID value for the signature requests +SETTING_ID_SIGNATURE = 'signature' +# The settingID value for the language requests +SETTING_ID_LANGUAGE = 'language' +# The settingID value for the general requests +SETTING_ID_GENERAL = 'general' +# The settingID value for the delegation requests +SETTING_ID_DELEGATION = 'delegation' + +# The KEEP action for the email settings +ACTION_KEEP = 'KEEP' +# The ARCHIVE action for the email settings +ACTION_ARCHIVE = 'ARCHIVE' +# The DELETE action for the email settings +ACTION_DELETE = 'DELETE' + +# The ALL_MAIL setting for POP enable_for property +POP_ENABLE_FOR_ALL_MAIL = 'ALL_MAIL' +# The MAIL_FROM_NOW_ON setting for POP enable_for property +POP_ENABLE_FOR_MAIL_FROM_NOW_ON = 'MAIL_FROM_NOW_ON' + + +class EmailSettingsClient(gdata.client.GDClient): + """Client extension for the Google Email Settings API service. + + Attributes: + host: string The hostname for the Email Settings API service. + api_version: string The version of the Email Settings API. + """ + + host = 'apps-apis.google.com' + api_version = '2.0' + auth_service = 'apps' + auth_scopes = gdata.gauth.AUTH_SCOPES['apps'] + ssl = True + + def __init__(self, domain, auth_token=None, **kwargs): + """Constructs a new client for the Email Settings API. + + Args: + domain: string The Google Apps domain with Email Settings. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the email settings. + kwargs: The other parameters to pass to the gdata.client.GDClient + constructor. + """ + gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs) + self.domain = domain + + def make_email_settings_uri(self, username, setting_id): + """Creates the URI for the Email Settings API call. + + Using this client's Google Apps domain, create the URI to setup + email settings for the given user in that domain. If params are provided, + append them as GET params. + + Args: + username: string The name of the user affected by this setting. + setting_id: string The key of the setting to be configured. + + Returns: + A string giving the URI for Email Settings API calls for this client's + Google Apps domain. + """ + if '@' in username: + username, domain = username.split('@', 1) + else: + domain = self.domain + uri = EMAIL_SETTINGS_URI_TEMPLATE % (self.api_version, domain, + username, setting_id) + return uri + + MakeEmailSettingsUri = make_email_settings_uri + + def create_label(self, username, name, **kwargs): + """Creates a label with the given properties. + + Args: + username: string The name of the user. + name: string The name of the label. + kwargs: The other parameters to pass to gdata.client.GDClient.post(). + + Returns: + gdata.apps.emailsettings.data.EmailSettingsLabel of the new resource. + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_LABEL) + new_label = gdata.apps.emailsettings.data.EmailSettingsLabel( + uri=uri, name=name) + return self.post(new_label, uri, **kwargs) + + CreateLabel = create_label + + def retrieve_labels(self, username, **kwargs): + """Retrieves email labels for the specified username + + Args: + username: string The name of the user to get the labels for + + Returns: + A gdata.data.GDFeed of the user's email labels + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_LABEL) + return self.GetFeed(uri, auth_token=None, query=None, **kwargs) + + RetrieveLabels = retrieve_labels + + def create_filter(self, username, from_address=None, + to_address=None, subject=None, has_the_word=None, + does_not_have_the_word=None, has_attachments=None, + label=None, mark_as_read=None, archive=None, **kwargs): + """Creates a filter with the given properties. + + Args: + username: string The name of the user. + from_address: string The source email address for the filter. + to_address: string (optional) The destination email address for + the filter. + subject: string (optional) The value the email must have in its + subject to be filtered. + has_the_word: string (optional) The value the email must have + in its subject or body to be filtered. + does_not_have_the_word: string (optional) The value the email + cannot have in its subject or body to be filtered. + has_attachments: string (optional) A boolean string representing + whether the email must have an attachment to be filtered. + label: string (optional) The name of the label to apply to + messages matching the filter criteria. + mark_as_read: Boolean (optional) Whether or not to mark + messages matching the filter criteria as read. + archive: Boolean (optional) Whether or not to move messages + matching to Archived state. + kwargs: The other parameters to pass to gdata.client.GDClient.post(). + + Returns: + gdata.apps.emailsettings.data.EmailSettingsFilter of the new resource. + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_FILTER) + new_filter = gdata.apps.emailsettings.data.EmailSettingsFilter( + uri=uri, from_address=from_address, + to_address=to_address, subject=subject, + has_the_word=has_the_word, + does_not_have_the_word=does_not_have_the_word, + has_attachments=has_attachments, label=label, + mark_as_read=mark_as_read, archive=archive) + return self.post(new_filter, uri, **kwargs) + + CreateFilter = create_filter + + def create_send_as(self, username, name, address, reply_to=None, + make_default=None, **kwargs): + """Creates a send-as alias with the given properties. + + Args: + username: string The name of the user. + name: string The name that will appear in the "From" field. + address: string The email address that appears as the + origination address for emails sent by this user. + reply_to: string (optional) The address to be used as the reply-to + address in email sent using the alias. + make_default: Boolean (optional) Whether or not this alias should + become the default alias for this user. + kwargs: The other parameters to pass to gdata.client.GDClient.post(). + + Returns: + gdata.apps.emailsettings.data.EmailSettingsSendAsAlias of the + new resource. + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_SENDAS) + new_alias = gdata.apps.emailsettings.data.EmailSettingsSendAsAlias( + uri=uri, name=name, address=address, + reply_to=reply_to, make_default=make_default) + return self.post(new_alias, uri, **kwargs) + + CreateSendAs = create_send_as + + def retrieve_send_as(self, username, **kwargs): + """Retrieves send-as aliases for the specified username + + Args: + username: string The name of the user to get the send-as for + + Returns: + A gdata.data.GDFeed of the user's send-as alias settings + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_SENDAS) + return self.GetFeed(uri, auth_token=None, query=None, **kwargs) + + RetrieveSendAs = retrieve_send_as + + def update_webclip(self, username, enable, **kwargs): + """Enable/Disable Google Mail web clip. + + Args: + username: string The name of the user. + enable: Boolean Whether to enable showing Web clips. + kwargs: The other parameters to pass to the update method. + + Returns: + gdata.apps.emailsettings.data.EmailSettingsWebClip of the + updated resource. + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_WEBCLIP) + new_webclip = gdata.apps.emailsettings.data.EmailSettingsWebClip( + uri=uri, enable=enable) + return self.update(new_webclip, **kwargs) + + UpdateWebclip = update_webclip + + def update_forwarding(self, username, enable, forward_to=None, + action=None, **kwargs): + """Update Google Mail Forwarding settings. + + Args: + username: string The name of the user. + enable: Boolean Whether to enable incoming email forwarding. + forward_to: (optional) string The address email will be forwarded to. + action: string (optional) The action to perform after forwarding + an email (ACTION_KEEP, ACTION_ARCHIVE, ACTION_DELETE). + kwargs: The other parameters to pass to the update method. + + Returns: + gdata.apps.emailsettings.data.EmailSettingsForwarding of the + updated resource + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_FORWARDING) + new_forwarding = gdata.apps.emailsettings.data.EmailSettingsForwarding( + uri=uri, enable=enable, forward_to=forward_to, action=action) + return self.update(new_forwarding, **kwargs) + + UpdateForwarding = update_forwarding + + def retrieve_forwarding(self, username, **kwargs): + """Retrieves forwarding settings for the specified username + + Args: + username: string The name of the user to get the forwarding settings for + + Returns: + A gdata.data.GDEntry of the user's email forwarding settings + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_FORWARDING) + return self.GetEntry(uri, auth_token=None, query=None, **kwargs) + + RetrieveForwarding = retrieve_forwarding + + def update_pop(self, username, enable, enable_for=None, action=None, + **kwargs): + """Update Google Mail POP settings. + + Args: + username: string The name of the user. + enable: Boolean Whether to enable incoming POP3 access. + enable_for: string (optional) Whether to enable POP3 for all mail + (POP_ENABLE_FOR_ALL_MAIL), or mail from now on + (POP_ENABLE_FOR_MAIL_FROM_NOW_ON). + action: string (optional) What Google Mail should do with its copy + of the email after it is retrieved using POP (ACTION_KEEP, + ACTION_ARCHIVE, ACTION_DELETE). + kwargs: The other parameters to pass to the update method. + + Returns: + gdata.apps.emailsettings.data.EmailSettingsPop of the updated resource. + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_POP) + new_pop = gdata.apps.emailsettings.data.EmailSettingsPop( + uri=uri, enable=enable, + enable_for=enable_for, action=action) + return self.update(new_pop, **kwargs) + + UpdatePop = update_pop + + def retrieve_pop(self, username, **kwargs): + """Retrieves POP settings for the specified username + + Args: + username: string The name of the user to get the POP settings for + + Returns: + A gdata.data.GDEntry of the user's POP settings + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_POP) + return self.GetEntry(uri, auth_token=None, query=None, **kwargs) + + RetrievePop = retrieve_pop + + def update_imap(self, username, enable, **kwargs): + """Update Google Mail IMAP settings. + + Args: + username: string The name of the user. + enable: Boolean Whether to enable IMAP access.language + kwargs: The other parameters to pass to the update method. + + Returns: + gdata.apps.emailsettings.data.EmailSettingsImap of the updated resource. + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_IMAP) + new_imap = gdata.apps.emailsettings.data.EmailSettingsImap( + uri=uri, enable=enable) + return self.update(new_imap, **kwargs) + + UpdateImap = update_imap + + def retrieve_imap(self, username, **kwargs): + """Retrieves imap settings for the specified username + + Args: + username: string The name of the user to get the imap settings for + + Returns: + A gdata.data.GDEntry of the user's IMAP settings + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_IMAP) + return self.GetEntry(uri, auth_token=None, query=None, **kwargs) + + RetrieveImap = retrieve_imap + + def update_vacation(self, username, enable, subject=None, message=None, + contacts_only=None, **kwargs): + """Update Google Mail vacation-responder settings. + + Args: + username: string The name of the user. + enable: Boolean Whether to enable the vacation responder. + subject: string (optional) The subject line of the vacation responder + autoresponse. + message: string (optional) The message body of the vacation responder + autoresponse. + contacts_only: Boolean (optional) Whether to only send autoresponses + to known contacts. + kwargs: The other parameters to pass to the update method. + + Returns: + gdata.apps.emailsettings.data.EmailSettingsVacationResponder of the + updated resource. + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_VACATION_RESPONDER) + new_vacation = gdata.apps.emailsettings.data.EmailSettingsVacationResponder( + uri=uri, enable=enable, subject=subject, + message=message, contacts_only=contacts_only) + return self.update(new_vacation, **kwargs) + + UpdateVacation = update_vacation + + def retrieve_vacation(self, username, **kwargs): + """Retrieves vacation settings for the specified username + + Args: + username: string The name of the user to get the vacation settings for + + Returns: + A gdata.data.GDEntry of the user's vacation auto-responder settings + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_VACATION_RESPONDER) + return self.GetEntry(uri, auth_token=None, query=None, **kwargs) + + RetrieveVacation = retrieve_vacation + + def update_signature(self, username, signature, **kwargs): + """Update Google Mail signature. + + Args: + username: string The name of the user. + signature: string The signature to be appended to outgoing messages. + kwargs: The other parameters to pass to the update method. + + Returns: + gdata.apps.emailsettings.data.EmailSettingsSignature of the + updated resource. + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_SIGNATURE) + new_signature = gdata.apps.emailsettings.data.EmailSettingsSignature( + uri=uri, signature=signature) + return self.update(new_signature, **kwargs) + + UpdateSignature = update_signature + + def retrieve_signature(self, username, **kwargs): + """Retrieves signature settings for the specified username + + Args: + username: string The name of the user to get the signature settings for + + Returns: + A gdata.data.GDEntry of the user's signature settings + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_SIGNATURE) + return self.GetEntry(uri, auth_token=None, query=None, **kwargs) + + RetrieveSignature = retrieve_signature + + def update_language(self, username, language, **kwargs): + """Update Google Mail language settings. + + Args: + username: string The name of the user. + language: string The language tag for Google Mail's display language. + kwargs: The other parameters to pass to the update method. + + Returns: + gdata.apps.emailsettings.data.EmailSettingsLanguage of the + updated resource. + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_LANGUAGE) + new_language = gdata.apps.emailsettings.data.EmailSettingsLanguage( + uri=uri, language=language) + return self.update(new_language, **kwargs) + + UpdateLanguage = update_language + + def update_general_settings(self, username, page_size=None, shortcuts=None, + arrows=None, snippets=None, use_unicode=None, + **kwargs): + """Update Google Mail general settings. + + Args: + username: string The name of the user. + page_size: int (optional) The number of conversations to be shown per + page. + shortcuts: Boolean (optional) Whether to enable keyboard shortcuts. + arrows: Boolean (optional) Whether to display arrow-shaped personal + indicators next to email sent specifically to the user. + snippets: Boolean (optional) Whether to display snippets of the messages + in the inbox and when searching. + use_unicode: Boolean (optional) Whether to use UTF-8 (unicode) encoding + for all outgoing messages. + kwargs: The other parameters to pass to the update method. + + Returns: + gdata.apps.emailsettings.data.EmailSettingsGeneral of the + updated resource. + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_GENERAL) + new_general = gdata.apps.emailsettings.data.EmailSettingsGeneral( + uri=uri, page_size=page_size, shortcuts=shortcuts, + arrows=arrows, snippets=snippets, use_unicode=use_unicode) + return self.update(new_general, **kwargs) + + UpdateGeneralSettings = update_general_settings + + def add_email_delegate(self, username, address, **kwargs): + """Add an email delegate to the mail account + + Args: + username: string The name of the user + address: string The email address of the delegated account + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_DELEGATION) + new_delegation = gdata.apps.emailsettings.data.EmailSettingsDelegation( + uri=uri, address=address) + return self.post(new_delegation, uri, **kwargs) + + AddEmailDelegate = add_email_delegate + + def retrieve_email_delegates(self, username, **kwargs): + """Retrieve a feed of the email delegates for the specified username + + Args: + username: string The name of the user to get the email delegates for + + Returns: + A gdata.data.GDFeed of the user's email delegates + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_DELEGATION) + return self.GetFeed(uri, auth_token=None, query=None, **kwargs) + + RetrieveEmailDelegates = retrieve_email_delegates + + def delete_email_delegate(self, username, address, **kwargs): + """Delete an email delegate from the specified account + + Args: + username: string The name of the user + address: string The email address of the delegated account + """ + uri = self.MakeEmailSettingsUri(username=username, + setting_id=SETTING_ID_DELEGATION) + uri = uri + '/' + address + return self.delete(uri, **kwargs) + + DeleteEmailDelegate = delete_email_delegate diff --git a/gam/gdata/analytics/apps/emailsettings/data.py b/gam/gdata/analytics/apps/emailsettings/data.py new file mode 100755 index 00000000000..ab908de71ca --- /dev/null +++ b/gam/gdata/analytics/apps/emailsettings/data.py @@ -0,0 +1,1174 @@ +#!/usr/bin/python +# +# Copyright 2010 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data model classes for the Email Settings API.""" + + +__author__ = 'Claudio Cherubino ' + + +import atom.data +import gdata.apps +import gdata.apps_property +import gdata.data + + +# This is required to work around a naming conflict between the Google +# Spreadsheets API and Python's built-in property function +pyproperty = property + + +# The apps:property label of the label property +LABEL_NAME = 'label' + +# The apps:property from of the filter property +FILTER_FROM_NAME = 'from' +# The apps:property to of the filter property +FILTER_TO_NAME = 'to' +# The apps:property subject of the filter property +FILTER_SUBJECT_NAME = 'subject' +# The apps:property hasTheWord of the filter property +FILTER_HAS_THE_WORD_NAME = 'hasTheWord' +# The apps:property doesNotHaveTheWord of the filter property +FILTER_DOES_NOT_HAVE_THE_WORD_NAME = 'doesNotHaveTheWord' +# The apps:property hasAttachment of the filter property +FILTER_HAS_ATTACHMENTS_NAME = 'hasAttachment' +# The apps:property label of the filter action property +FILTER_LABEL = 'label' +# The apps:property shouldMarkAsRead of the filter action property +FILTER_MARK_AS_READ = 'shouldMarkAsRead' +# The apps:property shouldArchive of the filter action propertylabel +FILTER_ARCHIVE = 'shouldArchive' + +# The apps:property name of the send-as alias property +SENDAS_ALIAS_NAME = 'name' +# The apps:property address of theAPPS_TEMPLATE send-as alias property +SENDAS_ALIAS_ADDRESS = 'address' +# The apps:property replyTo of the send-as alias property +SENDAS_ALIAS_REPLY_TO = 'replyTo' +# The apps:property makeDefault of the send-as alias property +SENDAS_ALIAS_MAKE_DEFAULT = 'makeDefault' + +# The apps:property enable of the webclip property +WEBCLIP_ENABLE = 'enable' + +# The apps:property enable of the forwarding property +FORWARDING_ENABLE = 'enable' +# The apps:property forwardTo of the forwarding property +FORWARDING_TO = 'forwardTo' +# The apps:property action of the forwarding property +FORWARDING_ACTION = 'action' + +# The apps:property enable of the POP property +POP_ENABLE = 'enable' +# The apps:property enableFor of the POP propertyACTION +POP_ENABLE_FOR = 'enableFor' +# The apps:property action of the POP property +POP_ACTION = 'action' + +# The apps:property enable of the IMAP property +IMAP_ENABLE = 'enable' + +# The apps:property enable of the vacation responder property +VACATION_RESPONDER_ENABLE = 'enable' +# The apps:property subject of the vacation responder property +VACATION_RESPONDER_SUBJECT = 'subject' +# The apps:property message of the vacation responder property +VACATION_RESPONDER_MESSAGE = 'message' +# The apps:property contactsOnly of the vacation responder property +VACATION_RESPONDER_CONTACTS_ONLY = 'contactsOnly' + +# The apps:property signature of the signature property +SIGNATURE_VALUE = 'signature' + +# The apps:property language of the language property +LANGUAGE_TAG = 'language' + +# The apps:property pageSize of the general settings property +GENERAL_PAGE_SIZE = 'pageSize' +# The apps:property shortcuts of the general settings property +GENERAL_SHORTCUTS = 'shortcuts' +# The apps:property arrows of the general settings property +GENERAL_ARROWS = 'arrows' +# The apps:prgdata.appsoperty snippets of the general settings property +GENERAL_SNIPPETS = 'snippets' +# The apps:property uniAppsProcode of the general settings property +GENERAL_UNICODE = 'unicode' + +# The apps:property delegationId of the email delegation property +DELEGATION_ID = "delegationId" +# The apps:property address of the email delegation property +DELEGATION_ADDRESS = 'address' +# The apps:property delegate of the email delegation property +DELEGATION_DELEGATE = "delegate" +# The apps:property status of the email delegation property +DELEGATION_STATUS = "status" + +class EmailSettingsEntry(gdata.data.GDEntry): + """Represents an Email Settings entry in object form.""" + + property = [gdata.apps_property.AppsProperty] + + def _GetProperty(self, name): + """Get the apps:property value with the given name. + + Args: + name: string Name of the apps:property value to get. + + Returns: + The apps:property value with the given name, or None if the name was + invalid. + """ + + value = None + for p in self.property: + if p.name == name: + value = p.value + break + return value + + def _SetProperty(self, name, value): + """Set the apps:property value with the given name to the given value. + + Args: + name: string Name of the apps:property value to set. + value: string Value to give the apps:property value with the given name. + """ + found = False + for i in range(len(self.property)): + if self.property[i].name == name: + self.property[i].value = value + found = True + break + if not found: + self.property.append(gdata.apps_property.AppsProperty(name=name, value=value)) + + def find_edit_link(self): + return self.uri + + +class EmailSettingsLabel(EmailSettingsEntry): + """Represents a Label in object form.""" + + def GetName(self): + """Get the name of the Label object. + + Returns: + The name of this Label object as a string or None. + """ + + return self._GetProperty(LABEL_NAME) + + def SetName(self, value): + """Set the name of this Label object. + + Args: + value: string The new label name to give this object. + """ + + self._SetProperty(LABEL_NAME, value) + + name = pyproperty(GetName, SetName) + + def __init__(self, uri=None, name=None, *args, **kwargs): + """Constructs a new EmailSettingsLabel object with the given arguments. + + Args: + uri: string (optional) The uri of of this object for HTTP requests. + name: string (optional) The name to give this new object. + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(EmailSettingsLabel, self).__init__(*args, **kwargs) + if uri: + self.uri = uri + if name: + self.name = name + + +class EmailSettingsFilter(EmailSettingsEntry): + """Represents an Email Settings Filter in object form.""" + + def GetFrom(self): + """Get the From value of the Filter object. + + Returns: + The From value of this Filter object as a string or None. + """ + + return self._GetProperty(FILTER_FROM_NAME) + + def SetFrom(self, value): + """Set the From value of this Filter object. + + Args: + value: string The new From value to give this object. + """ + + self._SetProperty(FILTER_FROM_NAME, value) + + from_address = pyproperty(GetFrom, SetFrom) + + def GetTo(self): + """Get the To value of the Filter object. + + Returns: + The To value of this Filter object as a string or None. + """ + + return self._GetProperty(FILTER_TO_NAME) + + def SetTo(self, value): + """Set the To value of this Filter object. + + Args: + value: string The new To value to give this object. + """ + + self._SetProperty(FILTER_TO_NAME, value) + + to_address = pyproperty(GetTo, SetTo) + + def GetSubject(self): + """Get the Subject value of the Filter object. + + Returns: + The Subject value of this Filter object as a string or None. + """ + + return self._GetProperty(FILTER_SUBJECT_NAME) + + def SetSubject(self, value): + """Set the Subject value of this Filter object. + + Args: + value: string The new Subject value to give this object. + """ + + self._SetProperty(FILTER_SUBJECT_NAME, value) + + subject = pyproperty(GetSubject, SetSubject) + + def GetHasTheWord(self): + """Get the HasTheWord value of the Filter object. + + Returns: + The HasTheWord value of this Filter object as a string or None. + """ + + return self._GetProperty(FILTER_HAS_THE_WORD_NAME) + + def SetHasTheWord(self, value): + """Set the HasTheWord value of this Filter object. + + Args: + value: string The new HasTheWord value to give this object. + """ + + self._SetProperty(FILTER_HAS_THE_WORD_NAME, value) + + has_the_word = pyproperty(GetHasTheWord, SetHasTheWord) + + def GetDoesNotHaveTheWord(self): + """Get the DoesNotHaveTheWord value of the Filter object. + + Returns: + The DoesNotHaveTheWord value of this Filter object as a string or None. + """ + + return self._GetProperty(FILTER_DOES_NOT_HAVE_THE_WORD_NAME) + + def SetDoesNotHaveTheWord(self, value): + """Set the DoesNotHaveTheWord value of this Filter object. + + Args: + value: string The new DoesNotHaveTheWord value to give this object. + """ + + self._SetProperty(FILTER_DOES_NOT_HAVE_THE_WORD_NAME, value) + + does_not_have_the_word = pyproperty(GetDoesNotHaveTheWord, + SetDoesNotHaveTheWord) + + def GetHasAttachments(self): + """Get the HasAttachments value of the Filter object. + + Returns: + The HasAttachments value of this Filter object as a string or None. + """ + + return self._GetProperty(FILTER_HAS_ATTACHMENTS_NAME) + + def SetHasAttachments(self, value): + """Set the HasAttachments value of this Filter object. + + Args: + value: string The new HasAttachments value to give this object. + """ + + self._SetProperty(FILTER_HAS_ATTACHMENTS_NAME, value) + + has_attachments = pyproperty(GetHasAttachments, + SetHasAttachments) + + def GetLabel(self): + """Get the Label value of the Filter object. + + Returns: + The Label value of this Filter object as a string or None. + """ + + return self._GetProperty(FILTER_LABEL) + + def SetLabel(self, value): + """Set the Label value of this Filter object. + + Args: + value: string The new Label value to give this object. + """ + + self._SetProperty(FILTER_LABEL, value) + + label = pyproperty(GetLabel, SetLabel) + + def GetMarkAsRead(self): + """Get the MarkAsRead value of the Filter object. + + Returns: + The MarkAsRead value of this Filter object as a string or None. + """ + + return self._GetProperty(FILTER_MARK_AS_READ) + + def SetMarkAsRead(self, value): + """Set the MarkAsRead value of this Filter object. + + Args: + value: string The new MarkAsRead value to give this object. + """ + + self._SetProperty(FILTER_MARK_AS_READ, value) + + mark_as_read = pyproperty(GetMarkAsRead, SetMarkAsRead) + + def GetArchive(self): + """Get the Archive value of the Filter object. + + Returns: + The Archive value of this Filter object as a string or None. + """ + + return self._GetProperty(FILTER_ARCHIVE) + + def SetArchive(self, value): + """Set the Archive value of this Filter object. + + Args: + value: string The new Archive value to give this object. + """ + + self._SetProperty(FILTER_ARCHIVE, value) + + archive = pyproperty(GetArchive, SetArchive) + + def __init__(self, uri=None, from_address=None, to_address=None, + subject=None, has_the_word=None, does_not_have_the_word=None, + has_attachments=None, label=None, mark_as_read=None, + archive=None, *args, **kwargs): + """Constructs a new EmailSettingsFilter object with the given arguments. + + Args: + uri: string (optional) The uri of of this object for HTTP requests. + from_address: string (optional) The source email address for the filter. + to_address: string (optional) The destination email address for + the filter. + subject: string (optional) The value the email must have in its + subject to be filtered. + has_the_word: string (optional) The value the email must have in its + subject or body to be filtered. + does_not_have_the_word: string (optional) The value the email cannot + have in its subject or body to be filtered. + has_attachments: Boolean (optional) Whether or not the email must + have an attachment to be filtered. + label: string (optional) The name of the label to apply to + messages matching the filter criteria. + mark_as_read: Boolean (optional) Whether or not to mark messages + matching the filter criteria as read. + archive: Boolean (optional) Whether or not to move messages + matching to Archived state. + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(EmailSettingsFilter, self).__init__(*args, **kwargs) + if uri: + self.uri = uri + if from_address: + self.from_address = from_address + if to_address: + self.to_address = to_address + if subject: + self.subject = subject + if has_the_word: + self.has_the_word = has_the_word + if does_not_have_the_word: + self.does_not_have_the_word = does_not_have_the_word + if has_attachments is not None: + self.has_attachments = str(has_attachments) + if label: + self.label = label + if mark_as_read is not None: + self.mark_as_read = str(mark_as_read) + if archive is not None: + self.archive = str(archive) + + +class EmailSettingsSendAsAlias(EmailSettingsEntry): + """Represents an Email Settings send-as Alias in object form.""" + + def GetName(self): + """Get the Name of the send-as Alias object. + + Returns: + The Name of this send-as Alias object as a string or None. + """ + + return self._GetProperty(SENDAS_ALIAS_NAME) + + def SetName(self, value): + """Set the Name of this send-as Alias object. + + Args: + value: string The new Name to give this object. + """ + + self._SetProperty(SENDAS_ALIAS_NAME, value) + + name = pyproperty(GetName, SetName) + + def GetAddress(self): + """Get the Address of the send-as Alias object. + + Returns: + The Address of this send-as Alias object as a string or None. + """ + + return self._GetProperty(SENDAS_ALIAS_ADDRESS) + + def SetAddress(self, value): + """Set the Address of this send-as Alias object. + + Args: + value: string The new Address to give this object. + """ + + self._SetProperty(SENDAS_ALIAS_ADDRESS, value) + + address = pyproperty(GetAddress, SetAddress) + + def GetReplyTo(self): + """Get the ReplyTo address of the send-as Alias object. + + Returns: + The ReplyTo address of this send-as Alias object as a string or None. + """ + + return self._GetProperty(SENDAS_ALIAS_REPLY_TO) + + def SetReplyTo(self, value): + """Set the ReplyTo address of this send-as Alias object. + + Args: + value: string The new ReplyTo address to give this object. + """ + + self._SetProperty(SENDAS_ALIAS_REPLY_TO, value) + + reply_to = pyproperty(GetReplyTo, SetReplyTo) + + def GetMakeDefault(self): + """Get the MakeDefault value of the send-as Alias object. + + Returns: + The MakeDefault value of this send-as Alias object as a string or None. + """ + + return self._GetProperty(SENDAS_ALIAS_MAKE_DEFAULT) + + def SetMakeDefault(self, value): + """Set the MakeDefault value of this send-as Alias object. + + Args: + value: string The new MakeDefault valueto give this object.WebClip + """ + + self._SetProperty(SENDAS_ALIAS_MAKE_DEFAULT, value) + + make_default = pyproperty(GetMakeDefault, SetMakeDefault) + + def __init__(self, uri=None, name=None, address=None, reply_to=None, + make_default=None, *args, **kwargs): + """Constructs a new EmailSettingsSendAsAlias object with the given + arguments. + + Args: + uri: string (optional) The uri of of this object for HTTP requests. + name: string (optional) The name that will appear in the "From" field + for this user. + address: string (optional) The email address that appears as the + origination address for emails sent by this user. + reply_to: string (optional) The address to be used as the reply-to + address in email sent using the alias. + make_default: Boolean (optional) Whether or not this alias should + become the default alias for this user. + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(EmailSettingsSendAsAlias, self).__init__(*args, **kwargs) + if uri: + self.uri = uri + if name: + self.name = name + if address: + self.address = address + if reply_to: + self.reply_to = reply_to + if make_default is not None: + self.make_default = str(make_default) + + +class EmailSettingsWebClip(EmailSettingsEntry): + """Represents a WebClip in object form.""" + + def GetEnable(self): + """Get the Enable value of the WebClip object. + + Returns: + The Enable value of this WebClip object as a string or None. + """ + + return self._GetProperty(WEBCLIP_ENABLE) + + def SetEnable(self, value): + """Set the Enable value of this WebClip object. + + Args: + value: string The new Enable value to give this object. + """ + + self._SetProperty(WEBCLIP_ENABLE, value) + + enable = pyproperty(GetEnable, SetEnable) + + def __init__(self, uri=None, enable=None, *args, **kwargs): + """Constructs a new EmailSettingsWebClip object with the given arguments. + + Args: + uri: string (optional) The uri of of this object for HTTP requests. + enable: Boolean (optional) Whether to enable showing Web clips. + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(EmailSettingsWebClip, self).__init__(*args, **kwargs) + if uri: + self.uri = uri + if enable is not None: + self.enable = str(enable) + + +class EmailSettingsForwarding(EmailSettingsEntry): + """Represents Forwarding settings in object form.""" + + def GetEnable(self): + """Get the Enable value of the Forwarding object. + + Returns: + The Enable value of this Forwarding object as a string or None. + """ + + return self._GetProperty(FORWARDING_ENABLE) + + def SetEnable(self, value): + """Set the Enable value of this Forwarding object. + + Args: + value: string The new Enable value to give this object. + """ + + self._SetProperty(FORWARDING_ENABLE, value) + + enable = pyproperty(GetEnable, SetEnable) + + def GetForwardTo(self): + """Get the ForwardTo value of the Forwarding object. + + Returns: + The ForwardTo value of this Forwarding object as a string or None. + """ + + return self._GetProperty(FORWARDING_TO) + + def SetForwardTo(self, value): + """Set the ForwardTo value of this Forwarding object. + + Args: + value: string The new ForwardTo value to give this object. + """ + + self._SetProperty(FORWARDING_TO, value) + + forward_to = pyproperty(GetForwardTo, SetForwardTo) + + def GetAction(self): + """Get the Action value of the Forwarding object. + + Returns: + The Action value of this Forwarding object as a string or None. + """ + + return self._GetProperty(FORWARDING_ACTION) + + def SetAction(self, value): + """Set the Action value of this Forwarding object. + + Args: + value: string The new Action value to give this object. + """ + + self._SetProperty(FORWARDING_ACTION, value) + + action = pyproperty(GetAction, SetAction) + + def __init__(self, uri=None, enable=None, forward_to=None, action=None, + *args, **kwargs): + """Constructs a new EmailSettingsForwarding object with the given arguments. + + Args: + uri: string (optional) The uri of of this object for HTTP requests. + enable: Boolean (optional) Whether to enable incoming email forwarding. + forward_to: string (optional) The address email will be forwarded to. + action: string (optional) The action to perform after forwarding an + email ("KEEP", "ARCHIVE", "DELETE"). + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(EmailSettingsForwarding, self).__init__(*args, **kwargs) + if uri: + self.uri = uri + if enable is not None: + self.enable = str(enable) + if forward_to: + self.forward_to = forward_to + if action: + self.action = action + + +class EmailSettingsPop(EmailSettingsEntry): + """Represents POP settings in object form.""" + + def GetEnable(self): + """Get the Enable value of the POP object. + + Returns: + The Enable value of this POP object as a string or None. + """ + + return self._GetProperty(POP_ENABLE) + + def SetEnable(self, value): + """Set the Enable value of this POP object. + + Args: + value: string The new Enable value to give this object. + """ + + self._SetProperty(POP_ENABLE, value) + + enable = pyproperty(GetEnable, SetEnable) + + def GetEnableFor(self): + """Get the EnableFor value of the POP object. + + Returns: + The EnableFor value of this POP object as a string or None. + """ + + return self._GetProperty(POP_ENABLE_FOR) + + def SetEnableFor(self, value): + """Set the EnableFor value of this POP object. + + Args: + value: string The new EnableFor value to give this object. + """ + + self._SetProperty(POP_ENABLE_FOR, value) + + enable_for = pyproperty(GetEnableFor, SetEnableFor) + + def GetPopAction(self): + """Get the Action value of the POP object. + + Returns: + The Action value of this POP object as a string or None. + """ + + return self._GetProperty(POP_ACTION) + + def SetPopAction(self, value): + """Set the Action value of this POP object. + + Args: + value: string The new Action value to give this object. + """ + + self._SetProperty(POP_ACTION, value) + + action = pyproperty(GetPopAction, SetPopAction) + + def __init__(self, uri=None, enable=None, enable_for=None, + action=None, *args, **kwargs): + """Constructs a new EmailSettingsPOP object with the given arguments. + + Args: + uri: string (optional) The uri of of this object for HTTP requests. + enable: Boolean (optional) Whether to enable incoming POP3 access. + enable_for: string (optional) Whether to enable POP3 for all mail + ("ALL_MAIL"), or mail from now on ("MAIL_FROM_NOW_ON"). + action: string (optional) What Google Mail should do with its copy + of the email after it is retrieved using POP + ("KEEP", "ARCHIVE", or "DELETE"). + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(EmailSettingsPop, self).__init__(*args, **kwargs) + if uri: + self.uri = uri + if enable is not None: + self.enable = str(enable) + if enable_for: + self.enable_for = enable_for + if action: + self.action = action + + +class EmailSettingsImap(EmailSettingsEntry): + """Represents IMAP settings in object form.""" + + def GetEnable(self): + """Get the Enable value of the IMAP object. + + Returns: + The Enable value of this IMAP object as a string or None. + """ + + return self._GetProperty(IMAP_ENABLE) + + def SetEnable(self, value): + """Set the Enable value of this IMAP object. + + Args: + value: string The new Enable value to give this object. + """ + + self._SetProperty(IMAP_ENABLE, value) + + enable = pyproperty(GetEnable, SetEnable) + + def __init__(self, uri=None, enable=None, *args, **kwargs): + """Constructs a new EmailSettingsImap object with the given arguments. + + Args: + uri: string (optional) The uri of of this object for HTTP requests. + enable: Boolean (optional) Whether to enable IMAP access. + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(EmailSettingsImap, self).__init__(*args, **kwargs) + if uri: + self.uri = uri + if enable is not None: + self.enable = str(enable) + + +class EmailSettingsVacationResponder(EmailSettingsEntry): + """Represents Vacation Responder settings in object form.""" + + def GetEnable(self): + """Get the Enable value of the Vacation Responder object. + + Returns: + The Enable value of this Vacation Responder object as a string or None. + """ + + return self._GetProperty(VACATION_RESPONDER_ENABLE) + + def SetEnable(self, value): + """Set the Enable value of this Vacation Responder object. + + Args: + value: string The new Enable value to give this object. + """ + + self._SetProperty(VACATION_RESPONDER_ENABLE, value) + + enable = pyproperty(GetEnable, SetEnable) + + def GetSubject(self): + """Get the Subject value of the Vacation Responder object. + + Returns: + The Subject value of this Vacation Responder object as a string or None. + """ + + return self._GetProperty(VACATION_RESPONDER_SUBJECT) + + def SetSubject(self, value): + """Set the Subject value of this Vacation Responder object. + + Args: + value: string The new Subject value to give this object. + """ + + self._SetProperty(VACATION_RESPONDER_SUBJECT, value) + + subject = pyproperty(GetSubject, SetSubject) + + def GetMessage(self): + """Get the Message value of the Vacation Responder object. + + Returns: + The Message value of this Vacation Responder object as a string or None. + """ + + return self._GetProperty(VACATION_RESPONDER_MESSAGE) + + def SetMessage(self, value): + """Set the Message value of this Vacation Responder object. + + Args: + value: string The new Message value to give this object. + """ + + self._SetProperty(VACATION_RESPONDER_MESSAGE, value) + + message = pyproperty(GetMessage, SetMessage) + + def GetContactsOnly(self): + """Get the ContactsOnly value of the Vacation Responder object. + + Returns: + The ContactsOnly value of this Vacation Responder object as a + string or None. + """ + + return self._GetProperty(VACATION_RESPONDER_CONTACTS_ONLY) + + def SetContactsOnly(self, value): + """Set the ContactsOnly value of this Vacation Responder object. + + Args: + value: string The new ContactsOnly value to give this object. + """ + + self._SetProperty(VACATION_RESPONDER_CONTACTS_ONLY, value) + + contacts_only = pyproperty(GetContactsOnly, SetContactsOnly) + + def __init__(self, uri=None, enable=None, subject=None, + message=None, contacts_only=None, *args, **kwargs): + """Constructs a new EmailSettingsVacationResponder object with the + given arguments. + + Args: + uri: string (optional) The uri of of this object for HTTP requests. + enable: Boolean (optional) Whether to enable the vacation responder. + subject: string (optional) The subject line of the vacation responder + autoresponse. + message: string (optional) The message body of the vacation responder + autoresponse. + contacts_only: Boolean (optional) Whether to only send autoresponses + to known contacts. + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(EmailSettingsVacationResponder, self).__init__(*args, **kwargs) + if uri: + self.uri = uri + if enable is not None: + self.enable = str(enable) + if subject: + self.subject = subject + if message: + self.message = message + if contacts_only is not None: + self.contacts_only = str(contacts_only) + + +class EmailSettingsSignature(EmailSettingsEntry): + """Represents a Signature in object form.""" + + def GetValue(self): + """Get the value of the Signature object. + + Returns: + The value of this Signature object as a string or None. + """ + + value = self._GetProperty(SIGNATURE_VALUE) + if value == ' ': # hack to support empty signature + return '' + else: + return value + + def SetValue(self, value): + """Set the name of this Signature object. + + Args: + value: string The new signature value to give this object. + """ + + if value == '': # hack to support empty signature + value = ' ' + self._SetProperty(SIGNATURE_VALUE, value) + + signature_value = pyproperty(GetValue, SetValue) + + def __init__(self, uri=None, signature=None, *args, **kwargs): + """Constructs a new EmailSettingsSignature object with the given arguments. + + Args: + uri: string (optional) The uri of of this object for HTTP requests. + signature: string (optional) The signature to be appended to outgoing + messages. + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(EmailSettingsSignature, self).__init__(*args, **kwargs) + if uri: + self.uri = uri + if signature is not None: + self.signature_value = signature + + +class EmailSettingsLanguage(EmailSettingsEntry): + """Represents Language Settings in object form.""" + + def GetLanguage(self): + """Get the tag of the Language object. + + Returns: + The tag of this Language object as a string or None. + """ + + return self._GetProperty(LANGUAGE_TAG) + + def SetLanguage(self, value): + """Set the tag of this Language object. + + Args: + value: string The new tag value to give this object. + """ + + self._SetProperty(LANGUAGE_TAG, value) + + language_tag = pyproperty(GetLanguage, SetLanguage) + + def __init__(self, uri=None, language=None, *args, **kwargs): + """Constructs a new EmailSettingsLanguage object with the given arguments. + + Args: + uri: string (optional) The uri of of this object for HTTP requests. + language: string (optional) The language tag for Google Mail's display + language. + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(EmailSettingsLanguage, self).__init__(*args, **kwargs) + if uri: + self.uri = uri + if language: + self.language_tag = language + + +class EmailSettingsGeneral(EmailSettingsEntry): + """Represents General Settings in object form.""" + + def GetPageSize(self): + """Get the Page Size value of the General Settings object. + + Returns: + The Page Size value of this General Settings object as a string or None. + """ + + return self._GetProperty(GENERAL_PAGE_SIZE) + + def SetPageSize(self, value): + """Set the Page Size value of this General Settings object. + + Args: + value: string The new Page Size value to give this object. + """ + + self._SetProperty(GENERAL_PAGE_SIZE, value) + + page_size = pyproperty(GetPageSize, SetPageSize) + + def GetShortcuts(self): + """Get the Shortcuts value of the General Settings object. + + Returns: + The Shortcuts value of this General Settings object as a string or None. + """ + + return self._GetProperty(GENERAL_SHORTCUTS) + + def SetShortcuts(self, value): + """Set the Shortcuts value of this General Settings object. + + Args: + value: string The new Shortcuts value to give this object. + """ + + self._SetProperty(GENERAL_SHORTCUTS, value) + + shortcuts = pyproperty(GetShortcuts, SetShortcuts) + + def GetArrows(self): + """Get the Arrows value of the General Settings object. + + Returns: + The Arrows value of this General Settings object as a string or None. + """ + + return self._GetProperty(GENERAL_ARROWS) + + def SetArrows(self, value): + """Set the Arrows value of this General Settings object. + + Args: + value: string The new Arrows value to give this object. + """ + + self._SetProperty(GENERAL_ARROWS, value) + + arrows = pyproperty(GetArrows, SetArrows) + + def GetSnippets(self): + """Get the Snippets value of the General Settings object. + + Returns: + The Snippets value of this General Settings object as a string or None. + """ + + return self._GetProperty(GENERAL_SNIPPETS) + + def SetSnippets(self, value): + """Set the Snippets value of this General Settings object. + + Args: + value: string The new Snippets value to give this object. + """ + + self._SetProperty(GENERAL_SNIPPETS, value) + + snippets = pyproperty(GetSnippets, SetSnippets) + + def GetUnicode(self): + """Get the Unicode value of the General Settings object. + + Returns: + The Unicode value of this General Settings object as a string or None. + """ + + return self._GetProperty(GENERAL_UNICODE) + + def SetUnicode(self, value): + """Set the Unicode value of this General Settings object. + + Args: + value: string The new Unicode value to give this object. + """ + + self._SetProperty(GENERAL_UNICODE, value) + + use_unicode = pyproperty(GetUnicode, SetUnicode) + + def __init__(self, uri=None, page_size=None, shortcuts=None, + arrows=None, snippets=None, use_unicode=None, *args, **kwargs): + """Constructs a new EmailSettingsGeneral object with the given arguments. + + Args: + uri: string (optional) The uri of of this object for HTTP requests. + page_size: int (optional) The number of conversations to be shown per page. + shortcuts: Boolean (optional) Whether to enable keyboard shortcuts. + arrows: Boolean (optional) Whether to display arrow-shaped personal + indicators next to email sent specifically to the user. + snippets: Boolean (optional) Whether to display snippets of the messages + in the inbox and when searching. + use_unicode: Boolean (optional) Whether to use UTF-8 (unicode) encoding + for all outgoing messages. + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(EmailSettingsGeneral, self).__init__(*args, **kwargs) + if uri: + self.uri = uri + if page_size is not None: + self.page_size = str(page_size) + if shortcuts is not None: + self.shortcuts = str(shortcuts) + if arrows is not None: + self.arrows = str(arrows) + if snippets is not None: + self.snippets = str(snippets) + if use_unicode is not None: + self.use_unicode = str(use_unicode) + +class EmailSettingsDelegation(EmailSettingsEntry): + """Represents an Email Settings delegation entry in object form.""" + + def GetAddress(self): + """Get the email address of the delegated user. + + Returns: + The email address of the delegated user as a string or None. + """ + return self._GetProperty(DELEGATION_ADDRESS) + + def SetAddress(self, value): + """Set the email address of of the delegated user. + + Args: + value: string The email address of another user on the same domain + """ + self._SetProperty(DELEGATION_ADDRESS, value) + + address = pyproperty(GetAddress, SetAddress) + + def __init__(self, uri=None, address=None, *args, **kwargs): + """Constructs a new EmailSettingsDelegation object with the given + arguments. + + Args: + uri: string (optional) The uri of of this object for HTTP requests. + address: string The email address of the delegated user. + """ + super(EmailSettingsDelegation, self).__init__(*args, **kwargs) + if uri: + self.uri = uri + if address: + self.address = address + \ No newline at end of file diff --git a/gam/gdata/analytics/apps/emailsettings/service.py b/gam/gdata/analytics/apps/emailsettings/service.py new file mode 100755 index 00000000000..cab61eac583 --- /dev/null +++ b/gam/gdata/analytics/apps/emailsettings/service.py @@ -0,0 +1,264 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Allow Google Apps domain administrators to set users' email settings. + + EmailSettingsService: Set various email settings. +""" + +__author__ = 'google-apps-apis@googlegroups.com' + + +import gdata.apps +import gdata.apps.service +import gdata.service + + +API_VER='2.0' +# Forwarding and POP3 options +KEEP='KEEP' +ARCHIVE='ARCHIVE' +DELETE='DELETE' +ALL_MAIL='ALL_MAIL' +MAIL_FROM_NOW_ON='MAIL_FROM_NOW_ON' + + +class EmailSettingsService(gdata.apps.service.PropertyService): + """Client for the Google Apps Email Settings service.""" + + def _serviceUrl(self, setting_id, username, domain=None): + if domain is None: + domain = self.domain + return '/a/feeds/emailsettings/%s/%s/%s/%s' % (API_VER, domain, username, + setting_id) + + def CreateLabel(self, username, label): + """Create a label. + + Args: + username: User to create label for. + label: Label to create. + + Returns: + A dict containing the result of the create operation. + """ + uri = self._serviceUrl('label', username) + properties = {'label': label} + return self._PostProperties(uri, properties) + + def CreateFilter(self, username, from_=None, to=None, subject=None, + has_the_word=None, does_not_have_the_word=None, + has_attachment=None, label=None, should_mark_as_read=None, + should_archive=None): + """Create a filter. + + Args: + username: User to create filter for. + from_: Filter from string. + to: Filter to string. + subject: Filter subject. + has_the_word: Words to filter in. + does_not_have_the_word: Words to filter out. + has_attachment: Boolean for message having attachment. + label: Label to apply. + should_mark_as_read: Boolean for marking message as read. + should_archive: Boolean for archiving message. + + Returns: + A dict containing the result of the create operation. + """ + uri = self._serviceUrl('filter', username) + properties = {} + properties['from'] = from_ + properties['to'] = to + properties['subject'] = subject + properties['hasTheWord'] = has_the_word + properties['doesNotHaveTheWord'] = does_not_have_the_word + properties['hasAttachment'] = gdata.apps.service._bool2str(has_attachment) + properties['label'] = label + properties['shouldMarkAsRead'] = gdata.apps.service._bool2str(should_mark_as_read) + properties['shouldArchive'] = gdata.apps.service._bool2str(should_archive) + return self._PostProperties(uri, properties) + + def CreateSendAsAlias(self, username, name, address, reply_to=None, + make_default=None): + """Create alias to send mail as. + + Args: + username: User to create alias for. + name: Name of alias. + address: Email address to send from. + reply_to: Email address to reply to. + make_default: Boolean for whether this is the new default sending alias. + + Returns: + A dict containing the result of the create operation. + """ + uri = self._serviceUrl('sendas', username) + properties = {} + properties['name'] = name + properties['address'] = address + properties['replyTo'] = reply_to + properties['makeDefault'] = gdata.apps.service._bool2str(make_default) + return self._PostProperties(uri, properties) + + def UpdateWebClipSettings(self, username, enable): + """Update WebClip Settings + + Args: + username: User to update forwarding for. + enable: Boolean whether to enable Web Clip. + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('webclip', username) + properties = {} + properties['enable'] = gdata.apps.service._bool2str(enable) + return self._PutProperties(uri, properties) + + def UpdateForwarding(self, username, enable, forward_to=None, action=None): + """Update forwarding settings. + + Args: + username: User to update forwarding for. + enable: Boolean whether to enable this forwarding rule. + forward_to: Email address to forward to. + action: Action to take after forwarding. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('forwarding', username) + properties = {} + properties['enable'] = gdata.apps.service._bool2str(enable) + if enable is True: + properties['forwardTo'] = forward_to + properties['action'] = action + return self._PutProperties(uri, properties) + + def UpdatePop(self, username, enable, enable_for=None, action=None): + """Update POP3 settings. + + Args: + username: User to update POP3 settings for. + enable: Boolean whether to enable POP3. + enable_for: Which messages to make available via POP3. + action: Action to take after user retrieves email via POP3. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('pop', username) + properties = {} + properties['enable'] = gdata.apps.service._bool2str(enable) + if enable is True: + properties['enableFor'] = enable_for + properties['action'] = action + return self._PutProperties(uri, properties) + + def UpdateImap(self, username, enable): + """Update IMAP settings. + + Args: + username: User to update IMAP settings for. + enable: Boolean whether to enable IMAP. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('imap', username) + properties = {'enable': gdata.apps.service._bool2str(enable)} + return self._PutProperties(uri, properties) + + def UpdateVacation(self, username, enable, subject=None, message=None, + contacts_only=None): + """Update vacation settings. + + Args: + username: User to update vacation settings for. + enable: Boolean whether to enable vacation responses. + subject: Vacation message subject. + message: Vacation message body. + contacts_only: Boolean whether to send message only to contacts. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('vacation', username) + properties = {} + properties['enable'] = gdata.apps.service._bool2str(enable) + if enable is True: + properties['subject'] = subject + properties['message'] = message + properties['contactsOnly'] = gdata.apps.service._bool2str(contacts_only) + return self._PutProperties(uri, properties) + + def UpdateSignature(self, username, signature): + """Update signature. + + Args: + username: User to update signature for. + signature: Signature string. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('signature', username) + properties = {'signature': signature} + return self._PutProperties(uri, properties) + + def UpdateLanguage(self, username, language): + """Update user interface language. + + Args: + username: User to update language for. + language: Language code. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('language', username) + properties = {'language': language} + return self._PutProperties(uri, properties) + + def UpdateGeneral(self, username, page_size=None, shortcuts=None, arrows=None, + snippets=None, unicode=None): + """Update general settings. + + Args: + username: User to update general settings for. + page_size: Number of messages to show. + shortcuts: Boolean whether shortcuts are enabled. + arrows: Boolean whether arrows are enabled. + snippets: Boolean whether snippets are enabled. + unicode: Wheter unicode is enabled. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('general', username) + properties = {} + if page_size != None: + properties['pageSize'] = str(page_size) + if shortcuts != None: + properties['shortcuts'] = gdata.apps.service._bool2str(shortcuts) + if arrows != None: + properties['arrows'] = gdata.apps.service._bool2str(arrows) + if snippets != None: + properties['snippets'] = gdata.apps.service._bool2str(snippets) + if unicode != None: + properties['unicode'] = gdata.apps.service._bool2str(unicode) + return self._PutProperties(uri, properties) diff --git a/gam/gdata/analytics/apps/emailsettings/service.pyc b/gam/gdata/analytics/apps/emailsettings/service.pyc new file mode 100755 index 0000000000000000000000000000000000000000..59ce7bf3a65fad5cd502954647a2bd914d217888 GIT binary patch literal 9019 zcmc&(O>Z1Y8Lsiz&o1j5u-R$cYm_0G{Wq?w+2>tm9;DVY|!K)o*>h?^Ewr z)&FyL;r)xjT1VwSb^O19#~z>vmD)sEQ=2svPN}f2x>IViUeu>6^%<$7rJ+L9XI0ox zn{z6hRh#oFoKu?%Dx6oFXH>YLOkM3Qs=c#nZ%J)q^@2_R&(KuVO?!5QpB7c?IPLDr3#HBUQ~ww z$+Ge`noHcvH59kHk%=?CouoS3H6ub7hEQ4}P)Jtfx6&cI`OL689{WQS&wx**p3JHA zZS_p4y_$MnSFyl2o8s4JC~InaT3y^|PNBn^{T|Az!PRYJLVH!vQ-J5{JGT65QpzIY zwIntoR)?e#nU!&R@z^$3wQ-R+Fh%E$7fP~=aL#zD_`#7Gm5D{Kq3Tdl`c$n}Ca z*fHtjoiNCP$3)@dk<~peIMIKaok4d$fB!7){s6C*a8tEK`R#D^WEB4kv#z2-1_^7x zkrz|sR8%8_fTW*3pU(M5o=x+!#-C!FE&Ul* z#S_|bU8DoWypmH&LrdS~KtsBGNY7-0G%iPN>oDqMx|75i)BujRU?nwn(9LwREpPce zN#8&_L`m%C$+&l!N`&rF$7IVW{Po0U>nX{z$k-<7ym=O6sdpAdMxG7QNW|G6lA`tG zgy%RaTzC)TTB;4TaISW?*5N|9j#6}A;ALENXH=xr^F@I-&ft`~PPw60M-8GxZ5p*1 zsm-9)kXl1s&uVIKO6@@_gKSp400KZf3z9jh&5e=F7bNpz9~O!a3+M}EVXiYAL7>A; z*vmL@C3LoP?~QVRsp~$(3K_SS6YdYZ+5V?XF6F) zkiQpQcLVD~75N8A8n*N!lwxV}MMOEUG;ETwew<|fZm@5T=#dPv>d--!1)bd(?(C6*B z96hQ!k2^m(lfxaj!|aaVhg{9uDAa{>wMK2B{Jn(QCH!{CQW~XFk@q47FU5p$0`ZqO@j2@rdtIC?04xs506tPHKZ%u~r>)BS2nU#yHgCD+bO>Jf;P6 z15_*+&3Q|&p`;Cx&&6p9!VuQUwwwnFzg1q#*$Ik*G&MNIKJ}IJcPg~<37rbkUhu^D zq1g^#_C`S@tUEwR?EPhqNJ~>!b2Au}Rz8pQ?(?A3Dxf4PzgCbPf#|P(y&l3^A0&&3Il^PJS%1c<7 z5iB$&VFBS972=eFO<|1lKq4Bn=E@N@D96~12^|O*tl`ukXSO$>26MPnWL`u&$8yEK zB4k7{Rpw3yT~nyFym_(?%s*DNV9=p^-_mXQPo0`UpXgv4UL_r=9MW&D<-P=+4C;o% zdqkD^0C^j98%>T|U6aBVFJh8f6D|259T<-;zoQ+79x`xcGVsNFpTnJkEvIMTfN{HI z+#%YWK3jC?U4$J+IP!nS@Y)c=>-X1xJeufvl?l&9I?}NkPrO>^v1+L>;Ps^cyDvfROO}`}>jX<&o1l83cZ-w=lG*n>WY-f{bIfE#y}+U3Xxri&SYGFNhW zLWuDk1mhwuLW~K{RvvMb2DT@%K6dmmw7g>h$mc_?wI6gOG4)0$d8za3A*Jk&sHC67 z)*uLwe4Q?nArFh?6$yf|v`F6E(k~HiC1II{kQ7}fvwjkHpAKV`iVw*^q0pC_RJ7nl zw-ZM*V~1k%qUT%`PV<~^NCJmq{`*+ZvDMyKTq3}yPw4#ug=7|rVTBu~aVQvexIVZ%qOQO-9^3{TW&w?KOuqFXMRSZsEX}LQZHZST2o9R z6T=>3V7WMBQhZWMPa+11cBSC`kjz+$)4RiQe#zof7I#@tTb^(po5^}-#Xfcj{Wluy zH&Cc$2EL26xA0sZdDfaVt-=W3XP(pd%EuUXLKfi>{U&I%Dll^)g=<|IBgAC^--?rS2tng+;L@gIG!?uT-!* LM-5Dp-}?UmPhrQi literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/apps/groups/__init__.py b/gam/gdata/analytics/apps/groups/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/gam/gdata/analytics/apps/groups/__init__.pyc b/gam/gdata/analytics/apps/groups/__init__.pyc new file mode 100755 index 0000000000000000000000000000000000000000..af390ac415b429d3ae442a74cf95c07d4e549301 GIT binary patch literal 139 zcmZSn%*$mWmgtwv00oRd+5w1*S%5?e14FO|NW@PANHCxg#U?;8XRDa>{QUHsRNcga zf@0m=#Jt4x)S{U5l*E$67?4m*dQpBUkcyAb%*!l^kJl@xEa3oZw8_m+Da}c>0~uco G#0&t05FOG0 literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/apps/groups/service.py b/gam/gdata/analytics/apps/groups/service.py new file mode 100755 index 00000000000..80df41773b7 --- /dev/null +++ b/gam/gdata/analytics/apps/groups/service.py @@ -0,0 +1,387 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Allow Google Apps domain administrators to manage groups, group members and group owners. + + GroupsService: Provides methods to manage groups, members and owners. +""" + +__author__ = 'google-apps-apis@googlegroups.com' + + +import urllib +import gdata.apps +import gdata.apps.service +import gdata.service + + +API_VER = '2.0' +BASE_URL = '/a/feeds/group/' + API_VER + '/%s' +GROUP_MEMBER_URL = BASE_URL + '?member=%s' +GROUP_MEMBER_DIRECT_URL = GROUP_MEMBER_URL + '&directOnly=%s' +GROUP_ID_URL = BASE_URL + '/%s' +MEMBER_URL = BASE_URL + '/%s/member' +MEMBER_WITH_SUSPENDED_URL = MEMBER_URL + '?includeSuspendedUsers=%s' +MEMBER_ID_URL = MEMBER_URL + '/%s' +OWNER_URL = BASE_URL + '/%s/owner' +OWNER_WITH_SUSPENDED_URL = OWNER_URL + '?includeSuspendedUsers=%s' +OWNER_ID_URL = OWNER_URL + '/%s' + +PERMISSION_OWNER = 'Owner' +PERMISSION_MEMBER = 'Member' +PERMISSION_DOMAIN = 'Domain' +PERMISSION_ANYONE = 'Anyone' + + +class GroupsService(gdata.apps.service.PropertyService): + """Client for the Google Apps Groups service.""" + + def _ServiceUrl(self, service_type, is_existed, group_id, member_id, owner_email, + direct_only=False, domain=None, suspended_users=False): + if domain is None: + domain = self.domain + + if service_type == 'group': + if group_id != '' and is_existed: + return GROUP_ID_URL % (domain, group_id) + elif member_id != '': + if direct_only: + return GROUP_MEMBER_DIRECT_URL % (domain, urllib.quote_plus(member_id), + self._Bool2Str(direct_only)) + else: + return GROUP_MEMBER_URL % (domain, urllib.quote_plus(member_id)) + else: + return BASE_URL % (domain) + + if service_type == 'member': + if member_id != '' and is_existed: + return MEMBER_ID_URL % (domain, group_id, urllib.quote_plus(member_id)) + elif suspended_users: + return MEMBER_WITH_SUSPENDED_URL % (domain, group_id, + self._Bool2Str(suspended_users)) + else: + return MEMBER_URL % (domain, group_id) + + if service_type == 'owner': + if owner_email != '' and is_existed: + return OWNER_ID_URL % (domain, group_id, urllib.quote_plus(owner_email)) + elif suspended_users: + return OWNER_WITH_SUSPENDED_URL % (domain, group_id, + self._Bool2Str(suspended_users)) + else: + return OWNER_URL % (domain, group_id) + + def _Bool2Str(self, b): + if b is None: + return None + return str(b is True).lower() + + def _IsExisted(self, uri): + try: + self._GetProperties(uri) + return True + except gdata.apps.service.AppsForYourDomainException, e: + if e.error_code == gdata.apps.service.ENTITY_DOES_NOT_EXIST: + return False + else: + raise e + + def CreateGroup(self, group_id, group_name, description, email_permission): + """Create a group. + + Args: + group_id: The ID of the group (e.g. us-sales). + group_name: The name of the group. + description: A description of the group + email_permission: The subscription permission of the group. + + Returns: + A dict containing the result of the create operation. + """ + uri = self._ServiceUrl('group', False, group_id, '', '') + properties = {} + properties['groupId'] = group_id + properties['groupName'] = group_name + properties['description'] = description + properties['emailPermission'] = email_permission + return self._PostProperties(uri, properties) + + def UpdateGroup(self, group_id, group_name, description, email_permission): + """Update a group's name, description and/or permission. + + Args: + group_id: The ID of the group (e.g. us-sales). + group_name: The name of the group. + description: A description of the group + email_permission: The subscription permission of the group. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._ServiceUrl('group', True, group_id, '', '') + properties = {} + properties['groupId'] = group_id + properties['groupName'] = group_name + properties['description'] = description + properties['emailPermission'] = email_permission + return self._PutProperties(uri, properties) + + def RetrieveGroup(self, group_id): + """Retrieve a group based on its ID. + + Args: + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('group', True, group_id, '', '') + return self._GetProperties(uri) + + def RetrieveAllGroups(self): + """Retrieve all groups in the domain. + + Args: + None + + Returns: + A list containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('group', True, '', '', '') + return self._GetPropertiesList(uri) + + def RetrievePageOfGroups(self, start_group=None): + """Retrieve one page of groups in the domain. + + Args: + start_group: The key to continue for pagination through all groups. + + Returns: + A feed object containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('group', True, '', '', '') + if start_group is not None: + uri += "?start="+start_group + property_feed = self._GetPropertyFeed(uri) + return property_feed + + def RetrieveGroups(self, member_id, direct_only=False): + """Retrieve all groups that belong to the given member_id. + + Args: + member_id: The member's email address (e.g. member@example.com). + direct_only: Boolean whether only return groups that this member directly belongs to. + + Returns: + A list containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('group', True, '', member_id, '', direct_only=direct_only) + return self._GetPropertiesList(uri) + + def DeleteGroup(self, group_id): + """Delete a group based on its ID. + + Args: + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the delete operation. + """ + uri = self._ServiceUrl('group', True, group_id, '', '') + return self._DeleteProperties(uri) + + def AddMemberToGroup(self, member_id, group_id): + """Add a member to a group. + + Args: + member_id: The member's email address (e.g. member@example.com). + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the add operation. + """ + uri = self._ServiceUrl('member', False, group_id, member_id, '') + properties = {} + properties['memberId'] = member_id + return self._PostProperties(uri, properties) + + def IsMember(self, member_id, group_id): + """Check whether the given member already exists in the given group. + + Args: + member_id: The member's email address (e.g. member@example.com). + group_id: The ID of the group (e.g. us-sales). + + Returns: + True if the member exists in the group. False otherwise. + """ + uri = self._ServiceUrl('member', True, group_id, member_id, '') + return self._IsExisted(uri) + + def RetrieveMember(self, member_id, group_id): + """Retrieve the given member in the given group. + + Args: + member_id: The member's email address (e.g. member@example.com). + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('member', True, group_id, member_id, '') + return self._GetProperties(uri) + + def RetrieveAllMembers(self, group_id, suspended_users=False): + """Retrieve all members in the given group. + + Args: + group_id: The ID of the group (e.g. us-sales). + suspended_users: A boolean; should we include any suspended users in + the membership list returned? + + Returns: + A list containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('member', True, group_id, '', '', + suspended_users=suspended_users) + return self._GetPropertiesList(uri) + + def RetrievePageOfMembers(self, group_id, suspended_users=False, start=None): + """Retrieve one page of members of a given group. + + Args: + group_id: The ID of the group (e.g. us-sales). + suspended_users: A boolean; should we include any suspended users in + the membership list returned? + start: The key to continue for pagination through all members. + + Returns: + A feed object containing the result of the retrieve operation. + """ + + uri = self._ServiceUrl('member', True, group_id, '', '', + suspended_users=suspended_users) + if start is not None: + if suspended_users: + uri += "&start="+start + else: + uri += "?start="+start + property_feed = self._GetPropertyFeed(uri) + return property_feed + + def RemoveMemberFromGroup(self, member_id, group_id): + """Remove the given member from the given group. + + Args: + member_id: The member's email address (e.g. member@example.com). + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the remove operation. + """ + uri = self._ServiceUrl('member', True, group_id, member_id, '') + return self._DeleteProperties(uri) + + def AddOwnerToGroup(self, owner_email, group_id): + """Add an owner to a group. + + Args: + owner_email: The email address of a group owner. + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the add operation. + """ + uri = self._ServiceUrl('owner', False, group_id, '', owner_email) + properties = {} + properties['email'] = owner_email + return self._PostProperties(uri, properties) + + def IsOwner(self, owner_email, group_id): + """Check whether the given member an owner of the given group. + + Args: + owner_email: The email address of a group owner. + group_id: The ID of the group (e.g. us-sales). + + Returns: + True if the member is an owner of the given group. False otherwise. + """ + uri = self._ServiceUrl('owner', True, group_id, '', owner_email) + return self._IsExisted(uri) + + def RetrieveOwner(self, owner_email, group_id): + """Retrieve the given owner in the given group. + + Args: + owner_email: The email address of a group owner. + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('owner', True, group_id, '', owner_email) + return self._GetProperties(uri) + + def RetrieveAllOwners(self, group_id, suspended_users=False): + """Retrieve all owners of the given group. + + Args: + group_id: The ID of the group (e.g. us-sales). + suspended_users: A boolean; should we include any suspended users in + the ownership list returned? + + Returns: + A list containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('owner', True, group_id, '', '', + suspended_users=suspended_users) + return self._GetPropertiesList(uri) + + def RetrievePageOfOwners(self, group_id, suspended_users=False, start=None): + """Retrieve one page of owners of the given group. + + Args: + group_id: The ID of the group (e.g. us-sales). + suspended_users: A boolean; should we include any suspended users in + the ownership list returned? + start: The key to continue for pagination through all owners. + + Returns: + A feed object containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('owner', True, group_id, '', '', + suspended_users=suspended_users) + if start is not None: + if suspended_users: + uri += "&start="+start + else: + uri += "?start="+start + property_feed = self._GetPropertyFeed(uri) + return property_feed + + def RemoveOwnerFromGroup(self, owner_email, group_id): + """Remove the given owner from the given group. + + Args: + owner_email: The email address of a group owner. + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the remove operation. + """ + uri = self._ServiceUrl('owner', True, group_id, '', owner_email) + return self._DeleteProperties(uri) diff --git a/gam/gdata/analytics/apps/groups/service.pyc b/gam/gdata/analytics/apps/groups/service.pyc new file mode 100755 index 0000000000000000000000000000000000000000..4c1154bb89b65c728cd65fc437b0aaa83952cdd5 GIT binary patch literal 13387 zcmeHOO>^7E8D5YQElQMSIkxQBNsDAA4x34&Bu$glabnAs?69>Z6_nyw;|_)*APF`_ zFaT&<9eLVmeCef^-g?NTGd=dunf`(PfF5({^w2{u?X923_Icg~K#-zh%VJ_@rq*D& z*zbMc=lxo&^ZytheQT<=P*?H4Jbo|W3IBq`QEC;bp;is$|>$d!~^QqCxHey7q|VSUahll3G2goN=}KjB*aE)nm$;P^-t4gGNK@)|9$+Lfty4*0GOC z^+2glmAbL|EO%5bALVv`g=DtT@b8*ee1D_inzOA|XgdB@&1;%9XUl7PVHDINKM2jp zH@9ld+J#T&>CL0&y#JpdMmiAC^swLrcy1N;q12DlrR3}9D~%qWq_He(zFSbPAK19=#; zDEV=Mj13MeXG}7HT}d)W2sBnrO8djo{%E`&;O+s01TZ`zZGc2U9Km>0N~Vi!|sL^)_`sM|Ree5Q6lf}FaYSHYaxLCKtoa_orm^_-fj z?ug8@{ugSepn`1`{@qPvtWQ2u(Fkj%s_`oFvWi1S zXO_=p6`!l97_TBbkqo|r{rp=+qj5bqhRH`dlaIwUGG{!|kWADvb_6!u&Uu5|J#R#% z?umxefjE(E0IQlDyC*u_GZBbgEmH~>L>5H$BD^YrzLqGd5qT+7P_a8Sr$|s{v;xA4 zARX4?%934~vsbLE(TTL++WfVP^OikVvF0x=OC9^QgGR$!14)$nr0qwp-DTynj#{GnZ4sV>bg%+2dnak{vcV{z4s z*B7w5xHoaE3;J~C;&dhYmiZ>62;IiIj1(i=j<#E_Y|IO7_a3ye>&U8wxVGoWE;VWS zHim(Oy0!~l-jH{#;A|fn&f=OQ5%cD>be z?1hIaU8MO8GL%{^&Cp0d8YCP=kX8sd1n*X!X0BR>+^dB$VPy$bIRl?vM(GGsGc1O-Dj11Cdm9~Z45yTalPte|?m`GFe3 zez22UqFru}s$fMK_j8J}3MubY9uj%JN_|pA-A@hGG}Ol=OH2USJMEi&#f@NoTQEzW z8(J@-NJ}SqDO4dpp|P-P&AjLo@^lC;`@s#r9cc44f3NPgBF}FM8r>l91H0}!Zgd>2 z=NFbM%Qx(~#rdkeu()i`e_W|9%QTm3jnFM~|C-XwwF6Hm?jD@tvKYG(&TG-QiP6QX zDn>-MpOobXeUVR-RJ#p;Un|v99MpV&;a_ zIoJi}hLIbRTmd-@ga$ANFabHRK#2JcKy@i_YmsZ#w1Xs`k!j8b8{t{_Nc@+G@mX^j z7Opa9`s-rrq}(jK(;L%fJA5syHQex9(_NjKwJms7?8aPI$D|=Vu6p1J*Up-=X6Ci4 zc|5FW3>!e%^1={|)`N%bwR8}(Q`t2@yfMp-+Cekj8z%JXky!_1;8uFg4cS@XhV4d_ zbgydy5uP=IORwA_zbrCKa4E#7XjLcq1#DQWw$27ctH~CY(j92rnH=m@4o2~`m;5jz z5W*{grgfT4s30{{NL7mpOiV{Faje&p)~!^y1^{R3Zw;C&^q|-As_|Sp4OKZB|2+{r z{}6ayX+dkGcz!t)=)KwnEQA9yaM(IX`9JC*qTQ%(YclSMdh2!Y-Fkxw0cg=AXRs_f zTy*UiXu!u%UqQ06Jz3eqHtQwqN0-s@5S=5O z830r0zb2#-w)`9gAw3~gJZ;qJrW3|X!a~nQAF<&Hh!@J`)9T5Pr3>l}nivAzLH_Ui6)EF+1vzxW(;Y&R%< zLX(~_PUGZvDACGf^o33&nJSb0o73N5z&W?!cAU7U$Cx^LU5{!?BdT9pZ>AIAeo3~K z8bePdjQM@a7g|N@752GskhI2jpa2xswF zNsL}b1{t`C3=Yb@rTGBquZike_J!qtKv~F@sZkw@r3yP``z~?pW_!1)CTPObP}P?< z-TLiR7t)W)YGo*JgvideDTjZltEHO=x4t~3JpeH9#Le{dnMSP1dp1H3W!g05oXGTP z5AJ%Q8=nKDn8?6Dto#h9*Q<(C7?2MK3~B~0ouwS(ku*3@b)|FtidiGxe5nMTs3(zPe{P-pCwEUy_d zX(@O_LEfhX(y=$qo}+!9{Hpe5v^xF-`ybGb!A2sp$cDvI9XN~-%>orBOb1=d) zUh<~bGR3#juA1wdAAC%g3cU4A5WE)^=UCQ3g#d!>2IYV;a7-L91aul0;eSx75A?@Q z;%h}ck^U4Ln`rP5Y{1;(Lt{ZAyOsDuBdx!bBfc9_pP(28I7H4f;2X*WeDaYIr_wlK zj?b2Vl0{WE;7ibj*blyfc=xf9ATxIbeNQ{SxM%>Hq3OPZ(wG!`MF|IwN4v2qLFpMC zls+^-wLSo3$}}OJqh~?9d^3?jv_o@&?eHAXobek?0&E?J#@H?ZNFm)FHBM~v`y9T; zWy8?}*txu*bj!!d-7YJ)9{5|C(io(>0IHdoI;8q_|KaC8asIe&ZQ#mgAKA*$vSjqx zbL7$>t(;~%*QD8f8AQKAhwFK=oqCMU>?6f5ICen5vY+_nj~HFzm!>ISq^^F-3R;gFY-)Nm!kH#mpL@U9$b;Sl%Zp!ATQNq%=_btbVTm zB?Uo-_5A{rZq^S#qtp0QLSg!McL{Uip-{ib)s8Oj1U_p@MGRNdH zlPgREEt9S(HD!u#nIw1 z{FREO(nM(-zZ1pr;&^Fd;skz^{8X7&F|Ch~#Pq4P@&6|R(1ad?zO2T3oYTC+X`Nvi z&#>g4u9QtDcRJZ#7bhj8@a$5>{%GE^#Mn>-tr8P{?XrZE&oC#)V!VTGO)+6qDR<(Q z=B;a$YPGVsV9Qlz`68Do)R&`sOXn7^%~lqAOJ^5uEaL9;%bbAZmb-fKXT9yJzL+YM o91vFoeCIUWKHyg6e;gEYg*w$`T6NNsk(^; z1;x6#iFt|XsYNmADTyVCF(9Fs+|rzq%#{4x#LT>y`1s7c%#!$cy@JXT4xnzE-29Z% NoK!oI9mPP*004KgAW8rL literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/apps/multidomain/client.py b/gam/gdata/analytics/apps/multidomain/client.py new file mode 100755 index 00000000000..316aa30f161 --- /dev/null +++ b/gam/gdata/analytics/apps/multidomain/client.py @@ -0,0 +1,336 @@ +#!/usr/bin/python2.4 +# +# Copyright 2011 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MultiDomainProvisioningClient simplifies Multidomain Provisioning API calls. + +MultiDomainProvisioningClient extends gdata.client.GDClient to ease interaction +with the Google Multidomain Provisioning API. These interactions include the +ability to create, retrieve, update and delete users and aliases in multiple +domains. +""" + + +__author__ = 'Claudio Cherubino ' + + +import urllib +import gdata.apps.multidomain.data +import gdata.client + + +# Multidomain URI templates +# The strings in this template are eventually replaced with the feed type +# (user/alias), API version and Google Apps domain name, respectively. +MULTIDOMAIN_URI_TEMPLATE = '/a/feeds/%s/%s/%s' +# The strings in this template are eventually replaced with the API version, +# Google Apps domain name and old email address, respectively. +MULTIDOMAIN_USER_RENAME_URI_TEMPLATE = '/a/feeds/user/userEmail/%s/%s/%s' + +# The value for user requests +MULTIDOMAIN_USER_FEED = 'user' +# The value for alias requests +MULTIDOMAIN_ALIAS_FEED = 'alias' + + +class MultiDomainProvisioningClient(gdata.client.GDClient): + """Client extension for the Google MultiDomain Provisioning API service. + + Attributes: + host: string The hostname for the MultiDomain Provisioning API service. + api_version: string The version of the MultiDomain Provisioning API. + """ + + host = 'apps-apis.google.com' + api_version = '2.0' + auth_service = 'apps' + auth_scopes = gdata.gauth.AUTH_SCOPES['apps'] + ssl = True + + def __init__(self, domain, auth_token=None, **kwargs): + """Constructs a new client for the MultiDomain Provisioning API. + + Args: + domain: string The Google Apps domain with MultiDomain Provisioning. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the email settings. + kwargs: The other parameters to pass to the gdata.client.GDClient + constructor. + """ + gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs) + self.domain = domain + + def make_multidomain_provisioning_uri( + self, feed_type, email=None, params=None): + + """Creates a resource feed URI for the MultiDomain Provisioning API. + + Using this client's Google Apps domain, create a feed URI for multidomain + provisioning in that domain. If an email address is provided, return a + URI for that specific resource. If params are provided, append them as GET + params. + + Args: + feed_type: string The type of feed (user/alias) + email: string (optional) The email address of multidomain resource for + which to make a feed URI. + params: dict (optional) key -> value params to append as GET vars to the + URI. Example: params={'start': 'my-resource-id'} + + Returns: + A string giving the URI for multidomain provisioning for this client's + Google Apps domain. + """ + uri = MULTIDOMAIN_URI_TEMPLATE % (feed_type, self.api_version, self.domain) + if email: + uri += '/' + email + if params: + uri += '?' + urllib.urlencode(params) + return uri + + MakeMultidomainProvisioningUri = make_multidomain_provisioning_uri + + def make_multidomain_user_provisioning_uri(self, email=None, params=None): + """Creates a resource feed URI for the MultiDomain User Provisioning API. + + Using this client's Google Apps domain, create a feed URI for multidomain + user provisioning in that domain. If an email address is provided, return a + URI for that specific resource. If params are provided, append them as GET + params. + + Args: + email: string (optional) The email address of multidomain user for which + to make a feed URI. + params: dict (optional) key -> value params to append as GET vars to the + URI. Example: params={'start': 'my-resource-id'} + + Returns: + A string giving the URI for multidomain user provisioning for thisis that + client's Google Apps domain. + """ + return self.make_multidomain_provisioning_uri( + MULTIDOMAIN_USER_FEED, email, params) + + MakeMultidomainUserProvisioningUri = make_multidomain_user_provisioning_uri + + def make_multidomain_alias_provisioning_uri(self, email=None, params=None): + """Creates a resource feed URI for the MultiDomain Alias Provisioning API. + + Using this client's Google Apps domain, create a feed URI for multidomain + alias provisioning in that domain. If an email address is provided, return a + URI for that specific resource. If params are provided, append them as GET + params. + + Args: + email: string (optional) The email address of multidomain alias for which + to make a feed URI. + params: dict (optional) key -> value params to append as GET vars to the + URI. Example: params={'start': 'my-resource-id'} + + Returns: + A string giving the URI for multidomain alias provisioning for this + client's Google Apps domain. + """ + return self.make_multidomain_provisioning_uri( + MULTIDOMAIN_ALIAS_FEED, email, params) + + MakeMultidomainAliasProvisioningUri = make_multidomain_alias_provisioning_uri + + def retrieve_all_users(self, **kwargs): + """Retrieves all users in all domains. + + Args: + kwargs: The other parameters to pass to gdata.client.GDClient.GetFeed() + + Returns: + A gdata.data.GDFeed of the domain users + """ + uri = self.MakeMultidomainUserProvisioningUri() + return self.GetFeed( + uri, + desired_class=gdata.apps.multidomain.data.UserFeed, + **kwargs) + + RetrieveAllUsers = retrieve_all_users + + def retrieve_user(self, email, **kwargs): + """Retrieves a single user in the domain. + + Args: + email: string The email address of the user to be retrieved + kwargs: The other parameters to pass to gdata.client.GDClient.GetEntry() + + Returns: + A gdata.apps.multidomain.data.UserEntry representing the user + """ + uri = self.MakeMultidomainUserProvisioningUri(email=email) + return self.GetEntry( + uri, + desired_class=gdata.apps.multidomain.data.UserEntry, + **kwargs) + + RetrieveUser = retrieve_user + + def create_user(self, email, first_name, last_name, password, is_admin, + hash_function=None, suspended=None, change_password=None, + ip_whitelisted=None, quota=None, **kwargs): + """Creates an user in the domain with the given properties. + + Args: + email: string The email address of the user. + first_name: string The first name of the user. + last_name: string The last name of the user. + password: string The password of the user. + is_admin: Boolean Whether or not the user has administrator privileges. + hash_function: string (optional) The name of the function used to hash the + password. + suspended: Boolean (optional) Whether or not the user is suspended. + change_password: Boolean (optional) Whether or not the user must change + password at first login. + ip_whitelisted: Boolean (optional) Whether or not the user's ip is + whitelisted. + quota: string (optional) The value (in GB) of the user's quota. + kwargs: The other parameters to pass to gdata.client.GDClient.post(). + + Returns: + A gdata.apps.multidomain.data.UserEntry of the new user + """ + new_user = gdata.apps.multidomain.data.UserEntry( + email=email, first_name=first_name, last_name=last_name, + password=password, is_admin=is_admin, hash_function=hash_function, + suspended=suspended, change_password=change_password, + ip_whitelisted=ip_whitelisted, quota=quota) + return self.post(new_user, self.MakeMultidomainUserProvisioningUri(), + **kwargs) + + CreateUser = create_user + + def update_user(self, email, user_entry, **kwargs): + """Deletes the user with the given email address. + + Args: + email: string The email address of the user to be updated. + user_entry: UserEntry The user entry with updated values. + kwargs: The other parameters to pass to gdata.client.GDClient.put() + + Returns: + A gdata.apps.multidomain.data.UserEntry representing the user + """ + return self.update(user_entry, + uri=self.MakeMultidomainUserProvisioningUri(email), + **kwargs) + + UpdateUser = update_user + + def delete_user(self, email, **kwargs): + """Deletes the user with the given email address. + + Args: + email: string The email address of the user to delete. + kwargs: The other parameters to pass to gdata.client.GDClient.delete() + + Returns: + An HTTP response object. See gdata.client.request(). + """ + return self.delete(self.MakeMultidomainUserProvisioningUri(email), **kwargs) + + DeleteUser = delete_user + + def rename_user(self, old_email, new_email, **kwargs): + """Renames an user's account to a different domain. + + Args: + old_email: string The old email address of the user to rename. + new_email: string The new email address for the user to be renamed. + kwargs: The other parameters to pass to gdata.client.GDClient.put() + + Returns: + A gdata.apps.multidomain.data.UserRenameRequest representing the request. + """ + rename_uri = MULTIDOMAIN_USER_RENAME_URI_TEMPLATE % (self.api_version, + self.domain, + old_email) + entry = gdata.apps.multidomain.data.UserRenameRequest(new_email) + return self.update(entry, uri=rename_uri, **kwargs) + + RenameUser = rename_user + + def retrieve_all_aliases(self, **kwargs): + """Retrieves all aliases in the domain. + + Args: + kwargs: The other parameters to pass to gdata.client.GDClient.GetFeed() + + Returns: + A gdata.data.GDFeed of the domain aliases + """ + uri = self.MakeMultidomainAliasProvisioningUri() + return self.GetFeed( + uri, + desired_class=gdata.apps.multidomain.data.AliasFeed, + **kwargs) + + RetrieveAllAliases = retrieve_all_aliases + + def retrieve_alias(self, email, **kwargs): + """Retrieves a single alias in the domain. + + Args: + email: string The email address of the alias to be retrieved + kwargs: The other parameters to pass to gdata.client.GDClient.GetEntry() + + Returns: + A gdata.apps.multidomain.data.AliasEntry representing the alias + """ + uri = self.MakeMultidomainAliasProvisioningUri(email=email) + return self.GetEntry( + uri, + desired_class=gdata.apps.multidomain.data.AliasEntry, + **kwargs) + + RetrieveAlias = retrieve_alias + + def create_alias(self, user_email, alias_email, **kwargs): + """Creates an alias in the domain with the given properties. + + Args: + user_email: string The email address of the user. + alias_email: string The first name of the user. + kwargs: The other parameters to pass to gdata.client.GDClient.post(). + + Returns: + A gdata.apps.multidomain.data.AliasEntry of the new alias + """ + new_alias = gdata.apps.multidomain.data.AliasEntry( + user_email=user_email, alias_email=alias_email) + return self.post(new_alias, self.MakeMultidomainAliasProvisioningUri(), + **kwargs) + + CreateAlias = create_alias + + def delete_alias(self, email, **kwargs): + """Deletes the alias with the given email address. + + Args: + email: string The email address of the alias to delete. + kwargs: The other parameters to pass to gdata.client.GDClient.delete() + + Returns: + An HTTP response object. See gdata.client.request(). + """ + return self.delete(self.MakeMultidomainAliasProvisioningUri(email), + **kwargs) + + DeleteAlias = delete_alias diff --git a/gam/gdata/analytics/apps/multidomain/data.py b/gam/gdata/analytics/apps/multidomain/data.py new file mode 100755 index 00000000000..c75c5fa2b68 --- /dev/null +++ b/gam/gdata/analytics/apps/multidomain/data.py @@ -0,0 +1,453 @@ +#!/usr/bin/python2.4 +# +# Copyright 2011 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data model classes for the Multidomain Provisioning API.""" + + +__author__ = 'Claudio Cherubino ' + + +import gdata.apps +import gdata.apps_property +import gdata.data + + +# This is required to work around a naming conflict between the Google +# Spreadsheets API and Python's built-in property function +pyproperty = property + + +# The apps:property firstName of a user entry +USER_FIRST_NAME = 'firstName' +# The apps:property lastName of a user entry +USER_LAST_NAME = 'lastName' +# The apps:property userEmail of a user entry +USER_EMAIL = 'userEmail' +# The apps:property password of a user entry +USER_PASSWORD = 'password' +# The apps:property hashFunction of a user entry +USER_HASH_FUNCTION = 'hashFunction' +# The apps:property isChangePasswordAtNextLogin of a user entry +USER_CHANGE_PASSWORD = 'isChangePasswordAtNextLogin' +# The apps:property agreedToTerms of a user entry +USER_AGREED_TO_TERMS = 'agreedToTerms' +# The apps:property isSuspended of a user entry +USER_SUSPENDED = 'isSuspended' +# The apps:property isAdmin of a user entry +USER_ADMIN = 'isAdmin' +# The apps:property ipWhitelisted of a user entry +USER_IP_WHITELISTED = 'ipWhitelisted' +# The apps:property quotaInGb of a user entry +USER_QUOTA = 'quotaInGb' + +# The apps:property newEmail of a user rename request entry +USER_NEW_EMAIL = 'newEmail' + +# The apps:property aliasEmail of an alias entry +ALIAS_EMAIL = 'aliasEmail' + + +class MultidomainProvisioningEntry(gdata.data.GDEntry): + """Represents a Multidomain Provisioning entry in object form.""" + + property = [gdata.apps_property.AppsProperty] + + def _GetProperty(self, name): + """Get the apps:property value with the given name. + + Args: + name: string Name of the apps:property value to get. + + Returns: + The apps:property value with the given name, or None if the name was + invalid. + """ + value = None + for p in self.property: + if p.name == name: + value = p.value + break + return value + + def _SetProperty(self, name, value): + """Set the apps:property value with the given name to the given value. + + Args: + name: string Name of the apps:property value to set. + value: string Value to give the apps:property value with the given name. + """ + found = False + for i in range(len(self.property)): + if self.property[i].name == name: + self.property[i].value = value + found = True + break + if not found: + self.property.append( + gdata.apps_property.AppsProperty(name=name, value=value)) + + +class UserEntry(MultidomainProvisioningEntry): + """Represents an User in object form.""" + + def GetFirstName(self): + """Get the first name of the User object. + + Returns: + The first name of this User object as a string or None. + """ + return self._GetProperty(USER_FIRST_NAME) + + def SetFirstName(self, value): + """Set the first name of this User object. + + Args: + value: string The new first name to give this object. + """ + self._SetProperty(USER_FIRST_NAME, value) + + first_name = pyproperty(GetFirstName, SetFirstName) + + def GetLastName(self): + """Get the last name of the User object. + + Returns: + The last name of this User object as a string or None. + """ + return self._GetProperty(USER_LAST_NAME) + + def SetLastName(self, value): + """Set the last name of this User object. + + Args: + value: string The new last name to give this object. + """ + self._SetProperty(USER_LAST_NAME, value) + + last_name = pyproperty(GetLastName, SetLastName) + + def GetEmail(self): + """Get the email address of the User object. + + Returns: + The email address of this User object as a string or None. + """ + return self._GetProperty(USER_EMAIL) + + def SetEmail(self, value): + """Set the email address of this User object. + + Args: + value: string The new email address to give this object. + """ + self._SetProperty(USER_EMAIL, value) + + email = pyproperty(GetEmail, SetEmail) + + def GetPassword(self): + """Get the password of the User object. + + Returns: + The password of this User object as a string or None. + """ + return self._GetProperty(USER_PASSWORD) + + def SetPassword(self, value): + """Set the password of this User object. + + Args: + value: string The new password to give this object. + """ + self._SetProperty(USER_PASSWORD, value) + + password = pyproperty(GetPassword, SetPassword) + + def GetHashFunction(self): + """Get the hash function of the User object. + + Returns: + The hash function of this User object as a string or None. + """ + return self._GetProperty(USER_HASH_FUNCTION) + + def SetHashFunction(self, value): + """Set the hash function of this User object. + + Args: + value: string The new hash function to give this object. + """ + self._SetProperty(USER_HASH_FUNCTION, value) + + hash_function = pyproperty(GetHashFunction, SetHashFunction) + + def GetChangePasswordAtNextLogin(self): + """Get the change password at next login flag of the User object. + + Returns: + The change password at next login flag of this User object as a string or + None. + """ + return self._GetProperty(USER_CHANGE_PASSWORD) + + def SetChangePasswordAtNextLogin(self, value): + """Set the change password at next login flag of this User object. + + Args: + value: string The new change password at next login flag to give this + object. + """ + self._SetProperty(USER_CHANGE_PASSWORD, value) + + change_password_at_next_login = pyproperty(GetChangePasswordAtNextLogin, + SetChangePasswordAtNextLogin) + + def GetAgreedToTerms(self): + """Get the agreed to terms flag of the User object. + + Returns: + The agreed to terms flag of this User object as a string or None. + """ + return self._GetProperty(USER_AGREED_TO_TERMS) + + agreed_to_terms = pyproperty(GetAgreedToTerms) + + def GetSuspended(self): + """Get the suspended flag of the User object. + + Returns: + The suspended flag of this User object as a string or None. + """ + return self._GetProperty(USER_SUSPENDED) + + def SetSuspended(self, value): + """Set the suspended flag of this User object. + + Args: + value: string The new suspended flag to give this object. + """ + self._SetProperty(USER_SUSPENDED, value) + + suspended = pyproperty(GetSuspended, SetSuspended) + + def GetIsAdmin(self): + """Get the isAdmin flag of the User object. + + Returns: + The isAdmin flag of this User object as a string or None. + """ + return self._GetProperty(USER_ADMIN) + + def SetIsAdmin(self, value): + """Set the isAdmin flag of this User object. + + Args: + value: string The new isAdmin flag to give this object. + """ + self._SetProperty(USER_ADMIN, value) + + is_admin = pyproperty(GetIsAdmin, SetIsAdmin) + + def GetIpWhitelisted(self): + """Get the ipWhitelisted flag of the User object. + + Returns: + The ipWhitelisted flag of this User object as a string or None. + """ + return self._GetProperty(USER_IP_WHITELISTED) + + def SetIpWhitelisted(self, value): + """Set the ipWhitelisted flag of this User object. + + Args: + value: string The new ipWhitelisted flag to give this object. + """ + self._SetProperty(USER_IP_WHITELISTED, value) + + ip_whitelisted = pyproperty(GetIpWhitelisted, SetIpWhitelisted) + + def GetQuota(self): + """Get the quota of the User object. + + Returns: + The quota of this User object as a string or None. + """ + return self._GetProperty(USER_QUOTA) + + def SetQuota(self, value): + """Set the quota of this User object. + + Args: + value: string The new quota to give this object. + """ + self._SetProperty(USER_QUOTA, value) + + quota = pyproperty(GetQuota, GetQuota) + + def __init__(self, uri=None, email=None, first_name=None, last_name=None, + password=None, hash_function=None, change_password=None, + agreed_to_terms=None, suspended=None, is_admin=None, + ip_whitelisted=None, quota=None, *args, **kwargs): + """Constructs a new UserEntry object with the given arguments. + + Args: + uri: string (optional) The uri of of this object for HTTP requests. + email: string (optional) The email address of the user. + first_name: string (optional) The first name of the user. + last_name: string (optional) The last name of the user. + password: string (optional) The password of the user. + hash_function: string (optional) The name of the function used to hash the + password. + change_password: Boolean (optional) Whether or not the user must change + password at first login. + agreed_to_terms: Boolean (optional) Whether or not the user has agreed to + the Terms of Service. + suspended: Boolean (optional) Whether or not the user is suspended. + is_admin: Boolean (optional) Whether or not the user has administrator + privileges. + ip_whitelisted: Boolean (optional) Whether or not the user's ip is + whitelisted. + quota: string (optional) The value (in GB) of the user's quota. + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(UserEntry, self).__init__(*args, **kwargs) + if uri: + self.uri = uri + if email: + self.email = email + if first_name: + self.first_name = first_name + if last_name: + self.last_name = last_name + if password: + self.password = password + if hash_function: + self.hash_function = hash_function + if change_password is not None: + self.change_password = str(change_password) + if agreed_to_terms is not None: + self.agreed_to_terms = str(agreed_to_terms) + if suspended is not None: + self.suspended = str(suspended) + if is_admin is not None: + self.is_admin = str(is_admin) + if ip_whitelisted is not None: + self.ip_whitelisted = str(ip_whitelisted) + if quota: + self.quota = quota + + +class UserFeed(gdata.data.GDFeed): + """Represents a feed of UserEntry objects.""" + + # Override entry so that this feed knows how to type its list of entries. + entry = [UserEntry] + + +class UserRenameRequest(MultidomainProvisioningEntry): + """Represents an User rename request in object form.""" + + def GetNewEmail(self): + """Get the new email address for the User object. + + Returns: + The new email address for the User object as a string or None. + """ + return self._GetProperty(USER_NEW_EMAIL) + + def SetNewEmail(self, value): + """Set the new email address for the User object. + + Args: + value: string The new email address to give this object. + """ + self._SetProperty(USER_NEW_EMAIL, value) + + new_email = pyproperty(GetNewEmail, SetNewEmail) + + def __init__(self, new_email=None, *args, **kwargs): + """Constructs a new UserRenameRequest object with the given arguments. + + Args: + new_email: string (optional) The new email address for the target user. + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(UserRenameRequest, self).__init__(*args, **kwargs) + if new_email: + self.new_email = new_email + + +class AliasEntry(MultidomainProvisioningEntry): + """Represents an Alias in object form.""" + + def GetUserEmail(self): + """Get the user email address of the Alias object. + + Returns: + The user email address of this Alias object as a string or None. + """ + return self._GetProperty(USER_EMAIL) + + def SetUserEmail(self, value): + """Set the user email address of this Alias object. + + Args: + value: string The new user email address to give this object. + """ + self._SetProperty(USER_EMAIL, value) + + user_email = pyproperty(GetUserEmail, SetUserEmail) + + def GetAliasEmail(self): + """Get the alias email address of the Alias object. + + Returns: + The alias email address of this Alias object as a string or None. + """ + return self._GetProperty(ALIAS_EMAIL) + + def SetAliasEmail(self, value): + """Set the alias email address of this Alias object. + + Args: + value: string The new alias email address to give this object. + """ + self._SetProperty(ALIAS_EMAIL, value) + + alias_email = pyproperty(GetAliasEmail, SetAliasEmail) + + def __init__(self, user_email=None, alias_email=None, *args, **kwargs): + """Constructs a new AliasEntry object with the given arguments. + + Args: + user_email: string (optional) The user email address for the object. + alias_email: string (optional) The alias email address for the object. + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(AliasEntry, self).__init__(*args, **kwargs) + if user_email: + self.user_email = user_email + if alias_email: + self.alias_email = alias_email + + +class AliasFeed(gdata.data.GDFeed): + """Represents a feed of AliasEntry objects.""" + + # Override entry so that this feed knows how to type its list of entries. + entry = [AliasEntry] diff --git a/gam/gdata/analytics/apps/multidomain/service.py b/gam/gdata/analytics/apps/multidomain/service.py new file mode 100755 index 00000000000..f404160147e --- /dev/null +++ b/gam/gdata/analytics/apps/multidomain/service.py @@ -0,0 +1,105 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Extended Multi Domain Support. + + MultiDomainService: Multi Domain Support.""" + +__author__ = 'jlee@pbu.edu' + + +import gdata.apps +import gdata.apps.service +import gdata.service + + +API_VER='2.0' + +class MultiDomainService(gdata.apps.service.PropertyService): + """Extended functions for Google Apps Multi-Domain Support.""" + + def _serviceUrl(self, setting_id, domain=None): + if domain is None: + domain = self.domain + return '/a/feeds/%s/%s/%s' % (setting_id, API_VER, domain) + + def CreateUser(self, user_email, password, first_name, last_name, user_domain=None, is_admin=False): + + uri = self._serviceUrl('user', user_domain) + properties = {} + properties['userEmail'] = user_email + properties['password'] = password + properties['firstName'] = first_name + properties['lastName'] = last_name + properties['isAdmin'] = is_admin + return self._PostProperties(uri, properties) + + def UpdateUser(self, user_email, user_domain=None, password=None, first_name=None, last_name=None, is_admin=None): + + uri = self._serviceUrl('user', user_domain) + properties = RetrieveUser(user_domain, user_email) + if password != None: + properties['password'] = password + if first_name != None: + properties['firstName'] = first_name + if last_name != None: + properties['lastName'] = last_name + if is_admin != None: + properties['isAdmin'] = gdata.apps.service._bool2str(is_admin) + return self._PutProperties(uri, properties) + + def RenameUser(self, old_email, new_email): + + old_domain = old_email[old_email.find('@')+1:] + uri = self._serviceUrl('user/userEmail', old_domain+'/'+old_email) + properties = {} + properties['newEmail'] = new_email + return self._PutProperties(uri, properties) + + def CreateAlias(self, user_email, alias_email): + + if alias_email.find('@') > 0: + domain = alias_email[alias_email.find('@')+1:] + else: + domain = self.domain + uri = self._serviceUrl('alias', domain) + properties = {} + properties['userEmail'] = user_email + properties['aliasEmail'] = alias_email + return self._PostProperties(uri, properties) + + def RetrieveAlias(self, alias_email): + + alias_domain = alias_email[alias_email.find('@')+1:] + uri = self._serviceUrl('alias', alias_domain+'/'+alias_email) + return self._GetProperties(uri) + + def RetrieveAllAliases(self): + + uri = self._serviceUrl('alias', self.domain) + return self._GetPropertiesList(uri) + + def DeleteAlias(self, alias_email): + + alias_domain = alias_email[alias_email.find('@')+1:] + uri = self._serviceUrl('alias', alias_domain+'/'+alias_email) + return self._DeleteProperties(uri) + + def GetUserAliases(self, user_email): + + user_domain = user_email[user_email.find('@')+1:] + uri = self._serviceUrl('alias', user_domain+'?userEmail='+user_email) + return self._GetPropertiesList(uri) \ No newline at end of file diff --git a/gam/gdata/analytics/apps/multidomain/service.pyc b/gam/gdata/analytics/apps/multidomain/service.pyc new file mode 100755 index 0000000000000000000000000000000000000000..6cc2ab3a8cbf2a07a8c875dabba7c7e03f0ed5cd GIT binary patch literal 3885 zcmc&%?QR=I6rJ6*H+Gs(D6}c1P^+TCd?cyLA5@jn(3XM#D~u^pqe!c*cO2H;c&(jr zOQqyj^9VcwufUV>0C3Kot?icn!$6X~x%1(fx#ygFcO3n(zMB0W_6Dl{iSYjx9{T_$ zQ)&-3P!J9o;%G#_{R)5)Z)+}7%9#}D}azNsGP19Sb=lkFPT8x_Xf znmn9tnQWT>iJzWjNY}Qmx-`W%sgRuD$Vgp9Yr-A1-9{VB85u#A)QA<_l)e%4n)N!cBLhVtY zT*wK-NS*6gkhlds%pk#^NN8MDF>D+~$3)ezxl3M$utTGuI=9P85^w7vq zk=6|!I=4I7VLlcnGy`&{VUpd_y4|XrVC~%4F4x!OLLlZ&iQ!h71JWm3BxCNuKJMXL z1WrCsv&X~bHc7dwBV5xemlY=S?a;dn&M1f5l}Vksj}w1@mVKQ|hi8H~Se^a;oxI;L ziI98p!kRn>+Q3&JO|Xc{1eHgsx=z1l6fy^a^+2C~O)v+L%l;DueGY(c4j{Mvz^@9J zj2ddxR3oq-t7l#-Py^@TOwhnw1|+Pf$)hl#8L=kfn$@XGtUal0YU8ThJQgr?fDYG$ z5$TJPD1S*VfW^WmL?#c*vbbhlXS!fPw=CMBqHKd8PW*O9P(!2$;tZZbQ z_a{hN3FaDF_8g1?v!{`c>kH4tK#CNRYT%LN`~eP=FsLa8$jHWxOUIy5G7T9?dK+p3 zb2gZ}4ar@Q1_gjdW+!H%0fHsF`&}UeBEc&yr?muy(%+`>smcCr$-=RDB3<(mHq6JF zu%+L@ELqOy922T6GM{q73O-!7hy=%ccKtg@Ys0)|4&B0TmRqWDBe)=0G(&e*`W%J< z(@3(%L;I+qs%^n;u|M*VkJ~lsHT-yc0Mtm*Yz8~ns$us8)I||I%?V&0^z^&1IUXe> zh{n?*PpxJ)NTrdc2cMGA9U8tYJo@YK=ID#GrTojs>bfVhmo95&t}j?SJn`3P))nh) za3O35&+FEJ+w7 zl@`|Mv+(pqn)hi)qyCs?lV+RdD$JZ-@&3^zTVp+S36JGnAr9hjt$F&?`PbU5_F6k` zH`}p%W6_^tv<@eox}VFcj!v#JTfD~T4NhSu&HJ{1mRxICKzw)Iqgg5Rd78Ijw$`T6NNsk(^; z1;x6#iFt|XsYNmADTyVCF(9Fs{G#;YnE3e2yv&mLc)fzk5)Pm?o80`A(wtN~km1EZ G%m4s&ue0{TulK?Ku}92~t)%8M&f%x!7Io$35rXvrGTmxrX^? zWuqg-pO5c*NY)^WM7EH7vgJusk;s>BMYd4L%f4(?2jyC+T-S0{wx%SiiK)uLv>eRH zK|^*iPF+q!UW)u~>jFpIY)x^xHDtfZ_S#AOr?!jJq`BGe_0r5OHyX`mxna{}&*F~x zc&u{R+sNwZ|V|NI_05e*cm8hWw zynaCo7bTk2!n_2Vt&3dJFOjW3cP5EU)ZFbS9ooq@chju-S(@&3P4j-QXJ2iX4h_Im zMOs6$?;<WE0QSUN1nI}JM8+hu-U4h_lA7~`AU0b*O3Aq{ z9ws#asQ^fiJ>5Et;mJ!6bj-KfMC)*?oxXMHp~+T2p_ReIgQ&$Z9kB^NGwxxQ_Dtqt zV|9pt7ICyqKa0=3xgTJhz_=_n&&*n}8Y}>{mh(uZ?m4bzrBg#bjl^o*=Q9)tt~wny zymSoW0@|7^9M>flIiBL)kHBAFR)IK6RawP&2YB%5!*JS{p2vnMH287=pRUP4U5=_W z8XKnZE@r`4sGX9R6_|1cQ*S8PBcs_xR1Pf*5c^5&)UemBvM6aE8U-^lwv)v@vRoT% zd)EYHjw26y?aU+&k>L^Ef7o{W?hgNlK>=K(lc=O-VtOfk=QV@G5W!JOHzHD`48RGHqrMa(v0YWcdg| zu_mjCIV2il5gHCzWqXs1lkr>=Y7=9v>&3LFp%_8?cBa7(8vGnCmMV8n) zCyT%ESD*e{9Jq@e!bUh=|rS%K*9wYKo}tc#=U^-3@$SxDU|oSags;kJfc19ml>|3 zRoRuqOuJF2Zpw$m{81m2_Q4g@1WW8lFVH21E)5T{$bg$wVh2~*fy1#w3*YDF^`jn? z;S{eanf3<92d^)Cm%!{~t!@Xgdf&1LT`@2UqNMHmg3lP%C&r3Y{3a{kH zrSHijr3<3aAXF-wT6Ad*jU|TA-5rBiKPd%B^7X!h`t0I>Ey@h>9J+TEKaSEkL89Ncr*{ zamhy@_qce}xl(I&huBS@PE?;7x38bD^GFAxcyI6T#qQ(i{w zsyTMGlFLCD3YXK;z~%H%eXu*p0iL{TUYeUD$Q@<8Fl0$qkr# z;Bt2m&4*#XgzT%F@I8zljKI_`43?9wMnr9Y=l_K0W`E}tqWhe2;COGp3Q?|;Nu%c{ zC`=+c!cs4hqqD+U(X;F}BNP4)Ug4*vYn&+^*0rZ*dUzW;%(>lz%9_tM zX~#ZVEYTapuhYWwcQj6#4gm|13TJm7OME?+d_>7XjUG$zg>zSeYYhYiUZdx9w4-~; zUuq>)x~iS@7(Z~4T$OV9nbJ7+*D?7a8w>ei42zNEG#HTe;!MwD3+aIoR!a~s}w$`T6NNsk(^; z1;x6#iFt|XsYNmADTyVCF(9FsqSWH}jBKmE-$LmMk76<@3&dk!p?U;4J5vQcvfGh%RVp$4n%lmvnCn08eck`c$g`H27#6v*oYXRVHx8q+ zX}}@7Xr(dVpf`9jIhCXzMo9YMEXt!(r6;FJWTOdBO|VypjVCpn`Gx%m3&U#sQfH4a zG`uM}9eiRBy&W{JUcq~Y4r7$syYwg}x@<86OQgV|qopf77d@0S(d!239|?W-H|Pko zERn?0@hkV)(JUQ^i{&0qUAZV}-hF7>ur*6B5_@lLI4EYbA|FOGWj|e;K2|2y>D;D8 zZn+BU)xTWKRhOXzOmN`9*gG)cNnz}XF6K(xRGBU#4){6CG^dHM6w6*=&YLl7$=W6y z&LX(wMGKwEnddhFG5ev3dVmlEk!w&@Of5C8E5odi+i_Y>8=sD|FHP!|&%`(eXi|i$ z{2^fnNZr9DSeLp*_O2WdF9mCvpT6{kFZ(gZ;d8al6vq~(0?tyvPuxaTKICA^>Quc^ z+i(xzGfX(S_nnx}$fsQa`~rgwfY4$E&RGK&e1hlJurYs|*nY-MSw0{jZwuNZY*a(T zO}&lIJchr5clSN|`vdL`c#^*@9kTjxYnCGWo+?dN>RZY3RV~X(eJfcOwJg2&%aZSh zp%56bLIWNLq8Ywjw*@9ubFx^knw7rFs$n$+tkXc=4ZI*=S>8q^v20$>aRt6pN8iC? z?!#~$v21G5@b?D^XS=qz)WTzVxvpI@|C4kWaJkoZX&u9}63(#8g0-BmQ0}xaluIiN z@nc|-DGWJFVVD$g7|K;yUYd6?8}MZte8%R0&F3&T$&%iGej^2lMI6v@9kiU5+wtyn z_Pjms-oBhUU3W856j{r#>q)M{ hDrFa=@;88-@3z1S|3@X`KX92C&PvM#Xq02e{TCKoN;3ce literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/apps/service.py b/gam/gdata/analytics/apps/service.py new file mode 100755 index 00000000000..bc974849899 --- /dev/null +++ b/gam/gdata/analytics/apps/service.py @@ -0,0 +1,552 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 SIOS Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'tmatsuo@sios.com (Takashi MATSUO)' + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import urllib +import gdata +import atom.service +import gdata.service +import gdata.apps +import atom + +API_VER="2.0" +HTTP_OK=200 + +UNKOWN_ERROR=1000 +USER_DELETED_RECENTLY=1100 +USER_SUSPENDED=1101 +DOMAIN_USER_LIMIT_EXCEEDED=1200 +DOMAIN_ALIAS_LIMIT_EXCEEDED=1201 +DOMAIN_SUSPENDED=1202 +DOMAIN_FEATURE_UNAVAILABLE=1203 +ENTITY_EXISTS=1300 +ENTITY_DOES_NOT_EXIST=1301 +ENTITY_NAME_IS_RESERVED=1302 +ENTITY_NAME_NOT_VALID=1303 +INVALID_GIVEN_NAME=1400 +INVALID_FAMILY_NAME=1401 +INVALID_PASSWORD=1402 +INVALID_USERNAME=1403 +INVALID_HASH_FUNCTION_NAME=1404 +INVALID_HASH_DIGGEST_LENGTH=1405 +INVALID_EMAIL_ADDRESS=1406 +INVALID_QUERY_PARAMETER_VALUE=1407 +TOO_MANY_RECIPIENTS_ON_EMAIL_LIST=1500 + +DEFAULT_QUOTA_LIMIT='2048' + + +class Error(Exception): + pass + + +class AppsForYourDomainException(Error): + + def __init__(self, response): + + Error.__init__(self, response) + try: + self.element_tree = ElementTree.fromstring(response['body']) + self.error_code = int(self.element_tree[0].attrib['errorCode']) + self.reason = self.element_tree[0].attrib['reason'] + self.invalidInput = self.element_tree[0].attrib['invalidInput'] + except: + self.error_code = UNKOWN_ERROR + + +class AppsService(gdata.service.GDataService): + """Client for the Google Apps Provisioning service.""" + + def __init__(self, email=None, password=None, domain=None, source=None, + server='apps-apis.google.com', additional_headers=None, + **kwargs): + """Creates a client for the Google Apps Provisioning service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + domain: string (optional) The Google Apps domain name. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'apps-apis.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='apps', source=source, + server=server, additional_headers=additional_headers, **kwargs) + self.ssl = True + self.port = 443 + self.domain = domain + + def _baseURL(self): + return "/a/feeds/%s" % self.domain + + def AddAllElementsFromAllPages(self, link_finder, func): + """retrieve all pages and add all elements""" + next = link_finder.GetNextLink() + while next is not None: + next_feed = self.Get(next.href, converter=func) + for a_entry in next_feed.entry: + link_finder.entry.append(a_entry) + next = next_feed.GetNextLink() + return link_finder + + def RetrievePageOfEmailLists(self, start_email_list_name=None, + num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, + backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve one page of email list""" + uri = "%s/emailList/%s" % (self._baseURL(), API_VER) + if start_email_list_name is not None: + uri += "?startEmailListName=%s" % start_email_list_name + try: + return gdata.apps.EmailListFeedFromString(str(self.GetWithRetries( + uri, num_retries=num_retries, delay=delay, backoff=backoff))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def GetGeneratorForAllEmailLists( + self, num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve a generator for all emaillists in this domain.""" + first_page = self.RetrievePageOfEmailLists(num_retries=num_retries, + delay=delay, + backoff=backoff) + return self.GetGeneratorFromLinkFinder( + first_page, gdata.apps.EmailListRecipientFeedFromString, + num_retries=num_retries, delay=delay, backoff=backoff) + + def RetrieveAllEmailLists(self): + """Retrieve all email list of a domain.""" + + ret = self.RetrievePageOfEmailLists() + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.EmailListFeedFromString) + + def RetrieveEmailList(self, list_name): + """Retreive a single email list by the list's name.""" + + uri = "%s/emailList/%s/%s" % ( + self._baseURL(), API_VER, list_name) + try: + return self.Get(uri, converter=gdata.apps.EmailListEntryFromString) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrieveEmailLists(self, recipient): + """Retrieve All Email List Subscriptions for an Email Address.""" + + uri = "%s/emailList/%s?recipient=%s" % ( + self._baseURL(), API_VER, recipient) + try: + ret = gdata.apps.EmailListFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.EmailListFeedFromString) + + def RemoveRecipientFromEmailList(self, recipient, list_name): + """Remove recipient from email list.""" + + uri = "%s/emailList/%s/%s/recipient/%s" % ( + self._baseURL(), API_VER, list_name, recipient) + try: + self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrievePageOfRecipients(self, list_name, start_recipient=None, + num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, + backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve one page of recipient of an email list. """ + + uri = "%s/emailList/%s/%s/recipient" % ( + self._baseURL(), API_VER, list_name) + + if start_recipient is not None: + uri += "?startRecipient=%s" % start_recipient + try: + return gdata.apps.EmailListRecipientFeedFromString(str( + self.GetWithRetries( + uri, num_retries=num_retries, delay=delay, backoff=backoff))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def GetGeneratorForAllRecipients( + self, list_name, num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve a generator for all recipients of a particular emaillist.""" + first_page = self.RetrievePageOfRecipients(list_name, + num_retries=num_retries, + delay=delay, + backoff=backoff) + return self.GetGeneratorFromLinkFinder( + first_page, gdata.apps.EmailListRecipientFeedFromString, + num_retries=num_retries, delay=delay, backoff=backoff) + + def RetrieveAllRecipients(self, list_name): + """Retrieve all recipient of an email list.""" + + ret = self.RetrievePageOfRecipients(list_name) + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.EmailListRecipientFeedFromString) + + def AddRecipientToEmailList(self, recipient, list_name): + """Add a recipient to a email list.""" + + uri = "%s/emailList/%s/%s/recipient" % ( + self._baseURL(), API_VER, list_name) + recipient_entry = gdata.apps.EmailListRecipientEntry() + recipient_entry.who = gdata.apps.Who(email=recipient) + + try: + return gdata.apps.EmailListRecipientEntryFromString( + str(self.Post(recipient_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def DeleteEmailList(self, list_name): + """Delete a email list""" + + uri = "%s/emailList/%s/%s" % (self._baseURL(), API_VER, list_name) + try: + self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def CreateEmailList(self, list_name): + """Create a email list. """ + + uri = "%s/emailList/%s" % (self._baseURL(), API_VER) + email_list_entry = gdata.apps.EmailListEntry() + email_list_entry.email_list = gdata.apps.EmailList(name=list_name) + try: + return gdata.apps.EmailListEntryFromString( + str(self.Post(email_list_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def DeleteNickname(self, nickname): + """Delete a nickname""" + + uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname) + try: + self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrievePageOfNicknames(self, start_nickname=None, + num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, + backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve one page of nicknames in the domain""" + + uri = "%s/nickname/%s" % (self._baseURL(), API_VER) + if start_nickname is not None: + uri += "?startNickname=%s" % start_nickname + try: + return gdata.apps.NicknameFeedFromString(str(self.GetWithRetries( + uri, num_retries=num_retries, delay=delay, backoff=backoff))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def GetGeneratorForAllNicknames( + self, num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve a generator for all nicknames in this domain.""" + first_page = self.RetrievePageOfNicknames(num_retries=num_retries, + delay=delay, + backoff=backoff) + return self.GetGeneratorFromLinkFinder( + first_page, gdata.apps.NicknameFeedFromString, num_retries=num_retries, + delay=delay, backoff=backoff) + + def RetrieveAllNicknames(self): + """Retrieve all nicknames in the domain""" + + ret = self.RetrievePageOfNicknames() + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.NicknameFeedFromString) + + def GetGeneratorForAllNicknamesOfAUser( + self, user_name, num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve a generator for all nicknames of a particular user.""" + uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name) + try: + first_page = gdata.apps.NicknameFeedFromString(str(self.GetWithRetries( + uri, num_retries=num_retries, delay=delay, backoff=backoff))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + return self.GetGeneratorFromLinkFinder( + first_page, gdata.apps.NicknameFeedFromString, num_retries=num_retries, + delay=delay, backoff=backoff) + + def RetrieveNicknames(self, user_name): + """Retrieve nicknames of the user""" + + uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name) + try: + ret = gdata.apps.NicknameFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.NicknameFeedFromString) + + def RetrieveNickname(self, nickname): + """Retrieve a nickname. + + Args: + nickname: string The nickname to retrieve + + Returns: + gdata.apps.NicknameEntry + """ + + uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname) + try: + return gdata.apps.NicknameEntryFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def CreateNickname(self, user_name, nickname): + """Create a nickname""" + + uri = "%s/nickname/%s" % (self._baseURL(), API_VER) + nickname_entry = gdata.apps.NicknameEntry() + nickname_entry.login = gdata.apps.Login(user_name=user_name) + nickname_entry.nickname = gdata.apps.Nickname(name=nickname) + + try: + return gdata.apps.NicknameEntryFromString( + str(self.Post(nickname_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def DeleteUser(self, user_name): + """Delete a user account""" + + uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name) + try: + return self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def UpdateUser(self, user_name, user_entry): + """Update a user account.""" + + uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name) + try: + return gdata.apps.UserEntryFromString(str(self.Put(user_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def CreateUser(self, user_name, family_name, given_name, password, + suspended='false', quota_limit=None, + password_hash_function=None, + change_password=None): + """Create a user account. """ + + uri = "%s/user/%s" % (self._baseURL(), API_VER) + user_entry = gdata.apps.UserEntry() + user_entry.login = gdata.apps.Login( + user_name=user_name, password=password, suspended=suspended, + hash_function_name=password_hash_function, + change_password=change_password) + user_entry.name = gdata.apps.Name(family_name=family_name, + given_name=given_name) + if quota_limit is not None: + user_entry.quota = gdata.apps.Quota(limit=str(quota_limit)) + + try: + return gdata.apps.UserEntryFromString(str(self.Post(user_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def SuspendUser(self, user_name): + user_entry = self.RetrieveUser(user_name) + if user_entry.login.suspended != 'true': + user_entry.login.suspended = 'true' + user_entry = self.UpdateUser(user_name, user_entry) + return user_entry + + def RestoreUser(self, user_name): + user_entry = self.RetrieveUser(user_name) + if user_entry.login.suspended != 'false': + user_entry.login.suspended = 'false' + user_entry = self.UpdateUser(user_name, user_entry) + return user_entry + + def RetrieveUser(self, user_name): + """Retrieve an user account. + + Args: + user_name: string The user name to retrieve + + Returns: + gdata.apps.UserEntry + """ + + uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name) + try: + return gdata.apps.UserEntryFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrievePageOfUsers(self, start_username=None, + num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, + backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve one page of users in this domain.""" + + uri = "%s/user/%s" % (self._baseURL(), API_VER) + if start_username is not None: + uri += "?startUsername=%s" % start_username + try: + return gdata.apps.UserFeedFromString(str(self.GetWithRetries( + uri, num_retries=num_retries, delay=delay, backoff=backoff))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def GetGeneratorForAllUsers(self, + num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, + backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve a generator for all users in this domain.""" + first_page = self.RetrievePageOfUsers(num_retries=num_retries, delay=delay, + backoff=backoff) + return self.GetGeneratorFromLinkFinder( + first_page, gdata.apps.UserFeedFromString, num_retries=num_retries, + delay=delay, backoff=backoff) + + def RetrieveAllUsers(self): + """Retrieve all users in this domain. OBSOLETE""" + + ret = self.RetrievePageOfUsers() + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.UserFeedFromString) + + +class PropertyService(gdata.service.GDataService): + """Client for the Google Apps Property service.""" + + def __init__(self, email=None, password=None, domain=None, source=None, + server='apps-apis.google.com', additional_headers=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='apps', source=source, + server=server, + additional_headers=additional_headers) + self.ssl = True + self.port = 443 + self.domain = domain + + def AddAllElementsFromAllPages(self, link_finder, func): + """retrieve all pages and add all elements""" + next = link_finder.GetNextLink() + while next is not None: + next_feed = self.Get(next.href, converter=func) + for a_entry in next_feed.entry: + link_finder.entry.append(a_entry) + next = next_feed.GetNextLink() + return link_finder + + def _GetPropertyEntry(self, properties): + property_entry = gdata.apps.PropertyEntry() + property = [] + for name, value in properties.iteritems(): + if name is not None and value is not None: + property.append(gdata.apps.Property(name=name, value=value)) + property_entry.property = property + return property_entry + + def _PropertyEntry2Dict(self, property_entry): + properties = {} + for i, property in enumerate(property_entry.property): + properties[property.name] = property.value + return properties + + def _GetPropertyFeed(self, uri): + try: + return gdata.apps.PropertyFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + def _GetPropertiesList(self, uri): + property_feed = self._GetPropertyFeed(uri) + # pagination + property_feed = self.AddAllElementsFromAllPages( + property_feed, gdata.apps.PropertyFeedFromString) + properties_list = [] + for property_entry in property_feed.entry: + properties_list.append(self._PropertyEntry2Dict(property_entry)) + return properties_list + + def _GetProperties(self, uri): + try: + return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString( + str(self.Get(uri)))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + def _PostProperties(self, uri, properties): + property_entry = self._GetPropertyEntry(properties) + try: + return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString( + str(self.Post(property_entry, uri)))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + def _PutProperties(self, uri, properties): + property_entry = self._GetPropertyEntry(properties) + try: + return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString( + str(self.Put(property_entry, uri)))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + def _DeleteProperties(self, uri): + try: + self.Delete(uri) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + +def _bool2str(b): + if b is None: + return None + return str(b is True).lower() diff --git a/gam/gdata/analytics/apps/service.pyc b/gam/gdata/analytics/apps/service.pyc new file mode 100755 index 0000000000000000000000000000000000000000..a8a1de5e2aa207402370d77548e52d4223068050 GIT binary patch literal 21350 zcmdU1TWlQHc|ODCZ7E8kWr?;dT3%U}L_0R+Di^1&9h(wYGGl5f&ytq4vAbRE45g)( zyVTCm5>q*aODAZ0K@%WNkS0jera;h_plKh2zUL(m1^QB;DA2dO6-5iQeJIlJ`_5c; zSG$yC(yl9Mj%LoBIdjf`{r-Q>+04K19sA5=rBF4|pCSDJDvsb6jxnAwC1W-Z8qYD_ zfT=rXZNQW=#<|>BGlTx1xiMy1UpJ29Y)+_6#?;l|a$`QZ|5e?$eGICPOw`A*q|N(m z!3}zjrWKt9W~ygru2mI4x3WWc#oOVG2!W{Qm9Kk3G zAMq>K@HP>9L z)EfEQRewFKH5+#i`60BoL-ve#H^8xTMpgF6gReM;BltrUp)sqDxns;7$7~H4XVW*~ zfLYD(8ai`$OE=4d26RHJ)j{g<;Z-P6Cb>D%b!X5VM$__WSM`vs&YH`O392hd$7Umj zI*QR2r-w11f;e-v>220MWJIfPJ#q)Dw2s{cxeMpwmHy|`S(n_Z5>WjA_w zx$yepEAV0Vq zFdx8HLTWOS#vxkh51>*IpyRSTqh>W5pNz#PdyF?^J{Y8_rQPb+X4rfHOD{vrK4pt} zB6G|WS!ABTC~yC|J$l`NZC^L8*U6@{PnmU|$g=Z9#+@g!?>vEt-~PP^^xnhUzV~6h zR~~|W9@)0>AvKl@VPkm_p2(5#M81S4awj~IMt#% zG@!@1*l-31@eoZZOl&QfvF4VZsY+QL#jzj9>o|fdD6G+2wRR3D1vU=SI$-WVgfo;- zNZ%cl!v^jHUj7bFM$8{Mu*ZY4S*VqDE8_-CrK}hbKyjJ|A}ByD&cRBBevqr=st-7q zv9TQf%(kutFX}9p^I`MrFXpW6%1t+A`YZLPb4#Gj254|9ux)Y`&x7d+o@K2^@Z(|8wyf*4%vN_UU+5lUG0EZ> zi^D7kY1|_yG(1bV)OUS2N4bRV0*=^Bj5|l22`7ucRl0s2^k;Hx^k~#}02yN@i}Kus zfVHP9r&oO63r?R1gqAd3d&Y_EV0;#hT&)EDvb%5teF@qbr<%bzUG5+bjgB{Ah%mG* zW8TY{+i%gjz%Jd$n%0U&4I7mqI^!e;%$DQ4H)w7jqp<=qn|o){0P3;}T;pU2EsS&X zWpkMg;%;y5$F&1v#Q_ATP_%p?9{*iGSAhS>rgaLeMnT)Q5`*$S85H2rk-2b zXjFC5K=*n*Ps-$!WWQ2|MQd&DggXM*o;O>sN8w0t9yk$~mq>>ONwg1z85no=JC8!L z4&ta1bW!y@1yn#G0xIw;L0D;pdAcH27hv07J`sp( zjg7Uk6`?>(_WXKfQ~bGFsorR=tO&^$rm3IqaTK~YduhJ>CIIwF)=sf_l(%SmlDC`( zqsfkLvN1ah@i_tgO*2X+CAmv&@+* zpmTqXZO*XxGz!H98?73+38cFdG2`4~(o?b)SCUBvw{g#+{Gmx_+<6H2{(y7bIpkDn z8YpTEJ|t%6abm4k7ifW;Y?%zK0q?>%AYc=`VZesVHh9C91DWD9`wm_}5f3X@$z6l* zYgOQO1ifetL>~!8uGWB0t_3-3;0fpJPa_p`W^1G6H{ zQMCMUCuESbj?6>H{J)951L~5=j1LSu$|S_1shLy3yR08XZY2W)IAAs(7bPsb9!TvS zyjN<=V7Fi-Vaq(&G9(D$HR}~fRr?a2_&Fjqu;fMPe+CqzF5?FK=8_qajiOM=M!BEi8NFjl^N?Dch((^RC$&y8LGI`I07>QkItnS7 zHEU6L|nFpfafi2V{I zFQ?h$7Qnh(apP)GZPgU>1lBAyqWiNp+L;Na&`B3tUunhW1YSzKP2G=WEgzE0ktVsH z0WAyghY}TxElUhKJ^lcop4lUE+Rz2(cnku)%u5sr6<7BM>0@EQm=!p zzlDl!whC~p3_=$(X7iy~7vTkf4HGOOz(_8BDdHj+jyfdBk?u>m2yKi!hw!|=)`a#Y zl*p~X(zog0j1Wf`XP|JWlP7QyDc&eXo+_Ro;v9fy=v(#c!IYFA?gc)aXR#}aCDG8_ z+*pdJ=LtuCiZ>-Xb{IPL5Os`N&sijx@CUf)W=3QcrA`qn{2cQOHv0frINTo=D(MOh zA}q8y3IhE8u<*NJ<%oraP}2UeP<}O5zjj!dut_ed(azaiFQw~Xt&FWjjR_T@i(RlG z;(h`Rt!|zK~-W^G~8`dRr4SvQkFfnC@ zVTlnx5ZH|aI|$lQZ7DJI3+HeI^C<4XNGb-T*Z}`4++!>-NJcZ5u#jRXwW2**Cg@H! zDQ0Xhbe9k(u%H7u3nYj(>XlXs{CWeu1&r4H0t+dN=s-550iD%H$<`URBc66&WfL49 z*5&bKcO$iw=D)yw8?OzL(g{|h|D&K`a!%nI#~iy_#l#8a z?p6(Jmqeg0$R+}D2!aHbK;+R*DAEPmQd0aQI*vfweu6f>mjXw@6A6eEy3Nyo1#RtK z8c;R~CJ>m)n`{{eB8lwh=E;~^Bm6E#63YE6$cVr~1oA-CF2N)%VCJ{^0`Wzk3kZx(r$_FX^51BitG#{$JEQDvfCMUCPWm(QxJIhdQ<=3!vA!A zi7)7E+Xe>-C=`2MY6f9Xa7b=gN$B)nvR;XR*%Zf)p#_a5NSO?GshLdSUvNv-+6Z4D z%`uQckdpw%4ygYQ8h1-g3iT6sA+Xp3Uc3d)6W+rC0^awB7Z2k#*8g{KyAOhoxGN`c z98X#=LW|lTJ#bcSdblgVfo{i+Iw9`AqADOHFqs4?&>B$<6IJ{+F1m=S>`}WdQc5r~ zP3?M^>92=MBEg8kzTsaFJ^W?__MnFVd;Rs0M#g^8=-|dou2;IqChU+Ox|hyT8525L zRm-+l*-x;H!j+(L=Wx@)fFjR&!2nXUi>cuR>indaT~a4<+EOH$!GEDDg6WgW75<+q z5d*|Ti5j))4MxEMYk|SzYl5~uDBQhI4N{6b?r(R16=lN?y%3Fi5IMMRMIyULa3qlX zd${PONI*_Vfqg>Ku+L(U37BZd+c60Pn``r z2jV8(rD{sYTCg2Y;3L7>()S3>+7xO$?qDxvgL-a;Kkf{w@&?;s?3gAIQ}2;Lt&pl+ zxVntGwqSRcA@vdTemA5}NION+GCVU$?h~;tDm&K_J&6j7r?(zF_t{0FSnlndMUgHb zEsD^KT3f$U49y@dR!F3ulDVJa+=+F*Z(CPXYB< z*R6#HNhqHL@reHK&Z~VT3gZE+5ym60hJ{pc=bd@A;q+ZLmB6SmP8BHM&v^tdp@?S+ zSAKWCKV^lo&}{)Bzs1i4{9M*)ieBLjI+i{LsDIv#T-@HsNE6b}$;!L&b-ljTSikcs z`{XQl5>ru_w^513mDyzs;xwg$DF-u1WOu+(S`IPiCqCbL6k|@Lpo8OFZ9@XzgU>>C z_ruszd{Ymg)|F6oJ+>1Br!Mc9frL2m1QTt3nfxNIut+>fqSB6Zgg8x9^wDa3%{ZQF zTF=~g3bT{U_-YjNK!XwVbZ5rXoa*~{aXf5jXUv(IF|7@0UlJ7GN*2IyDNtO=btbLI zCD}*O=|-!OEGV*zP`FrRCZ>+yw_dX?KIBq~zVUNMUtN)K-_&~giNs}SxjX2QP#`8` zJgP5}{R+g!sIE%FWzeo8C-M_ibg^E6*nKBNq5utntqs0xqGYr!QIa)w;VV)h2)ZB; zJp#0G24hT={1qgJiIROvlwe&W*4C!BAD+N2nibEI04pRip(G9LUc#c9-c zOPF|=Sj`=0J8YP?TiK5EV`lO(<$kjrjIWbGjrKQJsaBgCjW8e^;BQ&C3xQgjo!zOA zq$b%R(-&S(lq=10P`M~6JkH?=S}3}dD@IHBGL>W!+tHE|;5`&1F|BJ+2BY^!$JDek z%voN?l6yZ_k%x;ZJkyuNeQh0cQ=GKaqRY-HNqA|acfya9u!xm{uk;2P8;Sa^WT40v zJ5P@AW+z)t0-8v9S1R?uuQJ&}WY39;LH~`DE(Xc^!DJ;VaiIIL_%cpI(bnOE6s#F) z5NgBv0qKsKEi-gyjOnS=Dq?jLu&+MuZgo^kp2hcWDg7}5|n$)Dt70Lbr{k;Fr*|!K&+A& zQZgrzlboDKs*6^aC|81|!A8IZZCIE_NO&E)Vai&)hUJz*t4iutuU8t^e5_Zoi*B`g zYNfJPt8dyiovp25%_i!xOp}7w)BBQ>iA1`qY{tce%EWP{mP?DTl~!W^ z8yn5AA|tKKo4QH5+@a1_ckv#S&#tk1`16^T_IbCF)OnW&$Qe@5cd`zlDyL< z>=^EmY?f&x!j~!ZbYx5h?8g_k1Hj}g$MN?VQrlF6Dp4OT6b}jid7RvdzrVr9@{16p z9*Zdx<{lyIex; z&axmZanG}Ojl~yPEU>u9qQGLA#hWbNX7QUWDlD$DsIu@`tgyJw0vN@(%y0>IxbO2! z-sr15lb@DMew~-Uk761}Tk+ugbC_L!14mFtfi*eYNimttWXJe@H0vA~&pJo4+0pD+ zc3)CAfx0o&?aOAf&f*e|$_-6WgE zew$doj8TC`NDIJQaT6=H_b2~QiSunXv0|Glo>IWWlTm(TmJ)S1bB8WAxissLi-)J zfSD0KpmiBBomTdAY!ZAKl1wk=*1e!HUfFwc^GUfN`4v-r|zwR@aRq(aI@Kf%i! zi(@F#tr~6J^G4SWvQ#!7TrBzSe{et{lFAGm1t=bNMkJESTB$+Yp%w-=Pyp$##(Stf zXV-Nm6q_IQ=)e4nWFio zeI3OiqC&V*(S+qe1a(cTTy9?$v5^{uA$@1wyp8B;P+Qv|q(+&7CzWeR&XHEfnn)|= z$R;2rLra8cSRiW#5X?C@pEUS)8R=F;X!y+m#c3G~prF`Taj>>}w#jLd2aRor&4W_x zYuEsXV=b_RCS|nXLF0InOTk9Iq?eC9V%%BeyP6-Trb)KJoNu%4Pf%#HRZR_dGJSgCa>v}y%+;#l zzmWhGL4)Z}VK-lr3E=c4m;5ft&pff!L{1N)ik(!T0c6SkL$>#`*oKFCtY92gj;Vwk z6MjM?dJfeRGi=j?Jho@;X*-o70DZw?(kF#FN}I&P?gh-H)H0!;=Y!NJyVr!$Y~j(a zWD+OPUAy>jEdQAjFZzPQd34Clmry}!Nn)ZjiUeN@#*8118|h7U*PO22ZnjWCYky`ox5Q>~4SC9+q}yZ^VnT^322%TAh(K>qAU zJ)pq-cF8JVN|B^yNS}%DB9YWuNbZ9T9lEhgL=tlhmGj7 z1M0Ml)vkKxr`>H;U3lj^7)}?7-5;{qEA%itIcNp41QVU1j$bGXb|)aI@62<1Wn6j{z^=< zh2J$JD!IK@pF!fy_g%KNzobPr%&+kmKeWYOUwxt?mK0 zfAd7&q*x(8-0^cL8bZ1@MJY?AU0z#Sx>R0#-K7kLhfpjR^KKa_6y%KNkX|{LFDxxw z5t`PW;&Sm)zA%@c(^mAk#f!7^g|hA}%wL>eD(Bxim(TO|5%d+^o?V!qEtb3P69Yx} zd-i!gKfAQ-=F7{4**9nB7iP~c(XSS~Db2)Jc;o!GX8 z*^Bw|d=az3+}_04IM3)Q-Q!blVw7`A+s+qsUcNB@X1-8n1Ernf>hrS~=NGQndQKv) zzcgDcUS4$Pv?xBVBxTeCBlfuRwb|lp<@3vhb4&A!(F;hej@xtd7cS(BOXY=p;lk2u z8gYC-51KEOXXoZHpJK>hKfd$Ea^AgCzBKD%%u8S+=CPdDo|>h_#q!13!WD9K{?a@q zUo7L@wyOowt39*!#`5CQtYw&lHTh9I{RW&hWHN5bG#tbTTq?BtV-%?-eK^yg@a`r1 yqkmtZR0bs!!}zv%(Af)zIiZs0Q_!&FFA(1_kNf#MFr1kjoZL6{EA#%;(*FXj$L2Ty literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/apps_property.py b/gam/gdata/analytics/apps_property.py new file mode 100755 index 00000000000..5afa1f38b94 --- /dev/null +++ b/gam/gdata/analytics/apps_property.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python +# +# Copyright (C) 2010 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +"""Provides a base class to represent property elements in feeds. + +This module is used for version 2 of the Google Data APIs. The primary class +in this module is AppsProperty. +""" + + +__author__ = 'Vic Fryzel ' + + +import atom.core +import gdata.apps + + +class AppsProperty(atom.core.XmlElement): + """Represents an element in a feed.""" + _qname = gdata.apps.APPS_TEMPLATE % 'property' + name = 'name' + value = 'value' diff --git a/gam/gdata/analytics/auth.py b/gam/gdata/analytics/auth.py new file mode 100755 index 00000000000..139c6cd016d --- /dev/null +++ b/gam/gdata/analytics/auth.py @@ -0,0 +1,952 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 - 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import cgi +import math +import random +import re +import time +import types +import urllib +import atom.http_interface +import atom.token_store +import atom.url +import gdata.oauth as oauth +import gdata.oauth.rsa as oauth_rsa +import gdata.tlslite.utils.keyfactory as keyfactory +import gdata.tlslite.utils.cryptomath as cryptomath + +import gdata.gauth + +__author__ = 'api.jscudder (Jeff Scudder)' + + +PROGRAMMATIC_AUTH_LABEL = 'GoogleLogin auth=' +AUTHSUB_AUTH_LABEL = 'AuthSub token=' + + +"""This module provides functions and objects used with Google authentication. + +Details on Google authorization mechanisms used with the Google Data APIs can +be found here: +http://code.google.com/apis/gdata/auth.html +http://code.google.com/apis/accounts/ + +The essential functions are the following. +Related to ClientLogin: + generate_client_login_request_body: Constructs the body of an HTTP request to + obtain a ClientLogin token for a specific + service. + extract_client_login_token: Creates a ClientLoginToken with the token from a + success response to a ClientLogin request. + get_captcha_challenge: If the server responded to the ClientLogin request + with a CAPTCHA challenge, this method extracts the + CAPTCHA URL and identifying CAPTCHA token. + +Related to AuthSub: + generate_auth_sub_url: Constructs a full URL for a AuthSub request. The + user's browser must be sent to this Google Accounts + URL and redirected back to the app to obtain the + AuthSub token. + extract_auth_sub_token_from_url: Once the user's browser has been + redirected back to the web app, use this + function to create an AuthSubToken with + the correct authorization token and scope. + token_from_http_body: Extracts the AuthSubToken value string from the + server's response to an AuthSub session token upgrade + request. +""" + +def generate_client_login_request_body(email, password, service, source, + account_type='HOSTED_OR_GOOGLE', captcha_token=None, + captcha_response=None): + """Creates the body of the autentication request + + See http://code.google.com/apis/accounts/AuthForInstalledApps.html#Request + for more details. + + Args: + email: str + password: str + service: str + source: str + account_type: str (optional) Defaul is 'HOSTED_OR_GOOGLE', other valid + values are 'GOOGLE' and 'HOSTED' + captcha_token: str (optional) + captcha_response: str (optional) + + Returns: + The HTTP body to send in a request for a client login token. + """ + return gdata.gauth.generate_client_login_request_body(email, password, + service, source, account_type, captcha_token, captcha_response) + + +GenerateClientLoginRequestBody = generate_client_login_request_body + + +def GenerateClientLoginAuthToken(http_body): + """Returns the token value to use in Authorization headers. + + Reads the token from the server's response to a Client Login request and + creates header value to use in requests. + + Args: + http_body: str The body of the server's HTTP response to a Client Login + request + + Returns: + The value half of an Authorization header. + """ + token = get_client_login_token(http_body) + if token: + return 'GoogleLogin auth=%s' % token + return None + + +def get_client_login_token(http_body): + """Returns the token value for a ClientLoginToken. + + Reads the token from the server's response to a Client Login request and + creates the token value string to use in requests. + + Args: + http_body: str The body of the server's HTTP response to a Client Login + request + + Returns: + The token value string for a ClientLoginToken. + """ + return gdata.gauth.get_client_login_token_string(http_body) + + +def extract_client_login_token(http_body, scopes): + """Parses the server's response and returns a ClientLoginToken. + + Args: + http_body: str The body of the server's HTTP response to a Client Login + request. It is assumed that the login request was successful. + scopes: list containing atom.url.Urls or strs. The scopes list contains + all of the partial URLs under which the client login token is + valid. For example, if scopes contains ['http://example.com/foo'] + then the client login token would be valid for + http://example.com/foo/bar/baz + + Returns: + A ClientLoginToken which is valid for the specified scopes. + """ + token_string = get_client_login_token(http_body) + token = ClientLoginToken(scopes=scopes) + token.set_token_string(token_string) + return token + + +def get_captcha_challenge(http_body, + captcha_base_url='http://www.google.com/accounts/'): + """Returns the URL and token for a CAPTCHA challenge issued by the server. + + Args: + http_body: str The body of the HTTP response from the server which + contains the CAPTCHA challenge. + captcha_base_url: str This function returns a full URL for viewing the + challenge image which is built from the server's response. This + base_url is used as the beginning of the URL because the server + only provides the end of the URL. For example the server provides + 'Captcha?ctoken=Hi...N' and the URL for the image is + 'http://www.google.com/accounts/Captcha?ctoken=Hi...N' + + Returns: + A dictionary containing the information needed to repond to the CAPTCHA + challenge, the image URL and the ID token of the challenge. The + dictionary is in the form: + {'token': string identifying the CAPTCHA image, + 'url': string containing the URL of the image} + Returns None if there was no CAPTCHA challenge in the response. + """ + return gdata.gauth.get_captcha_challenge(http_body, captcha_base_url) + + +GetCaptchaChallenge = get_captcha_challenge + + +def GenerateOAuthRequestTokenUrl( + oauth_input_params, scopes, + request_token_url='https://www.google.com/accounts/OAuthGetRequestToken', + extra_parameters=None): + """Generate a URL at which a request for OAuth request token is to be sent. + + Args: + oauth_input_params: OAuthInputParams OAuth input parameters. + scopes: list of strings The URLs of the services to be accessed. + request_token_url: string The beginning of the request token URL. This is + normally 'https://www.google.com/accounts/OAuthGetRequestToken' or + '/accounts/OAuthGetRequestToken' + extra_parameters: dict (optional) key-value pairs as any additional + parameters to be included in the URL and signature while making a + request for fetching an OAuth request token. All the OAuth parameters + are added by default. But if provided through this argument, any + default parameters will be overwritten. For e.g. a default parameter + oauth_version 1.0 can be overwritten if + extra_parameters = {'oauth_version': '2.0'} + + Returns: + atom.url.Url OAuth request token URL. + """ + scopes_string = ' '.join([str(scope) for scope in scopes]) + parameters = {'scope': scopes_string} + if extra_parameters: + parameters.update(extra_parameters) + oauth_request = oauth.OAuthRequest.from_consumer_and_token( + oauth_input_params.GetConsumer(), http_url=request_token_url, + parameters=parameters) + oauth_request.sign_request(oauth_input_params.GetSignatureMethod(), + oauth_input_params.GetConsumer(), None) + return atom.url.parse_url(oauth_request.to_url()) + + +def GenerateOAuthAuthorizationUrl( + request_token, + authorization_url='https://www.google.com/accounts/OAuthAuthorizeToken', + callback_url=None, extra_params=None, + include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope'): + """Generates URL at which user will login to authorize the request token. + + Args: + request_token: gdata.auth.OAuthToken OAuth request token. + authorization_url: string The beginning of the authorization URL. This is + normally 'https://www.google.com/accounts/OAuthAuthorizeToken' or + '/accounts/OAuthAuthorizeToken' + callback_url: string (optional) The URL user will be sent to after + logging in and granting access. + extra_params: dict (optional) Additional parameters to be sent. + include_scopes_in_callback: Boolean (default=False) if set to True, and + if 'callback_url' is present, the 'callback_url' will be modified to + include the scope(s) from the request token as a URL parameter. The + key for the 'callback' URL's scope parameter will be + OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as + a parameter to the 'callback' URL, is that the page which receives + the OAuth token will be able to tell which URLs the token grants + access to. + scopes_param_prefix: string (default='oauth_token_scope') The URL + parameter key which maps to the list of valid scopes for the token. + This URL parameter will be included in the callback URL along with + the scopes of the token as value if include_scopes_in_callback=True. + + Returns: + atom.url.Url OAuth authorization URL. + """ + scopes = request_token.scopes + if isinstance(scopes, list): + scopes = ' '.join(scopes) + if include_scopes_in_callback and callback_url: + if callback_url.find('?') > -1: + callback_url += '&' + else: + callback_url += '?' + callback_url += urllib.urlencode({scopes_param_prefix:scopes}) + oauth_token = oauth.OAuthToken(request_token.key, request_token.secret) + oauth_request = oauth.OAuthRequest.from_token_and_callback( + token=oauth_token, callback=callback_url, + http_url=authorization_url, parameters=extra_params) + return atom.url.parse_url(oauth_request.to_url()) + + +def GenerateOAuthAccessTokenUrl( + authorized_request_token, + oauth_input_params, + access_token_url='https://www.google.com/accounts/OAuthGetAccessToken', + oauth_version='1.0', + oauth_verifier=None): + """Generates URL at which user will login to authorize the request token. + + Args: + authorized_request_token: gdata.auth.OAuthToken OAuth authorized request + token. + oauth_input_params: OAuthInputParams OAuth input parameters. + access_token_url: string The beginning of the authorization URL. This is + normally 'https://www.google.com/accounts/OAuthGetAccessToken' or + '/accounts/OAuthGetAccessToken' + oauth_version: str (default='1.0') oauth_version parameter. + oauth_verifier: str (optional) If present, it is assumed that the client + will use the OAuth v1.0a protocol which includes passing the + oauth_verifier (as returned by the SP) in the access token step. + + Returns: + atom.url.Url OAuth access token URL. + """ + oauth_token = oauth.OAuthToken(authorized_request_token.key, + authorized_request_token.secret) + parameters = {'oauth_version': oauth_version} + if oauth_verifier is not None: + parameters['oauth_verifier'] = oauth_verifier + oauth_request = oauth.OAuthRequest.from_consumer_and_token( + oauth_input_params.GetConsumer(), token=oauth_token, + http_url=access_token_url, parameters=parameters) + oauth_request.sign_request(oauth_input_params.GetSignatureMethod(), + oauth_input_params.GetConsumer(), oauth_token) + return atom.url.parse_url(oauth_request.to_url()) + + +def GenerateAuthSubUrl(next, scope, secure=False, session=True, + request_url='https://www.google.com/accounts/AuthSubRequest', + domain='default'): + """Generate a URL at which the user will login and be redirected back. + + Users enter their credentials on a Google login page and a token is sent + to the URL specified in next. See documentation for AuthSub login at: + http://code.google.com/apis/accounts/AuthForWebApps.html + + Args: + request_url: str The beginning of the request URL. This is normally + 'http://www.google.com/accounts/AuthSubRequest' or + '/accounts/AuthSubRequest' + next: string The URL user will be sent to after logging in. + scope: string The URL of the service to be accessed. + secure: boolean (optional) Determines whether or not the issued token + is a secure token. + session: boolean (optional) Determines whether or not the issued token + can be upgraded to a session token. + domain: str (optional) The Google Apps domain for this account. If this + is not a Google Apps account, use 'default' which is the default + value. + """ + # Translate True/False values for parameters into numeric values acceoted + # by the AuthSub service. + if secure: + secure = 1 + else: + secure = 0 + + if session: + session = 1 + else: + session = 0 + + request_params = urllib.urlencode({'next': next, 'scope': scope, + 'secure': secure, 'session': session, + 'hd': domain}) + if request_url.find('?') == -1: + return '%s?%s' % (request_url, request_params) + else: + # The request URL already contained url parameters so we should add + # the parameters using the & seperator + return '%s&%s' % (request_url, request_params) + + +def generate_auth_sub_url(next, scopes, secure=False, session=True, + request_url='https://www.google.com/accounts/AuthSubRequest', + domain='default', scopes_param_prefix='auth_sub_scopes'): + """Constructs a URL string for requesting a multiscope AuthSub token. + + The generated token will contain a URL parameter to pass along the + requested scopes to the next URL. When the Google Accounts page + redirects the broswser to the 'next' URL, it appends the single use + AuthSub token value to the URL as a URL parameter with the key 'token'. + However, the information about which scopes were requested is not + included by Google Accounts. This method adds the scopes to the next + URL before making the request so that the redirect will be sent to + a page, and both the token value and the list of scopes can be + extracted from the request URL. + + Args: + next: atom.url.URL or string The URL user will be sent to after + authorizing this web application to access their data. + scopes: list containint strings The URLs of the services to be accessed. + secure: boolean (optional) Determines whether or not the issued token + is a secure token. + session: boolean (optional) Determines whether or not the issued token + can be upgraded to a session token. + request_url: atom.url.Url or str The beginning of the request URL. This + is normally 'http://www.google.com/accounts/AuthSubRequest' or + '/accounts/AuthSubRequest' + domain: The domain which the account is part of. This is used for Google + Apps accounts, the default value is 'default' which means that the + requested account is a Google Account (@gmail.com for example) + scopes_param_prefix: str (optional) The requested scopes are added as a + URL parameter to the next URL so that the page at the 'next' URL can + extract the token value and the valid scopes from the URL. The key + for the URL parameter defaults to 'auth_sub_scopes' + + Returns: + An atom.url.Url which the user's browser should be directed to in order + to authorize this application to access their information. + """ + if isinstance(next, (str, unicode)): + next = atom.url.parse_url(next) + scopes_string = ' '.join([str(scope) for scope in scopes]) + next.params[scopes_param_prefix] = scopes_string + + if isinstance(request_url, (str, unicode)): + request_url = atom.url.parse_url(request_url) + request_url.params['next'] = str(next) + request_url.params['scope'] = scopes_string + if session: + request_url.params['session'] = 1 + else: + request_url.params['session'] = 0 + if secure: + request_url.params['secure'] = 1 + else: + request_url.params['secure'] = 0 + request_url.params['hd'] = domain + return request_url + + +def AuthSubTokenFromUrl(url): + """Extracts the AuthSub token from the URL. + + Used after the AuthSub redirect has sent the user to the 'next' page and + appended the token to the URL. This function returns the value to be used + in the Authorization header. + + Args: + url: str The URL of the current page which contains the AuthSub token as + a URL parameter. + """ + token = TokenFromUrl(url) + if token: + return 'AuthSub token=%s' % token + return None + + +def TokenFromUrl(url): + """Extracts the AuthSub token from the URL. + + Returns the raw token value. + + Args: + url: str The URL or the query portion of the URL string (after the ?) of + the current page which contains the AuthSub token as a URL parameter. + """ + if url.find('?') > -1: + query_params = url.split('?')[1] + else: + query_params = url + for pair in query_params.split('&'): + if pair.startswith('token='): + return pair[6:] + return None + + +def extract_auth_sub_token_from_url(url, + scopes_param_prefix='auth_sub_scopes', rsa_key=None): + """Creates an AuthSubToken and sets the token value and scopes from the URL. + + After the Google Accounts AuthSub pages redirect the user's broswer back to + the web application (using the 'next' URL from the request) the web app must + extract the token from the current page's URL. The token is provided as a + URL parameter named 'token' and if generate_auth_sub_url was used to create + the request, the token's valid scopes are included in a URL parameter whose + name is specified in scopes_param_prefix. + + Args: + url: atom.url.Url or str representing the current URL. The token value + and valid scopes should be included as URL parameters. + scopes_param_prefix: str (optional) The URL parameter key which maps to + the list of valid scopes for the token. + + Returns: + An AuthSubToken with the token value from the URL and set to be valid for + the scopes passed in on the URL. If no scopes were included in the URL, + the AuthSubToken defaults to being valid for no scopes. If there was no + 'token' parameter in the URL, this function returns None. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + if 'token' not in url.params: + return None + scopes = [] + if scopes_param_prefix in url.params: + scopes = url.params[scopes_param_prefix].split(' ') + token_value = url.params['token'] + if rsa_key: + token = SecureAuthSubToken(rsa_key, scopes=scopes) + else: + token = AuthSubToken(scopes=scopes) + token.set_token_string(token_value) + return token + + +def AuthSubTokenFromHttpBody(http_body): + """Extracts the AuthSub token from an HTTP body string. + + Used to find the new session token after making a request to upgrade a + single use AuthSub token. + + Args: + http_body: str The repsonse from the server which contains the AuthSub + key. For example, this function would find the new session token + from the server's response to an upgrade token request. + + Returns: + The header value to use for Authorization which contains the AuthSub + token. + """ + token_value = token_from_http_body(http_body) + if token_value: + return '%s%s' % (AUTHSUB_AUTH_LABEL, token_value) + return None + + +def token_from_http_body(http_body): + """Extracts the AuthSub token from an HTTP body string. + + Used to find the new session token after making a request to upgrade a + single use AuthSub token. + + Args: + http_body: str The repsonse from the server which contains the AuthSub + key. For example, this function would find the new session token + from the server's response to an upgrade token request. + + Returns: + The raw token value to use in an AuthSubToken object. + """ + for response_line in http_body.splitlines(): + if response_line.startswith('Token='): + # Strip off Token= and return the token value string. + return response_line[6:] + return None + + +TokenFromHttpBody = token_from_http_body + + +def OAuthTokenFromUrl(url, scopes_param_prefix='oauth_token_scope'): + """Creates an OAuthToken and sets token key and scopes (if present) from URL. + + After the Google Accounts OAuth pages redirect the user's broswer back to + the web application (using the 'callback' URL from the request) the web app + can extract the token from the current page's URL. The token is same as the + request token, but it is either authorized (if user grants access) or + unauthorized (if user denies access). The token is provided as a + URL parameter named 'oauth_token' and if it was chosen to use + GenerateOAuthAuthorizationUrl with include_scopes_in_param=True, the token's + valid scopes are included in a URL parameter whose name is specified in + scopes_param_prefix. + + Args: + url: atom.url.Url or str representing the current URL. The token value + and valid scopes should be included as URL parameters. + scopes_param_prefix: str (optional) The URL parameter key which maps to + the list of valid scopes for the token. + + Returns: + An OAuthToken with the token key from the URL and set to be valid for + the scopes passed in on the URL. If no scopes were included in the URL, + the OAuthToken defaults to being valid for no scopes. If there was no + 'oauth_token' parameter in the URL, this function returns None. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + if 'oauth_token' not in url.params: + return None + scopes = [] + if scopes_param_prefix in url.params: + scopes = url.params[scopes_param_prefix].split(' ') + token_key = url.params['oauth_token'] + token = OAuthToken(key=token_key, scopes=scopes) + return token + + +def OAuthTokenFromHttpBody(http_body): + """Parses the HTTP response body and returns an OAuth token. + + The returned OAuth token will just have key and secret parameters set. + It won't have any knowledge about the scopes or oauth_input_params. It is + your responsibility to make it aware of the remaining parameters. + + Returns: + OAuthToken OAuth token. + """ + token = oauth.OAuthToken.from_string(http_body) + oauth_token = OAuthToken(key=token.key, secret=token.secret) + return oauth_token + + +class OAuthSignatureMethod(object): + """Holds valid OAuth signature methods. + + RSA_SHA1: Class to build signature according to RSA-SHA1 algorithm. + HMAC_SHA1: Class to build signature according to HMAC-SHA1 algorithm. + """ + + HMAC_SHA1 = oauth.OAuthSignatureMethod_HMAC_SHA1 + + class RSA_SHA1(oauth_rsa.OAuthSignatureMethod_RSA_SHA1): + """Provides implementation for abstract methods to return RSA certs.""" + + def __init__(self, private_key, public_cert): + self.private_key = private_key + self.public_cert = public_cert + + def _fetch_public_cert(self, unused_oauth_request): + return self.public_cert + + def _fetch_private_cert(self, unused_oauth_request): + return self.private_key + + +class OAuthInputParams(object): + """Stores OAuth input parameters. + + This class is a store for OAuth input parameters viz. consumer key and secret, + signature method and RSA key. + """ + + def __init__(self, signature_method, consumer_key, consumer_secret=None, + rsa_key=None, requestor_id=None): + """Initializes object with parameters required for using OAuth mechanism. + + NOTE: Though consumer_secret and rsa_key are optional, either of the two + is required depending on the value of the signature_method. + + Args: + signature_method: class which provides implementation for strategy class + oauth.oauth.OAuthSignatureMethod. Signature method to be used for + signing each request. Valid implementations are provided as the + constants defined by gdata.auth.OAuthSignatureMethod. Currently + they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and + gdata.auth.OAuthSignatureMethod.HMAC_SHA1. Instead of passing in + the strategy class, you may pass in a string for 'RSA_SHA1' or + 'HMAC_SHA1'. If you plan to use OAuth on App Engine (or another + WSGI environment) I recommend specifying signature method using a + string (the only options are 'RSA_SHA1' and 'HMAC_SHA1'). In these + environments there are sometimes issues with pickling an object in + which a member references a class or function. Storing a string to + refer to the signature method mitigates complications when + pickling. + consumer_key: string Domain identifying third_party web application. + consumer_secret: string (optional) Secret generated during registration. + Required only for HMAC_SHA1 signature method. + rsa_key: string (optional) Private key required for RSA_SHA1 signature + method. + requestor_id: string (optional) User email adress to make requests on + their behalf. This parameter should only be set when performing + 2 legged OAuth requests. + """ + if (signature_method == OAuthSignatureMethod.RSA_SHA1 + or signature_method == 'RSA_SHA1'): + self.__signature_strategy = 'RSA_SHA1' + elif (signature_method == OAuthSignatureMethod.HMAC_SHA1 + or signature_method == 'HMAC_SHA1'): + self.__signature_strategy = 'HMAC_SHA1' + else: + self.__signature_strategy = signature_method + self.rsa_key = rsa_key + self._consumer = oauth.OAuthConsumer(consumer_key, consumer_secret) + self.requestor_id = requestor_id + + def __get_signature_method(self): + if self.__signature_strategy == 'RSA_SHA1': + return OAuthSignatureMethod.RSA_SHA1(self.rsa_key, None) + elif self.__signature_strategy == 'HMAC_SHA1': + return OAuthSignatureMethod.HMAC_SHA1() + else: + return self.__signature_strategy() + + def __set_signature_method(self, signature_method): + if (signature_method == OAuthSignatureMethod.RSA_SHA1 + or signature_method == 'RSA_SHA1'): + self.__signature_strategy = 'RSA_SHA1' + elif (signature_method == OAuthSignatureMethod.HMAC_SHA1 + or signature_method == 'HMAC_SHA1'): + self.__signature_strategy = 'HMAC_SHA1' + else: + self.__signature_strategy = signature_method + + _signature_method = property(__get_signature_method, __set_signature_method, + doc="""Returns object capable of signing the request using RSA of HMAC. + + Replaces the _signature_method member to avoid pickle errors.""") + + def GetSignatureMethod(self): + """Gets the OAuth signature method. + + Returns: + object of supertype + """ + return self._signature_method + + def GetConsumer(self): + """Gets the OAuth consumer. + + Returns: + object of type + """ + return self._consumer + + +class ClientLoginToken(atom.http_interface.GenericToken): + """Stores the Authorization header in auth_header and adds to requests. + + This token will add it's Authorization header to an HTTP request + as it is made. Ths token class is simple but + some Token classes must calculate portions of the Authorization header + based on the request being made, which is why the token is responsible + for making requests via an http_client parameter. + + Args: + auth_header: str The value for the Authorization header. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + """ + def __init__(self, auth_header=None, scopes=None): + self.auth_header = auth_header + self.scopes = scopes or [] + + def __str__(self): + return self.auth_header + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header and makes the HTTP request.""" + if headers is None: + headers = {'Authorization':self.auth_header} + else: + headers['Authorization'] = self.auth_header + return http_client.request(operation, url, data=data, headers=headers) + + def get_token_string(self): + """Removes PROGRAMMATIC_AUTH_LABEL to give just the token value.""" + return self.auth_header[len(PROGRAMMATIC_AUTH_LABEL):] + + def set_token_string(self, token_string): + self.auth_header = '%s%s' % (PROGRAMMATIC_AUTH_LABEL, token_string) + + def valid_for_scope(self, url): + """Tells the caller if the token authorizes access to the desired URL. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + for scope in self.scopes: + if scope == atom.token_store.SCOPE_ALL: + return True + if isinstance(scope, (str, unicode)): + scope = atom.url.parse_url(scope) + if scope == url: + return True + # Check the host and the path, but ignore the port and protocol. + elif scope.host == url.host and not scope.path: + return True + elif scope.host == url.host and scope.path and not url.path: + continue + elif scope.host == url.host and url.path.startswith(scope.path): + return True + return False + + +class AuthSubToken(ClientLoginToken): + def get_token_string(self): + """Removes AUTHSUB_AUTH_LABEL to give just the token value.""" + return self.auth_header[len(AUTHSUB_AUTH_LABEL):] + + def set_token_string(self, token_string): + self.auth_header = '%s%s' % (AUTHSUB_AUTH_LABEL, token_string) + + +class OAuthToken(atom.http_interface.GenericToken): + """Stores the token key, token secret and scopes for which token is valid. + + This token adds the authorization header to each request made. It + re-calculates authorization header for every request since the OAuth + signature to be added to the authorization header is dependent on the + request parameters. + + Attributes: + key: str The value for the OAuth token i.e. token key. + secret: str The value for the OAuth token secret. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + oauth_input_params: OAuthInputParams OAuth input parameters. + """ + + def __init__(self, key=None, secret=None, scopes=None, + oauth_input_params=None): + self.key = key + self.secret = secret + self.scopes = scopes or [] + self.oauth_input_params = oauth_input_params + + def __str__(self): + return self.get_token_string() + + def get_token_string(self): + """Returns the token string. + + The token string returned is of format + oauth_token=[0]&oauth_token_secret=[1], where [0] and [1] are some strings. + + Returns: + A token string of format oauth_token=[0]&oauth_token_secret=[1], + where [0] and [1] are some strings. If self.secret is absent, it just + returns oauth_token=[0]. If self.key is absent, it just returns + oauth_token_secret=[1]. If both are absent, it returns None. + """ + if self.key and self.secret: + return urllib.urlencode({'oauth_token': self.key, + 'oauth_token_secret': self.secret}) + elif self.key: + return 'oauth_token=%s' % self.key + elif self.secret: + return 'oauth_token_secret=%s' % self.secret + else: + return None + + def set_token_string(self, token_string): + """Sets the token key and secret from the token string. + + Args: + token_string: str Token string of form + oauth_token=[0]&oauth_token_secret=[1]. If oauth_token is not present, + self.key will be None. If oauth_token_secret is not present, + self.secret will be None. + """ + token_params = cgi.parse_qs(token_string, keep_blank_values=False) + if 'oauth_token' in token_params: + self.key = token_params['oauth_token'][0] + if 'oauth_token_secret' in token_params: + self.secret = token_params['oauth_token_secret'][0] + + def GetAuthHeader(self, http_method, http_url, realm=''): + """Get the authentication header. + + Args: + http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc. + http_url: string or atom.url.Url HTTP URL to which request is made. + realm: string (default='') realm parameter to be included in the + authorization header. + + Returns: + dict Header to be sent with every subsequent request after + authentication. + """ + if isinstance(http_url, types.StringTypes): + http_url = atom.url.parse_url(http_url) + header = None + token = None + if self.key or self.secret: + token = oauth.OAuthToken(self.key, self.secret) + oauth_request = oauth.OAuthRequest.from_consumer_and_token( + self.oauth_input_params.GetConsumer(), token=token, + http_url=str(http_url), http_method=http_method, + parameters=http_url.params) + oauth_request.sign_request(self.oauth_input_params.GetSignatureMethod(), + self.oauth_input_params.GetConsumer(), token) + header = oauth_request.to_header(realm=realm) + header['Authorization'] = header['Authorization'].replace('+', '%2B') + return header + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header and makes the HTTP request.""" + if not headers: + headers = {} + if self.oauth_input_params.requestor_id: + url.params['xoauth_requestor_id'] = self.oauth_input_params.requestor_id + headers.update(self.GetAuthHeader(operation, url)) + return http_client.request(operation, url, data=data, headers=headers) + + def valid_for_scope(self, url): + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + for scope in self.scopes: + if scope == atom.token_store.SCOPE_ALL: + return True + if isinstance(scope, (str, unicode)): + scope = atom.url.parse_url(scope) + if scope == url: + return True + # Check the host and the path, but ignore the port and protocol. + elif scope.host == url.host and not scope.path: + return True + elif scope.host == url.host and scope.path and not url.path: + continue + elif scope.host == url.host and url.path.startswith(scope.path): + return True + return False + + +class SecureAuthSubToken(AuthSubToken): + """Stores the rsa private key, token, and scopes for the secure AuthSub token. + + This token adds the authorization header to each request made. It + re-calculates authorization header for every request since the secure AuthSub + signature to be added to the authorization header is dependent on the + request parameters. + + Attributes: + rsa_key: string The RSA private key in PEM format that the token will + use to sign requests + token_string: string (optional) The value for the AuthSub token. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + """ + + def __init__(self, rsa_key, token_string=None, scopes=None): + self.rsa_key = keyfactory.parsePEMKey(rsa_key) + self.token_string = token_string or '' + self.scopes = scopes or [] + + def __str__(self): + return self.get_token_string() + + def get_token_string(self): + return str(self.token_string) + + def set_token_string(self, token_string): + self.token_string = token_string + + def GetAuthHeader(self, http_method, http_url): + """Generates the Authorization header. + + The form of the secure AuthSub Authorization header is + Authorization: AuthSub token="token" sigalg="sigalg" data="data" sig="sig" + and data represents a string in the form + data = http_method http_url timestamp nonce + + Args: + http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc. + http_url: string or atom.url.Url HTTP URL to which request is made. + + Returns: + dict Header to be sent with every subsequent request after authentication. + """ + timestamp = int(math.floor(time.time())) + nonce = '%lu' % random.randrange(1, 2**64) + data = '%s %s %d %s' % (http_method, str(http_url), timestamp, nonce) + sig = cryptomath.bytesToBase64(self.rsa_key.hashAndSign(data)) + header = {'Authorization': '%s"%s" data="%s" sig="%s" sigalg="rsa-sha1"' % + (AUTHSUB_AUTH_LABEL, self.token_string, data, sig)} + return header + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header and makes the HTTP request.""" + if not headers: + headers = {} + headers.update(self.GetAuthHeader(operation, url)) + return http_client.request(operation, url, data=data, headers=headers) diff --git a/gam/gdata/analytics/auth.pyc b/gam/gdata/analytics/auth.pyc new file mode 100755 index 0000000000000000000000000000000000000000..942dfdc33de028e345a70b4c87d1a9cc9e2de5b7 GIT binary patch literal 37399 zcmeHwU2q)NnOzS+f&>K$5+#zNL|H9L6hMm_{hG45rezSKMcNX`ZGe_dNuC|dG{7MT zGthK{5M?P_6)6wiWH<5JyV=bqTb14HLtIHEaZ;&Ns#2-SOCGXGUb3}SNhJ@d#*f8Q&InhUMS{hPr5zm8AV z8;hbg{%TP-imrt7T6Cr6ua8An#{BtsbYYKN_V+qbrX`?LE<_==B$(_FkPl5w$0E5=ZTQ zQTx8=Q;frtQTk+byJ*0O% z6QMvl6SW_X+GyvY=u(52) z$8}PV(t32|d9~pSYVZqD`w888F>1$A`ckxl$DibtPon6%S6+_V$Nbf&!qr!zHk!FN zy7r~$+9^G8JluOVYNHpg>gr6m`sJvNZj_%m5$=5@YJV|GzpN*o4OdUIBa5>qL3f@1 zi+>B*V>oTDck0)&)<(OX4&vEwrYkG)qJ2A&J&5~n^!uyb^v(Wirx!Oj@~daE`*C#+ zXNw!lao)e4_Ri!F;L`bvi%aKTZ(K|oZ(O|i#+&C_dvKATz4*-IlO4k$kD_a}=-OEH zQ51bti$221c=XY(=%d}yM-$N}d(gld%l>y9=LcysPqR3`n#Rlh_RYAzqH{DO?d6?T zGw<|!@gRMFBhB)C`{H;pP2;P1zJB`o=Ue@DT3=P$>#hFU^XO*we6!W+Z}jr)c{cwW z{lSG^mN&cIv^}@Jp4G4BYu%?3S6dvf^at@;e~`xQG;emgS)Ff~8?0uh@&7nZ*KqN4 zoaF<3U2kUDW`EEwu4L)pMyHjQF7!7B+Djqdm?^4U1@G~<4zXO z3=izg$+(Xp8N@f5-A-F?XM^w!;}AE|;u)KPnYh_&`{!p=u+?18TUVP6&6}E5dTFJ^ zAkEhMy=+U%I0{LcZwz{FAeJyY=a-f)*%amdI7{(XQj4D&4P`TKbvu~0xGPdp1L~-2 zcAQkP8;8|)GjG}%2`wkNtfswm03vAE!wo&$a8uqOQOuIW^ps{KC$;&>$a&ArLr$5S zX$tZwoR&vF-;WEGp%F#0lhMW|INozUIwCcCC!4)268M3D4%m|N!Q)cVX3{lgq+afY0#k<$+i${^9*de~! z?5<=ZT15;(O>6u2w6pOMg`{ZqAx+Fm0nPgo) zc(a+s*+#3CX4%R{my8eAZ}r#H>~!4iWO>}`_n_x{q`+q0U#o8ny7kM0ZWe&-#lX?Ekw$(vcc(IddxyxM79RqM8DLv*52lhk`X22P37 z51MQ1-SlMKS@A7!&CTL>W*p>kg#>q2`u&;rDz$;xdbfJE`iJi%URu=c)6Bd9 z_SagP6y%q>c3q$gp^#qzhO#8|d1jXFk?5o)rlJDVR|bwioqG zukSlu=0j_zSEA$ulWzrFI0!BmTyR!e=)Cl@gj&Tg@$|X_ah4F*{w}48u*0G36`WHJ zWH6p@MKBkMK;KC7#Ej%6V$l}mEwL!=t9g8~zk>r%#x?XsHva4H9f&^OMeG4PA4ubS zW6}HjW$WyYI=Be$&{%Zp;|ao%!MErJpnG6kpri+J2#~TXx(5FMTnBIwpmHD=xWwQz zz}MQ?$9tlW_C(ikM1j35P?X#qeKH=^@_o@Yxw%*lua>2?I4g_rAC4DTJ8o*Kfgemy zS1nleKHv!xx}ZPMJ2WSZ-kej4zXJI5X$3Yqz4eW}0Wi>91J+P27kCK}3s>v{s2Epb z6-skiYy=Y(fC36M(3lB{fGB}Y%p*dn4qtbAT;c|AT0qJU^|x`g0% z(BD`kf&}F?2dhB5z5FEm8Y*(nmS`#7?BMMn(mu@m%|R!J`^>4J`f43SzvYoonNctv z%jjx*vHn8bf+=0C3Jnb9hi5826MuiEQVE3Y%uDqbW++4P=Y*(9?lSHkHH_p=(lkU# z6veW!#UXO~2*iN8!R+(^fwx2}3$p}#vemEkJ3UAfh^TxZl)14Ems6^G)JB0t%}`z+A2*9;S%OLYAbkLdSZAsk|V5()}{#-2bmN4fpytl;VRpK zR;hR{$^$7?ae2`F=Y03WI7EkM0g@+b57Z`W2k@E3S@trnZ%YSxCs(R^bB=&DUEJ1C z?^+b^997c8JU-cfz=5hEilA($j&OwSI~EP#?}x&ITA@PWUFaPstHCc*(by3>2xf6QC$)D(W^UwcXJvJf&h7WhkJ%YQ&_D`}le&FkY92 zLaYQz3V{-8IH)wd59Of@qg@~G0Ab8R~M2#)^;f+AxtGrTA`jk2m915AbKNg-f1 zG2MI4O=$SFDy)n%3pG|DuG6s`oOM=E*A#0GMzO1@AO9wS#DMm1seUTO#qh%D_n>~} zd{@=b+iIVoq8JGHI-%*7o2~1Wrk6bRRHD=w#UML4K>gBOfihP$tO}zKLjr&l&T-)5 z)j<=ulsZNF+J>vdvp@^OdTuT#w6c7!LXa5sOQjhfWB?!y--*-l*?zy9LWa*ep?~HZ z&2E;Spq7Ct%{DI$HsEP6M=x8A`c~o@Ujv6>h6d6)kgSvs)!NW)uW;7-Z8Pulei#JT zIB7#M>TGtRFwrWMC^fFZ4f0%OK&T69KbSwE2{U{GMxH7!hRJKe@bJaC%S-1Qi}M#R zoonEU#-+Jr?ybhc+*{|&)>wuIcBPXOV_BOyRqU))%sFos9-C(AO&G+=pr}P~-q4zK z&Z+gnP93DJv~wdZs^YWaEQi7TXf~GtLs)kTPBw&!Mwp2z!DkdzRAh98M~t-8?#ysh z1Jk$C`5@RxfgGH+E(y|^6CQ7dp$pSN;N_aK)?63BZ$KD$zN>y8nNL;kQ zt8?sU%F}UxW}XIByxHwzJ~lh~)zGXkIRO~kumBAjI0PZznm5jn@SU%_6yF*FO18=< zG$DBeQdr33HHBAo`i$I7!Y97*2^~pNxwF(;ro*YDI$4MD$X+WII^Y;4M_DH=td&l$ zZJ1@y?RJ(07r=?IzYyq5=xY-0DFm{B1!l+GX(=K$c@nc&fHXG6#5193$59}8hKCs* zhyZ4(Tm%j(YCt--G7c0B!a%;ZaG@;Hji6H-B_ghpo==`*`|!ETpj=M3 zypOMF#Jt>>jsi^i&nS>_o}woRO%BwiY6olk$M)48uRVfudib^vR)7)bB;c25FJ}ax z0MadnJc;f25j^=@I5FQJL5d>efF#5RW+x-_qh@w8pvXjYeKHz+O`&t7dyGY2rKybP z1UuI(W_09c`Y4O1C#!b0q|ac3|CSD@G=i-qzid>eyV;nZE|fFHci7H4>JO4}Yn z8=k#AkOO$k5 zBvU0iGox(`oH1uU*Fx-rBq8W4MGB2p9z#4&oj!0C6e$ZAXs0?%0JTX2bvPtrL60y$ zQf%IbY8vkb+5uHM=;!@b-)lK1FMv5vqtn8nlF($eDxQU~hpa<>f|=6P35%D2Z_ND` zR3az??v!+Wdjenl>(nu45^paVOm0YXTAJ17Nc*UKQoP33c(>%~Blz>j2 zMEp5ozY9!3df3XYsg#}nEx-y|nkf|1~1!cAVs#pE0hXL+CjxMOL`hrK{BXlwX`(P2u?{rb@8n>M4bjyrcU`sM)Pk#3YYtDJJk!B<&%{7xk~k5BeP9B8#6 zDGa6@j5CQx8fI`WB3TR<$Nj}|CvZFRE<#wIqQSKb|C6{pg-0N1Pf>*Kkr_uXC7hAC z3r8c0PDa;dAR=FlNlX*b%2-GoMNFyWm>epm>Nz+S@z3Ea=5N>pESY{LONDW^R8V#2 za%NDGF|YULW@!sP$Ypv6RA9(XBd`%z?^r+XjaJY*D3iX8%;y6)I zgBDn=(S~awT*z?xeU@+X&cls$n7%Gn#8>M@<#)IH$gaby;ixNnc}WV6s!2Vu6?29K zyJ3MlXM=N6{bAsapCl8Q2J_gfC z7UpGBa6yCWve48?_?sZ@+Q|!iibuzJ!DF+>&qd zE4_Hhw|ICH2l*jAHYWz>n{8yIko+y#Y}roWviUJB5zp}gr_9{k%`*m*u<{b%QV!&kRhK`;Ff6f-u$GH!y1EN1 z9)gC@3dt+9sY&!8ZTCKC9cREhML`;8+v@S#KBvSprAur;Wr)J73e#N1NXvtMwn)bA26Qaw|%d2WJPk616y*a~&~D-c`XNVytJ210lC88@eZ4IGEgezQ36Q zthwNGDR$jl?jzD-J{8y7O-6HzF@P|D_&^;Fj4GpIsNW9YBk%w}KchLWGfHOhy=JFT z4O_mPVqQaJiNo&R7iZMT&Q;{qe{hRXZ?iVkLa)=RdF!ANT&HcqZJem`S+V!XxX!sK z4r)Man9_n-4rvm;5t`$wc4p&Gm-3^~2~ehmCB2vEHfj*>^ZzIqn5SxR9;3UNE|Ubh zKCc@cG|k8M1K%(^xTHJb9Id8p<*pfb86ExqLZ7-BH5Ycpp=GnDsoP?}R_LITC?jSl z$AE8>G~l>5G3elIzKZh;A5Z8 zL5O_&Ar!C%Z#ntcxG_}a1_$pcHKYI+CJE!&udi~Q0CVyLQ(TVT2~}gfxYZv!40(!I zIqofD#gcuYO_V7Wp-#^C;<_SJ%p5RaScqZbvrw5643ZwBBnk!$xq_}mL3T)HaK0dU zLsisXY)Z9#Zk#c;{di^wC(KanRAR$(FlkC@nNC)-R-$gW`=MQ=T(YY^n=x2TcmeDg ztj=(;7>X7j6hgy5LAMh;okCAg{>aZ;05JhT0WqOK!)0$lpDS>iyo|Gi{zjp|jb2Co z+97lPDDM-tIgCa)U~iCzI6%E*fj2JlFvo*{Aai_og6b^U$Sto3!w&%YJp#KJwMNez z1dv0Vc8vdj$l|qQirV71{~nhzoHNB^D$)rFr{b&BL{hk{Y&-_jL`c4PP>ol?2&NVt zL3nNJh})aLsLEgl_?EO|gbjLdVEp|nvWQwca#*omZ>(bwb0MqN7-)-lk(v_vHM4Wu&wzRTfcd1Z3= z-;et{AtO5M-7Cx0|2Bz;9;)cR(h`MneCKB#pKJvOf^?yJI#b3JSbtU*@fE4BIF3h0 zuqFmh92CXL2X#5CaYL~E_4gj9j|ZTeJ{dT#v36!o&hClmiN*KlqPO9=SrjW~{{!#a zF42|5k4R(C+;p@eDiF#pd=^{h*2okK(~vT`-bWfZJTh*=Icp9u%_=;s7rX}FYmr7n za=Tl?02bUr5QUv-V(LYXI|YpqM~9~ivvi%w#drVte~uXaE|$+uDMPe?8#!D#Ye?GQ}KLvRjG)mnTd8yUQN{}^BH%3LIcAy~2} z8k`}`EUto#5EPN2#~TyT;JPRd6jqClAUng-Jm@4e`h@=wH((&rwEIzn&>Ovjr~|>7 zS`ERO-ZX=6?g|!Gz`R2gMm|8$V+~&xXKC+di|EF@TRP=dB*CjGwoyU=0`SK4njR-` zh~P(QS@1jl=7LNdvPFW`M8QIrxM@QXTcI&CSUGYkQeL7u65cQnpWX02_qYTVf{b?P z~gxzG@D%2hKkl%&HjjGl1p*t(V z=xsE;+X1W@GbNV-n|*|r6e(Y>r>^M+1g$2&LZO7Tg24xAKg44|A$^&sYz>iU#er&d zRPfy>!BK%V4v!;)+CnKV+7YC|&^SpNp*QXyX{Cb83NRFUfx)PZBv$G1=@E?`>3^#{ zyEXh@w8gi{|DUAgt;$I>tjX+_$vU%bd%VmM8ws+wrY8qSN z=9V505CxH;#fCJta2+Cj+ZB-yAdH;T_9$ja zA)&Tc-n3;25QPfZ4zgwgh`$KaYcd;}oE@*YVOE2)LJEeqiYZ2D&HfAO4F&2C6QfSo zb|I&5x^_fZ_23v6;i1ac}_PA?GDm#&O{f^Au-rWJsPU2bODUjddwc zAxSc><1SbP<`R5h&VU+)H3JW0Mk`BIC@9N>XzRvq-MLUvkoxxS(h3~PWQ?Bgt7g{^ z!d6+p4vkuKOc6plEkO9Kk+g{cpBbQa*~c^Qbk83Kfs{xr45&DS31f?w&o-1e_U7E# zb8nVVG$VXT1BV@8Acb+ujP?!C_j$zA&SEbA8);ln8cWM2;9lQK(J$e(9~f}fHWPxy zB4%v1U_g4XpR^5Z16aHtuy`Dxgs3X}SAXvGP38VGC2+pb{(UYfe8|`!ivYe*Q4NE} z{_-`%g=P8?B`=Yx&p1?1ac3}|fWFJHVDepD8zNbk+GPv>8!Wa1`c0as_)i0!-v3Z- zdJO2D!7c({ek6Gro8LKl2aYCshq-rW^p3cH1-&EEKj|PktN;M`PognGdPZM3blIO& zvAGf%G@c$#&`-rc3P{E-nOA=nu+w2Rm&QsM0RqF{t--k^yL_h5+^5|spi)TtQScm1 zV*tSE))JTsa{7+wJR{%*lmrGsZSEyDLMP*8u3AwBPzvJ_x~A#9ZS9!jMf=0$syZJ( ztY6}@jozq|NNec8gt2JAi5-~}N+?L_wNOMA_&9E_nnnob>A@wgm#Ajzte2%OMP|;3}hY}~# zl~JO0a&n;fpS9soYSz7&4Hd%Pk@27eFvjsM8Oup#NN@tUoqp%Fj8lW>< zj3)zurpgW!jM9yZAtO8jH$DgjW2=dd45R;phLWfRgZ82K3)&~_KV|42pdSbcDh0{5 zfJZ!mYZ2N(Zbi5E&ROWTOU0HL)fMsD#D>gbd54fxei5Mh?UVtrYe-B~SDZpoWd0f) zCfKd!h5{!}<-k`_UJ?q!L8|@&>$)M^66? zPoD30ksn~{!N{RlacUU^VRRSg8jI)WUObI`HJGJ=3F5{Ea0~jCg^EEA+Hw~4<79E} zIhIE{$0{tM{OTG9|NL8X^LJGZ9~e=O7Bk=#sw)f%_`?R}#L%E%EYN>$Td0BHo`vJO zvYVp;&=qBKI9&3(C3Lva30Jii$%f5knAFHoh~2Po0F0B^)wq?yOI??%m{Wu4G_c7j zH>3!w42Tb&;D)Bkw$pI?J8@0H`k-@zn~Xsrh!WQ~mXU_gV8u$sl6yN#yDLc-rIR%r zc0z-D@mvF|u{wF9@r$H7_CCVmM*(HFo1-|7PsWT^i;eik@|L}s1rwm#NTj;aqieHa zOzv>lP8!D~uZET}H%i^V%x3b1(e@k3F|xoC8=0*0D?WDE_ReWq3E~^%=7$sVouWko z?jpxVgAUk6L-tOifn{eKU4G>-HX2A@Z8VZT&Xf0f82HBQpjpXvJiNd*a5g7u6JwLR zCdZY)m1L|O3MNW6c%XF4xw+dYY0?PnsHHsBZ*DTPrdFzqeB3jj+EesHXWJg*1hKd1 z96Lk#H?bR?8B@NvURm0tAh0=g=~;X-T9ReihkR)F2Y$yMpI~$2cSu9y_lYRo6Q#Q; z8QZvvSlw8SyOUy1P{;=uXSN&YK8gimhU_?ib-O(hAlR6hg3Se+kc>EKBDlt+ zD!PPBtPXFj5Z~y0Sm#D2TF_nCb6O)s&u$@BeI^%lJ0%Uh1-P%AbBF(;By>Igw$f*+Z36Xza?61)Ah}Xa z9C<7dM34|B13Wd#K^$I=9UAtQ(h%$Myk!`4i+mi#`QXLC%kK2FmouE@QcLdiSdeFc znrJh2p5&bRGTECmKd=v++Au$zmRldETZo2t2F*h?h5bw)uJ*0SCO z7{}DmfoAj#LPHu@e6EM3*eO!2;GIBXC%hD;Hod+0#sw^{xX~H(d&D9q;tQBLByr&j zV3&3+({@6ELLD~2dpOm_CLy3s%ix`5KeaKG@rTWIsV`)sQV-zgxS+6<;)_O=2&fiq zlJ5mG$XU$#Ai~ZXxLsNPNT#HvyWVMCheHp|L5NG9Li!L(-82SiqU~$T@W^0AFrrde ze@ZE80u`g9QS6IcPiRuy)sNG*+>P zys0agqctUO8STK`lH>vaQ;N;H;ca0nlzPL0J8W;@%8f!LL^2<2e`AC3+ zwv5DYawP6YwH%3pAhf_ArcL&j`EIuxs}u9B=DIeEfX?N(QU*{eJ7RZAiij2kD6+h8 za@?AvP@CFL3=eH-k5?BE_01c72GOBe~3iRA^A zB~h^@Ctbb@jK|HZww0|lOV*_GUw3IN5iq&E`uy0y;%~C6%I0tzF9#4a6hkYGbCE=2 zRw-+~)fAZoHK)5!K#V5PEovXay@dW4^Wj|!^vIxCgn@~ z*OVay1oGf|g?CG(w_sX^ue~RI<9wQ+tVZMC1PNUWJ}J69HCE?w0{0MnZP)ikgD12d zoq}HwTBn$`jD#S4XQ3Jvq8e(He+j-j_?_TW!o5$%#$5U`gEEU=ZI5DO@K{^1Bh&0I zN+$VKSDs1bA$isHn;&emi8jCXS*nF&W!Z%$<4X9P>Zruc0`^oB3L01h@~@ys@~b!$ znqu2ZLdf13F@7(af5*Y5#E!B4*|myEXaDmDU(6$_+nFqV|?sT=8PNccYN{G^?nZc*30kr=HX z>TbQ?h2;J?&{~1RL&p!}0niXrdn|tvqu^N*&qWJj6sFl(VBkl%mWUO2SHXc)M%arvvXE{%RCFLKMmXvGi5kN+L3_O`1_*e8KzF)&;Az-$Ba{K;-C9qgvYVebh_y5S< zAK(NJvEUy?@4v#o;i_;&CGZ1YRww9h@QP|h>9@gwjuYdS$j zPOg*nCf1%X)S#6q+NrQ4!k^)rkKnKavJr=avpVet)U0Ll{1;MDjQXN+rV~7XJ!*F0 z{|8}o?HW6Z^W#XroWaknEX>j{y(Jt@euZ~`$it6$VA6E*J2({W)Y-Phkud2$W$Vb9 z6AvK889}G<+SFbKpOW`*v4xNRQ6QbgM_JJUu@Z!0(Tb170(+viDA+22k#8fxzKjRF z1lyVjyItXx{5q>y#^Ihslm_;Hb41vW=ra&WY|>{blKwO_u4W6Yis)byIh)TXlCu@u6yl~grQkAw4_wkt-a}MVcqi~KKw7j1 z#)=b(BWp!pEVjk9oo3ajIRZn7YxwP{U0>&NLAab+H!%Xid3<~t^Y{?;(!(znV%q^>-YjCPk3d}LO2T%;q}M7 zB_!z10!X=`OP314t??v)x83F_=e0eYC1L@ml=q@5?&7*>t?lb7I)olTgAFUoZ|#2d zjxFJ59v``O4J&_Aw32rv+V4#{Q2!og6Cfh-%An>rlzMp!C-(Be%Q^v2dYOnFIe;Au zAnYOm+EYA(X^G#b0G4vj^1&Ojj8x{3uHor-XlPk!?Ei*Lb$(>9WPgZWdH-yIuwcN! zEo;mtmR|~h-)=!1xm^q)OW;d@oWc*SsHhQ_`2Ng0FTD3m>AMTcpLyrS_vkX=Dl?Rj zVTMZ~Sm+{nZtb5cbxkz+gWPp5wk>=a8sI4VjxSf8@phWP-MW}uRd*T!-9^S1ye&0i>of%M*x($l5EQYG3!r^%RVQG8i!kU9hmow75SQK45yIh3cRDYy;Ohz?3cNl)}+ z`JGKm6$X-%s}-m~BtmK~J@01wPSrTluhgVhDPnOwP4UB-$Yi)~D=IUQV22Z%{7oE; zkQ5MVt#-_7uAh>4KTG}w3kAVnvYsBs4TUsqBn3%!vOS#4n-@rFgn?YYKUI4epPb+H z>p--`>9Co{C;Oi`;1+1c!jyz8QO(vSwt zhF|{CR>`1A-m>5O!Cn1i!Y2~v^mO69`1u6Ah5juNy9k)Srcm3(?LCpW`=F}Dg9jKR z{=`Y7(8Dwe)G5Q$1tlX>&#gJ0c&mo-GZ`!(AwdsqpT@1rxtHF%kLZEKn@bYgkREOP0rP!la zK8n(aYhPTs9@M3TkycP`6g{h5ypO+hc43y%UG5K+V|*@CHH(r%OZ+~(_FB%(z>=li zfMeG6Bo8NeIL5={JS_3>6c111AlC_g9mYn=c}NwpwM`<0=6;W*xiUKWeLli;MJsRJ zeGcCX^K;wiPF`Hl@EoSh=Haz*Fg3GWqx}cqDnUjp+$N7HS%KT8Q|}z8Jvzcay@E%; zPT4fK&!j)vLoxua!HeQL(?|M|QhKOa0!fhITHFxVS}G)lyO{=pYZT&I28WtXMuDTjl&&)aX*AL@XD9Kji0+h@1rYzlq=M2$o(<~$vAD1XUBQa z#LVza<`A=yli%dwuk-L*Jp48f-{pZ?wxnD2>=9nW=Wgedp28K*XUvD?fV`b6^0twA zb4C6b^_Uk@v`B)vH-993f&XU=O?((96AvP9yf!s4IX-!4a(e1<_$4QkcX6YIkN%Mc z37WSbMIV;~YGnM}0_^Zg$qnVB=vZJ<_C6uY4viK>P7Y^OP^}Cg`bX5H+#70KV9Ra2MdAO;5oekial-yMOszGJ~ zD+ZZxM}zF0c`RV+iX(IHeuCd9HG*~fPiRDNIcf@tfL%I+-mm5ebm}Ya-tANnUHw+6 z5jd^>rYr#xM#4-+8jG~MsMPU?o!UTgh!+9?!n`x%>kutR07Be=O($~?$PiZA%C6`T z&?ZoG_#r-;k+AEmGW~8J>^6XSjypoBs0F|4fX86jowDD~aVwM_MbG_q0TxRgg)5y^ zN7+RmURc~x)jOk}gx5fVvvTirRaKoiCZTzZ(iH1G&m6P=AJcxxXO1zyLicpzn5tuz zhHgXnI==>7Zq6l0Sx&Xcrm8e+z|VM=%pyC-W&^R-9OU?IdiY2XGudaY9jv-2hO;+6 zkLIgd`k7gmiYyi8yxEmzBX&3JCj!VMov-7N5wIQ4V*a&p%%0WvW5+W;XFS6!p3x;g ztx&_yWmlUo9+MT?rRUK9dmz%!i^%_jUpQ6d9IoQ4vPf6Dee6F;xyi|r9f+SD#BR#6 zB6*6>YASIeVcZ(rT*pSce4g@W`6hx=OZ~G*&pY*s#Iae)&Ft!2ue}Hru4t~MZ|ubb zCZ2zfXEZ;}SmoLj(utZRT9M_XCZmVR-$lutU}fUj-=W~RA1BfDW3YRU!up&TX>q=e zBEuFZ>`5N$4|36MS{qP!v!1W>1#vMpl;m%nZe!a-88GOiAD3<}Y!}M-g4*c)%cy0C<_>$k{;UDnuQyk!m z<`3K^|A-gTSiy zG)j~%*1>clQT#vofG_1dS%JKpVYh^|zL9sjS-pf{=G{0`4UBl!`o8RyaLPe?^-o!3Y6-3|oOn|-WuODuhkdx<6NVZ!y1d*m{Z&d67(9olQBY)um2L)hQK5881qC5H; element identifies an author-or more generally, an entity + responsible for creating the volume in question. Examples of a creator + include a person, an organization, or a service. In the case of + anthologies, proceedings, or other edited works, this field may be used to + indicate editors or other entities responsible for collecting the volume's + contents. + + This element appears as a child of . If there are multiple authors or + contributors to the book, there may be multiple elements in the + volume entry (one for each creator or contributor). + """ + + _tag = 'creator' + _namespace = DC_NAMESPACE + + +class Date(_AtomFromString): #iso 8601 / W3CDTF profile + """ + The element indicates the publication date of the specific volume + in question. If the book is a reprint, this is the reprint date, not the + original publication date. The date is encoded according to the ISO-8601 + standard (and more specifically, the W3CDTF profile). + + The element can appear only as a child of . + + Usually only the year or the year and the month are given. + + YYYY-MM-DDThh:mm:ssTZD TZD = -hh:mm or +hh:mm + """ + + _tag = 'date' + _namespace = DC_NAMESPACE + + +class Description(_AtomFromString): + """ + The element includes text that describes a book or book + result. In a search result feed, this may be a search result "snippet" that + contains the words around the user's search term. For a single volume feed, + this element may contain a synopsis of the book. + + The element can appear only as a child of + """ + + _tag = 'description' + _namespace = DC_NAMESPACE + + +class Format(_AtomFromString): + """ + The element describes the physical properties of the volume. + Currently, it indicates the number of pages in the book, but more + information may be added to this field in the future. + + This element can appear only as a child of . + """ + + _tag = 'format' + _namespace = DC_NAMESPACE + + +class Identifier(_AtomFromString): + """ + The element provides an unambiguous reference to a + particular book. + * Every contains at least one child. + * The first identifier is always the unique string Book Search has assigned + to the volume (such as s1gVAAAAYAAJ). This is the ID that appears in the + book's URL in the Book Search GUI, as well as in the URL of that book's + single item feed. + * Many books contain additional elements. These provide + alternate, external identifiers to the volume. Such identifiers may + include the ISBNs, ISSNs, Library of Congress Control Numbers (LCCNs), + and OCLC numbers; they are prepended with a corresponding namespace + prefix (such as "ISBN:"). + * Any can be passed to the Dynamic Links, used to + instantiate an Embedded Viewer, or even used to construct static links to + Book Search. + The element can appear only as a child of . + """ + + _tag = 'identifier' + _namespace = DC_NAMESPACE + + +class Publisher(_AtomFromString): + """ + The element contains the name of the entity responsible for + producing and distributing the volume (usually the specific edition of this + book). Examples of a publisher include a person, an organization, or a + service. + + This element can appear only as a child of . If there is more than + one publisher, multiple elements may appear. + """ + + _tag = 'publisher' + _namespace = DC_NAMESPACE + + +class Subject(_AtomFromString): + """ + The element identifies the topic of the book. Usually this is + a Library of Congress Subject Heading (LCSH) or Book Industry Standards + and Communications Subject Heading (BISAC). + + The element can appear only as a child of . There may + be multiple elements per entry. + """ + + _tag = 'subject' + _namespace = DC_NAMESPACE + + +class Title(_AtomFromString): + """ + The element contains the title of a book as it was published. If + a book has a subtitle, it appears as a second element in the book + result's . + """ + + _tag = 'title' + _namespace = DC_NAMESPACE + + +class Viewability(_AtomFromString): + """ + Google Book Search respects the user's local copyright restrictions. As a + result, previews or full views of some books are not available in all + locations. The element indicates whether a book is fully + viewable, can be previewed, or only has "about the book" information. These + three "viewability modes" are the same ones returned by the Dynamic Links + API. + + The element can appear only as a child of . + + The value attribute will take the form of the following URIs to represent + the relevant viewing capability: + + Full View: http://schemas.google.com/books/2008#view_all_pages + Limited Preview: http://schemas.google.com/books/2008#view_partial + Snippet View/No Preview: http://schemas.google.com/books/2008#view_no_pages + Unknown view: http://schemas.google.com/books/2008#view_unknown + """ + + _tag = 'viewability' + _namespace = BOOK_SEARCH_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, text=None, + extension_elements=None, extension_attributes=None): + self.value = value + _AtomFromString.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Embeddability(_AtomFromString): + """ + Many of the books found on Google Book Search can be embedded on third-party + sites using the Embedded Viewer. The element indicates + whether a particular book result is available for embedding. By definition, + a book that cannot be previewed on Book Search cannot be embedded on third- + party sites. + + The element can appear only as a child of . + + The value attribute will take on one of the following URIs: + embeddable: http://schemas.google.com/books/2008#embeddable + not embeddable: http://schemas.google.com/books/2008#not_embeddable + """ + + _tag = 'embeddability' + _namespace = BOOK_SEARCH_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, text=None, extension_elements=None, + extension_attributes=None): + self.value = value + _AtomFromString.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Review(_AtomFromString): + """ + When present, the element contains a user-generated review for + a given book. This element currently appears only in the user library and + user annotation feeds, as a child of . + + type: text, html, xhtml + xml:lang: id of the language, a guess, (always two letters?) + """ + + _tag = 'review' + _namespace = BOOK_SEARCH_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + _attributes['{http://www.w3.org/XML/1998/namespace}lang'] = 'lang' + + def __init__(self, type=None, lang=None, text=None, + extension_elements=None, extension_attributes=None): + self.type = type + self.lang = lang + _AtomFromString.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Rating(_AtomFromString): + """All attributes must take an integral string between 1 and 5. + The min, max, and average attributes represent 'community' ratings. The + value attribute is the user's (of the feed from which the item is fetched, + not necessarily the authenticated user) rating of the book. + """ + + _tag = 'rating' + _namespace = gdata.GDATA_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['min'] = 'min' + _attributes['max'] = 'max' + _attributes['average'] = 'average' + _attributes['value'] = 'value' + + def __init__(self, min=None, max=None, average=None, value=None, text=None, + extension_elements=None, extension_attributes=None): + self.min = min + self.max = max + self.average = average + self.value = value + _AtomFromString.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Book(_AtomFromString, gdata.GDataEntry): + """ + Represents an from either a search, annotation, library, or single + item feed. Note that dc_title attribute is the proper title of the volume, + title is an atom element and may not represent the full title. + """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + for i in (Creator, Identifier, Publisher, Subject,): + _children['{%s}%s' % (i._namespace, i._tag)] = (i._tag, [i]) + for i in (Date, Description, Format, Viewability, Embeddability, + Review, Rating): # Review, Rating maybe only in anno/lib entrys + _children['{%s}%s' % (i._namespace, i._tag)] = (i._tag, i) + # there is an atom title as well, should we clobber that? + del(i) + _children['{%s}%s' % (Title._namespace, Title._tag)] = ('dc_title', [Title]) + + def to_dict(self): + """Returns a dictionary of the book's available metadata. If the data + cannot be discovered, it is not included as a key in the returned dict. + The possible keys are: authors, embeddability, date, description, + format, identifiers, publishers, rating, review, subjects, title, and + viewability. + + Notes: + * Plural keys will be lists + * Singular keys will be strings + * Title, despite usually being a list, joins the title and subtitle + with a space as a single string. + * embeddability and viewability only return the portion of the URI + after # + * identifiers is a list of tuples, where the first item of each tuple + is the type of identifier and the second item is the identifying + string. Note that while doing dict() on this tuple may be possible, + some items may have multiple of the same identifier and converting + to a dict may resulted in collisions/dropped data. + * Rating returns only the user's rating. See Rating class for precise + definition. + """ + d = {} + if self.GetAnnotationLink(): + d['annotation'] = self.GetAnnotationLink().href + if self.creator: + d['authors'] = [x.text for x in self.creator] + if self.embeddability: + d['embeddability'] = self.embeddability.value.split('#')[-1] + if self.date: + d['date'] = self.date.text + if self.description: + d['description'] = self.description.text + if self.format: + d['format'] = self.format.text + if self.identifier: + d['identifiers'] = [('google_id', self.identifier[0].text)] + for x in self.identifier[1:]: + l = x.text.split(':') # should we lower the case of the ids? + d['identifiers'].append((l[0], ':'.join(l[1:]))) + if self.GetInfoLink(): + d['info'] = self.GetInfoLink().href + if self.GetPreviewLink(): + d['preview'] = self.GetPreviewLink().href + if self.publisher: + d['publishers'] = [x.text for x in self.publisher] + if self.rating: + d['rating'] = self.rating.value + if self.review: + d['review'] = self.review.text + if self.subject: + d['subjects'] = [x.text for x in self.subject] + if self.GetThumbnailLink(): + d['thumbnail'] = self.GetThumbnailLink().href + if self.dc_title: + d['title'] = ' '.join([x.text for x in self.dc_title]) + if self.viewability: + d['viewability'] = self.viewability.value.split('#')[-1] + return d + + def __init__(self, creator=None, date=None, + description=None, format=None, author=None, identifier=None, + publisher=None, subject=None, dc_title=None, viewability=None, + embeddability=None, review=None, rating=None, category=None, + content=None, contributor=None, atom_id=None, link=None, + published=None, rights=None, source=None, summary=None, + title=None, control=None, updated=None, text=None, + extension_elements=None, extension_attributes=None): + self.creator = creator + self.date = date + self.description = description + self.format = format + self.identifier = identifier + self.publisher = publisher + self.subject = subject + self.dc_title = dc_title or [] + self.viewability = viewability + self.embeddability = embeddability + self.review = review + self.rating = rating + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, contributor=contributor, atom_id=atom_id, + link=link, published=published, rights=rights, source=source, + summary=summary, title=title, control=control, updated=updated, + text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + + def GetThumbnailLink(self): + """Returns the atom.Link object representing the thumbnail URI.""" + for i in self.link: + if i.rel == THUMBNAIL_REL: + return i + + def GetInfoLink(self): + """ + Returns the atom.Link object representing the human-readable info URI. + """ + for i in self.link: + if i.rel == INFO_REL: + return i + + def GetPreviewLink(self): + """Returns the atom.Link object representing the preview URI.""" + for i in self.link: + if i.rel == PREVIEW_REL: + return i + + def GetAnnotationLink(self): + """ + Returns the atom.Link object representing the Annotation URI. + Note that the use of www.books in the href of this link seems to make + this information useless. Using books.service.ANNOTATION_FEED and + BOOK_SERVER to construct your URI seems to work better. + """ + for i in self.link: + if i.rel == ANNOTATION_REL: + return i + + def set_rating(self, value): + """Set user's rating. Must be an integral string between 1 nad 5""" + assert (value in ('1','2','3','4','5')) + if not isinstance(self.rating, Rating): + self.rating = Rating() + self.rating.value = value + + def set_review(self, text, type='text', lang='en'): + """Set user's review text""" + self.review = Review(text=text, type=type, lang=lang) + + def get_label(self): + """Get users label for the item as a string""" + for i in self.category: + if i.scheme == LABEL_SCHEME: + return i.term + + def set_label(self, term): + """Clear pre-existing label for the item and set term as the label.""" + self.remove_label() + self.category.append(atom.Category(term=term, scheme=LABEL_SCHEME)) + + def remove_label(self): + """Clear the user's label for the item""" + ln = len(self.category) + for i, j in enumerate(self.category[::-1]): + if j.scheme == LABEL_SCHEME: + del(self.category[ln-1-i]) + + def clean_annotations(self): + """Clear all annotations from an item. Useful for taking an item from + another user's library/annotation feed and adding it to the + authenticated user's library without adopting annotations.""" + self.remove_label() + self.review = None + self.rating = None + + + def get_google_id(self): + """Get Google's ID of the item.""" + return self.id.text.split('/')[-1] + + +class BookFeed(_AtomFromString, gdata.GDataFeed): + """Represents a feed of entries from a search.""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _children['{%s}%s' % (Book._namespace, Book._tag)] = (Book._tag, [Book]) + + +if __name__ == '__main__': + import doctest + doctest.testfile('datamodels.txt') diff --git a/gam/gdata/analytics/books/data.py b/gam/gdata/analytics/books/data.py new file mode 100755 index 00000000000..3f7f978b34d --- /dev/null +++ b/gam/gdata/analytics/books/data.py @@ -0,0 +1,90 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains the data classes of the Google Book Search Data API""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import atom.data +import gdata.data +import gdata.dublincore.data +import gdata.opensearch.data + + +GBS_TEMPLATE = '{http://schemas.google.com/books/2008/}%s' + + +class CollectionEntry(gdata.data.GDEntry): + """Describes an entry in a feed of collections.""" + + +class CollectionFeed(gdata.data.BatchFeed): + """Describes a Book Search collection feed.""" + entry = [CollectionEntry] + + +class Embeddability(atom.core.XmlElement): + """Describes an embeddability.""" + _qname = GBS_TEMPLATE % 'embeddability' + value = 'value' + + +class OpenAccess(atom.core.XmlElement): + """Describes an open access.""" + _qname = GBS_TEMPLATE % 'openAccess' + value = 'value' + + +class Review(atom.core.XmlElement): + """User-provided review.""" + _qname = GBS_TEMPLATE % 'review' + lang = 'lang' + type = 'type' + + +class Viewability(atom.core.XmlElement): + """Describes a viewability.""" + _qname = GBS_TEMPLATE % 'viewability' + value = 'value' + + +class VolumeEntry(gdata.data.GDEntry): + """Describes an entry in a feed of Book Search volumes.""" + comments = gdata.data.Comments + language = [gdata.dublincore.data.Language] + open_access = OpenAccess + format = [gdata.dublincore.data.Format] + dc_title = [gdata.dublincore.data.Title] + viewability = Viewability + embeddability = Embeddability + creator = [gdata.dublincore.data.Creator] + rating = gdata.data.Rating + description = [gdata.dublincore.data.Description] + publisher = [gdata.dublincore.data.Publisher] + date = [gdata.dublincore.data.Date] + subject = [gdata.dublincore.data.Subject] + identifier = [gdata.dublincore.data.Identifier] + review = Review + + +class VolumeFeed(gdata.data.BatchFeed): + """Describes a Book Search volume feed.""" + entry = [VolumeEntry] + + diff --git a/gam/gdata/analytics/books/service.py b/gam/gdata/analytics/books/service.py new file mode 100755 index 00000000000..cbb846fcd42 --- /dev/null +++ b/gam/gdata/analytics/books/service.py @@ -0,0 +1,266 @@ +#!/usr/bin/python + +""" + Extend gdata.service.GDataService to support authenticated CRUD ops on + Books API + + http://code.google.com/apis/books/docs/getting-started.html + http://code.google.com/apis/books/docs/gdata/developers_guide_protocol.html + + TODO: (here and __init__) + * search based on label, review, or other annotations (possible?) + * edit (specifically, Put requests) seem to fail effect a change + + Problems With API: + * Adding a book with a review to the library adds a note, not a review. + This does not get included in the returned item. You see this by + looking at My Library through the website. + * Editing a review never edits a review (unless it is freshly added, but + see above). More generally, + * a Put request with changed annotations (label/rating/review) does NOT + change the data. Note: Put requests only work on the href from + GetEditLink (as per the spec). Do not try to PUT to the annotate or + library feeds, this will cause a 400 Invalid URI Bad Request response. + Attempting to Post to one of the feeds with the updated annotations + does not update them. See the following for (hopefully) a follow up: + google.com/support/forum/p/booksearch-apis/thread?tid=27fd7f68de438fc8 + * Attempts to workaround the edit problem continue to fail. For example, + removing the item, editing the data, readding the item, gives us only + our originally added data (annotations). This occurs even if we + completely shut python down, refetch the book from the public feed, + and re-add it. There is some kind of persistence going on that I + cannot change. This is likely due to the annotations being cached in + the annotation feed and the inability to edit (see Put, above) + * GetAnnotationLink has www.books.... as the server, but hitting www... + results in a bad URI error. + * Spec indicates there may be multiple labels, but there does not seem + to be a way to get the server to accept multiple labels, nor does the + web interface have an obvious way to have multiple labels. Multiple + labels are never returned. +""" + +__author__ = "James Sams " +__copyright__ = "Apache License v2.0" + +from shlex import split + +import gdata.service +try: + import books +except ImportError: + import gdata.books as books + + +BOOK_SERVER = "books.google.com" +GENERAL_FEED = "/books/feeds/volumes" +ITEM_FEED = "/books/feeds/volumes/" +LIBRARY_FEED = "/books/feeds/users/%s/collections/library/volumes" +ANNOTATION_FEED = "/books/feeds/users/%s/volumes" +PARTNER_FEED = "/books/feeds/p/%s/volumes" +BOOK_SERVICE = "print" +ACCOUNT_TYPE = "HOSTED_OR_GOOGLE" + + +class BookService(gdata.service.GDataService): + + def __init__(self, email=None, password=None, source=None, + server=BOOK_SERVER, account_type=ACCOUNT_TYPE, + exception_handlers=tuple(), **kwargs): + """source should be of form 'ProgramCompany - ProgramName - Version'""" + + gdata.service.GDataService.__init__(self, email=email, + password=password, service=BOOK_SERVICE, source=source, + server=server, **kwargs) + self.exception_handlers = exception_handlers + + def search(self, q, start_index="1", max_results="10", + min_viewability="none", feed=GENERAL_FEED, + converter=books.BookFeed.FromString): + """ + Query the Public search feed. q is either a search string or a + gdata.service.Query instance with a query set. + + min_viewability must be "none", "partial", or "full". + + If you change the feed to a single item feed, note that you will + probably need to change the converter to be Book.FromString + """ + + if not isinstance(q, gdata.service.Query): + q = gdata.service.Query(text_query=q) + if feed: + q.feed = feed + q['start-index'] = start_index + q['max-results'] = max_results + q['min-viewability'] = min_viewability + return self.Get(uri=q.ToUri(),converter=converter) + + def search_by_keyword(self, q='', feed=GENERAL_FEED, start_index="1", + max_results="10", min_viewability="none", **kwargs): + """ + Query the Public Search Feed by keyword. Non-keyword strings can be + set in q. This is quite fragile. Is there a function somewhere in + the Google library that will parse a query the same way that Google + does? + + Legal Identifiers are listed below and correspond to their meaning + at http://books.google.com/advanced_book_search: + all_words + exact_phrase + at_least_one + without_words + title + author + publisher + subject + isbn + lccn + oclc + seemingly unsupported: + publication_date: a sequence of two, two tuples: + ((min_month,min_year),(max_month,max_year)) + where month is one/two digit month, year is 4 digit, eg: + (('1','2000'),('10','2003')). Lower bound is inclusive, + upper bound is exclusive + """ + + for k, v in kwargs.items(): + if not v: + continue + k = k.lower() + if k == 'all_words': + q = "%s %s" % (q, v) + elif k == 'exact_phrase': + q = '%s "%s"' % (q, v.strip('"')) + elif k == 'at_least_one': + q = '%s %s' % (q, ' '.join(['OR "%s"' % x for x in split(v)])) + elif k == 'without_words': + q = '%s %s' % (q, ' '.join(['-"%s"' % x for x in split(v)])) + elif k in ('author','title', 'publisher'): + q = '%s %s' % (q, ' '.join(['in%s:"%s"'%(k,x) for x in split(v)])) + elif k == 'subject': + q = '%s %s' % (q, ' '.join(['%s:"%s"' % (k,x) for x in split(v)])) + elif k == 'isbn': + q = '%s ISBN%s' % (q, v) + elif k == 'issn': + q = '%s ISSN%s' % (q,v) + elif k == 'oclc': + q = '%s OCLC%s' % (q,v) + else: + raise ValueError("Unsupported search keyword") + return self.search(q.strip(),start_index=start_index, feed=feed, + max_results=max_results, + min_viewability=min_viewability) + + def search_library(self, q, id='me', **kwargs): + """Like search, but in a library feed. Default is the authenticated + user's feed. Change by setting id.""" + + if 'feed' in kwargs: + raise ValueError("kwarg 'feed' conflicts with library_id") + feed = LIBRARY_FEED % id + return self.search(q, feed=feed, **kwargs) + + def search_library_by_keyword(self, id='me', **kwargs): + """Hybrid of search_by_keyword and search_library + """ + + if 'feed' in kwargs: + raise ValueError("kwarg 'feed' conflicts with library_id") + feed = LIBRARY_FEED % id + return self.search_by_keyword(feed=feed,**kwargs) + + def search_annotations(self, q, id='me', **kwargs): + """Like search, but in an annotation feed. Default is the authenticated + user's feed. Change by setting id.""" + + if 'feed' in kwargs: + raise ValueError("kwarg 'feed' conflicts with library_id") + feed = ANNOTATION_FEED % id + return self.search(q, feed=feed, **kwargs) + + def search_annotations_by_keyword(self, id='me', **kwargs): + """Hybrid of search_by_keyword and search_annotations + """ + + if 'feed' in kwargs: + raise ValueError("kwarg 'feed' conflicts with library_id") + feed = ANNOTATION_FEED % id + return self.search_by_keyword(feed=feed,**kwargs) + + def add_item_to_library(self, item): + """Add the item, either an XML string or books.Book instance, to the + user's library feed""" + + feed = LIBRARY_FEED % 'me' + return self.Post(data=item, uri=feed, converter=books.Book.FromString) + + def remove_item_from_library(self, item): + """ + Remove the item, a books.Book instance, from the authenticated user's + library feed. Using an item retrieved from a public search will fail. + """ + + return self.Delete(item.GetEditLink().href) + + def add_annotation(self, item): + """ + Add the item, either an XML string or books.Book instance, to the + user's annotation feed. + """ + # do not use GetAnnotationLink, results in 400 Bad URI due to www + return self.Post(data=item, uri=ANNOTATION_FEED % 'me', + converter=books.Book.FromString) + + def edit_annotation(self, item): + """ + Send an edited item, a books.Book instance, to the user's annotation + feed. Note that whereas extra annotations in add_annotations, minus + ratings which are immutable once set, are simply added to the item in + the annotation feed, if an annotation has been removed from the item, + sending an edit request will remove that annotation. This should not + happen with add_annotation. + """ + + return self.Put(data=item, uri=item.GetEditLink().href, + converter=books.Book.FromString) + + def get_by_google_id(self, id): + return self.Get(ITEM_FEED + id, converter=books.Book.FromString) + + def get_library(self, id='me',feed=LIBRARY_FEED, start_index="1", + max_results="100", min_viewability="none", + converter=books.BookFeed.FromString): + """ + Return a generator object that will return gbook.Book instances until + the search feed no longer returns an item from the GetNextLink method. + Thus max_results is not the maximum number of items that will be + returned, but rather the number of items per page of searches. This has + been set high to reduce the required number of network requests. + """ + + q = gdata.service.Query() + q.feed = feed % id + q['start-index'] = start_index + q['max-results'] = max_results + q['min-viewability'] = min_viewability + x = self.Get(uri=q.ToUri(), converter=converter) + while 1: + for entry in x.entry: + yield entry + else: + l = x.GetNextLink() + if l: # hope the server preserves our preferences + x = self.Get(uri=l.href, converter=converter) + else: + break + + def get_annotations(self, id='me', start_index="1", max_results="100", + min_viewability="none", converter=books.BookFeed.FromString): + """ + Like get_library, but for the annotation feed + """ + + return self.get_library(id=id, feed=ANNOTATION_FEED, + max_results=max_results, min_viewability = min_viewability, + converter=converter) diff --git a/gam/gdata/analytics/calendar/__init__.py b/gam/gdata/analytics/calendar/__init__.py new file mode 100755 index 00000000000..06c041075a9 --- /dev/null +++ b/gam/gdata/analytics/calendar/__init__.py @@ -0,0 +1,1044 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains extensions to ElementWrapper objects used with Google Calendar.""" + + +__author__ = 'api.vli (Vivian Li), api.rboyd (Ryan Boyd)' + + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata + + +# XML namespaces which are often used in Google Calendar entities. +GCAL_NAMESPACE = 'http://schemas.google.com/gCal/2005' +GCAL_TEMPLATE = '{http://schemas.google.com/gCal/2005}%s' +WEB_CONTENT_LINK_REL = '%s/%s' % (GCAL_NAMESPACE, 'webContent') +GACL_NAMESPACE = gdata.GACL_NAMESPACE +GACL_TEMPLATE = gdata.GACL_TEMPLATE + + + +class ValueAttributeContainer(atom.AtomBase): + """A parent class for all Calendar classes which have a value attribute. + + Children include Color, AccessLevel, Hidden + """ + + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class Color(ValueAttributeContainer): + """The Google Calendar color element""" + + _tag = 'color' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + + +class AccessLevel(ValueAttributeContainer): + """The Google Calendar accesslevel element""" + + _tag = 'accesslevel' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Hidden(ValueAttributeContainer): + """The Google Calendar hidden element""" + + _tag = 'hidden' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Selected(ValueAttributeContainer): + """The Google Calendar selected element""" + + _tag = 'selected' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Timezone(ValueAttributeContainer): + """The Google Calendar timezone element""" + + _tag = 'timezone' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Where(atom.AtomBase): + """The Google Calendar Where element""" + + _tag = 'where' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['valueString'] = 'value_string' + + def __init__(self, value_string=None, extension_elements=None, + extension_attributes=None, text=None): + self.value_string = value_string + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class CalendarListEntry(gdata.GDataEntry, gdata.LinkFinder): + """A Google Calendar meta Entry flavor of an Atom Entry """ + + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}color' % GCAL_NAMESPACE] = ('color', Color) + _children['{%s}accesslevel' % GCAL_NAMESPACE] = ('access_level', + AccessLevel) + _children['{%s}hidden' % GCAL_NAMESPACE] = ('hidden', Hidden) + _children['{%s}selected' % GCAL_NAMESPACE] = ('selected', Selected) + _children['{%s}timezone' % GCAL_NAMESPACE] = ('timezone', Timezone) + _children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', Where) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + color=None, access_level=None, hidden=None, timezone=None, + selected=None, + where=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=None) + + self.color = color + self.access_level = access_level + self.hidden = hidden + self.selected = selected + self.timezone = timezone + self.where = where + + +class CalendarListFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Calendar meta feed flavor of an Atom Feed""" + + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarListEntry]) + + +class Scope(atom.AtomBase): + """The Google ACL scope element""" + + _tag = 'scope' + _namespace = GACL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + _attributes['type'] = 'type' + + def __init__(self, extension_elements=None, value=None, scope_type=None, + extension_attributes=None, text=None): + self.value = value + self.type = scope_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Role(ValueAttributeContainer): + """The Google Calendar timezone element""" + + _tag = 'role' + _namespace = GACL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class CalendarAclEntry(gdata.GDataEntry, gdata.LinkFinder): + """A Google Calendar ACL Entry flavor of an Atom Entry """ + + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}scope' % GACL_NAMESPACE] = ('scope', Scope) + _children['{%s}role' % GACL_NAMESPACE] = ('role', Role) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + scope=None, role=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=None) + self.scope = scope + self.role = role + + +class CalendarAclFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Calendar ACL feed flavor of an Atom Feed""" + + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarAclEntry]) + + +class CalendarEventCommentEntry(gdata.GDataEntry, gdata.LinkFinder): + """A Google Calendar event comments entry flavor of an Atom Entry""" + + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + +class CalendarEventCommentFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Calendar event comments feed flavor of an Atom Feed""" + + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [CalendarEventCommentEntry]) + + +class ExtendedProperty(gdata.ExtendedProperty): + """A transparent subclass of gdata.ExtendedProperty added to this module + for backwards compatibility.""" + + +class Reminder(atom.AtomBase): + """The Google Calendar reminder element""" + + _tag = 'reminder' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['absoluteTime'] = 'absolute_time' + _attributes['days'] = 'days' + _attributes['hours'] = 'hours' + _attributes['minutes'] = 'minutes' + _attributes['method'] = 'method' + + def __init__(self, absolute_time=None, + days=None, hours=None, minutes=None, method=None, + extension_elements=None, + extension_attributes=None, text=None): + self.absolute_time = absolute_time + if days is not None: + self.days = str(days) + else: + self.days = None + if hours is not None: + self.hours = str(hours) + else: + self.hours = None + if minutes is not None: + self.minutes = str(minutes) + else: + self.minutes = None + self.method = method + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class When(atom.AtomBase): + """The Google Calendar When element""" + + _tag = 'when' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}reminder' % gdata.GDATA_NAMESPACE] = ('reminder', [Reminder]) + _attributes['startTime'] = 'start_time' + _attributes['endTime'] = 'end_time' + + def __init__(self, start_time=None, end_time=None, reminder=None, + extension_elements=None, extension_attributes=None, text=None): + self.start_time = start_time + self.end_time = end_time + self.reminder = reminder or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Recurrence(atom.AtomBase): + """The Google Calendar Recurrence element""" + + _tag = 'recurrence' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + +class UriEnumElement(atom.AtomBase): + + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, tag, enum_map, attrib_name='value', + extension_elements=None, extension_attributes=None, text=None): + self.tag=tag + self.enum_map=enum_map + self.attrib_name=attrib_name + self.value=None + self.text=text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def findKey(self, value): + res=[item[0] for item in self.enum_map.items() if item[1] == value] + if res is None or len(res) == 0: + return None + return res[0] + + def _ConvertElementAttributeToMember(self, attribute, value): + # Special logic to use the enum_map to set the value of the object's value member. + if attribute == self.attrib_name and value != '': + self.value = self.enum_map[value] + return + # Find the attribute in this class's list of attributes. + if self.__class__._attributes.has_key(attribute): + # Find the member of this class which corresponds to the XML attribute + # (lookup in current_class._attributes) and set this member to the + # desired value (using self.__dict__). + setattr(self, self.__class__._attributes[attribute], value) + else: + # The current class doesn't map this attribute, so try to parent class. + atom.ExtensionContainer._ConvertElementAttributeToMember(self, + attribute, + value) + + def _AddMembersToElementTree(self, tree): + # Convert the members of this class which are XML child nodes. + # This uses the class's _children dictionary to find the members which + # should become XML child nodes. + member_node_names = [values[0] for tag, values in + self.__class__._children.iteritems()] + for member_name in member_node_names: + member = getattr(self, member_name) + if member is None: + pass + elif isinstance(member, list): + for instance in member: + instance._BecomeChildElement(tree) + else: + member._BecomeChildElement(tree) + # Special logic to set the desired XML attribute. + key = self.findKey(self.value) + if key is not None: + tree.attrib[self.attrib_name]=key + # Convert the members of this class which are XML attributes. + for xml_attribute, member_name in self.__class__._attributes.iteritems(): + member = getattr(self, member_name) + if member is not None: + tree.attrib[xml_attribute] = member + # Lastly, call the parent's _AddMembersToElementTree to get any + # extension elements. + atom.ExtensionContainer._AddMembersToElementTree(self, tree) + + + +class AttendeeStatus(UriEnumElement): + """The Google Calendar attendeeStatus element""" + + _tag = 'attendeeStatus' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + attendee_enum = { + 'http://schemas.google.com/g/2005#event.accepted' : 'ACCEPTED', + 'http://schemas.google.com/g/2005#event.declined' : 'DECLINED', + 'http://schemas.google.com/g/2005#event.invited' : 'INVITED', + 'http://schemas.google.com/g/2005#event.tentative' : 'TENTATIVE'} + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'attendeeStatus', AttendeeStatus.attendee_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class AttendeeType(UriEnumElement): + """The Google Calendar attendeeType element""" + + _tag = 'attendeeType' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + attendee_type_enum = { + 'http://schemas.google.com/g/2005#event.optional' : 'OPTIONAL', + 'http://schemas.google.com/g/2005#event.required' : 'REQUIRED' } + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'attendeeType', + AttendeeType.attendee_type_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes,text=text) + + +class Visibility(UriEnumElement): + """The Google Calendar Visibility element""" + + _tag = 'visibility' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + visibility_enum = { + 'http://schemas.google.com/g/2005#event.confidential' : 'CONFIDENTIAL', + 'http://schemas.google.com/g/2005#event.default' : 'DEFAULT', + 'http://schemas.google.com/g/2005#event.private' : 'PRIVATE', + 'http://schemas.google.com/g/2005#event.public' : 'PUBLIC' } + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'visibility', Visibility.visibility_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class Transparency(UriEnumElement): + """The Google Calendar Transparency element""" + + _tag = 'transparency' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + transparency_enum = { + 'http://schemas.google.com/g/2005#event.opaque' : 'OPAQUE', + 'http://schemas.google.com/g/2005#event.transparent' : 'TRANSPARENT' } + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, tag='transparency', + enum_map=Transparency.transparency_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class Comments(atom.AtomBase): + """The Google Calendar comments element""" + + _tag = 'comments' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + gdata.FeedLink) + _attributes['rel'] = 'rel' + + def __init__(self, rel=None, feed_link=None, extension_elements=None, + extension_attributes=None, text=None): + self.rel = rel + self.feed_link = feed_link + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class EventStatus(UriEnumElement): + """The Google Calendar eventStatus element""" + + _tag = 'eventStatus' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + status_enum = { 'http://schemas.google.com/g/2005#event.canceled' : 'CANCELED', + 'http://schemas.google.com/g/2005#event.confirmed' : 'CONFIRMED', + 'http://schemas.google.com/g/2005#event.tentative' : 'TENTATIVE'} + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, tag='eventStatus', + enum_map=EventStatus.status_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class Who(UriEnumElement): + """The Google Calendar Who element""" + + _tag = 'who' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + _children['{%s}attendeeStatus' % gdata.GDATA_NAMESPACE] = ( + 'attendee_status', AttendeeStatus) + _children['{%s}attendeeType' % gdata.GDATA_NAMESPACE] = ('attendee_type', + AttendeeType) + _attributes['valueString'] = 'name' + _attributes['email'] = 'email' + + relEnum = { 'http://schemas.google.com/g/2005#event.attendee' : 'ATTENDEE', + 'http://schemas.google.com/g/2005#event.organizer' : 'ORGANIZER', + 'http://schemas.google.com/g/2005#event.performer' : 'PERFORMER', + 'http://schemas.google.com/g/2005#event.speaker' : 'SPEAKER', + 'http://schemas.google.com/g/2005#message.bcc' : 'BCC', + 'http://schemas.google.com/g/2005#message.cc' : 'CC', + 'http://schemas.google.com/g/2005#message.from' : 'FROM', + 'http://schemas.google.com/g/2005#message.reply-to' : 'REPLY_TO', + 'http://schemas.google.com/g/2005#message.to' : 'TO' } + + def __init__(self, name=None, email=None, attendee_status=None, + attendee_type=None, rel=None, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'who', Who.relEnum, attrib_name='rel', + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.name = name + self.email = email + self.attendee_status = attendee_status + self.attendee_type = attendee_type + self.rel = rel + + +class OriginalEvent(atom.AtomBase): + """The Google Calendar OriginalEvent element""" + + _tag = 'originalEvent' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + # TODO: The when tag used to map to a EntryLink, make sure it should really be a When. + _children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', When) + _attributes['id'] = 'id' + _attributes['href'] = 'href' + + def __init__(self, id=None, href=None, when=None, + extension_elements=None, extension_attributes=None, text=None): + self.id = id + self.href = href + self.when = when + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def GetCalendarEventEntryClass(): + return CalendarEventEntry + + +# This class is not completely defined here, because of a circular reference +# in which CalendarEventEntryLink and CalendarEventEntry refer to one another. +class CalendarEventEntryLink(gdata.EntryLink): + """An entryLink which contains a calendar event entry + + Within an event's recurranceExceptions, an entry link + points to a calendar event entry. This class exists + to capture the calendar specific extensions in the entry. + """ + + _tag = 'entryLink' + _namespace = gdata.GDATA_NAMESPACE + _children = gdata.EntryLink._children.copy() + _attributes = gdata.EntryLink._attributes.copy() + # The CalendarEventEntryLink should like CalendarEventEntry as a child but + # that class hasn't been defined yet, so we will wait until after defining + # CalendarEventEntry to list it in _children. + + +class RecurrenceException(atom.AtomBase): + """The Google Calendar RecurrenceException element""" + + _tag = 'recurrenceException' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}entryLink' % gdata.GDATA_NAMESPACE] = ('entry_link', + CalendarEventEntryLink) + _children['{%s}originalEvent' % gdata.GDATA_NAMESPACE] = ('original_event', + OriginalEvent) + _attributes['specialized'] = 'specialized' + + def __init__(self, specialized=None, entry_link=None, + original_event=None, extension_elements=None, + extension_attributes=None, text=None): + self.specialized = specialized + self.entry_link = entry_link + self.original_event = original_event + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class SendEventNotifications(atom.AtomBase): + """The Google Calendar sendEventNotifications element""" + + _tag = 'sendEventNotifications' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, extension_elements=None, + value=None, extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class QuickAdd(atom.AtomBase): + """The Google Calendar quickadd element""" + + _tag = 'quickadd' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, extension_elements=None, + value=None, extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def _TransferToElementTree(self, element_tree): + if self.value: + element_tree.attrib['value'] = self.value + element_tree.tag = GCAL_TEMPLATE % 'quickadd' + atom.AtomBase._TransferToElementTree(self, element_tree) + return element_tree + + def _TakeAttributeFromElementTree(self, attribute, element_tree): + if attribute == 'value': + self.value = element_tree.attrib[attribute] + del element_tree.attrib[attribute] + else: + atom.AtomBase._TakeAttributeFromElementTree(self, attribute, + element_tree) + + +class SyncEvent(atom.AtomBase): + _tag = 'syncEvent' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value='false', extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class UID(atom.AtomBase): + _tag = 'uid' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Sequence(atom.AtomBase): + _tag = 'sequence' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class WebContentGadgetPref(atom.AtomBase): + + _tag = 'webContentGadgetPref' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['value'] = 'value' + + """The Google Calendar Web Content Gadget Preferences element""" + + def __init__(self, name=None, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class WebContent(atom.AtomBase): + + _tag = 'webContent' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}webContentGadgetPref' % GCAL_NAMESPACE] = ('gadget_pref', + [WebContentGadgetPref]) + _attributes['url'] = 'url' + _attributes['width'] = 'width' + _attributes['height'] = 'height' + + def __init__(self, url=None, width=None, height=None, text=None, + gadget_pref=None, extension_elements=None, extension_attributes=None): + self.url = url + self.width = width + self.height = height + self.text = text + self.gadget_pref = gadget_pref or [] + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class WebContentLink(atom.Link): + + _tag = 'link' + _namespace = atom.ATOM_NAMESPACE + _children = atom.Link._children.copy() + _attributes = atom.Link._attributes.copy() + _children['{%s}webContent' % GCAL_NAMESPACE] = ('web_content', WebContent) + + def __init__(self, title=None, href=None, link_type=None, + web_content=None): + atom.Link.__init__(self, rel=WEB_CONTENT_LINK_REL, title=title, href=href, + link_type=link_type) + self.web_content = web_content + + +class GuestsCanInviteOthers(atom.AtomBase): + """Indicates whether event attendees may invite others to the event. + + This element may only be changed by the organizer of the event. If not + included as part of the event entry, this element will default to true + during a POST request, and will inherit its previous value during a PUT + request. + """ + _tag = 'guestsCanInviteOthers' + _namespace = GCAL_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value='true', *args, **kwargs): + atom.AtomBase.__init__(self, *args, **kwargs) + self.value = value + + +class GuestsCanSeeGuests(atom.AtomBase): + """Indicates whether attendees can see other people invited to the event. + + The organizer always sees all attendees. Guests always see themselves. This + property affects what attendees see in the event's guest list via both the + Calendar UI and API feeds. + + This element may only be changed by the organizer of the event. + + If not included as part of the event entry, this element will default to + true during a POST request, and will inherit its previous value during a + PUT request. + """ + _tag = 'guestsCanSeeGuests' + _namespace = GCAL_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value='true', *args, **kwargs): + atom.AtomBase.__init__(self, *args, **kwargs) + self.value = value + + +class GuestsCanModify(atom.AtomBase): + """Indicates whether event attendees may modify the original event. + + If yes, changes are visible to organizer and other attendees. Otherwise, + any changes made by attendees will be restricted to that attendee's + calendar. + + This element may only be changed by the organizer of the event, and may + be set to 'true' only if both gCal:guestsCanInviteOthers and + gCal:guestsCanSeeGuests are set to true in the same PUT/POST request. + Otherwise, request fails with HTTP error code 400 (Bad Request). + + If not included as part of the event entry, this element will default to + false during a POST request, and will inherit its previous value during a + PUT request.""" + _tag = 'guestsCanModify' + _namespace = GCAL_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value='false', *args, **kwargs): + atom.AtomBase.__init__(self, *args, **kwargs) + self.value = value + + + +class CalendarEventEntry(gdata.BatchEntry): + """A Google Calendar flavor of an Atom Entry """ + + _tag = gdata.BatchEntry._tag + _namespace = gdata.BatchEntry._namespace + _children = gdata.BatchEntry._children.copy() + _attributes = gdata.BatchEntry._attributes.copy() + # This class also contains WebContentLinks but converting those members + # is handled in a special version of _ConvertElementTreeToMember. + _children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', [Where]) + _children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', [When]) + _children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', [Who]) + _children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = ( + 'extended_property', [ExtendedProperty]) + _children['{%s}visibility' % gdata.GDATA_NAMESPACE] = ('visibility', + Visibility) + _children['{%s}transparency' % gdata.GDATA_NAMESPACE] = ('transparency', + Transparency) + _children['{%s}eventStatus' % gdata.GDATA_NAMESPACE] = ('event_status', + EventStatus) + _children['{%s}recurrence' % gdata.GDATA_NAMESPACE] = ('recurrence', + Recurrence) + _children['{%s}recurrenceException' % gdata.GDATA_NAMESPACE] = ( + 'recurrence_exception', [RecurrenceException]) + _children['{%s}sendEventNotifications' % GCAL_NAMESPACE] = ( + 'send_event_notifications', SendEventNotifications) + _children['{%s}quickadd' % GCAL_NAMESPACE] = ('quick_add', QuickAdd) + _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) + _children['{%s}originalEvent' % gdata.GDATA_NAMESPACE] = ('original_event', + OriginalEvent) + _children['{%s}sequence' % GCAL_NAMESPACE] = ('sequence', Sequence) + _children['{%s}reminder' % gdata.GDATA_NAMESPACE] = ('reminder', [Reminder]) + _children['{%s}syncEvent' % GCAL_NAMESPACE] = ('sync_event', SyncEvent) + _children['{%s}uid' % GCAL_NAMESPACE] = ('uid', UID) + _children['{%s}guestsCanInviteOthers' % GCAL_NAMESPACE] = ( + 'guests_can_invite_others', GuestsCanInviteOthers) + _children['{%s}guestsCanModify' % GCAL_NAMESPACE] = ( + 'guests_can_modify', GuestsCanModify) + _children['{%s}guestsCanSeeGuests' % GCAL_NAMESPACE] = ( + 'guests_can_see_guests', GuestsCanSeeGuests) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + transparency=None, comments=None, event_status=None, + send_event_notifications=None, visibility=None, + recurrence=None, recurrence_exception=None, + where=None, when=None, who=None, quick_add=None, + extended_property=None, original_event=None, + batch_operation=None, batch_id=None, batch_status=None, + sequence=None, reminder=None, sync_event=None, uid=None, + guests_can_invite_others=None, guests_can_modify=None, + guests_can_see_guests=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.BatchEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + batch_operation=batch_operation, batch_id=batch_id, + batch_status=batch_status, + title=title, updated=updated) + + self.transparency = transparency + self.comments = comments + self.event_status = event_status + self.send_event_notifications = send_event_notifications + self.visibility = visibility + self.recurrence = recurrence + self.recurrence_exception = recurrence_exception or [] + self.where = where or [] + self.when = when or [] + self.who = who or [] + self.quick_add = quick_add + self.extended_property = extended_property or [] + self.original_event = original_event + self.sequence = sequence + self.reminder = reminder or [] + self.sync_event = sync_event + self.uid = uid + self.text = text + self.guests_can_invite_others = guests_can_invite_others + self.guests_can_modify = guests_can_modify + self.guests_can_see_guests = guests_can_see_guests + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + # We needed to add special logic to _ConvertElementTreeToMember because we + # want to make links with a rel of WEB_CONTENT_LINK_REL into a + # WebContentLink + def _ConvertElementTreeToMember(self, child_tree): + # Special logic to handle Web Content links + if (child_tree.tag == '{%s}link' % atom.ATOM_NAMESPACE and + child_tree.attrib['rel'] == WEB_CONTENT_LINK_REL): + if self.link is None: + self.link = [] + self.link.append(atom._CreateClassFromElementTree(WebContentLink, + child_tree)) + return + # Find the element's tag in this class's list of child members + if self.__class__._children.has_key(child_tree.tag): + member_name = self.__class__._children[child_tree.tag][0] + member_class = self.__class__._children[child_tree.tag][1] + # If the class member is supposed to contain a list, make sure the + # matching member is set to a list, then append the new member + # instance to the list. + if isinstance(member_class, list): + if getattr(self, member_name) is None: + setattr(self, member_name, []) + getattr(self, member_name).append(atom._CreateClassFromElementTree( + member_class[0], child_tree)) + else: + setattr(self, member_name, + atom._CreateClassFromElementTree(member_class, child_tree)) + else: + atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree) + + + def GetWebContentLink(self): + """Finds the first link with rel set to WEB_CONTENT_REL + + Returns: + A gdata.calendar.WebContentLink or none if none of the links had rel + equal to WEB_CONTENT_REL + """ + + for a_link in self.link: + if a_link.rel == WEB_CONTENT_LINK_REL: + return a_link + return None + + +def CalendarEventEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventEntry, xml_string) + + +def CalendarEventCommentEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventCommentEntry, xml_string) + + +CalendarEventEntryLink._children = {'{%s}entry' % atom.ATOM_NAMESPACE: + ('entry', CalendarEventEntry)} + + +def CalendarEventEntryLinkFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventEntryLink, xml_string) + + +class CalendarEventFeed(gdata.BatchFeed, gdata.LinkFinder): + """A Google Calendar event feed flavor of an Atom Feed""" + + _tag = gdata.BatchFeed._tag + _namespace = gdata.BatchFeed._namespace + _children = gdata.BatchFeed._children.copy() + _attributes = gdata.BatchFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [CalendarEventEntry]) + _children['{%s}timezone' % GCAL_NAMESPACE] = ('timezone', Timezone) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + total_results=None, start_index=None, items_per_page=None, + interrupted=None, timezone=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + interrupted=interrupted, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.timezone = timezone + + +def CalendarListEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarListEntry, xml_string) + + +def CalendarAclEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarAclEntry, xml_string) + + +def CalendarListFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarListFeed, xml_string) + + +def CalendarAclFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarAclFeed, xml_string) + + +def CalendarEventFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventFeed, xml_string) + + +def CalendarEventCommentFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventCommentFeed, xml_string) diff --git a/gam/gdata/analytics/calendar/__init__.pyc b/gam/gdata/analytics/calendar/__init__.pyc new file mode 100755 index 0000000000000000000000000000000000000000..3796f16ce02e6b25f52fa8ed1cefdd8ffd1fe6f4 GIT binary patch literal 37831 zcmdsgdvsjKecmh<1PSo{0tHeMS0X8a5(!eWMB6g;06~y~M1s&IC`u^h)pG9zu;gNQ zx%Yw;Y^v$0n3MQ%l*V?_rhoMG*e6Zg`qXvYrs-+wsOiIQ;#1dko3=?)#V7qEkG5%& zwrSJ4?)QDOckabvL4v@NQVrk^=FZHWJ9p;yp5M%5|9E}R6TOAeqG|lB!vE)Rg`Y&s zH)aB1#!O_)+&1HP7(Z*u9cCtLCOS>#d}YRT2CK~F9#j7@lgVV}HmOXfDXYNwD}TY| ze@gkbj#a9o)7Ek2)2_^qunb><0$pat7C`UNikp6E?AV~5MwZv1=H zR%D~;JNO-bpUb|-_&b%+m&)GdvUeK)-c+^sxr|-LzdvrpZkKwm@pB7a_JGU2&-f26 zc-bD8eZTSdE_iyM%ieALhZa2ju*=RFzhBu8sLS`ej0cSWNa`CNbr}yD|3E6^pv%}} z{DD-)A(ye&_=i&&kGYI}#(&%d`}F1yxYUP?|3sYnq)UC+_($T@54zNT<9{el{jf{j zZ~UiHFZ_thc*OWeR;8~&LMb&rA^d1QgoBWXRiz;>8_`WK{ z5IEEgT(u{RKdEvjjX$Nx^TwZ6WW@L-MP4xeB}HB|{$)i@k>s82FOw!ek6@@;i3+7k zm)91@Qqy1(>&{Pbt=OtcO` zvGKm~dJv4-fVbp`zwcsLq1D_r3!0pS0@@y~|MfK9f*(hlAQ&6AB+K!=I_!5Eu z!;2wB*kOXKxzuSctumLo$Oo7OKrn-%Hw!f2AUqdjM zs}<^KRIXSqgkf&7TF({AW!LjIIS6ytrc1@?+;rh;kSpY_vhle>-1>o@o?LEdx>WX2 zQ?67gmS=qoUbS4UAIJ?Bi$NHk3a$p_1G(o*z8_RjI%4Ojnj&Mo*rXv`;S&h1XUuhD z@`!XQg4+&L+hwMZg7gk^J!@)@n3BE4M;d?~2YghY=CQI!0U$5hf;$I+`M^S77;T%E z>^>*#XLIy)7?dYfm)C_PZ#4sE_T#ca_!weCPhGIkau5I=9-Jvu3R6M-!jxZ#3KxnE zAij{#mnx+wpC72rMQiX>eE(s@LnfGx?o4-PV`j9U0MjT_Da-`ipmtLcrgJ#pM0t^Myr*;WBKH>6kFc6d0EtS z2v+0<4*pk_j>FMaoXwU@~Mdav*Lnbi)5F$Ye%qhoZO#pZWfOnZofbKPd_tm=X zj&B9c9e6l=0Kxcl&`djWMZ#gOG3aW7Qq;#DJKAHPcxcWC>xJ(>&9m!5hrMK!%Z#&9(y$ne5?p&AMgW6xf)!b!7T1#^7L~lKa+FSM3w1}MR zC^o_-y{CP&6U{h_tGSOzc+LVlictW`pz+g2wA_m-TlEr1tKRAQi4BPp-_xGD7Y(_< z#xx-y8#rE?2|fmLc{h71YP@q^PsNGvX;0mc9s$u_&{JfQ=cj{u(oIYeoqrvXmUJEx z%|w?(RY;>C?>(aS-Eqp=IAtxB=>TCC(#}(0Bq9LyVr1Uz5l|j@QHvMZd){CCNnO zb|;Syjt?eW(GvFcd{!0AXNVpJuQo~+IaF7{6_Vvz*4Pc%q9I+71;-l0sZtmXSEBly zLsT|J5SRgROGBU&s1s2d78-z}6|~cWhfPVz9u%~DsAW;y7FhOD=EV zostr~U3`vol^K>RdiODn;uq6m4&d!hr^eLIqmbv`E4(doV0p1gL(Kkj2qI%HWy~e$ z2{H*noxt!wg}~rIbAXZxy#j-B9R#GyT<@Ye?mC8Wjkyj39V~J%vq2QU-YsndrF#^? z18Wt*GwT(>W9t;blN%J#!)Ha7MB-~JI5c#^I*Who7>}Zz zdSBhG-c@Jz3q?h2`dMT@gGU%V%HRNlgA4{39AeN^9|y`-%E3vRmwgb(!^2l&CCqLs?RL)DKROSC4py*&fND5~?<|9MqOik-fK)QA(Cb`E!A3x zJ8wbt1rM-_ZVv?@reJ~q9j{Pq1+iwS2Lud)4`ioxPVgZRrzGydp;Nh#t(zzFC?#bk znM3Q@YF>mWQPQUo0nr2vr6mTW6A@TqKn4*(ddAfD#wHo$cADA;Vv`KWYhse2kY&v% zA7o6oe31+)lGd!aqcpx<#7s@hM`&653@@T5KaV$v30Z@SHMz6`K++Ct3P2|&@N{>D z9x~}KvBe}ru{}US4~#0^8SxCS3S|n}`ne0rTg;CgNO92WXvE^`2(4Uq*bML#Uhzw~ zniaP+`5Y{k#UEX7QEU~e61UT+dROE^NFGc1TV-xwHgE$`oZ^B^qzj2JFFEUoqfeVO4cSxcVnIPdG^_D7tkbYRIu@^DR=J`#Q zRr3=)epEld!4j6G*6g0W6JpH2M03PE%&LYMBk@IINej+*3-N_y!4pyu?AtA+97Ubq zV0$SW&6iTPHDnXmbJ_G*E&*b_AH))J>DHKW6RE_bFyqkT<^qZL0hC=_4q?Yh4)LBq zjBu9lw~o zv=g5&amdIu&|G`2fGJC#v)0jcDa_d!ILu&jru1T=c==kP?uUf*S|KW3ER{>q+<-Ks zCSzKA!1XzN!S`^59SFKQWL5BjnG*UVnL8#gJjOzwLy+zVr+qBT*eY8Vv<+FaV1QkP zm~2@HkHUZgYZbzyG@rEmTVrm(0ZIm1B3nolPw6Q!w&m)L?{UKA8WfBZBg*N8i($2l zY4begP-}QMA66Gi;OTqwnCx;k4AWj8@D;h9iI8I+o?`bG8>FFqkj=jRYf=AL0M zGUjr(sXuHYnUCJG)6!W}k(uW@)F5k6vc*PKEDPI_G^EZ{F>7(M#b#BkGcL9&RZJ!+ zRtpOgOiGBr!qjDIKNYK4NL+1dKO3uA4Qmr2-_9C){0vD0Q5G?nvb|xZoM3R0!Sf7O zF-TM}_JNg@^mwZ$T;cZFbvB~e;k|gZ>FlNv1cF66j3>h-#x)5@VhRKxYon2UM;e?8 zH(7eNupB?JJvcasnmXd9TZOD)h0gTY-;S&-Hv&XVH&lvvLO&GVg_)m3qz#z4&NNIk zfCJ5z1F>NmwwRjWXKSnkKElu~r9$|aLz`ORBfr4mA~6Ma*NUglh^@8`J5^Css7Dqx z+36u~g_p!*P;Q&WuD}g5ij4CjR1yHgQwY*%NJIcm!RI6G0sAvpA2P@JuUu@+lcBiO1Hf;{+m2WpY&v=yW#uXpguR$(5p(W-v- zb`xW^viuThNqZ>B7+UcWb|zbwbf!#A=jx^5%IwUrby9MagQy{tyMT!8#-PJoik-pU zg0&D>Z&P4JTqXMcQGT6W@#^5yV+qqG*-tP|$6w2tH|=Cp z#`K{(FL#*wCQVLtn7)s8m@Avin@*}&W5RLv3}_#u3XB{}zoM?=L>8W~_V`iwd>VEw zxSV>2-!B2E6a_P3A`!r&(iUJZCk9}a(g4(hFah%ADXfyVCLxo)7|fkzPw`bIb0l+r zrZZE_BF@VZ`;X(sLKXUmpLa-|fsV;4u8xBly3aw`S;QRZ<5BcXyCymy$?^l zl{J}4uR)vOKnA-yi-48b44)9lK^u(_7^{g2Xaqj&Nb5+KmzI#v%g;;SH(dzxmjgSM z2X{MysGWbHv>_#e?yk%SFs*b{ zyw_3hs~is6Lrur7%%)6d$M(+qvYcfS=#m^c&?R?DgDxgh1--rILI(l30i)<;!t7S_ zW~X_hM2tWFTEV=zNpS42_7<O~gUjtbqMZ3D21S)wJ!UwdQEWm^WS&Y~ZnV zoEX-(d{E!gy?ApW`@hR~qcNgs*f|0Q+pr1cyXD{1n>mnK4?jr)^G3wBb!>*? z>AK9;j@?S#mK|;8{crcXwC!W;=Nq z$-WVs1yPv|izuWY4v-{(Dk4h}qyXWZQv~5lO(>QyF()^OBjEWE4UTaBVKx5A@8JkU z0IV`<{Pma{SQ;gTopMw72!d8(7l4ljnMQ-1=*7>QpC!;392y!P8y`L%9zy<2Tu=8X3L43jCl5zY0`9`F>>dX0CU4Af#~?){1`4MHQ@i* ztfevJoTFYHVY6(*dIewB{o@L2mcgVkLiqrqWs3dZ0nz9u3E6A#V$-o5j>}ze&>$cz zMd&7CG`UoJ*O*2E*+vDP-SVSsW<0skg^8GrnTai~+lHCmT*g%JPMAjJbm?5k=Gd4bQW_0k>?Vw$?7D0w7 zlvRN@{L;A*@AlBH9$c9%)#0qtE});or`-bR-#`(q_=vkeE(CW0^8b1Q`Rf2sD}{Uz zgSJCH`{u8(ncEPF70(#*1@)v#FGCKomh6Qv|DG|QhtRNA7?3p9!9Ty@s`~~Z^3a*l z6C=k#xJQ5ioWWZnmDM!SUhS zdAwFHU4_4&B&@M>$4-q5g$I}Z0dgWNwhjOOEZ%$z{Cg8cG&7tG!CgSUcM{}l2it+{ z)OO@!$NU_dc}uo4?q;Zp$?VqqiS48t=+KUACz)F-&Iw}Aj19hYZuoW}wpuG(nGK|1 zjC+HlSbXIHZ^Oq?`x22hX+-1zpN*5W#d^}xmSw^XnG13jWG<+t;LK3^XmeR*WiI0W zsB|OmhY_{rr6%#IN&33MH&J^^+Ebjn3#jm~2xFvy1_}`Q&`LSckD{t}%V6xBZ`_m< zQSNEDGbBn7dt6Eok!dNi*DcnOU?r{f)o(R{!l_PZN9jK}V!_ukam3n-X0+l;iRXj0 z<4S^x6QxCPem+lMZ_NPE_lhZ}==_~UrS+h^h(D&CTZCl=3Ia?6xCOk*{t9@#4S&o} z<1OB&7*L-`h3F~@!^>2GIWP1(37A?>ZZ3SKrDQ3p!eXks`k6i&?8tV* z>?hE;-)x3idSYfXJM0_?9nj^6vzYEajRH#nTkxM2y1TSOsDw?rJ2eNuul>WUUaYi} z257~4D+rZhZJX&RIGdkJS7#0ljt&iH+Ej+1BA7w&vM@f7pm;6aHb3oh|sM?Q@R!*cPW3cRRQ{&JpZ~gI#cr za4#S1VAJvsTjIboW+P|c?!$M6yAia)S@ev8d1cr|DX7lczX2N}O)de7+@! ze)XCONEei~fMSfM+V&u#DJ1NNQL;vW!Eso>jt>vtjujzph1lL1@8sa<$i%RBJ2+jf zPZcVqkHNM~co-Y@PMiTjxveVTDF%HY|W)Eht>Z<8fukDYooKYm7x zddcst2etCt!KkW6UBQ7HrIXK^clfVjn3ckeGU z|1UB42?jsOfc5n2L5ie{7D*&12~_0i2#z=?!% z-I;o6ssyW}1gRnt^h2~ghAaFL1WVxrw~+jAF7|^PfX>r{%9&(c1h(1OZ;AjB?nlsy zTF?6qN4NQf)$|v7FR~KWL`qEGDq&q?HRyCbm|W|a2g2iV@UO%+-}2Xq2y1rCHE`B)nSX*z5W@s34Im&H1EffeKr(`P=RB8n|f z-HN!j^<)q&tbT}_J{@D@alu6+>P5J)7?r7cUvUh@!a)Q#Lqv#EvD3v{R-LgasoBWb zq3gF$)gYV<`N@1)&TcmaEx>LcR>&0_yC~(PpV*SzcIrH~3oBK)JY5+Nhj1UJw-+^# z;Ws#I$W6zvPgtY&Tp88lxmvYU!Iooi>uz25KyIAw-qtxhcmvxBgxt2OiiXsp*?ItP z@E~avgo9#fvQ$j(bB0#2jJ|1r&yt0>FYI57yCrD4dP<-mjdng6ZwqvIUqnluCk&96 zbaipVA^Nk*^C0o4bDik`!BJ$g+?xSH3&&DDRN&HfUn#Bx2f@bdv z{H`_&HD5xVud>mkrm;|Ctv_7453xo&b%}(Yg+GMGd9)glE@0*#RLEftyImyoHAGrs zPI}j~r7aJKH;4Pn%F$CR!uhK3aRjYUAZ*!8hXrH?t&3RyP}a?h>}-f-pFq$M*AcN& zI>dM^6HmKs-+Jf1X1L@_EL;i@`Mx$U3I7-emNHoEiapwLM}qBqGrQ&PE&=}v-gt#A zBz9QP>(ZVPvYD{iFU^*Um*M2K5X7p;4ZwaeuYnNr8m%_gTN;R^4dF3}0qAa^UKlrE z8b{>Trg3-#6fa{@l>;-jKTiCC1FTOq1C|^uktiQnVa;>6fw+DNEa%=v1SkV{APf~9 z7oiTofYpmMNc6A=z_}Ycec}nE(C-lc8a}}4J@0R$K1<4lC{wtVUZwpE$A?dkox%*X zoyu}lR8%XkX}!syKHo=>jTQE3CLMNNt}x9 z3Yy^Z<1qFN#{Rh{U`QEZiY-s0@V<(g{ybZ|4UuMww56+iwdB^bbCsglU=gMCLrQ)OS9l&lnk-?r1ukF^ zXwTd1)21}-xdmyEo7)00ns}a8Fz_XMe;B`CLo8F~WT6}eMV8>@;7gp#TATn@j&t4i zp0kT{F5%)GkfXlHQ6MZPoQsNcO0{^8nAYDzD?ZQ0Z9oJdf?w@g!i`{wK<7q|-yyJo zNzwh;J&y$z909#~w&V*SevrW67v8U6@Qdj6uPg!v1lzN45uCSRjCgqrS9?f=0|Z2x zVS%p?8?XO?1BkD`UjgE4==FCN0SI!F^KPT`lLa43B*tg~MY?`SKm{D&=I!7gJbj}B zoW!va8-a8SH;;swuwBUp?n%4sWB>`v9d_2xyo6W?(Icg@i=z;blcprO?bFtOodYHz zO?G;_Q1$)6XWqfbe~ZnNawHvGd?wR_RpqkSBoO7`h&?7a`V=BTNh; zjb{@yT~~WCo=t#mm>9G*Rq&7T67Qcd_@@j6xBrMSCj|BDV-+l+Fdkizu0dnq9}rF` zkEG`nC}d0eCw$wvw55tn@9>E>+nIhDb^bQnN%`LH{vKPIw7mc}jFg!%f$DkYdG_-$ zTp`Az8M1M3L}QcNrXBUMj2J0RLiU1i=3Bq>(*oapG`cM41OuJ?aDMn$9==Ij&6CG^q8Ibt@G0*b$W86P?CSOY8Q-8>L{6r& zNX4fyRp*gl2N#?jBO7}zb}_;JJcT)&K*GgZxvCmvckaPF40oi`NhO-tX_zrozFH~I zkYHB>I`6iCjRI72a+HfbbBfMV*DcR9pZ>Cg!SL;B|ep;#o@n;5mdtVJgJF^N{ ztu^-V$`vLjb^g<}=|Yr3WwtAxL?!ZKZdc%pDcms1;RvYQMW8rqMrHA&b?)4V@Mdsq zBu9tpupKDRiDrxSE5~_MMXYNF&ZDZabBy3po5zcQxu2ayQ6Uh@BCLqKDFaop_pbSj@3=lW?)cF>5^i z?)mZYv0PBEV~4>ac3j9karkhq|5(A#c~&ymuTE(rL6TAY4p1aHC%gW%_+V+#-v^@n z|4EBZyuWDCchHfKJ6g0Mp+(-$u#0i&XAeROwW9leKZ0ZWTn%zNGvzROq=fqpfdR*`9 z%oxdnW~~8rt0&UyJMA1f#Z??5#kD`~7#t~}b|r@QQo7p2(M)!UXLS>e*~+Uyg54+ZI#% zT6~K-x5`wEI=7kHuf^%8dAriF2UedV*u-w9sr^Y@V6(ZtOX*w8bvd@=QO|=)*=DZa zrwD4eUy)vOeYYYz%=H}1%$X@zreSKh$6SBF)LzV#>@C(((egl8y2W_`l07aKqT+O( zNG=aNEoii9fF@RAZ!D%-E8-!f_S3~rX9#Kj;23Kerf;&fZu~HFi5lLoG5EI(v@rtF z+WR*QNSv$}{pT3_cMSeDgD*26me}1#f1NSHxwVh~24nx8!EZ81EaikN>v2Mb%G=L? z^m6f2npWJ%|G?_sW$;@JTD5tx=C*I%X*LTydJ6A18@K?58+vukJN`-QO1OCsoQHVD zVd7Kj#7t5W;pKC{OCaJUyW0{X@UEg;k*`!4pZj30Y9zrnt)&v&y6$)k+t{;N07+l0 z)h0q6Yv^)S*w6c0SbHk9aK(@)}>ax;@98|^dRuQE8mfaVI# zRPiA2iXZL5eSWAOz{IPi2~Cby-q-oi1qRf#z1J9^6R__rgCc{EGRPxnR5owNMecd$ zHe2ju$u%t5gFr_m%d5>RGmX@tvB8}d0j$`N@gGs)PZON?B4Rpv;pfKP>Tq}=wqd*n zE68_rZp>^)>UNpgdpnBkMTqN(r{JTwd1Eg}0iPUxC>RQ08?V4_M;kf*H!SA_j}WJa`%rhwjjEu_7RuDbo8QaVHjeQYkbD{+ zU}1*fqU}2Eu#r3#247<^$bb%3 z-Y+uvCWG%V_%;JK7vvGcfr`hR#8_M9W!~T*!bDaqlR?>8C{oaLgTlRL}E5Il4<5iYovnQ=21waS*g9^oq zr&vbQ5BA88`LyGe)2Hm=t{^IF~5X{D+0-$@$Z;*6Y?e9ss80UYD@ zd{3_Vd-Nfk@J&YIHoVd_ zhlQzR11$|{XZvNu;f=rDOl+cZy&4~(J@*LL=l^CT?}=V6z$81>dCvliKBydJ?d z#5eu~g5_vG&~@xyz7VJ2Gy|4~Ef}DaXx{~BIM{gs>J2tuz=5mSegTRNc3^-$gFP6a z#$X!;s4Lir0lJD0HDf!Zv27VBG@u&syd}wWN_&X76pt5D^H#wG>P}Y?&%Kw6j-{p6Z zJERyT7~Bp?cUEEPS`kZl2z9rSgi`j7NuoPGJE5N6=jW`Pj~#7qqX{Q*qvN9;lQw68 zByDK(V|T92>}aBG(&p9eY4g+VZ8ME)seYc{Sr~&A(zc>`_`oG{ipGS=${1JeBK^6p3jSmLCzfrxKhhQDdSPz5?>jase!W%bBa;- z4gVySlq=6(78hmwJBqrDS3d{CvIe3N)E&L3Eu51_f4c~@!by+SclPb18 zb1yjSx=!-bO;EkMJNq{G_Vjk!-@eVe_w4TJ+uGZ`d&lmc-QBy__O0sM+qb>1yYF~! zZ|^o-+xxclZts1lueZCaZ*$*0xN?0P^}DTa2mWtCxU=uA?#}McY-VS+yK{Grar^uK E0NkRPy#N3J literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/calendar/client.py b/gam/gdata/analytics/calendar/client.py new file mode 100755 index 00000000000..414338d4990 --- /dev/null +++ b/gam/gdata/analytics/calendar/client.py @@ -0,0 +1,538 @@ +#!/usr/bin/python +# +# Copyright (C) 2011 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CalendarClient extends the GDataService to streamline Google Calendar operations. + + CalendarService: Provides methods to query feeds and manipulate items. Extends + GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + + +__author__ = 'alainv (Alain Vongsouvanh)' + + +import urllib +import gdata.client +import gdata.calendar.data +import atom.data +import atom.http_core +import gdata.gauth + + +DEFAULT_BATCH_URL = ('https://www.google.com/calendar/feeds/default/private' + '/full/batch') + + +class CalendarClient(gdata.client.GDClient): + """Client for the Google Calendar service.""" + api_version = '2' + auth_service = 'cl' + server = "www.google.com" + contact_list = "default" + auth_scopes = gdata.gauth.AUTH_SCOPES['cl'] + + def __init__(self, domain=None, auth_token=None, **kwargs): + """Constructs a new client for the Calendar API. + + Args: + domain: string The Google Apps domain (if any). + kwargs: The other parameters to pass to the gdata.client.GDClient + constructor. + """ + gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs) + self.domain = domain + + def get_calendar_feed_uri(self, feed='', projection='full', scheme="https"): + """Builds a feed URI. + + Args: + projection: The projection to apply to the feed contents, for example + 'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'. + scheme: The URL scheme such as 'http' or 'https', None to return a + relative URI without hostname. + + Returns: + A feed URI using the given scheme and projection. + Example: '/calendar/feeds/default/owncalendars/full'. + """ + prefix = scheme and '%s://%s' % (scheme, self.server) or '' + suffix = feed and '/%s/%s' % (feed, projection) or '' + return '%s/calendar/feeds/default%s' % (prefix, suffix) + + GetCalendarFeedUri = get_calendar_feed_uri + + def get_calendar_event_feed_uri(self, calendar='default', visibility='private', + projection='full', scheme="https"): + """Builds a feed URI. + + Args: + projection: The projection to apply to the feed contents, for example + 'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'. + scheme: The URL scheme such as 'http' or 'https', None to return a + relative URI without hostname. + + Returns: + A feed URI using the given scheme and projection. + Example: '/calendar/feeds/default/private/full'. + """ + prefix = scheme and '%s://%s' % (scheme, self.server) or '' + return '%s/calendar/feeds/%s/%s/%s' % (prefix, calendar, + visibility, projection) + + GetCalendarEventFeedUri = get_calendar_event_feed_uri + + def get_calendars_feed(self, uri, + desired_class=gdata.calendar.data.CalendarFeed, + auth_token=None, **kwargs): + """Obtains a calendar feed. + + Args: + uri: The uri of the calendar feed to request. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (desired_class=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.calendar.data.CalendarFeed. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.get_feed(uri, auth_token=auth_token, + desired_class=desired_class, **kwargs) + + GetCalendarsFeed = get_calendars_feed + + def get_own_calendars_feed(self, + desired_class=gdata.calendar.data.CalendarFeed, + auth_token=None, **kwargs): + """Obtains a feed containing the calendars owned by the current user. + + Args: + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (desired_class=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.calendar.data.CalendarFeed. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.GetCalendarsFeed(uri=self.GetCalendarFeedUri(feed='owncalendars'), + desired_class=desired_class, auth_token=auth_token, + **kwargs) + + GetOwnCalendarsFeed = get_own_calendars_feed + + def get_all_calendars_feed(self, desired_class=gdata.calendar.data.CalendarFeed, + auth_token=None, **kwargs): + """Obtains a feed containing all the ccalendars the current user has access to. + + Args: + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (desired_class=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.calendar.data.CalendarFeed. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.GetCalendarsFeed(uri=self.GetCalendarFeedUri(feed='allcalendars'), + desired_class=desired_class, auth_token=auth_token, + **kwargs) + + GetAllCalendarsFeed = get_all_calendars_feed + + def get_calendar_entry(self, uri, desired_class=gdata.calendar.data.CalendarEntry, + auth_token=None, **kwargs): + """Obtains a single calendar entry. + + Args: + uri: The uri of the desired calendar entry. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (desired_class=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.calendar.data.CalendarEntry. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.get_entry(uri, auth_token=auth_token, desired_class=desired_class, + **kwargs) + + GetCalendarEntry = get_calendar_entry + + def get_calendar_event_feed(self, uri=None, + desired_class=gdata.calendar.data.CalendarEventFeed, + auth_token=None, **kwargs): + """Obtains a feed of events for the desired calendar. + + Args: + uri: The uri of the desired calendar entry. + Defaults to https://www.google.com/calendar/feeds/default/private/full. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (desired_class=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.calendar.data.CalendarEventFeed. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + uri = uri or self.GetCalendarEventFeedUri() + return self.get_feed(uri, auth_token=auth_token, + desired_class=desired_class, **kwargs) + + GetCalendarEventFeed = get_calendar_event_feed + + def get_event_entry(self, uri, desired_class=gdata.calendar.data.CalendarEventEntry, + auth_token=None, **kwargs): + """Obtains a single event entry. + + Args: + uri: The uri of the desired calendar event entry. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (desired_class=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.calendar.data.CalendarEventEntry. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.get_entry(uri, auth_token=auth_token, desired_class=desired_class, + **kwargs) + + GetEventEntry = get_event_entry + + def get_calendar_acl_feed(self, uri='https://www.google.com/calendar/feeds/default/acl/full', + desired_class=gdata.calendar.data.CalendarAclFeed, + auth_token=None, **kwargs): + """Obtains an Access Control List feed. + + Args: + uri: The uri of the desired Acl feed. + Defaults to https://www.google.com/calendar/feeds/default/acl/full. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (desired_class=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.calendar.data.CalendarAclFeed. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.get_feed(uri, auth_token=auth_token, desired_class=desired_class, + **kwargs) + + GetCalendarAclFeed = get_calendar_acl_feed + + def get_calendar_acl_entry(self, uri, desired_class=gdata.calendar.data.CalendarAclEntry, + auth_token=None, **kwargs): + """Obtains a single Access Control List entry. + + Args: + uri: The uri of the desired Acl feed. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (desired_class=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.calendar.data.CalendarAclEntry. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.get_entry(uri, auth_token=auth_token, desired_class=desired_class, + **kwargs) + + GetCalendarAclEntry = get_calendar_acl_entry + + def insert_calendar(self, new_calendar, insert_uri=None, auth_token=None, **kwargs): + """Adds an new calendar to Google Calendar. + + Args: + new_calendar: atom.Entry or subclass A new calendar which is to be added to + Google Calendar. + insert_uri: the URL to post new contacts to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the contact created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + insert_uri = insert_uri or self.GetCalendarFeedUri(feed='owncalendars') + return self.Post(new_calendar, insert_uri, + auth_token=auth_token, **kwargs) + + InsertCalendar = insert_calendar + + def insert_calendar_subscription(self, calendar, insert_uri=None, + auth_token=None, **kwargs): + """Subscribes the authenticated user to the provided calendar. + + Args: + calendar: The calendar to which the user should be subscribed. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the subscription created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + insert_uri = insert_uri or self.GetCalendarFeedUri(feed='allcalendars') + return self.Post(calendar, insert_uri, auth_token=auth_token, **kwargs) + + InsertCalendarSubscription = insert_calendar_subscription + + def insert_event(self, new_event, insert_uri=None, auth_token=None, **kwargs): + """Adds an new event to Google Calendar. + + Args: + new_event: atom.Entry or subclass A new event which is to be added to + Google Calendar. + insert_uri: the URL to post new contacts to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the contact created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + insert_uri = insert_uri or self.GetCalendarEventFeedUri() + return self.Post(new_event, insert_uri, + auth_token=auth_token, **kwargs) + + + InsertEvent = insert_event + + def insert_acl_entry(self, new_acl_entry, + insert_uri = 'https://www.google.com/calendar/feeds/default/acl/full', + auth_token=None, **kwargs): + """Adds an new Acl entry to Google Calendar. + + Args: + new_acl_event: atom.Entry or subclass A new acl which is to be added to + Google Calendar. + insert_uri: the URL to post new contacts to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the contact created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + return self.Post(new_acl_entry, insert_uri, auth_token=auth_token, **kwargs) + + InsertAclEntry = insert_acl_entry + + def execute_batch(self, batch_feed, url, desired_class=None): + """Sends a batch request feed to the server. + + Args: + batch_feed: gdata.contacts.CalendarEventFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: str The batch URL to which these operations should be applied. + converter: Function (optional) The function used to convert the server's + response to an object. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a ContactsFeed. + """ + return self.Post(batch_feed, url, desired_class=desired_class) + + ExecuteBatch = execute_batch + + def update(self, entry, auth_token=None, **kwargs): + """Edits the entry on the server by sending the XML for this entry. + + Performs a PUT and converts the response to a new entry object with a + matching class to the entry passed in. + + Args: + entry: + auth_token: + + Returns: + A new Entry object of a matching type to the entry which was passed in. + """ + return gdata.client.GDClient.Update(self, entry, auth_token=auth_token, + force=True, **kwargs) + + Update = update + + +class CalendarEventQuery(gdata.client.Query): + """ + Create a custom Calendar Query + + Full specs can be found at: U{Calendar query parameters reference + } + """ + + def __init__(self, feed=None, ctz=None, fields=None, futureevents=None, + max_attendees=None, orderby=None, recurrence_expansion_start=None, + recurrence_expansion_end=None, singleevents=None, showdeleted=None, + showhidden=None, sortorder=None, start_min=None, start_max=None, + updated_min=None, **kwargs): + """ + @param max_results: The maximum number of entries to return. If you want + to receive all of the contacts, rather than only the default maximum, you + can specify a very large number for max-results. + @param start-index: The 1-based index of the first result to be retrieved. + @param updated-min: The lower bound on entry update dates. + @param group: Constrains the results to only the contacts belonging to the + group specified. Value of this parameter specifies group ID + @param orderby: Sorting criterion. The only supported value is + lastmodified. + @param showdeleted: Include deleted contacts in the returned contacts feed + @pram sortorder: Sorting order direction. Can be either ascending or + descending. + @param requirealldeleted: Only relevant if showdeleted and updated-min + are also provided. It dictates the behavior of the server in case it + detects that placeholders of some entries deleted since the point in + time specified as updated-min may have been lost. + """ + gdata.client.Query.__init__(self, **kwargs) + self.ctz = ctz + self.fields = fields + self.futureevents = futureevents + self.max_attendees = max_attendees + self.orderby = orderby + self.recurrence_expansion_start = recurrence_expansion_start + self.recurrence_expansion_end = recurrence_expansion_end + self.singleevents = singleevents + self.showdeleted = showdeleted + self.showhidden = showhidden + self.sortorder = sortorder + self.start_min = start_min + self.start_max = start_max + self.updated_min = updated_min + + def modify_request(self, http_request): + if self.ctz: + gdata.client._add_query_param('ctz', self.ctz, http_request) + if self.fields: + gdata.client._add_query_param('fields', self.fields, http_request) + if self.futureevents: + gdata.client._add_query_param('futureevents', self.futureevents, http_request) + if self.max_attendees: + gdata.client._add_query_param('max-attendees', self.max_attendees, http_request) + if self.orderby: + gdata.client._add_query_param('orderby', self.orderby, http_request) + if self.recurrence_expansion_start: + gdata.client._add_query_param('recurrence-expansion-start', + self.recurrence_expansion_start, http_request) + if self.recurrence_expansion_end: + gdata.client._add_query_param('recurrence-expansion-end', + self.recurrence_expansion_end, http_request) + if self.singleevents: + gdata.client._add_query_param('singleevents', self.singleevents, http_request) + if self.showdeleted: + gdata.client._add_query_param('showdeleted', self.showdeleted, http_request) + if self.showhidden: + gdata.client._add_query_param('showhidden', self.showhidden, http_request) + if self.sortorder: + gdata.client._add_query_param('sortorder', self.sortorder, http_request) + if self.start_min: + gdata.client._add_query_param('start-min', self.start_min, http_request) + if self.start_max: + gdata.client._add_query_param('start-max', self.start_max, http_request) + if self.updated_min: + gdata.client._add_query_param('updated-min', self.updated_min, http_request) + gdata.client.Query.modify_request(self, http_request) + + ModifyRequest = modify_request + + + diff --git a/gam/gdata/analytics/calendar/data.py b/gam/gdata/analytics/calendar/data.py new file mode 100755 index 00000000000..0a0235ef4a0 --- /dev/null +++ b/gam/gdata/analytics/calendar/data.py @@ -0,0 +1,327 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains the data classes of the Google Calendar Data API""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import atom.data +import gdata.acl.data +import gdata.data +import gdata.geo.data +import gdata.opensearch.data + + +GCAL_NAMESPACE = 'http://schemas.google.com/gCal/2005' +GCAL_TEMPLATE = '{%s}%%s' % GCAL_NAMESPACE +WEB_CONTENT_LINK_REL = '%s/%s' % (GCAL_NAMESPACE, 'webContent') + + +class AccessLevelProperty(atom.core.XmlElement): + """Describes how much a given user may do with an event or calendar""" + _qname = GCAL_TEMPLATE % 'accesslevel' + value = 'value' + + +class AllowGSync2Property(atom.core.XmlElement): + """Whether the user is permitted to run Google Apps Sync""" + _qname = GCAL_TEMPLATE % 'allowGSync2' + value = 'value' + + +class AllowGSyncProperty(atom.core.XmlElement): + """Whether the user is permitted to run Google Apps Sync""" + _qname = GCAL_TEMPLATE % 'allowGSync' + value = 'value' + + +class AnyoneCanAddSelfProperty(atom.core.XmlElement): + """Whether anyone can add self as attendee""" + _qname = GCAL_TEMPLATE % 'anyoneCanAddSelf' + value = 'value' + + +class CalendarAclRole(gdata.acl.data.AclRole): + """Describes the Calendar roles of an entry in the Calendar access control list""" + _qname = gdata.acl.data.GACL_TEMPLATE % 'role' + + +class CalendarCommentEntry(gdata.data.GDEntry): + """Describes an entry in a feed of a Calendar event's comments""" + + +class CalendarCommentFeed(gdata.data.GDFeed): + """Describes feed of a Calendar event's comments""" + entry = [CalendarCommentEntry] + + +class CalendarComments(gdata.data.Comments): + """Describes a container of a feed link for Calendar comment entries""" + _qname = gdata.data.GD_TEMPLATE % 'comments' + + +class CalendarExtendedProperty(gdata.data.ExtendedProperty): + """Defines a value for the realm attribute that is used only in the calendar API""" + _qname = gdata.data.GD_TEMPLATE % 'extendedProperty' + + +class CalendarWhere(gdata.data.Where): + """Extends the base Where class with Calendar extensions""" + _qname = gdata.data.GD_TEMPLATE % 'where' + + +class ColorProperty(atom.core.XmlElement): + """Describes the color of a calendar""" + _qname = GCAL_TEMPLATE % 'color' + value = 'value' + + +class GuestsCanInviteOthersProperty(atom.core.XmlElement): + """Whether guests can invite others to the event""" + _qname = GCAL_TEMPLATE % 'guestsCanInviteOthers' + value = 'value' + + +class GuestsCanModifyProperty(atom.core.XmlElement): + """Whether guests can modify event""" + _qname = GCAL_TEMPLATE % 'guestsCanModify' + value = 'value' + + +class GuestsCanSeeGuestsProperty(atom.core.XmlElement): + """Whether guests can see other attendees""" + _qname = GCAL_TEMPLATE % 'guestsCanSeeGuests' + value = 'value' + + +class HiddenProperty(atom.core.XmlElement): + """Describes whether a calendar is hidden""" + _qname = GCAL_TEMPLATE % 'hidden' + value = 'value' + + +class IcalUIDProperty(atom.core.XmlElement): + """Describes the UID in the ical export of the event""" + _qname = GCAL_TEMPLATE % 'uid' + value = 'value' + + +class OverrideNameProperty(atom.core.XmlElement): + """Describes the override name property of a calendar""" + _qname = GCAL_TEMPLATE % 'overridename' + value = 'value' + + +class PrivateCopyProperty(atom.core.XmlElement): + """Indicates whether this is a private copy of the event, changes to which should not be sent to other calendars""" + _qname = GCAL_TEMPLATE % 'privateCopy' + value = 'value' + + +class QuickAddProperty(atom.core.XmlElement): + """Describes whether gd:content is for quick-add processing""" + _qname = GCAL_TEMPLATE % 'quickadd' + value = 'value' + + +class ResourceProperty(atom.core.XmlElement): + """Describes whether gd:who is a resource such as a conference room""" + _qname = GCAL_TEMPLATE % 'resource' + value = 'value' + id = 'id' + + +class EventWho(gdata.data.Who): + """Extends the base Who class with Calendar extensions""" + _qname = gdata.data.GD_TEMPLATE % 'who' + resource = ResourceProperty + + +class SelectedProperty(atom.core.XmlElement): + """Describes whether a calendar is selected""" + _qname = GCAL_TEMPLATE % 'selected' + value = 'value' + + +class SendAclNotificationsProperty(atom.core.XmlElement): + """Describes whether to send ACL notifications to grantees""" + _qname = GCAL_TEMPLATE % 'sendAclNotifications' + value = 'value' + + +class CalendarAclEntry(gdata.acl.data.AclEntry): + """Describes an entry in a feed of a Calendar access control list (ACL)""" + send_acl_notifications = SendAclNotificationsProperty + + +class CalendarAclFeed(gdata.data.GDFeed): + """Describes a Calendar access contorl list (ACL) feed""" + entry = [CalendarAclEntry] + + +class SendEventNotificationsProperty(atom.core.XmlElement): + """Describes whether to send event notifications to other participants of the event""" + _qname = GCAL_TEMPLATE % 'sendEventNotifications' + value = 'value' + + +class SequenceNumberProperty(atom.core.XmlElement): + """Describes sequence number of an event""" + _qname = GCAL_TEMPLATE % 'sequence' + value = 'value' + + +class CalendarRecurrenceExceptionEntry(gdata.data.GDEntry): + """Describes an entry used by a Calendar recurrence exception entry link""" + uid = IcalUIDProperty + sequence = SequenceNumberProperty + + +class CalendarRecurrenceException(gdata.data.RecurrenceException): + """Describes an exception to a recurring Calendar event""" + _qname = gdata.data.GD_TEMPLATE % 'recurrenceException' + + +class SettingsProperty(atom.core.XmlElement): + """User preference name-value pair""" + _qname = GCAL_TEMPLATE % 'settingsProperty' + name = 'name' + value = 'value' + + +class SettingsEntry(gdata.data.GDEntry): + """Describes a Calendar Settings property entry""" + settings_property = SettingsProperty + + +class CalendarSettingsFeed(gdata.data.GDFeed): + """Personal settings for Calendar application""" + entry = [SettingsEntry] + + +class SuppressReplyNotificationsProperty(atom.core.XmlElement): + """Lists notification methods to be suppressed for this reply""" + _qname = GCAL_TEMPLATE % 'suppressReplyNotifications' + methods = 'methods' + + +class SyncEventProperty(atom.core.XmlElement): + """Describes whether this is a sync scenario where the Ical UID and Sequence number are honored during inserts and updates""" + _qname = GCAL_TEMPLATE % 'syncEvent' + value = 'value' + + +class When(gdata.data.When): + """Extends the gd:when element to add reminders""" + reminder = [gdata.data.Reminder] + + +class CalendarEventEntry(gdata.data.BatchEntry): + """Describes a Calendar event entry""" + quick_add = QuickAddProperty + send_event_notifications = SendEventNotificationsProperty + sync_event = SyncEventProperty + anyone_can_add_self = AnyoneCanAddSelfProperty + extended_property = [CalendarExtendedProperty] + sequence = SequenceNumberProperty + guests_can_invite_others = GuestsCanInviteOthersProperty + guests_can_modify = GuestsCanModifyProperty + guests_can_see_guests = GuestsCanSeeGuestsProperty + georss_where = gdata.geo.data.GeoRssWhere + private_copy = PrivateCopyProperty + suppress_reply_notifications = SuppressReplyNotificationsProperty + uid = IcalUIDProperty + where = [gdata.data.Where] + when = [When] + who = [gdata.data.Who] + transparency = gdata.data.Transparency + comments = gdata.data.Comments + event_status = gdata.data.EventStatus + visibility = gdata.data.Visibility + recurrence = gdata.data.Recurrence + recurrence_exception = [gdata.data.RecurrenceException] + original_event = gdata.data.OriginalEvent + reminder = [gdata.data.Reminder] + + +class TimeZoneProperty(atom.core.XmlElement): + """Describes the time zone of a calendar""" + _qname = GCAL_TEMPLATE % 'timezone' + value = 'value' + + +class TimesCleanedProperty(atom.core.XmlElement): + """Describes how many times calendar was cleaned via Manage Calendars""" + _qname = GCAL_TEMPLATE % 'timesCleaned' + value = 'value' + + +class CalendarEntry(gdata.data.GDEntry): + """Describes a Calendar entry in the feed of a user's calendars""" + timezone = TimeZoneProperty + overridename = OverrideNameProperty + hidden = HiddenProperty + selected = SelectedProperty + times_cleaned = TimesCleanedProperty + color = ColorProperty + where = [CalendarWhere] + accesslevel = AccessLevelProperty + + +class CalendarEventFeed(gdata.data.BatchFeed): + """Describes a Calendar event feed""" + allow_g_sync2 = AllowGSync2Property + timezone = TimeZoneProperty + entry = [CalendarEventEntry] + times_cleaned = TimesCleanedProperty + allow_g_sync = AllowGSyncProperty + + +class CalendarFeed(gdata.data.GDFeed): + """Describes a feed of Calendars""" + entry = [CalendarEntry] + + +class WebContentGadgetPref(atom.core.XmlElement): + """Describes a single web content gadget preference""" + _qname = GCAL_TEMPLATE % 'webContentGadgetPref' + name = 'name' + value = 'value' + + +class WebContent(atom.core.XmlElement): + """Describes a "web content" extension""" + _qname = GCAL_TEMPLATE % 'webContent' + height = 'height' + width = 'width' + web_content_gadget_pref = [WebContentGadgetPref] + url = 'url' + display = 'display' + + +class WebContentLink(atom.data.Link): + """Describes a "web content" link""" + def __init__(self, title=None, href=None, link_type=None, + web_content=None): + atom.data.Link.__init__(self, rel=WEB_CONTENT_LINK_REL, title=title, href=href, + link_type=link_type) + + web_content = WebContent + diff --git a/gam/gdata/analytics/calendar/service.py b/gam/gdata/analytics/calendar/service.py new file mode 100755 index 00000000000..53a94e31f89 --- /dev/null +++ b/gam/gdata/analytics/calendar/service.py @@ -0,0 +1,595 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CalendarService extends the GDataService to streamline Google Calendar operations. + + CalendarService: Provides methods to query feeds and manipulate items. Extends + GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + + +__author__ = 'api.vli (Vivian Li)' + + +import urllib +import gdata +import atom.service +import gdata.service +import gdata.calendar +import atom + + +DEFAULT_BATCH_URL = ('http://www.google.com/calendar/feeds/default/private' + '/full/batch') + + +class Error(Exception): + pass + + +class RequestError(Error): + pass + + +class CalendarService(gdata.service.GDataService): + """Client for the Google Calendar service.""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', additional_headers=None, **kwargs): + """Creates a client for the Google Calendar service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'www.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='cl', source=source, + server=server, additional_headers=additional_headers, **kwargs) + + def GetCalendarEventFeed(self, uri='/calendar/feeds/default/private/full'): + return self.Get(uri, converter=gdata.calendar.CalendarEventFeedFromString) + + def GetCalendarEventEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarEventEntryFromString) + + def GetCalendarListFeed(self, uri='/calendar/feeds/default/allcalendars/full'): + return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString) + + def GetAllCalendarsFeed(self, uri='/calendar/feeds/default/allcalendars/full'): + return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString) + + def GetOwnCalendarsFeed(self, uri='/calendar/feeds/default/owncalendars/full'): + return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString) + + def GetCalendarListEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarListEntryFromString) + + def GetCalendarAclFeed(self, uri='/calendar/feeds/default/acl/full'): + return self.Get(uri, converter=gdata.calendar.CalendarAclFeedFromString) + + def GetCalendarAclEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarAclEntryFromString) + + def GetCalendarEventCommentFeed(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarEventCommentFeedFromString) + + def GetCalendarEventCommentEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarEventCommentEntryFromString) + + def Query(self, uri, converter=None): + """Performs a query and returns a resulting feed or entry. + + Args: + feed: string The feed which is to be queried + + Returns: + On success, a GDataFeed or Entry depending on which is sent from the + server. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + if converter: + result = self.Get(uri, converter=converter) + else: + result = self.Get(uri) + return result + + def CalendarQuery(self, query): + if isinstance(query, CalendarEventQuery): + return self.Query(query.ToUri(), + converter=gdata.calendar.CalendarEventFeedFromString) + elif isinstance(query, CalendarListQuery): + return self.Query(query.ToUri(), + converter=gdata.calendar.CalendarListFeedFromString) + elif isinstance(query, CalendarEventCommentQuery): + return self.Query(query.ToUri(), + converter=gdata.calendar.CalendarEventCommentFeedFromString) + else: + return self.Query(query.ToUri()) + + def InsertEvent(self, new_event, insert_uri, url_params=None, + escape_params=True): + """Adds an event to Google Calendar. + + Args: + new_event: atom.Entry or subclass A new event which is to be added to + Google Calendar. + insert_uri: the URL to post new events to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the event created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Post(new_event, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarEventEntryFromString) + + def InsertCalendarSubscription(self, calendar, url_params=None, + escape_params=True): + """Subscribes the authenticated user to the provided calendar. + + Args: + calendar: The calendar to which the user should be subscribed. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the subscription created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + insert_uri = '/calendar/feeds/default/allcalendars/full' + return self.Post(calendar, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarListEntryFromString) + + def InsertCalendar(self, new_calendar, url_params=None, + escape_params=True): + """Creates a new calendar. + + Args: + new_calendar: The calendar to be created + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the calendar created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + insert_uri = '/calendar/feeds/default/owncalendars/full' + response = self.Post(new_calendar, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarListEntryFromString) + return response + + def UpdateCalendar(self, calendar, url_params=None, + escape_params=True): + """Updates a calendar. + + Args: + calendar: The calendar which should be updated + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the calendar created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + update_uri = calendar.GetEditLink().href + response = self.Put(data=calendar, uri=update_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarListEntryFromString) + return response + + def InsertAclEntry(self, new_entry, insert_uri, url_params=None, + escape_params=True): + """Adds an ACL entry (rule) to Google Calendar. + + Args: + new_entry: atom.Entry or subclass A new ACL entry which is to be added to + Google Calendar. + insert_uri: the URL to post new entries to the ACL feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the ACL entry created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Post(new_entry, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarAclEntryFromString) + + def InsertEventComment(self, new_entry, insert_uri, url_params=None, + escape_params=True): + """Adds an entry to Google Calendar. + + Args: + new_entry: atom.Entry or subclass A new entry which is to be added to + Google Calendar. + insert_uri: the URL to post new entrys to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the comment created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Post(new_entry, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarEventCommentEntryFromString) + + def _RemoveStandardUrlPrefix(self, url): + url_prefix = 'http://%s/' % self.server + if url.startswith(url_prefix): + return url[len(url_prefix) - 1:] + return url + + def DeleteEvent(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes an event with the specified ID from Google Calendar. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + 'http://www.google.com/calendar/feeds/default/private/full/abx' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, a httplib.HTTPResponse containing the server's + response to the DELETE request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + edit_uri = self._RemoveStandardUrlPrefix(edit_uri) + return self.Delete('%s' % edit_uri, + url_params=url_params, escape_params=escape_params) + + def DeleteAclEntry(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes an ACL entry at the given edit_uri from Google Calendar. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + 'http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, a httplib.HTTPResponse containing the server's + response to the DELETE request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + edit_uri = self._RemoveStandardUrlPrefix(edit_uri) + return self.Delete('%s' % edit_uri, + url_params=url_params, escape_params=escape_params) + + def DeleteCalendarEntry(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes a calendar entry at the given edit_uri from Google Calendar. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + 'http://www.google.com/calendar/feeds/default/allcalendars/abcdef@group.calendar.google.com' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, True is returned + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Delete(edit_uri, url_params=url_params, + escape_params=escape_params) + + def UpdateEvent(self, edit_uri, updated_event, url_params=None, + escape_params=True): + """Updates an existing event. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_event: string, atom.Entry, or subclass containing + the Atom Entry which will replace the event which is + stored at the edit_url + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + edit_uri = self._RemoveStandardUrlPrefix(edit_uri) + return self.Put(updated_event, '%s' % edit_uri, + url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarEventEntryFromString) + + def UpdateAclEntry(self, edit_uri, updated_rule, url_params=None, + escape_params=True): + """Updates an existing ACL rule. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_rule: string, atom.Entry, or subclass containing + the Atom Entry which will replace the event which is + stored at the edit_url + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + edit_uri = self._RemoveStandardUrlPrefix(edit_uri) + return self.Put(updated_rule, '%s' % edit_uri, + url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarAclEntryFromString) + + def ExecuteBatch(self, batch_feed, url, + converter=gdata.calendar.CalendarEventFeedFromString): + """Sends a batch request feed to the server. + + The batch request needs to be sent to the batch URL for a particular + calendar. You can find the URL by calling GetBatchLink().href on the + CalendarEventFeed. + + Args: + batch_feed: gdata.calendar.CalendarEventFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: str The batch URL for the Calendar to which these operations should + be applied. + converter: Function (optional) The function used to convert the server's + response to an object. The default value is + CalendarEventFeedFromString. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a CalendarEventFeed. + """ + return self.Post(batch_feed, url, converter=converter) + + +class CalendarEventQuery(gdata.service.Query): + + def __init__(self, user='default', visibility='private', projection='full', + text_query=None, params=None, categories=None): + gdata.service.Query.__init__(self, + feed='http://www.google.com/calendar/feeds/%s/%s/%s' % ( + urllib.quote(user), + urllib.quote(visibility), + urllib.quote(projection)), + text_query=text_query, params=params, categories=categories) + + def _GetStartMin(self): + if 'start-min' in self.keys(): + return self['start-min'] + else: + return None + + def _SetStartMin(self, val): + self['start-min'] = val + + start_min = property(_GetStartMin, _SetStartMin, + doc="""The start-min query parameter""") + + def _GetStartMax(self): + if 'start-max' in self.keys(): + return self['start-max'] + else: + return None + + def _SetStartMax(self, val): + self['start-max'] = val + + start_max = property(_GetStartMax, _SetStartMax, + doc="""The start-max query parameter""") + + def _GetOrderBy(self): + if 'orderby' in self.keys(): + return self['orderby'] + else: + return None + + def _SetOrderBy(self, val): + if val is not 'lastmodified' and val is not 'starttime': + raise Error, "Order By must be either 'lastmodified' or 'starttime'" + self['orderby'] = val + + orderby = property(_GetOrderBy, _SetOrderBy, + doc="""The orderby query parameter""") + + def _GetSortOrder(self): + if 'sortorder' in self.keys(): + return self['sortorder'] + else: + return None + + def _SetSortOrder(self, val): + if (val is not 'ascending' and val is not 'descending' + and val is not 'a' and val is not 'd' and val is not 'ascend' + and val is not 'descend'): + raise Error, "Sort order must be either ascending, ascend, " + ( + "a or descending, descend, or d") + self['sortorder'] = val + + sortorder = property(_GetSortOrder, _SetSortOrder, + doc="""The sortorder query parameter""") + + def _GetSingleEvents(self): + if 'singleevents' in self.keys(): + return self['singleevents'] + else: + return None + + def _SetSingleEvents(self, val): + self['singleevents'] = val + + singleevents = property(_GetSingleEvents, _SetSingleEvents, + doc="""The singleevents query parameter""") + + def _GetFutureEvents(self): + if 'futureevents' in self.keys(): + return self['futureevents'] + else: + return None + + def _SetFutureEvents(self, val): + self['futureevents'] = val + + futureevents = property(_GetFutureEvents, _SetFutureEvents, + doc="""The futureevents query parameter""") + + def _GetRecurrenceExpansionStart(self): + if 'recurrence-expansion-start' in self.keys(): + return self['recurrence-expansion-start'] + else: + return None + + def _SetRecurrenceExpansionStart(self, val): + self['recurrence-expansion-start'] = val + + recurrence_expansion_start = property(_GetRecurrenceExpansionStart, + _SetRecurrenceExpansionStart, + doc="""The recurrence-expansion-start query parameter""") + + def _GetRecurrenceExpansionEnd(self): + if 'recurrence-expansion-end' in self.keys(): + return self['recurrence-expansion-end'] + else: + return None + + def _SetRecurrenceExpansionEnd(self, val): + self['recurrence-expansion-end'] = val + + recurrence_expansion_end = property(_GetRecurrenceExpansionEnd, + _SetRecurrenceExpansionEnd, + doc="""The recurrence-expansion-end query parameter""") + + def _SetTimezone(self, val): + self['ctz'] = val + + def _GetTimezone(self): + if 'ctz' in self.keys(): + return self['ctz'] + else: + return None + + ctz = property(_GetTimezone, _SetTimezone, + doc="""The ctz query parameter which sets report time on the server.""") + + +class CalendarListQuery(gdata.service.Query): + """Queries the Google Calendar meta feed""" + + def __init__(self, userId=None, text_query=None, + params=None, categories=None): + if userId is None: + userId = 'default' + + gdata.service.Query.__init__(self, feed='http://www.google.com/calendar/feeds/' + +userId, + text_query=text_query, params=params, + categories=categories) + +class CalendarEventCommentQuery(gdata.service.Query): + """Queries the Google Calendar event comments feed""" + + def __init__(self, feed=None): + gdata.service.Query.__init__(self, feed=feed) diff --git a/gam/gdata/analytics/calendar_resource/__init__.py b/gam/gdata/analytics/calendar_resource/__init__.py new file mode 100755 index 00000000000..8b137891791 --- /dev/null +++ b/gam/gdata/analytics/calendar_resource/__init__.py @@ -0,0 +1 @@ + diff --git a/gam/gdata/analytics/calendar_resource/client.py b/gam/gdata/analytics/calendar_resource/client.py new file mode 100755 index 00000000000..73ddff805f6 --- /dev/null +++ b/gam/gdata/analytics/calendar_resource/client.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CalendarResourceClient simplifies Calendar Resources API calls. + +CalendarResourceClient extends gdata.client.GDClient to ease interaction with +the Google Apps Calendar Resources API. These interactions include the ability +to create, retrieve, update, and delete calendar resources in a Google Apps +domain. +""" + + +__author__ = 'Vic Fryzel ' + + +import gdata.calendar_resource.data +import gdata.client +import urllib + + +# Feed URI template. This must end with a / +# The strings in this template are eventually replaced with the API version +# and Google Apps domain name, respectively. +RESOURCE_FEED_TEMPLATE = '/a/feeds/calendar/resource/%s/%s/' + + +class CalendarResourceClient(gdata.client.GDClient): + """Client extension for the Google Calendar Resource API service. + + Attributes: + host: string The hostname for the Calendar Resouce API service. + api_version: string The version of the Calendar Resource API. + """ + + host = 'apps-apis.google.com' + api_version = '2.0' + auth_service = 'apps' + auth_scopes = gdata.gauth.AUTH_SCOPES['apps'] + ssl = True + + def __init__(self, domain, auth_token=None, **kwargs): + """Constructs a new client for the Calendar Resource API. + + Args: + domain: string The Google Apps domain with Calendar Resources. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the calendar resource + data. + kwargs: The other parameters to pass to the gdata.client.GDClient + constructor. + """ + gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs) + self.domain = domain + + def make_resource_feed_uri(self, resource_id=None, params=None): + """Creates a resource feed URI for the Calendar Resource API. + + Using this client's Google Apps domain, create a feed URI for calendar + resources in that domain. If a resource_id is provided, return a URI + for that specific resource. If params are provided, append them as GET + params. + + Args: + resource_id: string (optional) The ID of the calendar resource for which + to make a feed URI. + params: dict (optional) key -> value params to append as GET vars to the + URI. Example: params={'start': 'my-resource-id'} + Returns: + A string giving the URI for calendar resources for this client's Google + Apps domain. + """ + uri = RESOURCE_FEED_TEMPLATE % (self.api_version, self.domain) + if resource_id: + uri += resource_id + if params: + uri += '?' + urllib.urlencode(params) + return uri + + MakeResourceFeedUri = make_resource_feed_uri + + def get_resource_feed(self, uri=None, **kwargs): + """Fetches a ResourceFeed of calendar resources at the given URI. + + Args: + uri: string The URI of the feed to pull. + kwargs: The other parameters to pass to gdata.client.GDClient.get_feed(). + + Returns: + A ResourceFeed object representing the feed at the given URI. + """ + + if uri is None: + uri = self.MakeResourceFeedUri() + return self.get_feed( + uri, + desired_class=gdata.calendar_resource.data.CalendarResourceFeed, + **kwargs) + + GetResourceFeed = get_resource_feed + + def get_resource(self, uri=None, resource_id=None, **kwargs): + """Fetches a single calendar resource by resource ID. + + Args: + uri: string The base URI of the feed from which to fetch the resource. + resource_id: string The string ID of the Resource to fetch. + kwargs: The other parameters to pass to gdata.client.GDClient.get_entry(). + + Returns: + A Resource object representing the calendar resource with the given + base URI and resource ID. + """ + + if uri is None: + uri = self.MakeResourceFeedUri(resource_id) + return self.get_entry( + uri, + desired_class=gdata.calendar_resource.data.CalendarResourceEntry, + **kwargs) + + GetResource = get_resource + + def create_resource(self, resource_id, resource_common_name=None, + resource_description=None, resource_type=None, **kwargs): + """Creates a calendar resource with the given properties. + + Args: + resource_id: string The resource ID of the calendar resource. + resource_common_name: string (optional) The common name of the resource. + resource_description: string (optional) The description of the resource. + resource_type: string (optional) The type of the resource. + kwargs: The other parameters to pass to gdata.client.GDClient.post(). + + Returns: + gdata.calendar_resource.data.CalendarResourceEntry of the new resource. + """ + new_resource = gdata.calendar_resource.data.CalendarResourceEntry( + resource_id=resource_id, + resource_common_name=resource_common_name, + resource_description=resource_description, + resource_type=resource_type) + return self.post(new_resource, self.MakeResourceFeedUri(), **kwargs) + + CreateResource = create_resource + + def update_resource(self, resource_id, resource_common_name=None, + resource_description=None, resource_type=None, **kwargs): + """Updates the calendar resource with the given resource ID. + + Args: + resource_id: string The resource ID of the calendar resource to update. + resource_common_name: string (optional) The common name to give the + resource. + resource_description: string (optional) The description to give the + resource. + resource_type: string (optional) The type to give the resource. + kwargs: The other parameters to pass to gdata.client.GDClient.update(). + + Returns: + gdata.calendar_resource.data.CalendarResourceEntry of the updated + resource. + """ + new_resource = gdata.calendar_resource.data.CalendarResourceEntry( + resource_id=resource_id, + resource_common_name=resource_common_name, + resource_description=resource_description, + resource_type=resource_type) + return self.update(new_resource, uri=self.MakeResourceFeedUri(resource_id), + **kwargs) + + UpdateResource = update_resource + + def delete_resource(self, resource_id, **kwargs): + """Deletes the calendar resource with the given resource ID. + + Args: + resource_id: string The resource ID of the calendar resource to delete. + kwargs: The other parameters to pass to gdata.client.GDClient.delete() + + Returns: + An HTTP response object. See gdata.client.request(). + """ + + return self.delete(self.MakeResourceFeedUri(resource_id), **kwargs) + + DeleteResource = delete_resource diff --git a/gam/gdata/analytics/calendar_resource/data.py b/gam/gdata/analytics/calendar_resource/data.py new file mode 100755 index 00000000000..82c152a6c6f --- /dev/null +++ b/gam/gdata/analytics/calendar_resource/data.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data model for parsing and generating XML for the Calendar Resource API.""" + + +__author__ = 'Vic Fryzel ' + + +import atom.core +import atom.data +import gdata.apps +import gdata.apps_property +import gdata.data + + +# This is required to work around a naming conflict between the Google +# Spreadsheets API and Python's built-in property function +pyproperty = property + + +# The apps:property name of the resourceId property +RESOURCE_ID_NAME = 'resourceId' +# The apps:property name of the resourceCommonName property +RESOURCE_COMMON_NAME_NAME = 'resourceCommonName' +# The apps:property name of the resourceDescription property +RESOURCE_DESCRIPTION_NAME = 'resourceDescription' +# The apps:property name of the resourceType property +RESOURCE_TYPE_NAME = 'resourceType' +# The apps:property name of the resourceEmail property +RESOURCE_EMAIL_NAME = 'resourceEmail' + + +class CalendarResourceEntry(gdata.data.GDEntry): + """Represents a Calendar Resource entry in object form.""" + + property = [gdata.apps_property.AppsProperty] + + def _GetProperty(self, name): + """Get the apps:property value with the given name. + + Args: + name: string Name of the apps:property value to get. + + Returns: + The apps:property value with the given name, or None if the name was + invalid. + """ + + for p in self.property: + if p.name == name: + return p.value + return None + + def _SetProperty(self, name, value): + """Set the apps:property value with the given name to the given value. + + Args: + name: string Name of the apps:property value to set. + value: string Value to give the apps:property value with the given name. + """ + + for i in range(len(self.property)): + if self.property[i].name == name: + self.property[i].value = value + return + self.property.append(gdata.apps_property.AppsProperty(name=name, value=value)) + + def GetResourceId(self): + """Get the resource ID of this Calendar Resource object. + + Returns: + The resource ID of this Calendar Resource object as a string or None. + """ + + return self._GetProperty(RESOURCE_ID_NAME) + + def SetResourceId(self, value): + """Set the resource ID of this Calendar Resource object. + + Args: + value: string The new resource ID value to give this object. + """ + + self._SetProperty(RESOURCE_ID_NAME, value) + + resource_id = pyproperty(GetResourceId, SetResourceId) + + def GetResourceCommonName(self): + """Get the common name of this Calendar Resource object. + + Returns: + The common name of this Calendar Resource object as a string or None. + """ + + return self._GetProperty(RESOURCE_COMMON_NAME_NAME) + + def SetResourceCommonName(self, value): + """Set the common name of this Calendar Resource object. + + Args: + value: string The new common name value to give this object. + """ + + self._SetProperty(RESOURCE_COMMON_NAME_NAME, value) + + resource_common_name = pyproperty( + GetResourceCommonName, + SetResourceCommonName) + + def GetResourceDescription(self): + """Get the description of this Calendar Resource object. + + Returns: + The description of this Calendar Resource object as a string or None. + """ + + return self._GetProperty(RESOURCE_DESCRIPTION_NAME) + + def SetResourceDescription(self, value): + """Set the description of this Calendar Resource object. + + Args: + value: string The new description value to give this object. + """ + + self._SetProperty(RESOURCE_DESCRIPTION_NAME, value) + + resource_description = pyproperty( + GetResourceDescription, + SetResourceDescription) + + def GetResourceType(self): + """Get the type of this Calendar Resource object. + + Returns: + The type of this Calendar Resource object as a string or None. + """ + + return self._GetProperty(RESOURCE_TYPE_NAME) + + def SetResourceType(self, value): + """Set the type value of this Calendar Resource object. + + Args: + value: string The new type value to give this object. + """ + + self._SetProperty(RESOURCE_TYPE_NAME, value) + + resource_type = pyproperty(GetResourceType, SetResourceType) + + def GetResourceEmail(self): + """Get the email of this Calendar Resource object. + + Returns: + The email of this Calendar Resource object as a string or None. + """ + + return self._GetProperty(RESOURCE_EMAIL_NAME) + + resource_email = pyproperty(GetResourceEmail) + + def __init__(self, resource_id=None, resource_common_name=None, + resource_description=None, resource_type=None, *args, **kwargs): + """Constructs a new CalendarResourceEntry object with the given arguments. + + Args: + resource_id: string (optional) The resource ID to give this new object. + resource_common_name: string (optional) The common name to give this new + object. + resource_description: string (optional) The description to give this new + object. + resource_type: string (optional) The type to give this new object. + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(CalendarResourceEntry, self).__init__(*args, **kwargs) + if resource_id: + self.resource_id = resource_id + if resource_common_name: + self.resource_common_name = resource_common_name + if resource_description: + self.resource_description = resource_description + if resource_type: + self.resource_type = resource_type + + +class CalendarResourceFeed(gdata.data.GDFeed): + """Represents a feed of CalendarResourceEntry objects.""" + + # Override entry so that this feed knows how to type its list of entries. + entry = [CalendarResourceEntry] diff --git a/gam/gdata/analytics/client.py b/gam/gdata/analytics/client.py new file mode 100755 index 00000000000..17ec850536a --- /dev/null +++ b/gam/gdata/analytics/client.py @@ -0,0 +1,1163 @@ +#!/usr/bin/env python +# +# Copyright (C) 2008, 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +"""Provides a client to interact with Google Data API servers. + +This module is used for version 2 of the Google Data APIs. The primary class +in this module is GDClient. + + GDClient: handles auth and CRUD operations when communicating with servers. + GDataClient: deprecated client for version one services. Will be removed. +""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import re +import atom.client +import atom.core +import atom.http_core +import gdata.gauth +import gdata.data + + +class Error(Exception): + pass + + +class RequestError(Error): + status = None + reason = None + body = None + headers = None + + +class RedirectError(RequestError): + pass + + +class CaptchaChallenge(RequestError): + captcha_url = None + captcha_token = None + + +class ClientLoginTokenMissing(Error): + pass + + +class MissingOAuthParameters(Error): + pass + + +class ClientLoginFailed(RequestError): + pass + + +class UnableToUpgradeToken(RequestError): + pass + + +class Unauthorized(Error): + pass + + +class BadAuthenticationServiceURL(RedirectError): + pass + + +class BadAuthentication(RequestError): + pass + + +class NotModified(RequestError): + pass + +class NotImplemented(RequestError): + pass + + +def error_from_response(message, http_response, error_class, + response_body=None): + + """Creates a new exception and sets the HTTP information in the error. + + Args: + message: str human readable message to be displayed if the exception is + not caught. + http_response: The response from the server, contains error information. + error_class: The exception to be instantiated and populated with + information from the http_response + response_body: str (optional) specify if the response has already been read + from the http_response object. + """ + if response_body is None: + body = http_response.read() + else: + body = response_body + error = error_class('%s: %i, %s' % (message, http_response.status, body)) + error.status = http_response.status + error.reason = http_response.reason + error.body = body + error.headers = atom.http_core.get_headers(http_response) + return error + + +def get_xml_version(version): + """Determines which XML schema to use based on the client API version. + + Args: + version: string which is converted to an int. The version string is in + the form 'Major.Minor.x.y.z' and only the major version number + is considered. If None is provided assume version 1. + """ + if version is None: + return 1 + return int(version.split('.')[0]) + + +class GDClient(atom.client.AtomPubClient): + """Communicates with Google Data servers to perform CRUD operations. + + This class is currently experimental and may change in backwards + incompatible ways. + + This class exists to simplify the following three areas involved in using + the Google Data APIs. + + CRUD Operations: + + The client provides a generic 'request' method for making HTTP requests. + There are a number of convenience methods which are built on top of + request, which include get_feed, get_entry, get_next, post, update, and + delete. These methods contact the Google Data servers. + + Auth: + + Reading user-specific private data requires authorization from the user as + do any changes to user data. An auth_token object can be passed into any + of the HTTP requests to set the Authorization header in the request. + + You may also want to set the auth_token member to a an object which can + use modify_request to set the Authorization header in the HTTP request. + + If you are authenticating using the email address and password, you can + use the client_login method to obtain an auth token and set the + auth_token member. + + If you are using browser redirects, specifically AuthSub, you will want + to use gdata.gauth.AuthSubToken.from_url to obtain the token after the + redirect, and you will probably want to updgrade this since use token + to a multiple use (session) token using the upgrade_token method. + + API Versions: + + This client is multi-version capable and can be used with Google Data API + version 1 and version 2. The version should be specified by setting the + api_version member to a string, either '1' or '2'. + """ + + # The gsessionid is used by Google Calendar to prevent redirects. + __gsessionid = None + api_version = None + # Name of the Google Data service when making a ClientLogin request. + auth_service = None + # URL prefixes which should be requested for AuthSub and OAuth. + auth_scopes = None + # Name of alternate auth service to use in certain cases + alt_auth_service = None + + def request(self, method=None, uri=None, auth_token=None, + http_request=None, converter=None, desired_class=None, + redirects_remaining=4, **kwargs): + """Make an HTTP request to the server. + + See also documentation for atom.client.AtomPubClient.request. + + If a 302 redirect is sent from the server to the client, this client + assumes that the redirect is in the form used by the Google Calendar API. + The same request URI and method will be used as in the original request, + but a gsessionid URL parameter will be added to the request URI with + the value provided in the server's 302 redirect response. If the 302 + redirect is not in the format specified by the Google Calendar API, a + RedirectError will be raised containing the body of the server's + response. + + The method calls the client's modify_request method to make any changes + required by the client before the request is made. For example, a + version 2 client could add a GData-Version: 2 header to the request in + its modify_request method. + + Args: + method: str The HTTP verb for this request, usually 'GET', 'POST', + 'PUT', or 'DELETE' + uri: atom.http_core.Uri, str, or unicode The URL being requested. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. + http_request: (optional) atom.http_core.HttpRequest + converter: function which takes the body of the response as it's only + argument and returns the desired object. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. + redirects_remaining: (optional) int, if this number is 0 and the + server sends a 302 redirect, the request method + will raise an exception. This parameter is used in + recursive request calls to avoid an infinite loop. + + Any additional arguments are passed through to + atom.client.AtomPubClient.request. + + Returns: + An HTTP response object (see atom.http_core.HttpResponse for a + description of the object's interface) if no converter was + specified and no desired_class was specified. If a converter function + was provided, the results of calling the converter are returned. If no + converter was specified but a desired_class was provided, the response + body will be converted to the class using + atom.core.parse. + """ + if isinstance(uri, (str, unicode)): + uri = atom.http_core.Uri.parse_uri(uri) + + # Add the gsession ID to the URL to prevent further redirects. + # TODO: If different sessions are using the same client, there will be a + # multitude of redirects and session ID shuffling. + # If the gsession ID is in the URL, adopt it as the standard location. + if uri is not None and uri.query is not None and 'gsessionid' in uri.query: + self.__gsessionid = uri.query['gsessionid'] + # The gsession ID could also be in the HTTP request. + elif (http_request is not None and http_request.uri is not None + and http_request.uri.query is not None + and 'gsessionid' in http_request.uri.query): + self.__gsessionid = http_request.uri.query['gsessionid'] + # If the gsession ID is stored in the client, and was not present in the + # URI then add it to the URI. + elif self.__gsessionid is not None: + uri.query['gsessionid'] = self.__gsessionid + + # The AtomPubClient should call this class' modify_request before + # performing the HTTP request. + #http_request = self.modify_request(http_request) + + response = atom.client.AtomPubClient.request(self, method=method, + uri=uri, auth_token=auth_token, http_request=http_request, **kwargs) + # On success, convert the response body using the desired converter + # function if present. + if response is None: + return None + if response.status == 200 or response.status == 201: + if converter is not None: + return converter(response) + elif desired_class is not None: + if self.api_version is not None: + return atom.core.parse(response.read(), desired_class, + version=get_xml_version(self.api_version)) + else: + # No API version was specified, so allow parse to + # use the default version. + return atom.core.parse(response.read(), desired_class) + else: + return response + # TODO: move the redirect logic into the Google Calendar client once it + # exists since the redirects are only used in the calendar API. + elif response.status == 302: + if redirects_remaining > 0: + location = (response.getheader('Location') + or response.getheader('location')) + if location is not None: + # Make a recursive call with the gsession ID in the URI to follow + # the redirect. + return self.request(method=method, uri=location, + auth_token=auth_token, http_request=http_request, + converter=converter, desired_class=desired_class, + redirects_remaining=redirects_remaining-1, + **kwargs) + else: + raise error_from_response('302 received without Location header', + response, RedirectError) + else: + raise error_from_response('Too many redirects from server', + response, RedirectError) + elif response.status == 401: + raise error_from_response('Unauthorized - Server responded with', + response, Unauthorized) + elif response.status == 304: + raise error_from_response('Entry Not Modified - Server responded with', + response, NotModified) + elif response.status == 501: + raise error_from_response( + 'This API operation is not implemented. - Server responded with', + response, NotImplemented) + # If the server's response was not a 200, 201, 302, 304, 401, or 501, raise + # an exception. + else: + raise error_from_response('Server responded with', response, + RequestError) + + Request = request + + def request_client_login_token( + self, email, password, source, service=None, + account_type='HOSTED_OR_GOOGLE', + auth_url=atom.http_core.Uri.parse_uri( + 'https://www.google.com/accounts/ClientLogin'), + captcha_token=None, captcha_response=None): + service = service or self.auth_service + # Set the target URL. + http_request = atom.http_core.HttpRequest(uri=auth_url, method='POST') + http_request.add_body_part( + gdata.gauth.generate_client_login_request_body(email=email, + password=password, service=service, source=source, + account_type=account_type, captcha_token=captcha_token, + captcha_response=captcha_response), + 'application/x-www-form-urlencoded') + + # Use the underlying http_client to make the request. + response = self.http_client.request(http_request) + + response_body = response.read() + if response.status == 200: + token_string = gdata.gauth.get_client_login_token_string(response_body) + if token_string is not None: + return gdata.gauth.ClientLoginToken(token_string) + else: + raise ClientLoginTokenMissing( + 'Recieved a 200 response to client login request,' + ' but no token was present. %s' % (response_body,)) + elif response.status == 403: + captcha_challenge = gdata.gauth.get_captcha_challenge(response_body) + if captcha_challenge: + challenge = CaptchaChallenge('CAPTCHA required') + challenge.captcha_url = captcha_challenge['url'] + challenge.captcha_token = captcha_challenge['token'] + raise challenge + elif response_body.splitlines()[0] == 'Error=BadAuthentication': + raise BadAuthentication('Incorrect username or password') + else: + raise error_from_response('Server responded with a 403 code', + response, RequestError, response_body) + elif response.status == 302: + # Google tries to redirect all bad URLs back to + # http://www.google.. If a redirect + # attempt is made, assume the user has supplied an incorrect + # authentication URL + raise error_from_response('Server responded with a redirect', + response, BadAuthenticationServiceURL, + response_body) + else: + raise error_from_response('Server responded to ClientLogin request', + response, ClientLoginFailed, response_body) + + RequestClientLoginToken = request_client_login_token + + def client_login(self, email, password, source, service=None, + account_type='HOSTED_OR_GOOGLE', + auth_url=atom.http_core.Uri.parse_uri( + 'https://www.google.com/accounts/ClientLogin'), + captcha_token=None, captcha_response=None): + """Performs an auth request using the user's email address and password. + + In order to modify user specific data and read user private data, your + application must be authorized by the user. One way to demonstrage + authorization is by including a Client Login token in the Authorization + HTTP header of all requests. This method requests the Client Login token + by sending the user's email address, password, the name of the + application, and the service code for the service which will be accessed + by the application. If the username and password are correct, the server + will respond with the client login code and a new ClientLoginToken + object will be set in the client's auth_token member. With the auth_token + set, future requests from this client will include the Client Login + token. + + For a list of service names, see + http://code.google.com/apis/gdata/faq.html#clientlogin + For more information on Client Login, see: + http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html + + Args: + email: str The user's email address or username. + password: str The password for the user's account. + source: str The name of your application. This can be anything you + like but should should give some indication of which app is + making the request. + service: str The service code for the service you would like to access. + For example, 'cp' for contacts, 'cl' for calendar. For a full + list see + http://code.google.com/apis/gdata/faq.html#clientlogin + If you are using a subclass of the gdata.client.GDClient, the + service will usually be filled in for you so you do not need + to specify it. For example see BloggerClient, + SpreadsheetsClient, etc. + account_type: str (optional) The type of account which is being + authenticated. This can be either 'GOOGLE' for a Google + Account, 'HOSTED' for a Google Apps Account, or the + default 'HOSTED_OR_GOOGLE' which will select the Google + Apps Account if the same email address is used for both + a Google Account and a Google Apps Account. + auth_url: str (optional) The URL to which the login request should be + sent. + captcha_token: str (optional) If a previous login attempt was reponded + to with a CAPTCHA challenge, this is the token which + identifies the challenge (from the CAPTCHA's URL). + captcha_response: str (optional) If a previous login attempt was + reponded to with a CAPTCHA challenge, this is the + response text which was contained in the challenge. + + Returns: + Generated token, which is also stored in this object. + + Raises: + A RequestError or one of its suclasses: BadAuthentication, + BadAuthenticationServiceURL, ClientLoginFailed, + ClientLoginTokenMissing, or CaptchaChallenge + """ + service = service or self.auth_service + self.auth_token = self.request_client_login_token(email, password, + source, service=service, account_type=account_type, auth_url=auth_url, + captcha_token=captcha_token, captcha_response=captcha_response) + if self.alt_auth_service is not None: + self.alt_auth_token = self.request_client_login_token( + email, password, source, service=self.alt_auth_service, + account_type=account_type, auth_url=auth_url, + captcha_token=captcha_token, captcha_response=captcha_response) + return self.auth_token + + ClientLogin = client_login + + def upgrade_token(self, token=None, url=atom.http_core.Uri.parse_uri( + 'https://www.google.com/accounts/AuthSubSessionToken')): + """Asks the Google auth server for a multi-use AuthSub token. + + For details on AuthSub, see: + http://code.google.com/apis/accounts/docs/AuthSub.html + + Args: + token: gdata.gauth.AuthSubToken or gdata.gauth.SecureAuthSubToken + (optional) If no token is passed in, the client's auth_token member + is used to request the new token. The token object will be modified + to contain the new session token string. + url: str or atom.http_core.Uri (optional) The URL to which the token + upgrade request should be sent. Defaults to: + https://www.google.com/accounts/AuthSubSessionToken + + Returns: + The upgraded gdata.gauth.AuthSubToken object. + """ + # Default to using the auth_token member if no token is provided. + if token is None: + token = self.auth_token + # We cannot upgrade a None token. + if token is None: + raise UnableToUpgradeToken('No token was provided.') + if not isinstance(token, gdata.gauth.AuthSubToken): + raise UnableToUpgradeToken( + 'Cannot upgrade the token because it is not an AuthSubToken object.') + http_request = atom.http_core.HttpRequest(uri=url, method='GET') + token.modify_request(http_request) + # Use the lower level HttpClient to make the request. + response = self.http_client.request(http_request) + if response.status == 200: + token._upgrade_token(response.read()) + return token + else: + raise UnableToUpgradeToken( + 'Server responded to token upgrade request with %s: %s' % ( + response.status, response.read())) + + UpgradeToken = upgrade_token + + def revoke_token(self, token=None, url=atom.http_core.Uri.parse_uri( + 'https://www.google.com/accounts/AuthSubRevokeToken')): + """Requests that the token be invalidated. + + This method can be used for both AuthSub and OAuth tokens (to invalidate + a ClientLogin token, the user must change their password). + + Returns: + True if the server responded with a 200. + + Raises: + A RequestError if the server responds with a non-200 status. + """ + # Default to using the auth_token member if no token is provided. + if token is None: + token = self.auth_token + + http_request = atom.http_core.HttpRequest(uri=url, method='GET') + token.modify_request(http_request) + response = self.http_client.request(http_request) + if response.status != 200: + raise error_from_response('Server sent non-200 to revoke token', + response, RequestError, response.read()) + + return True + + RevokeToken = revoke_token + + def get_oauth_token(self, scopes, next, consumer_key, consumer_secret=None, + rsa_private_key=None, + url=gdata.gauth.REQUEST_TOKEN_URL): + """Obtains an OAuth request token to allow the user to authorize this app. + + Once this client has a request token, the user can authorize the request + token by visiting the authorization URL in their browser. After being + redirected back to this app at the 'next' URL, this app can then exchange + the authorized request token for an access token. + + For more information see the documentation on Google Accounts with OAuth: + http://code.google.com/apis/accounts/docs/OAuth.html#AuthProcess + + Args: + scopes: list of strings or atom.http_core.Uri objects which specify the + URL prefixes which this app will be accessing. For example, to access + the Google Calendar API, you would want to use scopes: + ['https://www.google.com/calendar/feeds/', + 'http://www.google.com/calendar/feeds/'] + next: str or atom.http_core.Uri object, The URL which the user's browser + should be sent to after they authorize access to their data. This + should be a URL in your application which will read the token + information from the URL and upgrade the request token to an access + token. + consumer_key: str This is the identifier for this application which you + should have received when you registered your application with Google + to use OAuth. + consumer_secret: str (optional) The shared secret between your app and + Google which provides evidence that this request is coming from you + application and not another app. If present, this libraries assumes + you want to use an HMAC signature to verify requests. Keep this data + a secret. + rsa_private_key: str (optional) The RSA private key which is used to + generate a digital signature which is checked by Google's server. If + present, this library assumes that you want to use an RSA signature + to verify requests. Keep this data a secret. + url: The URL to which a request for a token should be made. The default + is Google's OAuth request token provider. + """ + http_request = None + if rsa_private_key is not None: + http_request = gdata.gauth.generate_request_for_request_token( + consumer_key, gdata.gauth.RSA_SHA1, scopes, + rsa_key=rsa_private_key, auth_server_url=url, next=next) + elif consumer_secret is not None: + http_request = gdata.gauth.generate_request_for_request_token( + consumer_key, gdata.gauth.HMAC_SHA1, scopes, + consumer_secret=consumer_secret, auth_server_url=url, next=next) + else: + raise MissingOAuthParameters( + 'To request an OAuth token, you must provide your consumer secret' + ' or your private RSA key.') + + response = self.http_client.request(http_request) + response_body = response.read() + + if response.status != 200: + raise error_from_response('Unable to obtain OAuth request token', + response, RequestError, response_body) + + if rsa_private_key is not None: + return gdata.gauth.rsa_token_from_body(response_body, consumer_key, + rsa_private_key, + gdata.gauth.REQUEST_TOKEN) + elif consumer_secret is not None: + return gdata.gauth.hmac_token_from_body(response_body, consumer_key, + consumer_secret, + gdata.gauth.REQUEST_TOKEN) + + GetOAuthToken = get_oauth_token + + def get_access_token(self, request_token, + url=gdata.gauth.ACCESS_TOKEN_URL): + """Exchanges an authorized OAuth request token for an access token. + + Contacts the Google OAuth server to upgrade a previously authorized + request token. Once the request token is upgraded to an access token, + the access token may be used to access the user's data. + + For more details, see the Google Accounts OAuth documentation: + http://code.google.com/apis/accounts/docs/OAuth.html#AccessToken + + Args: + request_token: An OAuth token which has been authorized by the user. + url: (optional) The URL to which the upgrade request should be sent. + Defaults to: https://www.google.com/accounts/OAuthAuthorizeToken + """ + http_request = gdata.gauth.generate_request_for_access_token( + request_token, auth_server_url=url) + response = self.http_client.request(http_request) + response_body = response.read() + if response.status != 200: + raise error_from_response( + 'Unable to upgrade OAuth request token to access token', + response, RequestError, response_body) + + return gdata.gauth.upgrade_to_access_token(request_token, response_body) + + GetAccessToken = get_access_token + + def modify_request(self, http_request): + """Adds or changes request before making the HTTP request. + + This client will add the API version if it is specified. + Subclasses may override this method to add their own request + modifications before the request is made. + """ + http_request = atom.client.AtomPubClient.modify_request(self, + http_request) + if self.api_version is not None: + http_request.headers['GData-Version'] = self.api_version + return http_request + + ModifyRequest = modify_request + + def get_feed(self, uri, auth_token=None, converter=None, + desired_class=gdata.data.GDFeed, **kwargs): + return self.request(method='GET', uri=uri, auth_token=auth_token, + converter=converter, desired_class=desired_class, + **kwargs) + + GetFeed = get_feed + + def get_entry(self, uri, auth_token=None, converter=None, + desired_class=gdata.data.GDEntry, etag=None, **kwargs): + http_request = atom.http_core.HttpRequest() + # Conditional retrieval + if etag is not None: + http_request.headers['If-None-Match'] = etag + return self.request(method='GET', uri=uri, auth_token=auth_token, + http_request=http_request, converter=converter, + desired_class=desired_class, **kwargs) + + GetEntry = get_entry + + def get_next(self, feed, auth_token=None, converter=None, + desired_class=None, **kwargs): + """Fetches the next set of results from the feed. + + When requesting a feed, the number of entries returned is capped at a + service specific default limit (often 25 entries). You can specify your + own entry-count cap using the max-results URL query parameter. If there + are more results than could fit under max-results, the feed will contain + a next link. This method performs a GET against this next results URL. + + Returns: + A new feed object containing the next set of entries in this feed. + """ + if converter is None and desired_class is None: + desired_class = feed.__class__ + return self.get_feed(feed.find_next_link(), auth_token=auth_token, + converter=converter, desired_class=desired_class, + **kwargs) + + GetNext = get_next + + # TODO: add a refresh method to re-fetch the entry/feed from the server + # if it has been updated. + + def post(self, entry, uri, auth_token=None, converter=None, + desired_class=None, **kwargs): + if converter is None and desired_class is None: + desired_class = entry.__class__ + http_request = atom.http_core.HttpRequest() + http_request.add_body_part( + entry.to_string(get_xml_version(self.api_version)), + 'application/atom+xml') + return self.request(method='POST', uri=uri, auth_token=auth_token, + http_request=http_request, converter=converter, + desired_class=desired_class, **kwargs) + + Post = post + + def update(self, entry, auth_token=None, force=False, uri=None, **kwargs): + """Edits the entry on the server by sending the XML for this entry. + + Performs a PUT and converts the response to a new entry object with a + matching class to the entry passed in. + + Args: + entry: + auth_token: + force: boolean stating whether an update should be forced. Defaults to + False. Normally, if a change has been made since the passed in + entry was obtained, the server will not overwrite the entry since + the changes were based on an obsolete version of the entry. + Setting force to True will cause the update to silently + overwrite whatever version is present. + uri: The uri to put to. If provided, this uri is PUT to rather than the + inferred uri from the entry's edit link. + + Returns: + A new Entry object of a matching type to the entry which was passed in. + """ + http_request = atom.http_core.HttpRequest() + http_request.add_body_part( + entry.to_string(get_xml_version(self.api_version)), + 'application/atom+xml') + # Include the ETag in the request if present. + if force: + http_request.headers['If-Match'] = '*' + elif hasattr(entry, 'etag') and entry.etag: + http_request.headers['If-Match'] = entry.etag + + if uri is None: + uri = entry.find_edit_link() + + return self.request(method='PUT', uri=uri, auth_token=auth_token, + http_request=http_request, + desired_class=entry.__class__, **kwargs) + + Update = update + + def delete(self, entry_or_uri, auth_token=None, force=False, **kwargs): + http_request = atom.http_core.HttpRequest() + + # Include the ETag in the request if present. + if force: + http_request.headers['If-Match'] = '*' + elif hasattr(entry_or_uri, 'etag') and entry_or_uri.etag: + http_request.headers['If-Match'] = entry_or_uri.etag + + # If the user passes in a URL, just delete directly, may not work as + # the service might require an ETag. + if isinstance(entry_or_uri, (str, unicode, atom.http_core.Uri)): + return self.request(method='DELETE', uri=entry_or_uri, + http_request=http_request, auth_token=auth_token, + **kwargs) + + return self.request(method='DELETE', uri=entry_or_uri.find_edit_link(), + http_request=http_request, auth_token=auth_token, + **kwargs) + + Delete = delete + + #TODO: implement batch requests. + #def batch(feed, uri, auth_token=None, converter=None, **kwargs): + # pass + + # TODO: add a refresh method to request a conditional update to an entry + # or feed. + + +def _add_query_param(param_string, value, http_request): + if value: + http_request.uri.query[param_string] = value + + +class Query(object): + + def __init__(self, text_query=None, categories=None, author=None, alt=None, + updated_min=None, updated_max=None, pretty_print=False, + published_min=None, published_max=None, start_index=None, + max_results=None, strict=False, **custom_parameters): + """Constructs a Google Data Query to filter feed contents serverside. + + Args: + text_query: Full text search str (optional) + categories: list of strings (optional). Each string is a required + category. To include an 'or' query, put a | in the string between + terms. For example, to find everything in the Fitz category and + the Laurie or Jane category (Fitz and (Laurie or Jane)) you would + set categories to ['Fitz', 'Laurie|Jane']. + author: str (optional) The service returns entries where the author + name and/or email address match your query string. + alt: str (optional) for the Alternative representation type you'd like + the feed in. If you don't specify an alt parameter, the service + returns an Atom feed. This is equivalent to alt='atom'. + alt='rss' returns an RSS 2.0 result feed. + alt='json' returns a JSON representation of the feed. + alt='json-in-script' Requests a response that wraps JSON in a script + tag. + alt='atom-in-script' Requests an Atom response that wraps an XML + string in a script tag. + alt='rss-in-script' Requests an RSS response that wraps an XML + string in a script tag. + updated_min: str (optional), RFC 3339 timestamp format, lower bounds. + For example: 2005-08-09T10:57:00-08:00 + updated_max: str (optional) updated time must be earlier than timestamp. + pretty_print: boolean (optional) If True the server's XML response will + be indented to make it more human readable. Defaults to False. + published_min: str (optional), Similar to updated_min but for published + time. + published_max: str (optional), Similar to updated_max but for published + time. + start_index: int or str (optional) 1-based index of the first result to + be retrieved. Note that this isn't a general cursoring mechanism. + If you first send a query with ?start-index=1&max-results=10 and + then send another query with ?start-index=11&max-results=10, the + service cannot guarantee that the results are equivalent to + ?start-index=1&max-results=20, because insertions and deletions + could have taken place in between the two queries. + max_results: int or str (optional) Maximum number of results to be + retrieved. Each service has a default max (usually 25) which can + vary from service to service. There is also a service-specific + limit to the max_results you can fetch in a request. + strict: boolean (optional) If True, the server will return an error if + the server does not recognize any of the parameters in the request + URL. Defaults to False. + custom_parameters: other query parameters that are not explicitly defined. + """ + self.text_query = text_query + self.categories = categories or [] + self.author = author + self.alt = alt + self.updated_min = updated_min + self.updated_max = updated_max + self.pretty_print = pretty_print + self.published_min = published_min + self.published_max = published_max + self.start_index = start_index + self.max_results = max_results + self.strict = strict + self.custom_parameters = custom_parameters + + def add_custom_parameter(self, key, value): + self.custom_parameters[key] = value + + AddCustomParameter = add_custom_parameter + + def modify_request(self, http_request): + _add_query_param('q', self.text_query, http_request) + if self.categories: + http_request.uri.query['category'] = ','.join(self.categories) + _add_query_param('author', self.author, http_request) + _add_query_param('alt', self.alt, http_request) + _add_query_param('updated-min', self.updated_min, http_request) + _add_query_param('updated-max', self.updated_max, http_request) + if self.pretty_print: + http_request.uri.query['prettyprint'] = 'true' + _add_query_param('published-min', self.published_min, http_request) + _add_query_param('published-max', self.published_max, http_request) + if self.start_index is not None: + http_request.uri.query['start-index'] = str(self.start_index) + if self.max_results is not None: + http_request.uri.query['max-results'] = str(self.max_results) + if self.strict: + http_request.uri.query['strict'] = 'true' + http_request.uri.query.update(self.custom_parameters) + + ModifyRequest = modify_request + + +class GDQuery(atom.http_core.Uri): + + def _get_text_query(self): + return self.query['q'] + + def _set_text_query(self, value): + self.query['q'] = value + + text_query = property(_get_text_query, _set_text_query, + doc='The q parameter for searching for an exact text match on content') + + +class ResumableUploader(object): + """Resumable upload helper for the Google Data protocol.""" + + DEFAULT_CHUNK_SIZE = 5242880 # 5MB + # Initial chunks which are smaller than 256KB might be dropped. The last + # chunk for a file can be smaller tan this. + MIN_CHUNK_SIZE = 262144 # 256KB + + def __init__(self, client, file_handle, content_type, total_file_size, + chunk_size=None, desired_class=None): + """Starts a resumable upload to a service that supports the protocol. + + Args: + client: gdata.client.GDClient A Google Data API service. + file_handle: object A file-like object containing the file to upload. + content_type: str The mimetype of the file to upload. + total_file_size: int The file's total size in bytes. + chunk_size: int The size of each upload chunk. If None, the + DEFAULT_CHUNK_SIZE will be used. + desired_class: object (optional) The type of gdata.data.GDEntry to parse + the completed entry as. This should be specific to the API. + """ + self.client = client + self.file_handle = file_handle + self.content_type = content_type + self.total_file_size = total_file_size + self.chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE + if self.chunk_size < self.MIN_CHUNK_SIZE: + self.chunk_size = self.MIN_CHUNK_SIZE + self.desired_class = desired_class or gdata.data.GDEntry + self.upload_uri = None + + # Send the entire file if the chunk size is less than fize's total size. + if self.total_file_size <= self.chunk_size: + self.chunk_size = total_file_size + + def _init_session(self, resumable_media_link, entry=None, headers=None, + auth_token=None, method='POST'): + """Starts a new resumable upload to a service that supports the protocol. + + The method makes a request to initiate a new upload session. The unique + upload uri returned by the server (and set in this method) should be used + to send upload chunks to the server. + + Args: + resumable_media_link: str The full URL for the #resumable-create-media or + #resumable-edit-media link for starting a resumable upload request or + updating media using a resumable PUT. + entry: A (optional) gdata.data.GDEntry containging metadata to create the + upload from. + headers: dict (optional) Additional headers to send in the initial request + to create the resumable upload request. These headers will override + any default headers sent in the request. For example: + headers={'Slug': 'MyTitle'}. + auth_token: (optional) An object which sets the Authorization HTTP header + in its modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. + method: (optional) Type of HTTP request to start the session with. + Defaults to 'POST', but may also be 'PUT'. + + Returns: + Result of HTTP request to intialize the session. See atom.client.request. + + Raises: + RequestError if the unique upload uri is not set or the + server returns something other than an HTTP 308 when the upload is + incomplete. + """ + http_request = atom.http_core.HttpRequest() + + # Send empty body if Atom XML wasn't specified. + if entry is None: + http_request.add_body_part('', self.content_type, size=0) + else: + http_request.add_body_part(str(entry), 'application/atom+xml', + size=len(str(entry))) + http_request.headers['X-Upload-Content-Type'] = self.content_type + http_request.headers['X-Upload-Content-Length'] = self.total_file_size + + if headers is not None: + http_request.headers.update(headers) + + response = self.client.request(method=method, + uri=resumable_media_link, + auth_token=auth_token, + http_request=http_request) + + self.upload_uri = (response.getheader('location') or + response.getheader('Location')) + + return response + + _InitSession = _init_session + + def upload_chunk(self, start_byte, content_bytes): + """Uploads a byte range (chunk) to the resumable upload server. + + Args: + start_byte: int The byte offset of the total file where the byte range + passed in lives. + content_bytes: str The file contents of this chunk. + + Returns: + The final Atom entry created on the server. The entry object's type will + be the class specified in self.desired_class. + + Raises: + RequestError if the unique upload uri is not set or the + server returns something other than an HTTP 308 when the upload is + incomplete. + """ + if self.upload_uri is None: + raise RequestError('Resumable upload request not initialized.') + + # Adjustment if last byte range is less than defined chunk size. + chunk_size = self.chunk_size + if len(content_bytes) <= chunk_size: + chunk_size = len(content_bytes) + + http_request = atom.http_core.HttpRequest() + http_request.add_body_part(content_bytes, self.content_type, + size=len(content_bytes)) + http_request.headers['Content-Range'] = ('bytes %s-%s/%s' + % (start_byte, + start_byte + chunk_size - 1, + self.total_file_size)) + + try: + response = self.client.request(method='PUT', uri=self.upload_uri, + http_request=http_request, + desired_class=self.desired_class) + return response + except RequestError, error: + if error.status == 308: + return None + else: + raise error + + UploadChunk = upload_chunk + + def upload_file(self, resumable_media_link, entry=None, headers=None, + auth_token=None, **kwargs): + """Uploads an entire file in chunks using the resumable upload protocol. + + If you are interested in pausing an upload or controlling the chunking + yourself, use the upload_chunk() method instead. + + Args: + resumable_media_link: str The full URL for the #resumable-create-media for + starting a resumable upload request. + entry: A (optional) gdata.data.GDEntry containging metadata to create the + upload from. + headers: dict Additional headers to send in the initial request to create + the resumable upload request. These headers will override any default + headers sent in the request. For example: headers={'Slug': 'MyTitle'}. + auth_token: (optional) An object which sets the Authorization HTTP header + in its modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. + kwargs: (optional) Other args to pass to self._init_session. + + Returns: + The final Atom entry created on the server. The entry object's type will + be the class specified in self.desired_class. + + Raises: + RequestError if anything other than a HTTP 308 is returned + when the request raises an exception. + """ + self._init_session(resumable_media_link, headers=headers, + auth_token=auth_token, entry=entry, **kwargs) + + start_byte = 0 + entry = None + + while not entry: + entry = self.upload_chunk( + start_byte, self.file_handle.read(self.chunk_size)) + start_byte += self.chunk_size + + return entry + + UploadFile = upload_file + + def update_file(self, entry_or_resumable_edit_link, headers=None, force=False, + auth_token=None, update_metadata=False, uri_params=None): + """Updates the contents of an existing file using the resumable protocol. + + If you are interested in pausing an upload or controlling the chunking + yourself, use the upload_chunk() method instead. + + Args: + entry_or_resumable_edit_link: object or string A gdata.data.GDEntry for + the entry/file to update or the full uri of the link with rel + #resumable-edit-media. + headers: dict Additional headers to send in the initial request to create + the resumable upload request. These headers will override any default + headers sent in the request. For example: headers={'Slug': 'MyTitle'}. + force boolean (optional) True to force an update and set the If-Match + header to '*'. If False and entry_or_resumable_edit_link is a + gdata.data.GDEntry object, its etag value is used. Otherwise this + parameter should be set to True to force the update. + auth_token: (optional) An object which sets the Authorization HTTP header + in its modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. + update_metadata: (optional) True to also update the entry's metadata + with that in the given GDEntry object in entry_or_resumable_edit_link. + uri_params: (optional) Dict of additional parameters to attach to the URI. + Some non-dict types are valid here, too, like list of tuple pairs. + + Returns: + The final Atom entry created on the server. The entry object's type will + be the class specified in self.desired_class. + + Raises: + RequestError if anything other than a HTTP 308 is returned when the + request raises an exception. + """ + + custom_headers = {} + if headers is not None: + custom_headers.update(headers) + + uri = None + entry = None + if isinstance(entry_or_resumable_edit_link, gdata.data.GDEntry): + uri = entry_or_resumable_edit_link.find_url( + 'http://schemas.google.com/g/2005#resumable-edit-media') + custom_headers['If-Match'] = entry_or_resumable_edit_link.etag + if update_metadata: + entry = entry_or_resumable_edit_link + else: + uri = entry_or_resumable_edit_link + + uri = atom.http_core.parse_uri(uri) + if uri_params is not None: + uri.query.update(uri_params) + + if force: + custom_headers['If-Match'] = '*' + + return self.upload_file(str(uri), entry=entry, headers=custom_headers, + auth_token=auth_token, method='PUT') + + UpdateFile = update_file + + def query_upload_status(self, uri=None): + """Queries the current status of a resumable upload request. + + Args: + uri: str (optional) A resumable upload uri to query and override the one + that is set in this object. + + Returns: + An integer representing the file position (byte) to resume the upload from + or True if the upload is complete. + + Raises: + RequestError if anything other than a HTTP 308 is returned + when the request raises an exception. + """ + # Override object's unique upload uri. + if uri is None: + uri = self.upload_uri + + http_request = atom.http_core.HttpRequest() + http_request.headers['Content-Length'] = '0' + http_request.headers['Content-Range'] = 'bytes */%s' % self.total_file_size + + try: + response = self.client.request( + method='POST', uri=uri, http_request=http_request) + if response.status == 201: + return True + else: + raise error_from_response( + '%s returned by server' % response.status, response, RequestError) + except RequestError, error: + if error.status == 308: + for pair in error.headers: + if pair[0].capitalize() == 'Range': + return int(pair[1].split('-')[1]) + 1 + else: + raise error + + QueryUploadStatus = query_upload_status diff --git a/gam/gdata/analytics/codesearch/__init__.py b/gam/gdata/analytics/codesearch/__init__.py new file mode 100755 index 00000000000..fa23ef021dc --- /dev/null +++ b/gam/gdata/analytics/codesearch/__init__.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2007 Benoit Chesneau +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +"""Contains extensions to Atom objects used by Google Codesearch""" + +__author__ = 'Benoit Chesneau' + + +import atom +import gdata + + +CODESEARCH_NAMESPACE='http://schemas.google.com/codesearch/2006' +CODESEARCH_TEMPLATE='{http://shema.google.com/codesearch/2006}%s' + + +class Match(atom.AtomBase): + """ The Google Codesearch match element """ + _tag = 'match' + _namespace = CODESEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['lineNumber'] = 'line_number' + _attributes['type'] = 'type' + + def __init__(self, line_number=None, type=None, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.type = type + self.line_number = line_number + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class File(atom.AtomBase): + """ The Google Codesearch file element""" + _tag = 'file' + _namespace = CODESEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.name = name + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Package(atom.AtomBase): + """ The Google Codesearch package element""" + _tag = 'package' + _namespace = CODESEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['uri'] = 'uri' + + def __init__(self, name=None, uri=None, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.name = name + self.uri = uri + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class CodesearchEntry(gdata.GDataEntry): + """ Google codesearch atom entry""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + _children['{%s}file' % CODESEARCH_NAMESPACE] = ('file', File) + _children['{%s}package' % CODESEARCH_NAMESPACE] = ('package', Package) + _children['{%s}match' % CODESEARCH_NAMESPACE] = ('match', [Match]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + match=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=None) + + self.match = match or [] + + +def CodesearchEntryFromString(xml_string): + """Converts an XML string into a CodesearchEntry object. + + Args: + xml_string: string The XML describing a Codesearch feed entry. + + Returns: + A CodesearchEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(CodesearchEntry, xml_string) + + +class CodesearchFeed(gdata.GDataFeed): + """feed containing list of Google codesearch Items""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CodesearchEntry]) + + +def CodesearchFeedFromString(xml_string): + """Converts an XML string into a CodesearchFeed object. + Args: + xml_string: string The XML describing a Codesearch feed. + Returns: + A CodeseartchFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(CodesearchFeed, xml_string) diff --git a/gam/gdata/analytics/codesearch/service.py b/gam/gdata/analytics/codesearch/service.py new file mode 100755 index 00000000000..1243d614ee2 --- /dev/null +++ b/gam/gdata/analytics/codesearch/service.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2007 Benoit Chesneau +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +"""CodesearchService extends GDataService to streamline Google Codesearch +operations""" + + +__author__ = 'Benoit Chesneau' + + +import atom +import gdata.service +import gdata.codesearch + + +class CodesearchService(gdata.service.GDataService): + """Client extension for Google codesearch service""" + ssl = True + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', additional_headers=None, **kwargs): + """Creates a client for the Google codesearch service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'www.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='codesearch', + source=source, server=server, additional_headers=additional_headers, + **kwargs) + + def Query(self, uri, converter=gdata.codesearch.CodesearchFeedFromString): + """Queries the Codesearch feed and returns the resulting feed of + entries. + + Args: + uri: string The full URI to be queried. This can contain query + parameters, a hostname, or simply the relative path to a Document + List feed. The DocumentQuery object is useful when constructing + query parameters. + converter: func (optional) A function which will be executed on the + retrieved item, generally to render it into a Python object. + By default the CodesearchFeedFromString function is used to + return a CodesearchFeed object. This is because most feed + queries will result in a feed and not a single entry. + + Returns : + A CodesearchFeed objects representing the feed returned by the server + """ + return self.Get(uri, converter=converter) + + def GetSnippetsFeed(self, text_query=None): + """Retrieve Codesearch feed for a keyword + + Args: + text_query : string (optional) The contents of the q query parameter. This + string is URL escaped upon conversion to a URI. + Returns: + A CodesearchFeed objects representing the feed returned by the server + """ + + query=gdata.codesearch.service.CodesearchQuery(text_query=text_query) + feed = self.Query(query.ToUri()) + return feed + + +class CodesearchQuery(gdata.service.Query): + """Object used to construct the query to the Google Codesearch feed. here only as a shorcut""" + + def __init__(self, feed='/codesearch/feeds/search', text_query=None, + params=None, categories=None): + """Constructor for Codesearch Query. + + Args: + feed: string (optional) The path for the feed. (e.g. '/codesearch/feeds/search') + text_query: string (optional) The contents of the q query parameter. This + string is URL escaped upon conversion to a URI. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to + the query's items. + categories: list (optional) List of category strings which should be + included as query categories. See gdata.service.Query for + additional documentation. + + Yelds: + A CodesearchQuery object to construct a URI based on Codesearch feed + """ + + gdata.service.Query.__init__(self, feed, text_query, params, categories) diff --git a/gam/gdata/analytics/contacts/__init__.py b/gam/gdata/analytics/contacts/__init__.py new file mode 100755 index 00000000000..41e7c31e4f8 --- /dev/null +++ b/gam/gdata/analytics/contacts/__init__.py @@ -0,0 +1,740 @@ +#!/usr/bin/env python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to ElementWrapper objects used with Google Contacts.""" + +__author__ = 'dbrattli (Dag Brattli)' + + +import atom +import gdata + + +## Constants from http://code.google.com/apis/gdata/elements.html ## +REL_HOME = 'http://schemas.google.com/g/2005#home' +REL_WORK = 'http://schemas.google.com/g/2005#work' +REL_OTHER = 'http://schemas.google.com/g/2005#other' + +# AOL Instant Messenger protocol +IM_AIM = 'http://schemas.google.com/g/2005#AIM' +IM_MSN = 'http://schemas.google.com/g/2005#MSN' # MSN Messenger protocol +IM_YAHOO = 'http://schemas.google.com/g/2005#YAHOO' # Yahoo Messenger protocol +IM_SKYPE = 'http://schemas.google.com/g/2005#SKYPE' # Skype protocol +IM_QQ = 'http://schemas.google.com/g/2005#QQ' # QQ protocol +# Google Talk protocol +IM_GOOGLE_TALK = 'http://schemas.google.com/g/2005#GOOGLE_TALK' +IM_ICQ = 'http://schemas.google.com/g/2005#ICQ' # ICQ protocol +IM_JABBER = 'http://schemas.google.com/g/2005#JABBER' # Jabber protocol +IM_NETMEETING = 'http://schemas.google.com/g/2005#netmeeting' # NetMeeting + +PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo' +PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo' + +# Different phone types, for more info see: +# http://code.google.com/apis/gdata/docs/2.0/elements.html#gdPhoneNumber +PHONE_CAR = 'http://schemas.google.com/g/2005#car' +PHONE_FAX = 'http://schemas.google.com/g/2005#fax' +PHONE_GENERAL = 'http://schemas.google.com/g/2005#general' +PHONE_HOME = REL_HOME +PHONE_HOME_FAX = 'http://schemas.google.com/g/2005#home_fax' +PHONE_INTERNAL = 'http://schemas.google.com/g/2005#internal-extension' +PHONE_MOBILE = 'http://schemas.google.com/g/2005#mobile' +PHONE_OTHER = REL_OTHER +PHONE_PAGER = 'http://schemas.google.com/g/2005#pager' +PHONE_SATELLITE = 'http://schemas.google.com/g/2005#satellite' +PHONE_VOIP = 'http://schemas.google.com/g/2005#voip' +PHONE_WORK = REL_WORK +PHONE_WORK_FAX = 'http://schemas.google.com/g/2005#work_fax' +PHONE_WORK_MOBILE = 'http://schemas.google.com/g/2005#work_mobile' +PHONE_WORK_PAGER = 'http://schemas.google.com/g/2005#work_pager' +PHONE_MAIN = 'http://schemas.google.com/g/2005#main' +PHONE_ASSISTANT = 'http://schemas.google.com/g/2005#assistant' +PHONE_CALLBACK = 'http://schemas.google.com/g/2005#callback' +PHONE_COMPANY_MAIN = 'http://schemas.google.com/g/2005#company_main' +PHONE_ISDN = 'http://schemas.google.com/g/2005#isdn' +PHONE_OTHER_FAX = 'http://schemas.google.com/g/2005#other_fax' +PHONE_RADIO = 'http://schemas.google.com/g/2005#radio' +PHONE_TELEX = 'http://schemas.google.com/g/2005#telex' +PHONE_TTY_TDD = 'http://schemas.google.com/g/2005#tty_tdd' + +EXTERNAL_ID_ORGANIZATION = 'organization' + +RELATION_MANAGER = 'manager' + +CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008' + + +class GDataBase(atom.AtomBase): + """The Google Contacts intermediate class from atom.AtomBase.""" + + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, + extension_elements=None, extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class ContactsBase(GDataBase): + """The Google Contacts intermediate class for Contacts namespace.""" + + _namespace = CONTACTS_NAMESPACE + + +class OrgName(GDataBase): + """The Google Contacts OrgName element.""" + + _tag = 'orgName' + + +class OrgTitle(GDataBase): + """The Google Contacts OrgTitle element.""" + + _tag = 'orgTitle' + + +class OrgDepartment(GDataBase): + """The Google Contacts OrgDepartment element.""" + + _tag = 'orgDepartment' + + +class OrgJobDescription(GDataBase): + """The Google Contacts OrgJobDescription element.""" + + _tag = 'orgJobDescription' + + +class Where(GDataBase): + """The Google Contacts Where element.""" + + _tag = 'where' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['label'] = 'label' + _attributes['valueString'] = 'value_string' + + def __init__(self, value_string=None, rel=None, label=None, + text=None, extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.rel = rel + self.label = label + self.value_string = value_string + + +class When(GDataBase): + """The Google Contacts When element.""" + + _tag = 'when' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['startTime'] = 'start_time' + _attributes['endTime'] = 'end_time' + _attributes['label'] = 'label' + + def __init__(self, start_time=None, end_time=None, label=None, + text=None, extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.start_time = start_time + self.end_time = end_time + self.label = label + + +class Organization(GDataBase): + """The Google Contacts Organization element.""" + + _tag = 'organization' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['label'] = 'label' + _attributes['rel'] = 'rel' + _attributes['primary'] = 'primary' + _children['{%s}orgName' % GDataBase._namespace] = ( + 'org_name', OrgName) + _children['{%s}orgTitle' % GDataBase._namespace] = ( + 'org_title', OrgTitle) + _children['{%s}orgDepartment' % GDataBase._namespace] = ( + 'org_department', OrgDepartment) + _children['{%s}orgJobDescription' % GDataBase._namespace] = ( + 'org_job_description', OrgJobDescription) + #_children['{%s}where' % GDataBase._namespace] = ('where', Where) + + def __init__(self, label=None, rel=None, primary='false', org_name=None, + org_title=None, org_department=None, org_job_description=None, + where=None, text=None, + extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.label = label + self.rel = rel or REL_OTHER + self.primary = primary + self.org_name = org_name + self.org_title = org_title + self.org_department = org_department + self.org_job_description = org_job_description + self.where = where + + +class PostalAddress(GDataBase): + """The Google Contacts PostalAddress element.""" + + _tag = 'postalAddress' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['primary'] = 'primary' + + def __init__(self, primary=None, rel=None, text=None, + extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.rel = rel or REL_OTHER + self.primary = primary + + +class FormattedAddress(GDataBase): + """The Google Contacts FormattedAddress element.""" + + _tag = 'formattedAddress' + + +class StructuredPostalAddress(GDataBase): + """The Google Contacts StructuredPostalAddress element.""" + + _tag = 'structuredPostalAddress' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['primary'] = 'primary' + _children['{%s}formattedAddress' % GDataBase._namespace] = ( + 'formatted_address', FormattedAddress) + + def __init__(self, rel=None, primary=None, + formatted_address=None, text=None, + extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.rel = rel or REL_OTHER + self.primary = primary + self.formatted_address = formatted_address + + +class IM(GDataBase): + """The Google Contacts IM element.""" + + _tag = 'im' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['address'] = 'address' + _attributes['primary'] = 'primary' + _attributes['protocol'] = 'protocol' + _attributes['label'] = 'label' + _attributes['rel'] = 'rel' + + def __init__(self, primary='false', rel=None, address=None, protocol=None, + label=None, text=None, + extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.protocol = protocol + self.address = address + self.primary = primary + self.rel = rel or REL_OTHER + self.label = label + + +class Email(GDataBase): + """The Google Contacts Email element.""" + + _tag = 'email' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['address'] = 'address' + _attributes['primary'] = 'primary' + _attributes['rel'] = 'rel' + _attributes['label'] = 'label' + + def __init__(self, label=None, rel=None, address=None, primary='false', + text=None, extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.label = label + self.rel = rel or REL_OTHER + self.address = address + self.primary = primary + + +class PhoneNumber(GDataBase): + """The Google Contacts PhoneNumber element.""" + + _tag = 'phoneNumber' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['label'] = 'label' + _attributes['rel'] = 'rel' + _attributes['uri'] = 'uri' + _attributes['primary'] = 'primary' + + def __init__(self, label=None, rel=None, uri=None, primary='false', + text=None, extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.label = label + self.rel = rel or REL_OTHER + self.uri = uri + self.primary = primary + + +class Nickname(ContactsBase): + """The Google Contacts Nickname element.""" + + _tag = 'nickname' + + +class Occupation(ContactsBase): + """The Google Contacts Occupation element.""" + + _tag = 'occupation' + + +class Gender(ContactsBase): + """The Google Contacts Gender element.""" + + _tag = 'gender' + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, + text=None, extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.value = value + + +class Birthday(ContactsBase): + """The Google Contacts Birthday element.""" + + _tag = 'birthday' + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['when'] = 'when' + + def __init__(self, when=None, + text=None, extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.when = when + + +class Relation(ContactsBase): + """The Google Contacts Relation element.""" + + _tag = 'relation' + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['label'] = 'label' + _attributes['rel'] = 'rel' + + def __init__(self, label=None, rel=None, + text=None, extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.label = label + self.rel = rel + + +def RelationFromString(xml_string): + return atom.CreateClassFromXMLString(Relation, xml_string) + + +class UserDefinedField(ContactsBase): + """The Google Contacts UserDefinedField element.""" + + _tag = 'userDefinedField' + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['key'] = 'key' + _attributes['value'] = 'value' + + def __init__(self, key=None, value=None, + text=None, extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.key = key + self.value = value + + +def UserDefinedFieldFromString(xml_string): + return atom.CreateClassFromXMLString(UserDefinedField, xml_string) + + +class Website(ContactsBase): + """The Google Contacts Website element.""" + + _tag = 'website' + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['href'] = 'href' + _attributes['label'] = 'label' + _attributes['primary'] = 'primary' + _attributes['rel'] = 'rel' + + def __init__(self, href=None, label=None, primary='false', rel=None, + text=None, extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.href = href + self.label = label + self.primary = primary + self.rel = rel + + +def WebsiteFromString(xml_string): + return atom.CreateClassFromXMLString(Website, xml_string) + + +class ExternalId(ContactsBase): + """The Google Contacts ExternalId element.""" + + _tag = 'externalId' + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['label'] = 'label' + _attributes['rel'] = 'rel' + _attributes['value'] = 'value' + + def __init__(self, label=None, rel=None, value=None, + text=None, extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.label = label + self.rel = rel + self.value = value + + +def ExternalIdFromString(xml_string): + return atom.CreateClassFromXMLString(ExternalId, xml_string) + + +class Event(ContactsBase): + """The Google Contacts Event element.""" + + _tag = 'event' + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['label'] = 'label' + _attributes['rel'] = 'rel' + _children['{%s}when' % ContactsBase._namespace] = ('when', When) + + def __init__(self, label=None, rel=None, when=None, + text=None, extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.label = label + self.rel = rel + self.when = when + + +def EventFromString(xml_string): + return atom.CreateClassFromXMLString(Event, xml_string) + + +class Deleted(GDataBase): + """The Google Contacts Deleted element.""" + + _tag = 'deleted' + + +class GroupMembershipInfo(ContactsBase): + """The Google Contacts GroupMembershipInfo element.""" + + _tag = 'groupMembershipInfo' + + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['deleted'] = 'deleted' + _attributes['href'] = 'href' + + def __init__(self, deleted=None, href=None, text=None, + extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.deleted = deleted + self.href = href + + +class PersonEntry(gdata.BatchEntry): + """Base class for ContactEntry and ProfileEntry.""" + + _children = gdata.BatchEntry._children.copy() + _children['{%s}organization' % gdata.GDATA_NAMESPACE] = ( + 'organization', [Organization]) + _children['{%s}phoneNumber' % gdata.GDATA_NAMESPACE] = ( + 'phone_number', [PhoneNumber]) + _children['{%s}nickname' % CONTACTS_NAMESPACE] = ('nickname', Nickname) + _children['{%s}occupation' % CONTACTS_NAMESPACE] = ('occupation', Occupation) + _children['{%s}gender' % CONTACTS_NAMESPACE] = ('gender', Gender) + _children['{%s}birthday' % CONTACTS_NAMESPACE] = ('birthday', Birthday) + _children['{%s}postalAddress' % gdata.GDATA_NAMESPACE] = ('postal_address', + [PostalAddress]) + _children['{%s}structuredPostalAddress' % gdata.GDATA_NAMESPACE] = ( + 'structured_postal_address', [StructuredPostalAddress]) + _children['{%s}email' % gdata.GDATA_NAMESPACE] = ('email', [Email]) + _children['{%s}im' % gdata.GDATA_NAMESPACE] = ('im', [IM]) + _children['{%s}relation' % CONTACTS_NAMESPACE] = ('relation', [Relation]) + _children['{%s}userDefinedField' % CONTACTS_NAMESPACE] = ( + 'user_defined_field', [UserDefinedField]) + _children['{%s}website' % CONTACTS_NAMESPACE] = ('website', [Website]) + _children['{%s}externalId' % CONTACTS_NAMESPACE] = ( + 'external_id', [ExternalId]) + _children['{%s}event' % CONTACTS_NAMESPACE] = ('event', [Event]) + # The following line should be removed once the Python support + # for GData 2.0 is mature. + _attributes = gdata.BatchEntry._attributes.copy() + _attributes['{%s}etag' % gdata.GDATA_NAMESPACE] = 'etag' + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, organization=None, phone_number=None, + nickname=None, occupation=None, gender=None, birthday=None, + postal_address=None, structured_postal_address=None, email=None, + im=None, relation=None, user_defined_field=None, website=None, + external_id=None, event=None, batch_operation=None, + batch_id=None, batch_status=None, text=None, + extension_elements=None, extension_attributes=None, etag=None): + gdata.BatchEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, + batch_operation=batch_operation, + batch_id=batch_id, batch_status=batch_status, + title=title, updated=updated) + self.organization = organization or [] + self.phone_number = phone_number or [] + self.nickname = nickname + self.occupation = occupation + self.gender = gender + self.birthday = birthday + self.postal_address = postal_address or [] + self.structured_postal_address = structured_postal_address or [] + self.email = email or [] + self.im = im or [] + self.relation = relation or [] + self.user_defined_field = user_defined_field or [] + self.website = website or [] + self.external_id = external_id or [] + self.event = event or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + # The following line should be removed once the Python support + # for GData 2.0 is mature. + self.etag = etag + + +class ContactEntry(PersonEntry): + """A Google Contact flavor of an Atom Entry.""" + + _children = PersonEntry._children.copy() + + _children['{%s}deleted' % gdata.GDATA_NAMESPACE] = ('deleted', Deleted) + _children['{%s}groupMembershipInfo' % CONTACTS_NAMESPACE] = ( + 'group_membership_info', [GroupMembershipInfo]) + _children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = ( + 'extended_property', [gdata.ExtendedProperty]) + # Overwrite the organization rule in PersonEntry so that a ContactEntry + # may only contain one element. + _children['{%s}organization' % gdata.GDATA_NAMESPACE] = ( + 'organization', Organization) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, organization=None, phone_number=None, + nickname=None, occupation=None, gender=None, birthday=None, + postal_address=None, structured_postal_address=None, email=None, + im=None, relation=None, user_defined_field=None, website=None, + external_id=None, event=None, batch_operation=None, + batch_id=None, batch_status=None, text=None, + extension_elements=None, extension_attributes=None, etag=None, + deleted=None, extended_property=None, + group_membership_info=None): + PersonEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated, + organization=organization, phone_number=phone_number, + nickname=nickname, occupation=occupation, + gender=gender, birthday=birthday, + postal_address=postal_address, + structured_postal_address=structured_postal_address, + email=email, im=im, relation=relation, + user_defined_field=user_defined_field, + website=website, external_id=external_id, event=event, + batch_operation=batch_operation, batch_id=batch_id, + batch_status=batch_status, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes, etag=etag) + self.deleted = deleted + self.extended_property = extended_property or [] + self.group_membership_info = group_membership_info or [] + + def GetPhotoLink(self): + for a_link in self.link: + if a_link.rel == PHOTO_LINK_REL: + return a_link + return None + + def GetPhotoEditLink(self): + for a_link in self.link: + if a_link.rel == PHOTO_EDIT_LINK_REL: + return a_link + return None + + +def ContactEntryFromString(xml_string): + return atom.CreateClassFromXMLString(ContactEntry, xml_string) + + +class ContactsFeed(gdata.BatchFeed, gdata.LinkFinder): + """A Google Contacts feed flavor of an Atom Feed.""" + + _children = gdata.BatchFeed._children.copy() + + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ContactEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def ContactsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(ContactsFeed, xml_string) + + +class GroupEntry(gdata.BatchEntry): + """Represents a contact group.""" + _children = gdata.BatchEntry._children.copy() + _children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = ( + 'extended_property', [gdata.ExtendedProperty]) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, + rights=None, source=None, summary=None, control=None, + title=None, updated=None, + extended_property=None, batch_operation=None, batch_id=None, + batch_status=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.BatchEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + batch_operation=batch_operation, + batch_id=batch_id, batch_status=batch_status, + title=title, updated=updated) + self.extended_property = extended_property or [] + + +def GroupEntryFromString(xml_string): + return atom.CreateClassFromXMLString(GroupEntry, xml_string) + + +class GroupsFeed(gdata.BatchFeed): + """A Google contact groups feed flavor of an Atom Feed.""" + _children = gdata.BatchFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GroupEntry]) + + +def GroupsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GroupsFeed, xml_string) + + +class ProfileEntry(PersonEntry): + """A Google Profiles flavor of an Atom Entry.""" + + +def ProfileEntryFromString(xml_string): + """Converts an XML string into a ProfileEntry object. + + Args: + xml_string: string The XML describing a Profile entry. + + Returns: + A ProfileEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(ProfileEntry, xml_string) + + +class ProfilesFeed(gdata.BatchFeed, gdata.LinkFinder): + """A Google Profiles feed flavor of an Atom Feed.""" + + _children = gdata.BatchFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def ProfilesFeedFromString(xml_string): + """Converts an XML string into a ProfilesFeed object. + + Args: + xml_string: string The XML describing a Profiles feed. + + Returns: + A ProfilesFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(ProfilesFeed, xml_string) diff --git a/gam/gdata/analytics/contacts/__init__.pyc b/gam/gdata/analytics/contacts/__init__.pyc new file mode 100755 index 0000000000000000000000000000000000000000..f1b834c2b0908120e95d448b4eba581c6b86809d GIT binary patch literal 27620 zcmdU2YiwLsUjL8nII-iroWyxG>7;2|ciT99ZnxQN+p#@y+}LC1+HIO?c4x6KECWD&9PUdi@~>62`qm>rVcA*Nrl{bF`XcBhzKlHDa{K(Yg31|>TvX18Q_iy4ya zkeEG^-6LkNWcP~MC)s^s_DgoZm;;hMAZA#y!(t9f_Mn(Ul076YS~)D{uw+NXj7au~ zm?M%sDlS?)Cg!MQkBd1b*%M-pOZF*o(VvrIPDu8Yn5QKBw3w5UO^P`s+0$a4mh3ZP zl9D|m=Cov=74wW_&x$!C+2_POE7|A8oR#dTnCB$>f|%ze`x!B#l6_Ik3zB_F%xA>y zk(=ja88h>u+!whgay>gH<|T2*H1}MXdtS^~kb6GNeOb)QLGCMI?ki$m4RT)#b6*wn zdXW1@nERTT&jz_~hPkhc`CO3uR+#&SmIBZaU2UyqHTt?o^n2QOsp=&+D8`hq+%6^Inj9CCq(W%uJ9w z8|J@JGGReR zj4N(NbC<*1IWa3tWaq`K>UUQ)bYvMwTMXY@5OY)CTNHCER(~mc<9#t}`o;%h%JrHE z?UnG2YhtSU#)o1)4!Tkci?56Ev^Xoqf1oSt;d>v6xvlTrAca{Ny+iVI8No!Q?B|PR zFX?{byJfFfLDa7#(XYsCy;m2mT%S*S zXWHrK)vMl1?YuWNH#aqt&Ml^9uCzC<>4~e}^X=69UTSK%`Kg&u2%et_t{7M$~8s*oc*+|WiRC0gpcL?C*J5oJ5z__6}Rlx z@+I$uc3vjX=UDY??Yvzq`);kAFP)1pwyhOgt1J~uZhN3u&94AuKGRNjyu9z0N=3g- zw7p#^R@<8uBDl^`NA#l zTszYZT3OAP@8+~SZFRipnKnDFgi@!xExKCEn_{IscmjyHZDNk^-_7~P_(PbDN^K=y zE`BBNgDv?xk+GI9D`Y;re|0jVRKy>^XH%1TKR=%L+)xDXL+&^pZx(?IRt~0%ut!-a zkAP3Sm*uDKt$bCraL*guq_OjXw2o_h}W;U5r4u|D1a7d&*jFdcl}-XK1g3f-0NqJJp+k- ziOlF8MC)&q^Pr76e-Jsj+*-w~mzX3hFZ zS&3K87hIhT&b8Ja%(ucwuCQ7xnHr{qgDh0g$K7ltnh-6>RLZWy=A#+V*w&`O=|N5N zcszo8f1)1~Fq|0f=}jp72hvpo=yGWlbamkxK^IygAdqi(ZzE_I0xPvpkV}ShH8#qb zb+}Ltk;vg1C=gA|WfoHtiwn^-J6y5O%m_#IB|ILNfit3uJy%=FU=a(*(|?;z0u||A zP6B6EWu8`(P_HgoM}jeJ1Js6XDBL#>o1Mw|`IXL$QDtTJFB~Ih8J$`z`lUx6X z$so19Xq0RPqtx71!*q_~h0YA|7;3tSCmJF_1Vfy3tN9w_!Lt9Ti%I6Y$vC4*qrtL0 z3^pq2@`z8My(*q)#0QYVh~KL$O}bv8R;-ebJ?f~(P{}5vZmc*OIorj^8;iR<`jcq$ zHlAqoq#oCxc}4I{vZ<8o79wtpa#^H>{pscox!JGm77Q3kFkqX6tqZ(a!;4U08S9mc zrcYx#5JN`OZ@=7!;c9@ifN<|UgP_$!Y9FJ?W4G_H+derSbP#1QrTh|NB+|F@rFA#w zflHQGltXC7f~E@`As;q4fk&yZ4@^+jjbU!UjMpO@Fyr;g28NdCO@Nz|k0V4DA_y&` z$E#%Jq!F2B_RC%>^H5&ooMw^(bj~0cWfvSWR87W8Ii$`pac%oMsL~^Ake;K7orwYb zWJZ~8BB4rKY?t@5ltAiGBykQhV3#{Xd7eg1m`CwgIT=VlWQu)>LH>ZfDi_C^M~KL# zw+ohyz}T-~*$=RUb++kuX9Skysveha6R<2tP)Q&FRF+kqfpSsvbAEBnRmnw@i%b#h z+_JGn92OERP|(;Ka9U6Va9St@a9SvR1mJug9dTY@KnRTiXJbJ#h~r%g5nB|kIR0P7 z^F#sXS#;><*{Mx{lN1IlBh-2dNb5fqD3JaGA|0fieE?F>+FdkV!k~}c1=8IC86in^ z2ZL;AKSB0@xVwYw!66zTz;^nksnaWFi?HmJ&Db9HYFEdCY80FwB8{+qEe9;XO@P}yHqf#M#mOt%N9z{My zPDP!uZB9kGT~!^2;;lo8&!OPcX%_Y(>ii8n-YEplEbK^+{`3x^|NRM{V(xqeJfM^^ zP+vVSBpDO{lw?qy>IlgIeTe_|$$e}As)WV1)H>D*bsM@D;;#0*8t-`oAqCdQC&Z1< zBhi2x*cAnCfDHi!b_9Y{byWP2Q9ZI0^(xwk`Y_7&E%+*!tBlnJ0qSqijK^_W0gCpI zpQQLi8}whWOQgmPut(vEnU^ZHHCRPl(+qG46e_?G-5vzEKq)7^Xay$C^+u4wR+pPv z?Xo`e2AU+Rjr5r#NI}eBFZk;<*EGO%h;o!UbOt^_lBSQ94T|8I}Yf!inJ%;`1aD4fxzbF-k}S zK3l|&@w&~~m!fVDny~6oBVs#mp&%xPDq<^&v!EM1=wDH<6+?T;gUEq0mOFAjBJ0}b zIugOra-I%rOK+m$e`5EDvyJF}1ey~V45x8=HUx1hEehh_Mx;|94%NLbh$2RHKGL9& z0Ws*o-(7Ma-Y?30NG?69eXZbqdNu-W4yCxJQequq9j?3TP1PE98Vi+DBuzT6psc|0 z@(CgL1AY-#>EVq7&_qAMiFR2|WQ8!~TKNoNz>H|SpAblal)z$A+UBsVAQHjy7tp+O zk%5A21lrFaZ>&s% zs`mwED@&Arl9Cs9YzEk^2S~A+u7(n7Zk%7m4pe4+ZON@Yi6A?QL9`;nv2I6zODz*_ z8o_uX(yO-D^;*%=xF;BbNhq|FEg?88?FIzD@Z>@8yO@nr4~1aTpiHrFi$<9ceDOs) zdc#?~)(W(O`XabuE9H7CUDgvwOkd-W`w$_QQ48%{p|D<6w}nR?wL5qIl5JWvM0-(I;7hYV$ktlm3=pcNR{u0*rL z?!gTU-BTv7+cX8%z=sgRrBgS$20p0%9FW_{2m2cN;O1t0KZ= zHz>dqmxCjbo?*x`J0E5_(9Thh?%u}B6l#BmU7{oy6S(Njz+R3QYyPUq-wjcM3(;+8 zIg2)0K}*n?2wvF5rFs{g=a6_zz`~|tz`|rVfaThg1uXAjm~#&S7RN0$(obTN0?XsF zW^M*KLC+c?2OGuO-$;ge(1uARzJb=XuwlmR`y0>@@y4b$dUBi?(Rqf+(+FaQ%P_AS z5VgwXBM*`~`FZHh?_(f9Gw2HZ;aZt4`x6Be2p`gFCH%}lz@J&bUMlaJE`4gFWC|CW2#A;Wy!=(B(} zwc!~0gn8XSpRaQ@pCt65%(l)>acyG!8*UR@MIR@_S!KnFCe9pVc(#Qc$MJnLU`0ss z-58S4`f$x%^5C5w;)Y5h$bTH>ow#iWMxJK#&a>!7E7_#pb|mCbC)fn zQ5)cC6kB5@2Nyg_<0z9gu$+gRc`VBbT|DZW$@pedCPY#bV?uQ0ZR%M}Gj37Z4b1u> zK|sxsA!GV6Zb^@UIlYgW`IH@FTMgJ%x9)6>L0JrAdJM5hw$eB- z#&ams5#J%4s*H9E5qInAU$cWB_XdDVO}{%LPbxO-J&&LjIz=6gFp2}9FT5Q~HJ}*wMBh$fn+cTF}v89t6 z4O`nRCg8P19d? z#HW_dMXj*sEMKNKPp7?!ZlKOTZH-4qwuwiqx`TK`XDr2$2!;MOhC*EBNgUaL>q#ii zq1Yl3r__CbLL(l8^*TLWrHdXa>?IwbbW}Bys0zcRIRdJoprgrX^8PW7)^+*(byuiDlz96@B}pxJP2yr)+k8!jA^0VUF^46&GPL>eVy6U!^O* zr{z9wbU9AXvNHe)kH#LJNDR)eMyCfAev9nJj>O{ms_TlFLbhNHlI24O!A1$tTnI~Fdb+*xuOYYEXbQS&KLPz*g?v7 zYGrV7-Z7Nym?)4U@KK{EKcWTSS6`@`s`c=M6HZdA62uKOn6Kdq1!W2TPOu`t-w8S# z+|U8K@Il9=LU2aMr9y-1KDFDrPwh@@3`q5(;CwS`8IJ8 z-K!B)y-%vY7JPy>_Dl75f+QL{Ak{w!lBjK1s(&3MQRhLa{$r3t&4;A=Z$T3EAC~HW z2g!Z1F(TFe#ELz8yU0*5{y;axf zk?5toUs%mmaF2^UNKdWS=HWZGW)pBB_t!n$E7emGTK#$zN4Q-xs-EuzCAD;qvqr^j zyWfd?anN~jh^mU2iiV1J4x}=1mKhKe9KyZ=yn|^I1zKm7F@j+eJ9aQfGD?uMdhkug zZZRk^SYuFTP+{OP062;goDOxU%KAQLP-D=`ppU^02HHPFwqwxLIUE9y^YdJmZ(To7#f zRnSn24}=^myTqj$lt8K(r1%1n25;;RI5zt zy_`D;M!9ZJFoCzKYt&uzcH8tm)dLWHmNo4@$}2yVZqXyTU#Y)M_k#nF#;d6lA4GfC zq~xDGOW44APk!;3d`%@UoHNH>_N^YN0df#b^V0b#vtsObm(^0_~)3}3-<8+(HnHi;$(af$DHzH1*<15O-m0jnV zB!Sf?(sgkW*4;vXSPb3Z< z8tC7SZJy0_(56VqH7)`~rbXPd8oE=&1O@Ms8?!IctT>Lx1FE$a6v7KDS{Om}_9(~& zDk5wpg!3bAXw_3)B%$6If@VIAE2Pll7TlTx8B>8C+CuGaq~fWWha=-d&`^f?9Yid{ zd^BaQb3F52;$T9_75f!qWon|L)}vQbK_#NH3e}z(4N@d)MHEN+fK`VoD;1?+wc^Su zF1p%<*Sz&5Rc>Os!cQSv&!YMj9HPzP2<>_aZxi|KMGHm9CZ8y=gdf40mxCmdup<#v)Vn?k+=eilcO9_GmHBc^X@UY&mhufzRe_;!}%_QNUvdEou6j#GYr1R zK#98YSM3F|wnL}2w|9scL>%cq0mel3WK|Nfr~XaOt|_(p6yL-Y4&t))qoScEwKz8$ zyE7^zSW29tU~9zJ`6gPZvg5;ukW5i$AosKfrME+pSao+uk`kMUBz=$l$3qXVL4;~( zZ`54K1;+7s-$LNF(p(w{5{#Cc7-qLjlkg$P60VFMr23#_Nfl7H3fuvu@sjx@krg+2 zCROAgQ&DeIp}xQ&@d?dJ7*-pU2nHn>fz&C0r|)3?fiw>39nv_YcSz%qfgrI%dfx+C z8fK-v4Q3@JG^G16LetW$&}0D!vC)cC3ij)$#`0cDiK84$GqRVM$2E)Lxmqoh?DnI; ztE|^>MIEg=-ujx}VZpf4J)BDn+qHsoQ?I%}1%3cPv~DyzPt?Uynj|u#I%HDQ*vZvs z8X_U)>wHD!mky))_fhfp**j8IQl#OYlR$N<{{4_hdbi~pz{)#_?CfHM#u2LjCi{=4 z0o5C253cDr9<_q}cvNq~^SbN-6eVBqUPlm04h`t}aQVR@&uJ7LTHa5f0Prc^jgfy& zrILT=XAz4iyvDXj=6?}2{Wj;0D5Gn?t<`3A-B|4=!v4GLF&?fqF(SS{QeZa!CGd>n zQKXBnH`b74dcj7xhV?42dYhPOtU%I;r=3}YUqWa8AQ;gj`G$IbhEFFd<=ZgxfKQZ@ zaHdXT;{=zf@ov-#SVw$n>KBiX4Gt!gsoIKn0skkHk)!p6;9KrevnqQB&l0N(Yf7s0 zt93XoE^jE;t4XzL3Y@D3Hdd{a4J$#9d{n(s#I6V%7()l_yi%aC&Uc)R4uTNnuJ{~8 zB;${QDVKpb5H%7F(i;V$Cs1LDK=vUDOACTi;Gp;rd?)aD$~3pI@zi}?#q#)j=wv6Q z`d-!-kyy$F|G!j$2`@<$P+8`W(=DnqIf<#KjcG?%kLK%7v`o;QlAc{ZVpzk49|3E{K&_BVT788_Qy{bw-VzED?5Y&J_l%Q%pO6{LQP!j@D zne^gpI=wiZnR4!;Zw|?@z6m2_CY_r|IkD8G)Q9#0FD;u&XVOk;M#pL2)&Y^zM9Cb# ziL$0Mi)kl=k5sDAYG&ugr)SdEtw%HT!nRm?J~fqgbZgjFyO3H;&&*6O+S{^i-UoBj z^RbR_GGnbWsU4vy(|({?^`@x;#ByzWTtb^Uo0`tVnolh(OfM{^GK;YWCQ>sqH}fFnwXoNPi3y>^y{dy>4nL7Wja>v8X?1e>7*v7=VIf>Xw&g|UtGMNTb!KKvtH>B z?R@5@Cv$VoR4Ox_O)XB(Wz_Hr7}Es$kl`HK%~QIp*gG!Bq8t*r=KJAv2m34wUS#kZ zgEttw$w2p4b*J@RCduHMcVuUnH_zZIgGC1KGx(5!ZpMDZ*bN3>VxSh}JY(w&3Jh*D z_y&Vd5$N*LM16@CuA1{r7W^ComD_)TF^b2|4;U!j{UKwd0?w}>=!h_>sCUuc?EW^v z$@_f-xV04*+zs_n!0+$XkTJcHsQ$k#V{UstJ@^{69@{(8AO0L1IXH4=n- utR?$L_Kyq?^yxE#{}0ul{*i;nCPyv|^gVripzjz;j`gFYZ(!)@WB&)M;9F<_ literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/contacts/client.py b/gam/gdata/analytics/contacts/client.py new file mode 100755 index 00000000000..9ae9cd770e7 --- /dev/null +++ b/gam/gdata/analytics/contacts/client.py @@ -0,0 +1,547 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from types import ListType, DictionaryType + + +"""Contains a client to communicate with the Contacts servers. + +For documentation on the Contacts API, see: +http://code.google.com/apis/contatcs/ +""" + +__author__ = 'vinces1979@gmail.com (Vince Spicer)' + + +import gdata.client +import gdata.contacts.data +import atom.client +import atom.data +import atom.http_core +import gdata.gauth + +DEFAULT_BATCH_URL = ('https://www.google.com/m8/feeds/contacts/default/full' + '/batch') +DEFAULT_PROFILES_BATCH_URL = ('https://www.google.com/m8/feeds/profiles/domain/' + '%s/full/batch') + +class ContactsClient(gdata.client.GDClient): + api_version = '3' + auth_service = 'cp' + server = "www.google.com" + contact_list = "default" + auth_scopes = gdata.gauth.AUTH_SCOPES['cp'] + ssl = True + + + def __init__(self, domain=None, auth_token=None, **kwargs): + """Constructs a new client for the Email Settings API. + + Args: + domain: string The Google Apps domain (if any). + kwargs: The other parameters to pass to the gdata.client.GDClient + constructor. + """ + gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs) + self.domain = domain + + def get_feed_uri(self, kind='contacts', contact_list=None, projection='full', + scheme="https"): + """Builds a feed URI. + + Args: + kind: The type of feed to return, typically 'groups' or 'contacts'. + Default value: 'contacts'. + contact_list: The contact list to return a feed for. + Default value: self.contact_list. + projection: The projection to apply to the feed contents, for example + 'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'. + scheme: The URL scheme such as 'http' or 'https', None to return a + relative URI without hostname. + + Returns: + A feed URI using the given kind, contact list, and projection. + Example: '/m8/feeds/contacts/default/full'. + """ + contact_list = contact_list or self.contact_list + if kind == 'profiles': + contact_list = 'domain/%s' % self.domain + prefix = scheme and '%s://%s' % (scheme, self.server) or '' + return '%s/m8/feeds/%s/%s/%s' % (prefix, kind, contact_list, projection) + + GetFeedUri = get_feed_uri + + def get_contact(self, uri, desired_class=gdata.contacts.data.ContactEntry, + auth_token=None, **kwargs): + return self.get_entry(uri, auth_token=auth_token, + desired_class=desired_class, **kwargs) + + + GetContact = get_contact + + + def create_contact(self, new_contact, insert_uri=None, auth_token=None, **kwargs): + """Adds an new contact to Google Contacts. + + Args: + new_contact: atom.Entry or subclass A new contact which is to be added to + Google Contacts. + insert_uri: the URL to post new contacts to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the contact created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + insert_uri = insert_uri or self.GetFeedUri() + return self.Post(new_contact, insert_uri, + auth_token=auth_token, **kwargs) + + CreateContact = create_contact + + def add_contact(self, new_contact, insert_uri=None, auth_token=None, + billing_information=None, birthday=None, calendar_link=None, **kwargs): + """Adds an new contact to Google Contacts. + + Args: + new_contact: atom.Entry or subclass A new contact which is to be added to + Google Contacts. + insert_uri: the URL to post new contacts to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the contact created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + contact = gdata.contacts.data.ContactEntry() + + if billing_information is not None: + if not isinstance(billing_information, gdata.contacts.data.BillingInformation): + billing_information = gdata.contacts.data.BillingInformation(text=billing_information) + + contact.billing_information = billing_information + + if birthday is not None: + if not isinstance(birthday, gdata.contacts.data.Birthday): + birthday = gdata.contacts.data.Birthday(when=birthday) + + contact.birthday = birthday + + if calendar_link is not None: + if type(calendar_link) is not ListType: + calendar_link = [calendar_link] + + for link in calendar_link: + if not isinstance(link, gdata.contacts.data.CalendarLink): + if type(link) is not DictionaryType: + raise TypeError, "calendar_link Requires dictionary not %s" % type(link) + + link = gdata.contacts.data.CalendarLink( + rel=link.get("rel", None), + label=link.get("label", None), + primary=link.get("primary", None), + href=link.get("href", None), + ) + + contact.calendar_link.append(link) + + insert_uri = insert_uri or self.GetFeedUri() + return self.Post(contact, insert_uri, + auth_token=auth_token, **kwargs) + + AddContact = add_contact + + def get_contacts(self, uri=None, desired_class=gdata.contacts.data.ContactsFeed, + auth_token=None, **kwargs): + """Obtains a feed with the contacts belonging to the current user. + + Args: + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (desired_class=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.SpreadsheetsFeed. + """ + uri = uri or self.GetFeedUri() + return self.get_feed(uri, auth_token=auth_token, + desired_class=desired_class, **kwargs) + + GetContacts = get_contacts + + def get_group(self, uri=None, desired_class=gdata.contacts.data.GroupEntry, + auth_token=None, **kwargs): + """ Get a single groups details + Args: + uri: the group uri or id + """ + return self.get_entry(uri, desired_class=desired_class, auth_token=auth_token, **kwargs) + + GetGroup = get_group + + def get_groups(self, uri=None, desired_class=gdata.contacts.data.GroupsFeed, + auth_token=None, **kwargs): + uri = uri or self.GetFeedUri('groups') + return self.get_feed(uri, desired_class=desired_class, auth_token=auth_token, **kwargs) + + GetGroups = get_groups + + def create_group(self, new_group, insert_uri=None, url_params=None, + desired_class=None, **kwargs): + insert_uri = insert_uri or self.GetFeedUri('groups') + return self.Post(new_group, insert_uri, url_params=url_params, + desired_class=desired_class, **kwargs) + + CreateGroup = create_group + + def update_group(self, edit_uri, updated_group, url_params=None, + escape_params=True, desired_class=None, auth_token=None, **kwargs): + return self.Put(updated_group, self._CleanUri(edit_uri), + url_params=url_params, + escape_params=escape_params, + desired_class=desired_class, + auth_token=auth_token, **kwargs) + + UpdateGroup = update_group + + def delete_group(self, group_object, auth_token=None, force=False, **kws): + return self.Delete(group_object, auth_token=auth_token, force=force, **kws) + + DeleteGroup = delete_group + + def change_photo(self, media, contact_entry_or_url, content_type=None, + content_length=None, auth_token=None, **kwargs): + """Change the photo for the contact by uploading a new photo. + + Performs a PUT against the photo edit URL to send the binary data for the + photo. + + Args: + media: filename, file-like-object, or a gdata.data.MediaSource object to send. + contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this + method will search for an edit photo link URL and + perform a PUT to the URL. + content_type: str (optional) the mime type for the photo data. This is + necessary if media is a file or file name, but if media + is a MediaSource object then the media object can contain + the mime type. If media_type is set, it will override the + mime type in the media object. + content_length: int or str (optional) Specifying the content length is + only required if media is a file-like object. If media + is a filename, the length is determined using + os.path.getsize. If media is a MediaSource object, it is + assumed that it already contains the content length. + """ + ifmatch_header = None + if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry): + photo_link = contact_entry_or_url.GetPhotoLink() + uri = photo_link.href + ifmatch_header = atom.client.CustomHeaders( + **{'if-match': photo_link.etag}) + else: + uri = contact_entry_or_url + if isinstance(media, gdata.data.MediaSource): + payload = media + # If the media object is a file-like object, then use it as the file + # handle in the in the MediaSource. + elif hasattr(media, 'read'): + payload = gdata.data.MediaSource(file_handle=media, + content_type=content_type, content_length=content_length) + # Assume that the media object is a file name. + else: + payload = gdata.data.MediaSource(content_type=content_type, + content_length=content_length, file_path=media) + return self.Put(uri=uri, data=payload, auth_token=auth_token, + ifmatch_header=ifmatch_header, **kwargs) + + ChangePhoto = change_photo + + def get_photo(self, contact_entry_or_url, auth_token=None, **kwargs): + """Retrives the binary data for the contact's profile photo as a string. + + Args: + contact_entry_or_url: a gdata.contacts.ContactEntry object or a string + containing the photo link's URL. If the contact entry does not + contain a photo link, the image will not be fetched and this method + will return None. + """ + # TODO: add the ability to write out the binary image data to a file, + # reading and writing a chunk at a time to avoid potentially using up + # large amounts of memory. + url = None + if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry): + photo_link = contact_entry_or_url.GetPhotoLink() + if photo_link: + url = photo_link.href + else: + url = contact_entry_or_url + if url: + return self.Get(url, auth_token=auth_token, **kwargs).read() + else: + return None + + GetPhoto = get_photo + + def delete_photo(self, contact_entry_or_url, auth_token=None, **kwargs): + """Delete the contact's profile photo. + + Args: + contact_entry_or_url: a gdata.contacts.ContactEntry object or a string + containing the photo link's URL. + """ + uri = None + ifmatch_header = None + if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry): + photo_link = contact_entry_or_url.GetPhotoLink() + if photo_link.etag: + uri = photo_link.href + ifmatch_header = atom.client.CustomHeaders( + **{'if-match': photo_link.etag}) + else: + # No etag means no photo has been assigned to this contact. + return + else: + uri = contact_entry_or_url + if uri: + self.Delete(entry_or_uri=uri, auth_token=auth_token, + ifmatch_header=ifmatch_header, **kwargs) + + DeletePhoto = delete_photo + + def get_profiles_feed(self, uri=None, auth_token=None, **kwargs): + """Retrieves a feed containing all domain's profiles. + + Args: + uri: string (optional) the URL to retrieve the profiles feed, + for example /m8/feeds/profiles/default/full + + Returns: + On success, a ProfilesFeed containing the profiles. + On failure, raises a RequestError. + """ + + uri = uri or self.GetFeedUri('profiles') + return self.get_feed(uri, auth_token=auth_token, + desired_class=gdata.contacts.data.ProfilesFeed, **kwargs) + + GetProfilesFeed = get_profiles_feed + + def get_profile(self, uri, auth_token=None, **kwargs): + """Retrieves a domain's profile for the user. + + Args: + uri: string the URL to retrieve the profiles feed, + for example /m8/feeds/profiles/default/full/username + + Returns: + On success, a ProfileEntry containing the profile for the user. + On failure, raises a RequestError + """ + return self.get_entry(uri, + desired_class=gdata.contacts.data.ProfileEntry, + auth_token=auth_token, **kwargs) + + GetProfile = get_profile + + def update_profile(self, updated_profile, auth_token=None, force=False, **kwargs): + """Updates an existing profile. + + Args: + updated_profile: atom.Entry or subclass containing + the Atom Entry which will replace the profile which is + stored at the edit_url. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of ContactsClient. + force: boolean stating whether an update should be forced. Defaults to + False. Normally, if a change has been made since the passed in + entry was obtained, the server will not overwrite the entry since + the changes were based on an obsolete version of the entry. + Setting force to True will cause the update to silently + overwrite whatever version is present. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, raises a RequestError. + """ + return self.Update(updated_profile, auth_token=auth_token, force=force, **kwargs) + + UpdateProfile = update_profile + + def execute_batch(self, batch_feed, url=DEFAULT_BATCH_URL, desired_class=None, + auth_token=None, **kwargs): + """Sends a batch request feed to the server. + + Args: + batch_feed: gdata.contacts.ContactFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: str The batch URL to which these operations should be applied. + converter: Function (optional) The function used to convert the server's + response to an object. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a ContactsFeed. + """ + return self.Post(batch_feed, url, desired_class=desired_class, + auth_token=None, **kwargs) + + ExecuteBatch = execute_batch + + def execute_batch_profiles(self, batch_feed, url=None, + desired_class=gdata.contacts.data.ProfilesFeed, + auth_token=None, **kwargs): + """Sends a batch request feed to the server. + + Args: + batch_feed: gdata.profiles.ProfilesFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: string The batch URL to which these operations should be applied. + converter: Function (optional) The function used to convert the server's + response to an object. The default value is + gdata.profiles.ProfilesFeedFromString. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a ProfilesFeed. + """ + url = url or (DEFAULT_PROFILES_BATCH_URL % self.domain) + return self.Post(batch_feed, url, desired_class=desired_class, + auth_token=auth_token, **kwargs) + + ExecuteBatchProfiles = execute_batch_profiles + + def _CleanUri(self, uri): + """Sanitizes a feed URI. + + Args: + uri: The URI to sanitize, can be relative or absolute. + + Returns: + The given URI without its http://server prefix, if any. + Keeps the leading slash of the URI. + """ + url_prefix = 'http://%s' % self.server + if uri.startswith(url_prefix): + uri = uri[len(url_prefix):] + return uri + +class ContactsQuery(gdata.client.Query): + """ + Create a custom Contacts Query + + Full specs can be found at: U{Contacts query parameters reference + } + """ + + def __init__(self, feed=None, group=None, orderby=None, showdeleted=None, + sortorder=None, requirealldeleted=None, **kwargs): + """ + @param max_results: The maximum number of entries to return. If you want + to receive all of the contacts, rather than only the default maximum, you + can specify a very large number for max-results. + @param start-index: The 1-based index of the first result to be retrieved. + @param updated-min: The lower bound on entry update dates. + @param group: Constrains the results to only the contacts belonging to the + group specified. Value of this parameter specifies group ID + @param orderby: Sorting criterion. The only supported value is + lastmodified. + @param showdeleted: Include deleted contacts in the returned contacts feed + @pram sortorder: Sorting order direction. Can be either ascending or + descending. + @param requirealldeleted: Only relevant if showdeleted and updated-min + are also provided. It dictates the behavior of the server in case it + detects that placeholders of some entries deleted since the point in + time specified as updated-min may have been lost. + """ + gdata.client.Query.__init__(self, **kwargs) + self.group = group + self.orderby = orderby + self.sortorder = sortorder + self.showdeleted = showdeleted + + def modify_request(self, http_request): + if self.group: + gdata.client._add_query_param('group', self.group, http_request) + if self.orderby: + gdata.client._add_query_param('orderby', self.orderby, http_request) + if self.sortorder: + gdata.client._add_query_param('sortorder', self.sortorder, http_request) + if self.showdeleted: + gdata.client._add_query_param('showdeleted', self.showdeleted, http_request) + gdata.client.Query.modify_request(self, http_request) + + ModifyRequest = modify_request + + +class ProfilesQuery(gdata.client.Query): + """ + Create a custom Profiles Query + + Full specs can be found at: U{Profiless query parameters reference + } + """ + + def __init__(self, feed=None, start_key=None, **kwargs): + """ + @param start_key: Opaque key of the first element to retrieve. Present in + the next link of an earlier request, if further pages of response are + available. + """ + gdata.client.Query.__init__(self, **kwargs) + self.feed = feed or 'https://www.google.com/m8/feeds/profiles/default/full' + self.start_key = start_key + + def modify_request(self, http_request): + if self.start_key: + gdata.client._add_query_param('start-key', self.start_key, http_request) + gdata.client.Query.modify_request(self, http_request) + + ModifyRequest = modify_request diff --git a/gam/gdata/analytics/contacts/data.py b/gam/gdata/analytics/contacts/data.py new file mode 100755 index 00000000000..ce2385204a2 --- /dev/null +++ b/gam/gdata/analytics/contacts/data.py @@ -0,0 +1,493 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data model classes for parsing and generating XML for the Contacts API.""" + + +__author__ = 'vinces1979@gmail.com (Vince Spicer)' + + +import atom.core +import gdata +import gdata.data + + +PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo' +PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo' + +EXTERNAL_ID_ORGANIZATION = 'organization' + +RELATION_MANAGER = 'manager' + +CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008' +CONTACTS_TEMPLATE = '{%s}%%s' % CONTACTS_NAMESPACE + + +class BillingInformation(atom.core.XmlElement): + """ + gContact:billingInformation + Specifies billing information of the entity represented by the contact. The element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'billingInformation' + + +class Birthday(atom.core.XmlElement): + """ + Stores birthday date of the person represented by the contact. The element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'birthday' + when = 'when' + + +class ContactLink(atom.data.Link): + """ + Extends atom.data.Link to add gd:etag attribute for photo link. + """ + etag = gdata.data.GD_TEMPLATE % 'etag' + + +class CalendarLink(atom.core.XmlElement): + """ + Storage for URL of the contact's calendar. The element can be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'calendarLink' + rel = 'rel' + label = 'label' + primary = 'primary' + href = 'href' + + +class DirectoryServer(atom.core.XmlElement): + """ + A directory server associated with this contact. + May not be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'directoryServer' + + +class Event(atom.core.XmlElement): + """ + These elements describe events associated with a contact. + They may be repeated + """ + + _qname = CONTACTS_TEMPLATE % 'event' + label = 'label' + rel = 'rel' + when = gdata.data.When + + +class ExternalId(atom.core.XmlElement): + """ + Describes an ID of the contact in an external system of some kind. + This element may be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'externalId' + label = 'label' + rel = 'rel' + value = 'value' + + +def ExternalIdFromString(xml_string): + return atom.core.parse(ExternalId, xml_string) + + +class Gender(atom.core.XmlElement): + """ + Specifies the gender of the person represented by the contact. + The element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'directoryServer' + value = 'value' + + +class Hobby(atom.core.XmlElement): + """ + Describes an ID of the contact in an external system of some kind. + This element may be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'hobby' + + +class Initials(atom.core.XmlElement): + """ Specifies the initials of the person represented by the contact. The + element cannot be repeated. """ + + _qname = CONTACTS_TEMPLATE % 'initials' + + +class Jot(atom.core.XmlElement): + """ + Storage for arbitrary pieces of information about the contact. Each jot + has a type specified by the rel attribute and a text value. + The element can be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'jot' + rel = 'rel' + + +class Language(atom.core.XmlElement): + """ + Specifies the preferred languages of the contact. + The element can be repeated. + + The language must be specified using one of two mutually exclusive methods: + using the freeform @label attribute, or using the @code attribute, whose value + must conform to the IETF BCP 47 specification. + """ + + _qname = CONTACTS_TEMPLATE % 'language' + code = 'code' + label = 'label' + + +class MaidenName(atom.core.XmlElement): + """ + Specifies maiden name of the person represented by the contact. + The element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'maidenName' + + +class Mileage(atom.core.XmlElement): + """ + Specifies the mileage for the entity represented by the contact. + Can be used for example to document distance needed for reimbursement + purposes. The value is not interpreted. The element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'mileage' + + +class NickName(atom.core.XmlElement): + """ + Specifies the nickname of the person represented by the contact. + The element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'nickname' + + +class Occupation(atom.core.XmlElement): + """ + Specifies the occupation/profession of the person specified by the contact. + The element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'occupation' + + +class Priority(atom.core.XmlElement): + """ + Classifies importance of the contact into 3 categories: + * Low + * Normal + * High + + The priority element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'priority' + + +class Relation(atom.core.XmlElement): + """ + This element describe another entity (usually a person) that is in a + relation of some kind with the contact. + """ + + _qname = CONTACTS_TEMPLATE % 'relation' + rel = 'rel' + label = 'label' + + +class Sensitivity(atom.core.XmlElement): + """ + Classifies sensitivity of the contact into the following categories: + * Confidential + * Normal + * Personal + * Private + + The sensitivity element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'sensitivity' + rel = 'rel' + + +class UserDefinedField(atom.core.XmlElement): + """ + Represents an arbitrary key-value pair attached to the contact. + """ + + _qname = CONTACTS_TEMPLATE % 'userDefinedField' + key = 'key' + value = 'value' + + +def UserDefinedFieldFromString(xml_string): + return atom.core.parse(UserDefinedField, xml_string) + + +class Website(atom.core.XmlElement): + """ + Describes websites associated with the contact, including links. + May be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'website' + + href = 'href' + label = 'label' + primary = 'primary' + rel = 'rel' + + +def WebsiteFromString(xml_string): + return atom.core.parse(Website, xml_string) + + +class HouseName(atom.core.XmlElement): + """ + Used in places where houses or buildings have names (and + not necessarily numbers), eg. "The Pillars". + """ + + _qname = CONTACTS_TEMPLATE % 'housename' + + +class Street(atom.core.XmlElement): + """ + Can be street, avenue, road, etc. This element also includes the house + number and room/apartment/flat/floor number. + """ + + _qname = CONTACTS_TEMPLATE % 'street' + + +class POBox(atom.core.XmlElement): + """ + Covers actual P.O. boxes, drawers, locked bags, etc. This is usually but not + always mutually exclusive with street + """ + + _qname = CONTACTS_TEMPLATE % 'pobox' + + +class Neighborhood(atom.core.XmlElement): + """ + This is used to disambiguate a street address when a city contains more than + one street with the same name, or to specify a small place whose mail is + routed through a larger postal town. In China it could be a county or a + minor city. + """ + + _qname = CONTACTS_TEMPLATE % 'neighborhood' + + +class City(atom.core.XmlElement): + """ + Can be city, village, town, borough, etc. This is the postal town and not + necessarily the place of residence or place of business. + """ + + _qname = CONTACTS_TEMPLATE % 'city' + + +class SubRegion(atom.core.XmlElement): + """ + Handles administrative districts such as U.S. or U.K. counties that are not + used for mail addressing purposes. Subregion is not intended for + delivery addresses. + """ + + _qname = CONTACTS_TEMPLATE % 'subregion' + + +class Region(atom.core.XmlElement): + """ + A state, province, county (in Ireland), Land (in Germany), + departement (in France), etc. + """ + + _qname = CONTACTS_TEMPLATE % 'region' + + +class PostalCode(atom.core.XmlElement): + """ + Postal code. Usually country-wide, but sometimes specific to the + city (e.g. "2" in "Dublin 2, Ireland" addresses). + """ + + _qname = CONTACTS_TEMPLATE % 'postcode' + + +class Country(atom.core.XmlElement): + """ The name or code of the country. """ + + _qname = CONTACTS_TEMPLATE % 'country' + + +class Status(atom.core.XmlElement): + """Person's status element.""" + + _qname = CONTACTS_TEMPLATE % 'status' + indexed = 'indexed' + + +class PersonEntry(gdata.data.BatchEntry): + """Represents a google contact""" + + link = [ContactLink] + + billing_information = BillingInformation + birthday = Birthday + calendar_link = [CalendarLink] + directory_server = DirectoryServer + event = [Event] + external_id = [ExternalId] + gender = Gender + hobby = [Hobby] + initials = Initials + jot = [Jot] + language= [Language] + maiden_name = MaidenName + mileage = Mileage + nickname = NickName + occupation = Occupation + priority = Priority + relation = [Relation] + sensitivity = Sensitivity + user_defined_field = [UserDefinedField] + website = [Website] + + name = gdata.data.Name + phone_number = [gdata.data.PhoneNumber] + organization = gdata.data.Organization + postal_address = [gdata.data.PostalAddress] + email = [gdata.data.Email] + im = [gdata.data.Im] + structured_postal_address = [gdata.data.StructuredPostalAddress] + extended_property = [gdata.data.ExtendedProperty] + + status = Status + + +class Deleted(atom.core.XmlElement): + """If present, indicates that this contact has been deleted.""" + _qname = gdata.GDATA_TEMPLATE % 'deleted' + + +class GroupMembershipInfo(atom.core.XmlElement): + """ + Identifies the group to which the contact belongs or belonged. + The group is referenced by its id. + """ + + _qname = CONTACTS_TEMPLATE % 'groupMembershipInfo' + + href = 'href' + deleted = 'deleted' + + +class ContactEntry(PersonEntry): + """A Google Contacts flavor of an Atom Entry.""" + + deleted = Deleted + group_membership_info = [GroupMembershipInfo] + organization = gdata.data.Organization + + def GetPhotoLink(self): + for a_link in self.link: + if a_link.rel == PHOTO_LINK_REL: + return a_link + return None + + def GetPhotoEditLink(self): + for a_link in self.link: + if a_link.rel == PHOTO_EDIT_LINK_REL: + return a_link + return None + + +class ContactsFeed(gdata.data.BatchFeed): + """A collection of Contacts.""" + entry = [ContactEntry] + + +class SystemGroup(atom.core.XmlElement): + """The contacts systemGroup element. + + When used within a contact group entry, indicates that the group in + question is one of the predefined system groups.""" + + _qname = CONTACTS_TEMPLATE % 'systemGroup' + id = 'id' + + +class GroupEntry(gdata.data.BatchEntry): + """Represents a contact group.""" + extended_property = [gdata.data.ExtendedProperty] + system_group = SystemGroup + + +class GroupsFeed(gdata.data.BatchFeed): + """A Google contact groups feed flavor of an Atom Feed.""" + entry = [GroupEntry] + + +class ProfileEntry(PersonEntry): + """A Google Profiles flavor of an Atom Entry.""" + + +def ProfileEntryFromString(xml_string): + """Converts an XML string into a ProfileEntry object. + + Args: + xml_string: string The XML describing a Profile entry. + + Returns: + A ProfileEntry object corresponding to the given XML. + """ + return atom.core.parse(ProfileEntry, xml_string) + + +class ProfilesFeed(gdata.data.BatchFeed): + """A Google Profiles feed flavor of an Atom Feed.""" + _qname = atom.data.ATOM_TEMPLATE % 'feed' + entry = [ProfileEntry] + + +def ProfilesFeedFromString(xml_string): + """Converts an XML string into a ProfilesFeed object. + + Args: + xml_string: string The XML describing a Profiles feed. + + Returns: + A ProfilesFeed object corresponding to the given XML. + """ + return atom.core.parse(ProfilesFeed, xml_string) diff --git a/gam/gdata/analytics/contacts/service.py b/gam/gdata/analytics/contacts/service.py new file mode 100755 index 00000000000..4b017c0451b --- /dev/null +++ b/gam/gdata/analytics/contacts/service.py @@ -0,0 +1,427 @@ +#!/usr/bin/env python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ContactsService extends the GDataService for Google Contacts operations. + + ContactsService: Provides methods to query feeds and manipulate items. + Extends GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + +__author__ = 'dbrattli (Dag Brattli)' + + +import gdata +import gdata.calendar +import gdata.service + + +DEFAULT_BATCH_URL = ('http://www.google.com/m8/feeds/contacts/default/full' + '/batch') +DEFAULT_PROFILES_BATCH_URL = ('http://www.google.com' + '/m8/feeds/profiles/default/full/batch') + +GDATA_VER_HEADER = 'GData-Version' + + +class Error(Exception): + pass + + +class RequestError(Error): + pass + + +class ContactsService(gdata.service.GDataService): + """Client for the Google Contacts service.""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', additional_headers=None, + contact_list='default', **kwargs): + """Creates a client for the Contacts service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'www.google.com'. + contact_list: string (optional) The name of the default contact list to + use when no URI is specified to the methods of the service. + Default value: 'default' (the logged in user's contact list). + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + + self.contact_list = contact_list + gdata.service.GDataService.__init__( + self, email=email, password=password, service='cp', source=source, + server=server, additional_headers=additional_headers, **kwargs) + + def GetFeedUri(self, kind='contacts', contact_list=None, projection='full', + scheme=None): + """Builds a feed URI. + + Args: + kind: The type of feed to return, typically 'groups' or 'contacts'. + Default value: 'contacts'. + contact_list: The contact list to return a feed for. + Default value: self.contact_list. + projection: The projection to apply to the feed contents, for example + 'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'. + scheme: The URL scheme such as 'http' or 'https', None to return a + relative URI without hostname. + + Returns: + A feed URI using the given kind, contact list, and projection. + Example: '/m8/feeds/contacts/default/full'. + """ + contact_list = contact_list or self.contact_list + if kind == 'profiles': + contact_list = 'domain/%s' % contact_list + prefix = scheme and '%s://%s' % (scheme, self.server) or '' + return '%s/m8/feeds/%s/%s/%s' % (prefix, kind, contact_list, projection) + + def GetContactsFeed(self, uri=None): + uri = uri or self.GetFeedUri() + return self.Get(uri, converter=gdata.contacts.ContactsFeedFromString) + + def GetContact(self, uri): + return self.Get(uri, converter=gdata.contacts.ContactEntryFromString) + + def CreateContact(self, new_contact, insert_uri=None, url_params=None, + escape_params=True): + """Adds an new contact to Google Contacts. + + Args: + new_contact: atom.Entry or subclass A new contact which is to be added to + Google Contacts. + insert_uri: the URL to post new contacts to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the contact created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + insert_uri = insert_uri or self.GetFeedUri() + return self.Post(new_contact, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.ContactEntryFromString) + + def UpdateContact(self, edit_uri, updated_contact, url_params=None, + escape_params=True): + """Updates an existing contact. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_contact: string, atom.Entry or subclass containing + the Atom Entry which will replace the contact which is + stored at the edit_url + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + return self.Put(updated_contact, self._CleanUri(edit_uri), + url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.ContactEntryFromString) + + def DeleteContact(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes an contact with the specified ID from Google Contacts. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + '/m8/feeds/contacts/default/full/xxx/yyy' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, a httplib.HTTPResponse containing the server's + response to the DELETE request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + return self.Delete(self._CleanUri(edit_uri), + url_params=url_params, escape_params=escape_params) + + def GetGroupsFeed(self, uri=None): + uri = uri or self.GetFeedUri('groups') + return self.Get(uri, converter=gdata.contacts.GroupsFeedFromString) + + def CreateGroup(self, new_group, insert_uri=None, url_params=None, + escape_params=True): + insert_uri = insert_uri or self.GetFeedUri('groups') + return self.Post(new_group, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.GroupEntryFromString) + + def UpdateGroup(self, edit_uri, updated_group, url_params=None, + escape_params=True): + return self.Put(updated_group, self._CleanUri(edit_uri), + url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.GroupEntryFromString) + + def DeleteGroup(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + return self.Delete(self._CleanUri(edit_uri), + url_params=url_params, escape_params=escape_params) + + def ChangePhoto(self, media, contact_entry_or_url, content_type=None, + content_length=None): + """Change the photo for the contact by uploading a new photo. + + Performs a PUT against the photo edit URL to send the binary data for the + photo. + + Args: + media: filename, file-like-object, or a gdata.MediaSource object to send. + contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this + method will search for an edit photo link URL and + perform a PUT to the URL. + content_type: str (optional) the mime type for the photo data. This is + necessary if media is a file or file name, but if media + is a MediaSource object then the media object can contain + the mime type. If media_type is set, it will override the + mime type in the media object. + content_length: int or str (optional) Specifying the content length is + only required if media is a file-like object. If media + is a filename, the length is determined using + os.path.getsize. If media is a MediaSource object, it is + assumed that it already contains the content length. + """ + if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry): + url = contact_entry_or_url.GetPhotoEditLink().href + else: + url = contact_entry_or_url + if isinstance(media, gdata.MediaSource): + payload = media + # If the media object is a file-like object, then use it as the file + # handle in the in the MediaSource. + elif hasattr(media, 'read'): + payload = gdata.MediaSource(file_handle=media, + content_type=content_type, content_length=content_length) + # Assume that the media object is a file name. + else: + payload = gdata.MediaSource(content_type=content_type, + content_length=content_length, file_path=media) + return self.Put(payload, url) + + def GetPhoto(self, contact_entry_or_url): + """Retrives the binary data for the contact's profile photo as a string. + + Args: + contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string + containing the photo link's URL. If the contact entry does not + contain a photo link, the image will not be fetched and this method + will return None. + """ + # TODO: add the ability to write out the binary image data to a file, + # reading and writing a chunk at a time to avoid potentially using up + # large amounts of memory. + url = None + if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry): + photo_link = contact_entry_or_url.GetPhotoLink() + if photo_link: + url = photo_link.href + else: + url = contact_entry_or_url + if url: + return self.Get(url, converter=str) + else: + return None + + def DeletePhoto(self, contact_entry_or_url): + url = None + if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry): + url = contact_entry_or_url.GetPhotoEditLink().href + else: + url = contact_entry_or_url + if url: + self.Delete(url) + + def GetProfilesFeed(self, uri=None): + """Retrieves a feed containing all domain's profiles. + + Args: + uri: string (optional) the URL to retrieve the profiles feed, + for example /m8/feeds/profiles/default/full + + Returns: + On success, a ProfilesFeed containing the profiles. + On failure, raises a RequestError. + """ + + uri = uri or self.GetFeedUri('profiles') + return self.Get(uri, + converter=gdata.contacts.ProfilesFeedFromString) + + def GetProfile(self, uri): + """Retrieves a domain's profile for the user. + + Args: + uri: string the URL to retrieve the profiles feed, + for example /m8/feeds/profiles/default/full/username + + Returns: + On success, a ProfileEntry containing the profile for the user. + On failure, raises a RequestError + """ + return self.Get(uri, + converter=gdata.contacts.ProfileEntryFromString) + + def UpdateProfile(self, edit_uri, updated_profile, url_params=None, + escape_params=True): + """Updates an existing profile. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_profile: string atom.Entry or subclass containing + the Atom Entry which will replace the profile which is + stored at the edit_url. + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_params will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, raises a RequestError. + """ + return self.Put(updated_profile, self._CleanUri(edit_uri), + url_params=url_params, escape_params=escape_params, + converter=gdata.contacts.ProfileEntryFromString) + + def ExecuteBatch(self, batch_feed, url, + converter=gdata.contacts.ContactsFeedFromString): + """Sends a batch request feed to the server. + + Args: + batch_feed: gdata.contacts.ContactFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: str The batch URL to which these operations should be applied. + converter: Function (optional) The function used to convert the server's + response to an object. The default value is ContactsFeedFromString. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a ContactsFeed. + """ + return self.Post(batch_feed, url, converter=converter) + + def ExecuteBatchProfiles(self, batch_feed, url, + converter=gdata.contacts.ProfilesFeedFromString): + """Sends a batch request feed to the server. + + Args: + batch_feed: gdata.profiles.ProfilesFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: string The batch URL to which these operations should be applied. + converter: Function (optional) The function used to convert the server's + response to an object. The default value is + gdata.profiles.ProfilesFeedFromString. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a ProfilesFeed. + """ + return self.Post(batch_feed, url, converter=converter) + + def _CleanUri(self, uri): + """Sanitizes a feed URI. + + Args: + uri: The URI to sanitize, can be relative or absolute. + + Returns: + The given URI without its http://server prefix, if any. + Keeps the leading slash of the URI. + """ + url_prefix = 'http://%s' % self.server + if uri.startswith(url_prefix): + uri = uri[len(url_prefix):] + return uri + +class ContactsQuery(gdata.service.Query): + + def __init__(self, feed=None, text_query=None, params=None, + categories=None, group=None): + self.feed = feed or '/m8/feeds/contacts/default/full' + if group: + self._SetGroup(group) + gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query, + params=params, categories=categories) + + def _GetGroup(self): + if 'group' in self: + return self['group'] + else: + return None + + def _SetGroup(self, group_id): + self['group'] = group_id + + group = property(_GetGroup, _SetGroup, + doc='The group query parameter to find only contacts in this group') + +class GroupsQuery(gdata.service.Query): + + def __init__(self, feed=None, text_query=None, params=None, + categories=None): + self.feed = feed or '/m8/feeds/groups/default/full' + gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query, + params=params, categories=categories) + + +class ProfilesQuery(gdata.service.Query): + """Constructs a query object for the profiles feed.""" + + def __init__(self, feed=None, text_query=None, params=None, + categories=None): + self.feed = feed or '/m8/feeds/profiles/default/full' + gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query, + params=params, categories=categories) diff --git a/gam/gdata/analytics/contacts/service.pyc b/gam/gdata/analytics/contacts/service.pyc new file mode 100755 index 0000000000000000000000000000000000000000..28d61b9a60b8284b82e0c18e67a07e0cd6d0ff0e GIT binary patch literal 18426 zcmeHP&vPADR=!V9mMvSh64_Rqj7d5ZTvCE9%`hPhssNEC*)iiNnwFdaPl`wDy_VGW zQ}>&`{cH=I4K}c%SW!h4#eyno_!n5PW5y|^Zwlhd3T|FH*Gr8gYRA`-@RfwuMWPODc`-y;cU)dykMo0ICysF&rJZ0uN|Ro{8x?Vo6!AW43J(8UbG@qf zNSa(t+H60{54N(6D3A8;4m*y1|w1d$DFktgY{eRm<)+DmMJpXlhS`bE-B3Jl-( zuWwOHk_Ic`z10D7+F3HaD)_k7N~8U_)hf=)&3@MDcX6ww z@W{xO{SK~HmOj@(-iUfVdjpIU?Z)}%yB%=p=YFW4TV>>h-a*LEe+7qSYZHxP3ctb_ z3}}n5)X|Uw30cXBbgPMq7IFA)P2<{&loOqjgs1UNNcijMgOZTw1R#3d{{UogEztBF z;4^MI_`wq(63!FmxuO%UPn+k!G+>l~5@8AtUnf!lnVtjI08x4id;;PDo5}*`q`*0O z0)#?6Q?<9}~%ivkTNy!iu#~ZS|n}3cn{3I z$lAwQ-g!awzL@{aX8pV!AN3JI03a;W;`#?6?+!K10mu25sY6Rr->3!v5wkRnMJd!q z_&DiygKf0ji_^HX5UeUr29Kg{AAOx0WP&+A76&#hs*W$y8^<{81Myf@PLb6*f-$3~ zaT=sqa4)<=HDY^lJK0GvD;NdY)hkg)rgHWxbvfL9*X~>}&+oh0?yhPZN8x&R-t}X9 z@4bhQq3!IF@Jfac^Poq~8W#Y1_JuRc-vYb~z8iJbTpJkLfQIb1i!9d`L{A9s3I2OF z5dm{`6@=P$ub7}H2;1nT%ziP0r{LN|S-Q0sM;&x*eV+P9Aih33(wcNMq{PbmdQY7W zO46i&(vc$LY&Guggls;ftcTQ1;W?f}g;4iNkCalCQuumm>J?P?0S-}<=Hg^ys_|w+ zI_5@V3-*b+yUjteU+n1$4*O%AfGoW!V$B0%z8Dj@X_$v&CZDHXz}s0oO;K*#@@jF8Xl zio+b|aPE3Aw;kEo|6cs=uYT|SU*lb6oy{GFBKg#J%eMF8eE<|mP4iUm1KWqO3!8L~ zcA-+1R~D78XDMdIPsD(Yn#ZudlSi!SPVhK^rQR=sz04K_WjD*A%wMTrmP;;x-hlZl z1lUC#X+R#hK7e%B#m=uN=LfN-)B()Cz+!b=p?KkJQcFGA$)M-b#cS45Os`p(!q;qp zOJgs|gR0_d7O(IlmS;!_7J(N24qwHs$QU~>FU1_Kx~%vkM3+l=06WQ(V+y{saXT(< zK}PQ7$vV4Dq%kwofPnLjw;F8%ArFeZh&#<$NN8``iTg81JOoyRnPx`A06f+9c-Cla z%4pXM(h8hOA1KZRD8p~zQPIQ+9SCXfgmeiDdL1h&2bTT1m1p~#;^8Ragi`#hpC`v1 zKFuezUT1bzCUSxv_dvs~T%~6=ml6MfgiRjU3uqTI2_#h8DiX zuW=m1?eNp;#sy4POW!mEy*ZVfWf>5CIHGrbIsv|ZI`N9;BaB5ngt6GH&F)xpQ-*2J zV>*^Q;!Ou>{J0c72>Xz~J_51vK?|Hgt7IvNifn&DXhTr8{q1&_s(yK(TD7NOpP+tN zPSC`H@Fm81YvfSd`XWi87){ zsOa0drQqYOtqu4byhE!Us6?QLcqwsyy`sPzTDDp0KhQfZO9Ije94x<`bq;)KUi*${ zNw_lbV0y4K<1eHU5x{a9Y(cOem_)@03ZlXw(P9G(Sb;_OHXbQZ3gXfBtBH!VP;qkj z4y*cQeuCpT(7~>%-{;U{9}mpVNI6sobM5AWK|Z93RpcVbPPJ& zt}9wG)yqI2CCWD&esm0kpMaKUH`!hwDuhlzhr!58a5)uTlD$#Aac^swApe`7szQzm zlf)|9=oezOwN|KXU>zQfO=lqgUMvgRR71j5wQ&;YbWh~iq0^tDHASb{E5N22I?-(u zbow?f96EWcW(b;|HO4mQ0Yt{~9LD?EBZZ?kCpst{XgOc<8@n73a{ zm~`DBRiNP?K7ZoMP;TJzKyDyC2ye5_CdJR@P9)(ADd_yBjX0;P%$PaN*k~8AN<=y6ifDb} zH@N@^78D>*j#t|WS2F}mYWoJokf_&OH*oe*OSBX)QO|fS0?)X*(M=x4H?nQUuCFtg z6S-{7CoH`wIh8;b=z3myhuGYpd0 zVXLX;tpzTVg;+DPZ#Ir{M6F3%x}bC=l{(I$#M+D0yNv^tls8y}LOMG#V>l!u$XXu} zM$=+WLb7%B*bU3>CrCa@#=_6A_JDIxF6$UMAY>E~cj0^`I~WyctMuR;knHkXr)3*K zbzf*?T|&rF^TV{DbW`egv(^T&WU7*MWNicETHsts_k;+NU5vpPptZ~aEbctVLLD|U zvbKssE?TBz)xHeV8H~7F>@6V_g>^N`Dzyysrld6w>ftC%PM{y5XCvDWf-FU*g>kTi z?ztyqMQ!1D$I{hskEl#YT}cdSRkj~=7`WR{kPp_}#u0U7cA*y)dked9VUs_Mt2VVy zhbL0VedPEMXvG2}LI6S!n3t%FxEMlRZjKQTMJIVv5liVVc?*^;sAf2*7I3QrCx*F7 z_k~P*L94(&(D90oDc^7`vi3k@TS)34ke9aON^44eD;g^n94MH$gaNcca9o4n-bDyV zy0eG$h;*k$1xfJkMHUO1@{me-p2|Tj5ZWTbQ3cHvb`kOD0R##^WMNuR;vNH&q`qEs zK>R%sD@D*=#_}&{M%G`M*Dg0^#wL)9m~2irrW-SIZZji8RmWq6W!pGG0U^r@g$2!0 zv)WAlA)i3IJwvKSbWnbiN(y=n<$rQhR320t^cU3GkHsj4f|JNUGG5GVplUhPOa2j7 z0wDyPBip82F+yVqr+|itjUtI9D5Q{vF2Cta68>0x(u zM3T1Nbj@Mqq$$IkM3UAepX+U1A-^z=Vx}lTC8v}y|KjwsT^M4DVFvIfy)42Tkzc7Z z15xp1alj&%fYC7h@~j{_{|HtX8kU;oFJudWV`T`^csPv1wFqv0r0ti+3xs6?sj@Y} zOg-6iU;@GfkCRBZ{VP~l;{XtlTDq;L4Ub?4xdw#~KW)?XfB_m0lC zd0hD>rQ599AKRn-r{tg60B4ssn9~F1wiu~cYTRmgtc;@7S zR1DFo#BHdIeNbZj#Vf>k;tr3}8X)Mnd=)GJzy9Xgm|II_PdAO(lpQirAR8%sl0o&q zi#}*xQ~6j>u5Mw=XRRG5)}XUyDxPv1h?SMyETmhU}CCs#USo#{i-$RmVxS82`?hL z)*P+~+;}QEEV@guPpBVtwez*A_9Cv1K>$Q+Kwi$hl&BP=P+O=#C&KAUP4A;#EU7Mg zg5}g$*4SlbvT75{s(noutS*eJZ5yh^D_H;Fb{)*I`$l?yD2E+jVst$U$qk@)VG`-8 zUkWZO%o!?JQEzu>wOx&MhRihf2lZn&kM-%x0X~GW89#~J*lC0rb(8AY(YacVg1#r^ z_}Lqn1z5ew03PogJc9|!5TEo@d#Azks?eZwWWhFZEaVkAFQ zMyxaUenG^$&a*EN@9;AS+{ppM~o|LVn5*3w^@ir+7=esh71yg zH+cFEPh^Df1D?Lm({J!}ho^O(KIQ3$JZ&CQm=&X@jTV<>@_~syLKf zeZ5^h>@YiLKf(!-xUoq@p{8cByXUp3@yRBxuQVG|Z!{azvs2@^o4_58>2ds?o4hhP zIrYlq1zf*2b7|(1tl#jK!VkHFx{X@p9|IOQamXgbST@;m>u3GPnCIA=j=X5YJVya6 zBp;KDQzo7;@wkamShA)tbFnKFJ0@9an{nzBnw$QqLOs!bswzY70=r--L* zoODYWmzPw0^7UARZT%U=g8>LLHVp=OrSW#7P0`>%=~%&Gui~U6e>h?COG49zd3}?V z6Y8<#dWhLK5{eZu(kn)wkJ(S98UiGe=>H3Lm~3I-Kb_I&GMgvTNM|Wu;1bSz!+bd0 zJ-pQ2--f!C!|RHC40y2n1(i56`mcF^jHm5{(f!9!KhtJrs-<$;3syNV@z|-TBKKy;u@h2P;Ovl; z(rYpyb`THf$=<|aiLwq4WGy&mgFgTRYzS{s%1Ng(z*S25GM)}edAVl8ZE60}^2|O| z@+`z0{A&WE1WY3YB6)(<9j%-rcR%&E>6? zk6Q?y6#P30{^>^e>8(3=*EVafsjhCXE^jTjK3fZ0AFnO1u7#o-)}FLu`3nP#@0pNH zC%dgf#`Z#y9uZ%HSx9~lIjobQkD%^{`bR^4i|H9rl@ntVKuG*=@@k?vJ3c#p^=D@D J>g?6Y{{~BRf205a literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/contentforshopping/__init__.py b/gam/gdata/analytics/contentforshopping/__init__.py new file mode 100755 index 00000000000..5a5ed8e1dd9 --- /dev/null +++ b/gam/gdata/analytics/contentforshopping/__init__.py @@ -0,0 +1,21 @@ +#!/usr/bin/python +# +# Copyright (C) 2010-2011 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Support for the Content API for Shopping + +See: http://code.google.com/apis/shopping/content/index.html +""" diff --git a/gam/gdata/analytics/contentforshopping/client.py b/gam/gdata/analytics/contentforshopping/client.py new file mode 100755 index 00000000000..815201e87ae --- /dev/null +++ b/gam/gdata/analytics/contentforshopping/client.py @@ -0,0 +1,237 @@ +#!/usr/bin/python +# +# Copyright (C) 2010-2011 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Extend the gdata client for the Content API for Shopping. + +TODO: + +1. Proper MCA Support. +2. Better datafeed Support. +""" + + +__author__ = 'afshar (Ali Afshar)' + + +import gdata.client +import atom.data + +from gdata.contentforshopping.data import (ProductEntry, ProductFeed, + DatafeedFeed, ClientAccountFeed, ClientAccount) + + +CFS_VERSION = 'v1' +CFS_HOST = 'content.googleapis.com' +CFS_URI = 'https://%s/content' % CFS_HOST +CFS_PROJECTION = 'generic' + + +class ContentForShoppingClient(gdata.client.GDClient): + """Client for Content for Shopping API. + + :param account_id: Merchant account ID. This value will be used by default + for all requests, but may be overridden on a + request-by-request basis. + :param api_version: The version of the API to target. Default value: 'v1'. + :param **kwargs: Pass all addtional keywords to the GDClient constructor. + """ + + api_version = '1.0' + + def __init__(self, account_id=None, api_version=CFS_VERSION, **kwargs): + self.cfs_account_id = account_id + self.cfs_api_version = api_version + gdata.client.GDClient.__init__(self, **kwargs) + + def _create_uri(self, account_id, resource, path=(), use_projection=True): + """Create a request uri from the given arguments. + + If arguments are None, use the default client attributes. + """ + account_id = account_id or self.cfs_account_id + if account_id is None: + raise ValueError('No Account ID set. ' + 'Either set for the client, or per request') + segments = [CFS_URI, self.cfs_api_version, account_id, resource] + if use_projection: + segments.append(CFS_PROJECTION) + segments.extend(path) + return '/'.join(segments) + + def _create_product_id(self, id, country, language): + return 'online:%s:%s:%s' % (language, country, id) + + def _create_batch_feed(self, entries, operation, feed=None): + if feed is None: + feed = ProductFeed() + for entry in entries: + entry.batch_operation = gdata.data.BatchOperation(type=operation) + feed.entry.append(entry) + return feed + + def get_products(self, start_index=None, max_results=None, account_id=None, + auth_token=None): + """Get a feed of products for the account. + + :param max_results: The maximum number of results to return (default 25, + maximum 250). + :param start_index: The starting index of the feed to return (default 1, + maximum 10000) + :param account_id: The Merchant Center Account ID. If ommitted the default + Account ID will be used for this client + """ + uri = self._create_uri(account_id, 'items/products') + return self.get_feed(uri, auth_token=auth_token, + desired_class=gdata.contentforshopping.data.ProductFeed) + + def get_product(self, id, country, language, account_id=None, + auth_token=None): + """Get a product by id, country and language. + + :param id: The product ID + :param country: The country (target_country) + :param language: The language (content_language) + """ + pid = self._create_product_id(id, country, language) + uri = self._create_uri(account_id, 'items/products', [pid]) + return self.get_entry(uri, desired_class=ProductEntry, + auth_token=auth_token) + + def insert_product(self, product, account_id=None, auth_token=None): + """Create a new product, by posting the product entry feed. + + :param product: A :class:`gdata.contentforshopping.data.ProductEntry` with + the required product data. + :param account_id: The Merchant Center Account ID. If ommitted the default + Account ID will be used for this client + """ + uri = self._create_uri(account_id, 'items/products') + return self.post(product, uri=uri, auth_token=auth_token) + + def insert_products(self, products, account_id=None, auth_token=None): + """Insert the products using a batch request + + :param products: A list of product entries + """ + feed = self._create_batch_feed(products, 'insert') + return self.batch(feed) + + def delete_products(self, products, account_id=None, auth_token=None): + """Delete the products using a batch request. + + :param products: A list of product entries + + .. note:: Entries must have the atom:id element set. + """ + feed = self._create_batch_feed(products, 'delete') + return self.batch(feed) + + def update_products(self, products, account_id=None, auth_token=None): + """Update the products using a batch request + + :param products: A list of product entries + + .. note:: Entries must have the atom:id element set. + """ + feed = self._create_batch_feed(products, 'update') + return self.batch(feed) + + def batch(self, feed, account_id=None, auth_token=None): + """Send a batch request. + + :param feed: The feed of batch entries to send. + :param account_id: The Merchant Center Account ID. If ommitted the default + Account ID will be used for this client + """ + uri = self._create_uri(account_id, 'items/products', ['batch']) + return self.post(feed, uri=uri, auth_token=auth_token, + desired_class=ProductFeed) + + def update_product(self, product, account_id=None, + auth_token=None): + """Update a product, by putting the product entry feed. + + :param product: A :class:`gdata.contentforshopping.data.ProductEntry` with + the required product data. + :param account_id: The Merchant Center Account ID. If ommitted the default + Account ID will be used for this client + """ + pid = self._create_product_id(product.id.text, product.target_country.text, + product.content_language.text) + uri = self._create_uri(account_id, 'items/products', [pid]) + return self.update(product, uri=uri, auth_token=auth_token) + + def get_datafeeds(self, account_id=None): + """Get the feed of datafeeds. + """ + uri = self._create_uri(account_id, 'datafeeds/products', + use_projection=False) + return self.get_feed(uri, desired_class=DatafeedFeed) + + def insert_datafeed(self, entry, account_id=None, auth_token=None): + """Insert a datafeed. + """ + uri = self._create_uri(account_id, 'datafeeds/products', + use_projection=False) + return self.post(entry, uri=uri, auth_token=auth_token) + + def get_client_accounts(self, account_id=None, auth_token=None): + """Get the feed of managed accounts + + :param account_id: The Merchant Center Account ID. If ommitted the default + Account ID will be used for this client + """ + uri = self._create_uri(account_id, 'managedaccounts/products', + use_projection=False) + return self.get_feed(uri, desired_class=ClientAccountFeed, + auth_token=auth_token) + + def insert_client_account(self, entry, account_id=None, auth_token=None): + """Insert a client account entry + + :param entry: An entry of type ClientAccount + :param account_id: The Merchant Center Account ID. If ommitted the default + Account ID will be used for this client + """ + uri = self._create_uri(account_id, 'managedaccounts/products', + use_projection=False) + return self.post(entry, uri=uri, auth_token=auth_token) + + def update_client_account(self, entry, client_account_id, account_id=None, auth_token=None): + """Update a client account + + :param entry: An entry of type ClientAccount to update to + :param client_account_id: The client account ID + :param account_id: The Merchant Center Account ID. If ommitted the default + Account ID will be used for this client + """ + uri = self._create_uri(account_id, 'managedaccounts/products', + [client_account_id], use_projection=False) + return self.update(entry, uri=uri, auth_token=auth_token) + + def delete_client_account(self, client_account_id, account_id=None, + auth_token=None): + """Delete a client account + + :param client_account_id: The client account ID + :param account_id: The Merchant Center Account ID. If ommitted the default + Account ID will be used for this client + """ + + uri = self._create_uri(account_id, 'managedaccounts/products', + [client_account_id], use_projection=False) + return self.delete(uri, auth_token=auth_token) diff --git a/gam/gdata/analytics/contentforshopping/data.py b/gam/gdata/analytics/contentforshopping/data.py new file mode 100755 index 00000000000..2f09a42d280 --- /dev/null +++ b/gam/gdata/analytics/contentforshopping/data.py @@ -0,0 +1,1175 @@ +#!/usr/bin/python +# +# Copyright (C) 2010-2011 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""GData definitions for Content API for Shopping""" + + +__author__ = 'afshar (Ali Afshar)' + + +import atom.core +import atom.data +import gdata.data + + +SC_NAMESPACE_TEMPLATE = ('{http://schemas.google.com/' + 'structuredcontent/2009}%s') +SCP_NAMESPACE_TEMPLATE = ('{http://schemas.google.com/' + 'structuredcontent/2009/products}%s') + + +class ProductId(atom.core.XmlElement): + """sc:id element + + It is required that all inserted products are provided with a unique + alphanumeric ID, in this element. + """ + _qname = SC_NAMESPACE_TEMPLATE % 'id' + + +class RequiredDestination(atom.core.XmlElement): + """sc:required_destination element + + This element defines the required destination for a product, namely + "ProductSearch", "ProductAds" or "CommerceSearch". It should be added to the + app:control element (ProductEntry's "control" attribute) to specify where the + product should appear in search APIs. + + By default, when omitted, the api attempts to upload to as many destinations + as possible. + """ + _qname = SC_NAMESPACE_TEMPLATE % 'required_destination' + dest = 'dest' + + +class ExcludedDestination(atom.core.XmlElement): + """sc:excluded_destination element + + This element defines the required destination for a product, namely + "ProductSearch", "ProductAds" or "CommerceSearch". It should be added to the + app:control element (ProductEntry's "control" attribute) to specify where the + product should not appear in search APIs. + + By default, when omitted, the api attempts to upload to as many destinations + as possible. + """ + _qname = SC_NAMESPACE_TEMPLATE % 'excluded_destination' + dest = 'dest' + + +class ProductControl(atom.data.Control): + """ + app:control element + + overridden to provide additional elements in the sc namespace. + """ + required_destination = RequiredDestination + excluded_destination = ExcludedDestination + + +class ContentLanguage(atom.core.XmlElement): + """ + sc:content_language element + + Language used in the item content for the product + """ + _qname = SC_NAMESPACE_TEMPLATE % 'content_language' + + +class TargetCountry(atom.core.XmlElement): + """ + sc:target_country element + + The target country of the product + """ + _qname = SC_NAMESPACE_TEMPLATE % 'target_country' + + +class ImageLink(atom.core.XmlElement): + """sc:image_link element + + This is the URL of an associated image for a product. Please use full size + images (400x400 pixels or larger), not thumbnails. + """ + _qname = SC_NAMESPACE_TEMPLATE % 'image_link' + + +class ExpirationDate(atom.core.XmlElement): + """sc:expiration_date + + This is the date when the product listing will expire. If omitted, this will + default to 30 days after the product was created. + """ + _qname = SC_NAMESPACE_TEMPLATE % 'expiration_date' + + +class Adult(atom.core.XmlElement): + """sc:adult element + + Indicates whether the content is targeted towards adults, with possible + values of "true" or "false". Defaults to "false". + """ + _qname = SC_NAMESPACE_TEMPLATE % 'adult' + + +class Author(atom.core.XmlElement): + """ + scp:author element + + Defines the author of the information, recommended for books. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'author' + + +class Availability(atom.core.XmlElement): + """ + scp:availability element + + The retailer's suggested label for product availability. Supported values + include: 'in stock', 'out of stock', 'limited availability'. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'availability' + + +class Brand(atom.core.XmlElement): + """ + scp:brand element + + The brand of the product + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'brand' + + +class Color(atom.core.XmlElement): + """scp:color element + + The color of the product. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'color' + + +class Condition(atom.core.XmlElement): + """scp:condition element + + The condition of the product, one of "new", "used", "refurbished" + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'condition' + + +class Edition(atom.core.XmlElement): + """scp:edition element + + The edition of the product. Recommended for products with multiple editions + such as collectors' editions etc, such as books. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'edition' + + +class Feature(atom.core.XmlElement): + """scp:feature element + + A product feature. A product may have multiple features, each being text, for + example a smartphone may have features: "wifi", "gps" etc. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'feature' + + +class FeaturedProduct(atom.core.XmlElement): + """scp:featured_product element + + Used to indicate that this item is a special, featured product; Supported + values are: "true", "false". + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'featured_product' + + +class Genre(atom.core.XmlElement): + """scp:genre element + + Describes the genre of a product, eg "comedy". Strongly recommended for media. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'genre' + + +class Gtin(atom.core.XmlElement): + """scp:gtin element + + GTIN of the product (isbn/upc/ean) + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'gtin' + + +class Manufacturer(atom.core.XmlElement): + """scp:manufacturer element + + Manufacturer of the product. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'manufacturer' + + +class Mpn(atom.core.XmlElement): + """scp:mpn element + + Manufacturer's Part Number. A unique code determined by the manufacturer for + the product. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'mpn' + + +class Price(atom.core.XmlElement): + """scp:price element + + The price of the product. The unit attribute must be set, and should represent + the currency. + + Note: Required Element + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'price' + unit = 'unit' + + +class ProductType(atom.core.XmlElement): + """scp:product_type element + + Describes the type of product. A taxonomy of available product types is + listed at http://www.google.com/basepages/producttype/taxonomy.txt and the + entire line in the taxonomy should be included, for example "Electronics > + Video > Projectors". + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'product_type' + + +class Quantity(atom.core.XmlElement): + """scp:quantity element + + The number of items available. A value of 0 indicates items that are + currently out of stock. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'quantity' + + +class ShippingCountry(atom.core.XmlElement): + """scp:shipping_country element + + The two-letter ISO 3166 country code for the country to which an item will + ship. + + This element should be placed inside the scp:shipping element. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'shipping_country' + + +class ShippingPrice(atom.core.XmlElement): + """scp:shipping_price element + + Fixed shipping price, represented as a number. Specify the currency as the + "unit" attribute". + + This element should be placed inside the scp:shipping element. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'shipping_price' + unit = 'unit' + + +class ShippingRegion(atom.core.XmlElement): + """scp:shipping_region element + + The geographic region to which a shipping rate applies, e.g., in the US, the + two-letter state abbreviation, ZIP code, or ZIP code range using * wildcard. + + This element should be placed inside the scp:shipping element. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'shipping_region' + + +class ShippingService(atom.core.XmlElement): + """scp:shipping_service element + + A free-form description of the service class or delivery speed. + + This element should be placed inside the scp:shipping element. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'shipping_service' + + +class Shipping(atom.core.XmlElement): + """scp:shipping element + + Container for the shipping rules as provided by the shipping_country, + shipping_price, shipping_region and shipping_service tags. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'shipping' + shipping_price = ShippingPrice + shipping_country = ShippingCountry + shipping_service = ShippingService + shipping_region = ShippingRegion + + +class ShippingWeight(atom.core.XmlElement): + """scp:shipping_weight element + + The shipping weight of a product. Requires a value and a unit using the unit + attribute. Valid units include lb, pound, oz, ounce, g, gram, kg, kilogram. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'shipping_weight' + unit = 'unit' + + +class Size(atom.core.XmlElement): + """scp:size element + + Available sizes of an item. Appropriate values include: "small", "medium", + "large", etc. The product enttry may contain multiple sizes, to indicate the + available sizes. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'size' + + +class TaxRate(atom.core.XmlElement): + """scp:tax_rate element + + The tax rate as a percent of the item price, i.e., number, as a percentage. + + This element should be placed inside the scp:tax (Tax class) element. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'tax_rate' + + +class TaxCountry(atom.core.XmlElement): + """scp:tax_country element + + The country an item is taxed in (as a two-letter ISO 3166 country code). + + This element should be placed inside the scp:tax (Tax class) element. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'tax_country' + + +class TaxRegion(atom.core.XmlElement): + """scp:tax_region element + + The geographic region that a tax rate applies to, e.g., in the US, the + two-letter state abbreviation, ZIP code, or ZIP code range using * wildcard. + + This element should be placed inside the scp:tax (Tax class) element. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'tax_region' + + +class TaxShip(atom.core.XmlElement): + """scp:tax_ship element + + Whether tax is charged on shipping for this product. The default value is + "false". + + This element should be placed inside the scp:tax (Tax class) element. + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'tax_ship' + + +class Tax(atom.core.XmlElement): + """scp:tax element + + Container for the tax rules for this product. Containing the tax_rate, + tax_country, tax_region, and tax_ship elements + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'tax' + tax_rate = TaxRate + tax_country = TaxCountry + tax_region = TaxRegion + tax_ship = TaxShip + + +class Year(atom.core.XmlElement): + """scp:year element + + The year the product was produced. Expects four digits + """ + _qname = SCP_NAMESPACE_TEMPLATE % 'year' + + +class ProductEntry(gdata.data.BatchEntry): + """Product entry containing product information + + The elements of this entry that are used are made up of five different + namespaces. They are: + + atom: - Atom + app: - Atom Publishing Protocol + gd: - Google Data API + sc: - Content API for Shopping, general attributes + scp: - Content API for Shopping, product attributes + + Only the sc and scp namespace elements are defined here, but additional useful + elements are defined in superclasses, and are documented here because they are + part of the required Content for Shopping API. + + .. attribute:: title + + The title of the product. + + This should be a :class:`atom.data.Title` element, for example:: + + entry = ProductEntry() + entry.title = atom.data.Title(u'32GB MP3 Player') + + .. attribute:: author + + The author of the product. + + This should be a :class:`Author` element, for example:: + + entry = ProductEntry() + entry.author = atom.data.Author(u'Isaac Asimov') + + .. attribute:: availability + + The avilability of a product. + + This should be an :class:`Availability` instance, for example:: + + entry = ProductEntry() + entry.availability = Availability('in stock') + + .. attribute:: brand + + The brand of a product. + + This should be a :class:`Brand` element, for example:: + + entry = ProductEntry() + entry.brand = Brand(u'Sony') + + .. attribute:: color + + The color of a product. + + This should be a :class:`Color` element, for example:: + + entry = ProductEntry() + entry.color = Color(u'purple') + + .. attribute:: condition + + The condition of a product. + + This should be a :class:`Condition` element, for example:: + + entry = ProductEntry() + entry.condition = Condition(u'new') + + .. attribute:: content_language + + The language for the product. + + This should be a :class:`ContentLanguage` element, for example:: + + entry = ProductEntry() + entry.content_language = ContentLanguage('EN') + + .. attribute:: edition + + The edition of the product. + + This should be a :class:`Edition` element, for example:: + + entry = ProductEntry() + entry.edition = Edition('1') + + .. attribute:: expiration + + The expiration date of this product listing. + + This should be a :class:`ExpirationDate` element, for example:: + + entry = ProductEntry() + entry.expiration_date = ExpirationDate('2011-22-03') + + .. attribute:: feature + + A list of features for this product. + + Each feature should be a :class:`Feature` element, for example:: + + entry = ProductEntry() + entry.feature.append(Feature(u'wifi')) + entry.feature.append(Feature(u'gps')) + + .. attribute:: featured_product + + Whether the product is featured. + + This should be a :class:`FeaturedProduct` element, for example:: + + entry = ProductEntry() + entry.featured_product = FeaturedProduct('true') + + .. attribute:: genre + + The genre of the product. + + This should be a :class:`Genre` element, for example:: + + entry = ProductEntry() + entry.genre = Genre(u'comedy') + + .. attribute:: image_link + + A list of links to images of the product. Each link should be an + :class:`ImageLink` element, for example:: + + entry = ProductEntry() + entry.image_link.append(ImageLink('http://myshop/cdplayer.jpg')) + + .. attribute:: manufacturer + + The manufacturer of the product. + + This should be a :class:`Manufacturer` element, for example:: + + entry = ProductEntry() + entry.manufacturer = Manufacturer('Sony') + + .. attribute:: mpn + + The manufacturer's part number for this product. + + This should be a :class:`Mpn` element, for example:: + + entry = ProductEntry() + entry.mpn = Mpn('cd700199US') + + .. attribute:: price + + The price for this product. + + This should be a :class:`Price` element, including a unit argument to + indicate the currency, for example:: + + entry = ProductEntry() + entry.price = Price('20.00', unit='USD') + + .. attribute:: gtin + + The gtin for this product. + + This should be a :class:`Gtin` element, for example:: + + entry = ProductEntry() + entry.gtin = Gtin('A888998877997') + + .. attribute:: product_type + + The type of product. + + This should be a :class:`ProductType` element, for example:: + + entry = ProductEntry() + entry.product_type = ProductType("Electronics > Video > Projectors") + + .. attribute:: publisher + + The publisher of this product. + + This should be a :class:`Publisher` element, for example:: + + entry = ProductEntry() + entry.publisher = Publisher(u'Oxford University Press') + + .. attribute:: quantity + + The quantity of product available in stock. + + This should be a :class:`Quantity` element, for example:: + + entry = ProductEntry() + entry.quantity = Quantity('100') + + .. attribute:: shipping + + The shipping rules for the product. + + This should be a :class:`Shipping` with the necessary rules embedded as + elements, for example:: + + entry = ProductEntry() + entry.shipping = Shipping() + entry.shipping.shipping_price = ShippingPrice('10.00', unit='USD') + + .. attribute:: shipping_weight + + The shipping weight for this product. + + This should be a :class:`ShippingWeight` element, including a unit parameter + for the unit of weight, for example:: + + entry = ProductEntry() + entry.shipping_weight = ShippingWeight('10.45', unit='kg') + + .. attribute:: size + + A list of the available sizes for this product. + + Each item in this list should be a :class:`Size` element, for example:: + + entry = ProductEntry() + entry.size.append(Size('Small')) + entry.size.append(Size('Medium')) + entry.size.append(Size('Large')) + + .. attribute:: target_country + + The target country for the product. + + This should be a :class:`TargetCountry` element, for example:: + + entry = ProductEntry() + entry.target_country = TargetCountry('US') + + .. attribute:: tax + + The tax rules for this product. + + This should be a :class:`Tax` element, with the tax rule elements embedded + within, for example:: + + entry = ProductEntry() + entry.tax = Tax() + entry.tax.tax_rate = TaxRate('17.5') + + .. attribute:: year + + The year the product was created. + + This should be a :class:`Year` element, for example:: + + entry = ProductEntry() + entry.year = Year('2001') + + + #TODO Document these atom elements which are part of the required API + + <link> + <entry> + <id> + <category> + <content> + <author> + <created> + <updated> +""" + + author = Author + product_id = ProductId + availability = Availability + brand = Brand + color = Color + condition = Condition + content_language = ContentLanguage + edition = Edition + expiration_date = ExpirationDate + feature = [Feature] + featured_product = FeaturedProduct + genre = Genre + image_link = [ImageLink] + manufacturer = Manufacturer + mpn = Mpn + price = Price + gtin = Gtin + product_type = ProductType + quantity = Quantity + shipping = Shipping + shipping_weight = ShippingWeight + size = [Size] + target_country = TargetCountry + tax = Tax + year = Year + control = ProductControl + + +# opensearch needs overriding for wrong version +# see http://code.google.com/p/gdata-python-client/issues/detail?id=483 +class TotalResults(gdata.data.TotalResults): + + _qname = gdata.data.TotalResults._qname[1] + + +class ItemsPerPage(gdata.data.ItemsPerPage): + + _qname = gdata.data.ItemsPerPage._qname[1] + + +class StartIndex(gdata.data.StartIndex): + + _qname = gdata.data.StartIndex._qname[1] + + +class ProductFeed(gdata.data.BatchFeed): + """Represents a feed of a merchant's products.""" + entry = [ProductEntry] + total_results = TotalResults + items_per_page = ItemsPerPage + start_index = StartIndex + + +def build_entry(product_id=None, title=None, content=None, link=None, condition=None, + target_country=None, content_language=None, price=None, + price_unit=None, tax_rate=None, shipping_price=None, + shipping_price_unit=None, image_links=(), expiration_date=None, + adult=None, author=None, brand=None, color=None, edition=None, + features=(), featured_product=None, genre=None, + manufacturer=None, mpn=None, gtin=None, product_type=None, + quantity=None, shipping_country=None, shipping_region=None, + shipping_service=None, shipping_weight=None, + shipping_weight_unit=None, sizes=(), tax_country=None, + tax_region=None, tax_ship=None, year=None, product=None): + """Create a new product with the required attributes. + + This function exists as an alternative constructor to help alleviate the + boilerplate involved in creating product definitions. You may well want to + fine-tune your products after creating them. + + Documentation of each attribute attempts to explain the "long-hand" way of + achieving the same goal. + + :param product_id: The unique ID for this product. + + This is equivalent to creating and setting an product_id element:: + + entry = ProductEntry() + entry.product_id = ProductId(product_id) + + :param title: The title of this product. + + This is equivalent to creating and setting a title element:: + + entry = ProductEntry + entry.title = atom.data.Title(title) + + :param content: The description of this product. + + This is equivalent to creating and setting the content element:: + + entry.content = atom.data.Content(content) + + :param link: The uri of the link to a page describing the product. + + This is equivalent to creating and setting the link element:: + + entry.link = atom.data.Link(href=link, rel='alternate', + type='text/html') + + :param condition: The condition of the product. + + This is equivalent to creating and setting the condition element:: + + entry.condition = Condition(condition) + + :param target_country: The target country of the product + + This is equivalent to creating and setting the target_country element:: + + entry.target_country = TargetCountry(target_country) + + :param content_language: The language of the content + + This is equivalent to creating and setting the content_language element:: + + entry.content_language = ContentLanguage(content_language) + + :param price: The price of the product + + This is equivalent to creating and setting the price element, using the + price_unit parameter as the unit:: + + entry.price = Price(price, unit=price_unit) + + :param price_unit: The price unit of the product + + See price parameter. + + :param tax_rate: The tax rate for this product + + This is equivalent to creating and setting the tax element and its required + children:: + + entry.tax = Tax() + entry.tax.tax_rate = TaxRate(tax_rate) + + :param shipping_price: Thie price of shipping for this product + + This is equivalent to creating and setting the shipping element and its + required children. The unit for the price is taken from the + shipping_price_unit parameter:: + + entry.shipping = Shipping() + entry.shipping.shipping_price = ShippingPrice(shipping_price, + unit=shipping_price_unit) + + :param shipping_price_unit: The unit of the shipping price + + See shipping_price + + :param image_links: A sequence of links for images for this product. + + This is equivalent to creating a single image_link element for each image:: + + for image_link in image_links: + entry.image_link.append(ImageLink(image_link)) + + :param expiration_date: The date that this product listing expires + + This is equivalent to creating and setting an expiration_date element:: + + entry.expiration_date = ExpirationDate(expiration_date) + + :param adult: Whether this product listing contains adult content + + This is equivalent to creating and setting the adult element:: + + entry.adult = Adult(adult) + + :param author: The author of the product + + This is equivalent to creating and setting the author element:: + + entry.author = Author(author) + + :param brand: The brand of the product + + This is equivalent to creating and setting the brand element:: + + entry.brand = Brand(brand) + + :param color: The color of the product + + This is equivalent to creating and setting the color element:: + + entry.color = Color(color) + + :param edition: The edition of the product + + This is equivalent to creating and setting the edition element:: + + entry.edition = Edition('1') + + :param features=(): Features for this product + + Each feature in the provided sequence will create a Feature element in the + entry, equivalent to:: + + for feature in features: + entry.feature.append(Feature(feature))) + + :param featured_product: Whether this product is featured + + This is equivalent to creating and setting the featured_product element:: + + entry.featured_product = FeaturedProduct(featured_product) + + :param genre: The genre of the product + + This is equivalent to creating and setting the genre element:: + + entry.genre = Genre(genre) + + :param manufacturer: The manufacturer of the product + + This is equivalent to creating and setting the manufacturer element:: + + entry.manufacturer = Manufacturer(manufacturer) + + :param mpn: The manufacturer's part number for a product + + This is equivalent to creating and setting the mpn element:: + + entry.mpn = Mpn(mpn) + + :param gtin: The gtin for a product + + This is equivalent to creating and setting the gtin element:: + + entry.gtin = Gtin(gtin) + + :param product_type: The type of a product + + This is equivalent to creating and setting the product_type element:: + + entry.product_type = ProductType(product_type) + + :param quantity: The quantity of the product in stock + + This is equivalent to creating and setting the quantity element:: + + entry.quantity = Quantity(quantity) + + :param shipping_country: The country that this product can be shipped to + + This is equivalent to creating a Shipping element, and creating and setting + the required element within:: + + entry.shipping = Shipping() + entry.shipping.shipping_country = ShippingCountry(shipping_country) + + :param shipping_region: The region that this product can be shipped to + + This is equivalent to creating a Shipping element, and creating and setting + the required element within:: + + entry.shipping = Shipping() + entry.shipping.shipping_region = ShippingRegion(shipping_region) + + :param shipping_service: The service for shipping. + + This is equivalent to creating a Shipping element, and creating and setting + the required element within:: + + entry.shipping = Shipping() + entry.shipping.shipping_service = ShippingRegion(shipping_service) + + :param shipping_weight: The shipping weight of a product + + Along with the shipping_weight_unit, this is equivalent to creating and + setting the shipping_weight element:: + + entry.shipping_weight = ShippingWeight(shipping_weight, + unit=shipping_weight_unit) + + :param shipping_weight_unit: The unit of shipping weight + + See shipping_weight. + + :param: The sizes that are available for this product. + + Each size of a list will add a size element to the entry, like so:: + + for size in sizes: + product.size.append(Size(size)) + + :param tax_country: The country that tax rules will apply + + This is equivalent to creating a Tax element, and creating and setting the + required sub-element:: + + entry.tax = Tax() + entry.tax.tax_country = TaxCountry(tax_country) + + :param tax_region: The region that the tax rule applies in + + This is equivalent to creating a Tax element, and creating and setting the + required sub-element:: + + entry.tax = Tax() + entry.tax.tax_region = TaxRegion(tax_region) + + :param tax_ship: Whether shipping cost is taxable + + This is equivalent to creating a Tax element, and creating and setting the + required sub-element:: + + entry.tax = Tax() + entry.tax.tax_ship = TaxShip(tax_ship) + + :param year: The year the product was created + + This is equivalent to creating and setting a year element:: + + entry.year = Year('2001') + """ + + product = product or ProductEntry() + if product_id is not None: + product.product_id = ProductId(product_id) + if content is not None: + product.content = atom.data.Content(content) + if title is not None: + product.title = atom.data.Title(title) + if condition is not None: + product.condition = Condition(condition) + if price is not None: + product.price = Price(price, unit=price_unit) + if content_language is not None: + product.content_language = ContentLanguage(content_language) + if target_country is not None: + product.target_country = TargetCountry(target_country) + if tax_rate is not None: + product.tax = Tax() + product.tax.tax_rate = TaxRate(tax_rate) + if shipping_price is not None: + if shipping_price_unit is None: + raise ValueError('Must provide shipping_price_unit if ' + 'shipping_price is provided') + product.shipping = Shipping() + product.shipping.shipping_price = ShippingPrice(shipping_price, + unit=shipping_price_unit) + if link is not None: + product.link.append(atom.data.Link(href=link, type='text/html', + rel='alternate')) + for image_link in image_links: + product.image_link.append(ImageLink(image_link)) + if expiration_date is not None: + product.expiration_date = ExpirationDate(expiration_date) + if adult is not None: + product.adult = Adult(adult) + if author is not None: + product.author = Author(author) + if brand is not None: + product.brand = Brand(brand) + if color is not None: + product.color = Color(color) + if edition is not None: + product.edition = Edition(edition) + for feature in features: + product.feature.append(Feature(feature)) + if featured_product is not None: + product.featured_product = FeaturedProduct(featured_product) + if genre is not None: + product.genre = Genre(genre) + if manufacturer is not None: + product.manufacturer = Manufacturer(manufacturer) + if mpn is not None: + product.mpn = Mpn(mpn) + if gtin is not None: + product.gtin = Gtin(gtin) + if product_type is not None: + product.product_type = ProductType(product_type) + if quantity is not None: + product.quantity = Quantity(quantity) + if shipping_country is not None: + product.shipping.shipping_country = ShippingCountry( + shipping_country) + if shipping_region is not None: + product.shipping.shipping_region = ShippingRegion(shipping_region) + if shipping_service is not None: + product.shipping.shipping_service = ShippingService( + shipping_service) + if shipping_weight is not None: + product.shipping_weight = ShippingWeight(shipping_weight) + if shipping_weight_unit is not None: + product.shipping_weight.unit = shipping_weight_unit + for size in sizes: + product.size.append(Size(size)) + if tax_country is not None: + product.tax.tax_country = TaxCountry(tax_country) + if tax_region is not None: + product.tax.tax_region = TaxRegion(tax_region) + if tax_ship is not None: + product.tax.tax_ship = TaxShip(tax_ship) + if year is not None: + product.year = Year(year) + return product + + +class Edited(atom.core.XmlElement): + """sc:edited element + """ + _qname = SC_NAMESPACE_TEMPLATE % 'edited' + + +class AttributeLanguage(atom.core.XmlElement): + """sc:attribute_language element + """ + _qname = SC_NAMESPACE_TEMPLATE % 'attribute_language' + + +class Channel(atom.core.XmlElement): + """sc:channel element + """ + _qname = SC_NAMESPACE_TEMPLATE % 'channel' + + +class FeedFileName(atom.core.XmlElement): + """sc:feed_file_name element + """ + _qname = SC_NAMESPACE_TEMPLATE % 'feed_file_name' + + +class FeedType(atom.core.XmlElement): + """sc:feed_type element + """ + _qname = SC_NAMESPACE_TEMPLATE % 'feed_type' + + +class UseQuotedFields(atom.core.XmlElement): + """sc:use_quoted_fields element + """ + _qname = SC_NAMESPACE_TEMPLATE % 'use_quoted_fields' + + +class FileFormat(atom.core.XmlElement): + """sc:file_format element + """ + _qname = SC_NAMESPACE_TEMPLATE % 'file_format' + use_quoted_fields = UseQuotedFields + format = 'format' + + +class ProcessingStatus(atom.core.XmlElement): + """sc:processing_status element + """ + _qname = SC_NAMESPACE_TEMPLATE % 'processing_status' + + +class DatafeedEntry(gdata.data.GDEntry): + """An entry for a Datafeed + """ + content_language = ContentLanguage + target_country = TargetCountry + feed_file_name = FeedFileName + file_format = FileFormat + attribute_language = AttributeLanguage + processing_status = ProcessingStatus + edited = Edited + feed_type = FeedType + + +class DatafeedFeed(gdata.data.GDFeed): + """A datafeed feed + """ + entry = [DatafeedEntry] + + +class AdultContent(atom.core.XmlElement): + """sc:adult_content element + """ + _qname = SC_NAMESPACE_TEMPLATE % 'adult_content' + + +class InternalId(atom.core.XmlElement): + """sc:internal_id element + """ + _qname = SC_NAMESPACE_TEMPLATE % 'internal_id' + + +class ReviewsUrl(atom.core.XmlElement): + """sc:reviews_url element + """ + _qname = SC_NAMESPACE_TEMPLATE % 'reviews_url' + + +class ClientAccount(gdata.data.GDEntry): + """A multiclient account entry + """ + adult_content = AdultContent + internal_id = InternalId + reviews_url = ReviewsUrl + + +class ClientAccountFeed(gdata.data.GDFeed): + """A multiclient account feed + """ + entry = [ClientAccount] diff --git a/gam/gdata/analytics/core.py b/gam/gdata/analytics/core.py new file mode 100755 index 00000000000..0661ec6a0a9 --- /dev/null +++ b/gam/gdata/analytics/core.py @@ -0,0 +1,279 @@ +#!/usr/bin/env python +# +# Copyright (C) 2010 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +"""Provides classes and methods for working with JSON-C. + +This module is experimental and subject to backwards incompatible changes. + + Jsonc: Class which represents JSON-C data and provides pythonic member + access which is a bit cleaner than working with plain old dicts. + parse_json: Converts a JSON-C string into a Jsonc object. + jsonc_to_string: Converts a Jsonc object into a string of JSON-C. +""" + + +try: + import simplejson +except ImportError: + try: + # Try to import from django, should work on App Engine + from django.utils import simplejson + except ImportError: + # Should work for Python2.6 and higher. + import json as simplejson + + +def _convert_to_jsonc(x): + """Builds a Jsonc objects which wraps the argument's members.""" + + if isinstance(x, dict): + jsonc_obj = Jsonc() + # Recursively transform all members of the dict. + # When converting a dict, we do not convert _name items into private + # Jsonc members. + for key, value in x.iteritems(): + jsonc_obj._dict[key] = _convert_to_jsonc(value) + return jsonc_obj + elif isinstance(x, list): + # Recursively transform all members of the list. + members = [] + for item in x: + members.append(_convert_to_jsonc(item)) + return members + else: + # Return the base object. + return x + + +def parse_json(json_string): + """Converts a JSON-C string into a Jsonc object. + + Args: + json_string: str or unicode The JSON to be parsed. + + Returns: + A new Jsonc object. + """ + + return _convert_to_jsonc(simplejson.loads(json_string)) + + +def parse_json_file(json_file): + return _convert_to_jsonc(simplejson.load(json_file)) + + +def jsonc_to_string(jsonc_obj): + """Converts a Jsonc object into a string of JSON-C.""" + + return simplejson.dumps(_convert_to_object(jsonc_obj)) + + +def prettify_jsonc(jsonc_obj, indentation=2): + """Converts a Jsonc object to a pretified (intented) JSON string.""" + + return simplejson.dumps(_convert_to_object(jsonc_obj), indent=indentation) + + + +def _convert_to_object(jsonc_obj): + """Creates a new dict or list which has the data in the Jsonc object. + + Used to convert the Jsonc object to a plain old Python object to simplify + conversion to a JSON-C string. + + Args: + jsonc_obj: A Jsonc object to be converted into simple Python objects + (dicts, lists, etc.) + + Returns: + Either a dict, list, or other object with members converted from Jsonc + objects to the corresponding simple Python object. + """ + + if isinstance(jsonc_obj, Jsonc): + plain = {} + for key, value in jsonc_obj._dict.iteritems(): + plain[key] = _convert_to_object(value) + return plain + elif isinstance(jsonc_obj, list): + plain = [] + for item in jsonc_obj: + plain.append(_convert_to_object(item)) + return plain + else: + return jsonc_obj + + +def _to_jsonc_name(member_name): + """Converts a Python style member name to a JSON-C style name. + + JSON-C uses camelCaseWithLower while Python tends to use + lower_with_underscores so this method converts as follows: + + spam becomes spam + spam_and_eggs becomes spamAndEggs + + Args: + member_name: str or unicode The Python syle name which should be + converted to JSON-C style. + + Returns: + The JSON-C style name as a str or unicode. + """ + + characters = [] + uppercase_next = False + for character in member_name: + if character == '_': + uppercase_next = True + elif uppercase_next: + characters.append(character.upper()) + uppercase_next = False + else: + characters.append(character) + return ''.join(characters) + + +class Jsonc(object): + """Represents JSON-C data in an easy to access object format. + + To access the members of a JSON structure which looks like this: + { + "data": { + "totalItems": 800, + "items": [ + { + "content": { + "1": "rtsp://v5.cache3.c.youtube.com/CiILENy.../0/0/0/video.3gp" + }, + "viewCount": 220101, + "commentCount": 22, + "favoriteCount": 201 + } + ] + }, + "apiVersion": "2.0" + } + + You would do the following: + x = gdata.core.parse_json(the_above_string) + # Gives you 800 + x.data.total_items + # Should be 22 + x.data.items[0].comment_count + # The apiVersion is '2.0' + x.api_version + + To create a Jsonc object which would produce the above JSON, you would do: + gdata.core.Jsonc( + api_version='2.0', + data=gdata.core.Jsonc( + total_items=800, + items=[ + gdata.core.Jsonc( + view_count=220101, + comment_count=22, + favorite_count=201, + content={ + '1': ('rtsp://v5.cache3.c.youtube.com' + '/CiILENy.../0/0/0/video.3gp')})])) + or + x = gdata.core.Jsonc() + x.api_version = '2.0' + x.data = gdata.core.Jsonc() + x.data.total_items = 800 + x.data.items = [] + # etc. + + How it works: + The JSON-C data is stored in an internal dictionary (._dict) and the + getattr, setattr, and delattr methods rewrite the name which you provide + to mirror the expected format in JSON-C. (For more details on name + conversion see _to_jsonc_name.) You may also access members using + getitem, setitem, delitem as you would for a dictionary. For example + x.data.total_items is equivalent to x['data']['totalItems'] + (Not all dict methods are supported so if you need something other than + the item operations, then you will want to use the ._dict member). + + You may need to use getitem or the _dict member to access certain + properties in cases where the JSON-C syntax does not map neatly to Python + objects. For example the YouTube Video feed has some JSON like this: + "content": {"1": "rtsp://v5.cache3.c.youtube.com..."...} + You cannot do x.content.1 in Python, so you would use the getitem as + follows: + x.content['1'] + or you could use the _dict member as follows: + x.content._dict['1'] + + If you need to create a new object with such a mapping you could use. + + x.content = gdata.core.Jsonc(_dict={'1': 'rtsp://cache3.c.youtube.com...'}) + """ + + def __init__(self, _dict=None, **kwargs): + json = _dict or {} + for key, value in kwargs.iteritems(): + if key.startswith('_'): + object.__setattr__(self, key, value) + else: + json[_to_jsonc_name(key)] = _convert_to_jsonc(value) + + object.__setattr__(self, '_dict', json) + + def __setattr__(self, name, value): + if name.startswith('_'): + object.__setattr__(self, name, value) + else: + object.__getattribute__( + self, '_dict')[_to_jsonc_name(name)] = _convert_to_jsonc(value) + + def __getattr__(self, name): + if name.startswith('_'): + object.__getattribute__(self, name) + else: + try: + return object.__getattribute__(self, '_dict')[_to_jsonc_name(name)] + except KeyError: + raise AttributeError( + 'No member for %s or [\'%s\']' % (name, _to_jsonc_name(name))) + + + def __delattr__(self, name): + if name.startswith('_'): + object.__delattr__(self, name) + else: + try: + del object.__getattribute__(self, '_dict')[_to_jsonc_name(name)] + except KeyError: + raise AttributeError( + 'No member for %s (or [\'%s\'])' % (name, _to_jsonc_name(name))) + + # For container methods pass-through to the underlying dict. + def __getitem__(self, key): + return self._dict[key] + + def __setitem__(self, key, value): + self._dict[key] = value + + def __delitem__(self, key): + del self._dict[key] diff --git a/gam/gdata/analytics/data.py b/gam/gdata/analytics/data.py new file mode 100755 index 00000000000..3bf18502698 --- /dev/null +++ b/gam/gdata/analytics/data.py @@ -0,0 +1,1219 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +"""Provides classes and constants for the XML in the Google Data namespace. + +Documentation for the raw XML which these classes represent can be found here: +http://code.google.com/apis/gdata/docs/2.0/elements.html +""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import os +import atom.core +import atom.data + + +GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s' +GD_TEMPLATE = GDATA_TEMPLATE +OPENSEARCH_TEMPLATE_V1 = '{http://a9.com/-/spec/opensearchrss/1.0/}%s' +OPENSEARCH_TEMPLATE_V2 = '{http://a9.com/-/spec/opensearch/1.1/}%s' +BATCH_TEMPLATE = '{http://schemas.google.com/gdata/batch}%s' + + +# Labels used in batch request entries to specify the desired CRUD operation. +BATCH_INSERT = 'insert' +BATCH_UPDATE = 'update' +BATCH_DELETE = 'delete' +BATCH_QUERY = 'query' + +EVENT_LOCATION = 'http://schemas.google.com/g/2005#event' +ALTERNATE_LOCATION = 'http://schemas.google.com/g/2005#event.alternate' +PARKING_LOCATION = 'http://schemas.google.com/g/2005#event.parking' + +CANCELED_EVENT = 'http://schemas.google.com/g/2005#event.canceled' +CONFIRMED_EVENT = 'http://schemas.google.com/g/2005#event.confirmed' +TENTATIVE_EVENT = 'http://schemas.google.com/g/2005#event.tentative' + +CONFIDENTIAL_EVENT = 'http://schemas.google.com/g/2005#event.confidential' +DEFAULT_EVENT = 'http://schemas.google.com/g/2005#event.default' +PRIVATE_EVENT = 'http://schemas.google.com/g/2005#event.private' +PUBLIC_EVENT = 'http://schemas.google.com/g/2005#event.public' + +OPAQUE_EVENT = 'http://schemas.google.com/g/2005#event.opaque' +TRANSPARENT_EVENT = 'http://schemas.google.com/g/2005#event.transparent' + +CHAT_MESSAGE = 'http://schemas.google.com/g/2005#message.chat' +INBOX_MESSAGE = 'http://schemas.google.com/g/2005#message.inbox' +SENT_MESSAGE = 'http://schemas.google.com/g/2005#message.sent' +SPAM_MESSAGE = 'http://schemas.google.com/g/2005#message.spam' +STARRED_MESSAGE = 'http://schemas.google.com/g/2005#message.starred' +UNREAD_MESSAGE = 'http://schemas.google.com/g/2005#message.unread' + +BCC_RECIPIENT = 'http://schemas.google.com/g/2005#message.bcc' +CC_RECIPIENT = 'http://schemas.google.com/g/2005#message.cc' +SENDER = 'http://schemas.google.com/g/2005#message.from' +REPLY_TO = 'http://schemas.google.com/g/2005#message.reply-to' +TO_RECIPIENT = 'http://schemas.google.com/g/2005#message.to' + +ASSISTANT_REL = 'http://schemas.google.com/g/2005#assistant' +CALLBACK_REL = 'http://schemas.google.com/g/2005#callback' +CAR_REL = 'http://schemas.google.com/g/2005#car' +COMPANY_MAIN_REL = 'http://schemas.google.com/g/2005#company_main' +FAX_REL = 'http://schemas.google.com/g/2005#fax' +HOME_REL = 'http://schemas.google.com/g/2005#home' +HOME_FAX_REL = 'http://schemas.google.com/g/2005#home_fax' +ISDN_REL = 'http://schemas.google.com/g/2005#isdn' +MAIN_REL = 'http://schemas.google.com/g/2005#main' +MOBILE_REL = 'http://schemas.google.com/g/2005#mobile' +OTHER_REL = 'http://schemas.google.com/g/2005#other' +OTHER_FAX_REL = 'http://schemas.google.com/g/2005#other_fax' +PAGER_REL = 'http://schemas.google.com/g/2005#pager' +RADIO_REL = 'http://schemas.google.com/g/2005#radio' +TELEX_REL = 'http://schemas.google.com/g/2005#telex' +TTL_TDD_REL = 'http://schemas.google.com/g/2005#tty_tdd' +WORK_REL = 'http://schemas.google.com/g/2005#work' +WORK_FAX_REL = 'http://schemas.google.com/g/2005#work_fax' +WORK_MOBILE_REL = 'http://schemas.google.com/g/2005#work_mobile' +WORK_PAGER_REL = 'http://schemas.google.com/g/2005#work_pager' +NETMEETING_REL = 'http://schemas.google.com/g/2005#netmeeting' +OVERALL_REL = 'http://schemas.google.com/g/2005#overall' +PRICE_REL = 'http://schemas.google.com/g/2005#price' +QUALITY_REL = 'http://schemas.google.com/g/2005#quality' +EVENT_REL = 'http://schemas.google.com/g/2005#event' +EVENT_ALTERNATE_REL = 'http://schemas.google.com/g/2005#event.alternate' +EVENT_PARKING_REL = 'http://schemas.google.com/g/2005#event.parking' + +AIM_PROTOCOL = 'http://schemas.google.com/g/2005#AIM' +MSN_PROTOCOL = 'http://schemas.google.com/g/2005#MSN' +YAHOO_MESSENGER_PROTOCOL = 'http://schemas.google.com/g/2005#YAHOO' +SKYPE_PROTOCOL = 'http://schemas.google.com/g/2005#SKYPE' +QQ_PROTOCOL = 'http://schemas.google.com/g/2005#QQ' +GOOGLE_TALK_PROTOCOL = 'http://schemas.google.com/g/2005#GOOGLE_TALK' +ICQ_PROTOCOL = 'http://schemas.google.com/g/2005#ICQ' +JABBER_PROTOCOL = 'http://schemas.google.com/g/2005#JABBER' + +REGULAR_COMMENTS = 'http://schemas.google.com/g/2005#regular' +REVIEW_COMMENTS = 'http://schemas.google.com/g/2005#reviews' + +MAIL_BOTH = 'http://schemas.google.com/g/2005#both' +MAIL_LETTERS = 'http://schemas.google.com/g/2005#letters' +MAIL_PARCELS = 'http://schemas.google.com/g/2005#parcels' +MAIL_NEITHER = 'http://schemas.google.com/g/2005#neither' + +GENERAL_ADDRESS = 'http://schemas.google.com/g/2005#general' +LOCAL_ADDRESS = 'http://schemas.google.com/g/2005#local' + +OPTIONAL_ATENDEE = 'http://schemas.google.com/g/2005#event.optional' +REQUIRED_ATENDEE = 'http://schemas.google.com/g/2005#event.required' + +ATTENDEE_ACCEPTED = 'http://schemas.google.com/g/2005#event.accepted' +ATTENDEE_DECLINED = 'http://schemas.google.com/g/2005#event.declined' +ATTENDEE_INVITED = 'http://schemas.google.com/g/2005#event.invited' +ATTENDEE_TENTATIVE = 'http://schemas.google.com/g/2005#event.tentative' + +FULL_PROJECTION = 'full' +VALUES_PROJECTION = 'values' +BASIC_PROJECTION = 'basic' + +PRIVATE_VISIBILITY = 'private' +PUBLIC_VISIBILITY = 'public' + +OPAQUE_TRANSPARENCY = 'http://schemas.google.com/g/2005#event.opaque' +TRANSPARENT_TRANSPARENCY = 'http://schemas.google.com/g/2005#event.transparent' + +CONFIDENTIAL_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.confidential' +DEFAULT_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.default' +PRIVATE_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.private' +PUBLIC_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.public' + +CANCELED_EVENT_STATUS = 'http://schemas.google.com/g/2005#event.canceled' +CONFIRMED_EVENT_STATUS = 'http://schemas.google.com/g/2005#event.confirmed' +TENTATIVE_EVENT_STATUS = 'http://schemas.google.com/g/2005#event.tentative' + +ACL_REL = 'http://schemas.google.com/acl/2007#accessControlList' + + +class Error(Exception): + pass + + +class MissingRequiredParameters(Error): + pass + + +class LinkFinder(atom.data.LinkFinder): + """Mixin used in Feed and Entry classes to simplify link lookups by type. + + Provides lookup methods for edit, edit-media, post, ACL and other special + links which are common across Google Data APIs. + """ + + def find_html_link(self): + """Finds the first link with rel of alternate and type of text/html.""" + for link in self.link: + if link.rel == 'alternate' and link.type == 'text/html': + return link.href + return None + + FindHtmlLink = find_html_link + + def get_html_link(self): + for a_link in self.link: + if a_link.rel == 'alternate' and a_link.type == 'text/html': + return a_link + return None + + GetHtmlLink = get_html_link + + def find_post_link(self): + """Get the URL to which new entries should be POSTed. + + The POST target URL is used to insert new entries. + + Returns: + A str for the URL in the link with a rel matching the POST type. + """ + return self.find_url('http://schemas.google.com/g/2005#post') + + FindPostLink = find_post_link + + def get_post_link(self): + return self.get_link('http://schemas.google.com/g/2005#post') + + GetPostLink = get_post_link + + def find_acl_link(self): + acl_link = self.get_acl_link() + if acl_link: + return acl_link.href + + return None + + FindAclLink = find_acl_link + + def get_acl_link(self): + """Searches for a link or feed_link (if present) with the rel for ACL.""" + + acl_link = self.get_link(ACL_REL) + if acl_link: + return acl_link + elif hasattr(self, 'feed_link'): + for a_feed_link in self.feed_link: + if a_feed_link.rel == ACL_REL: + return a_feed_link + + return None + + GetAclLink = get_acl_link + + def find_feed_link(self): + return self.find_url('http://schemas.google.com/g/2005#feed') + + FindFeedLink = find_feed_link + + def get_feed_link(self): + return self.get_link('http://schemas.google.com/g/2005#feed') + + GetFeedLink = get_feed_link + + def find_previous_link(self): + return self.find_url('previous') + + FindPreviousLink = find_previous_link + + def get_previous_link(self): + return self.get_link('previous') + + GetPreviousLink = get_previous_link + + +class TotalResults(atom.core.XmlElement): + """opensearch:TotalResults for a GData feed.""" + _qname = (OPENSEARCH_TEMPLATE_V1 % 'totalResults', + OPENSEARCH_TEMPLATE_V2 % 'totalResults') + + +class StartIndex(atom.core.XmlElement): + """The opensearch:startIndex element in GData feed.""" + _qname = (OPENSEARCH_TEMPLATE_V1 % 'startIndex', + OPENSEARCH_TEMPLATE_V2 % 'startIndex') + + +class ItemsPerPage(atom.core.XmlElement): + """The opensearch:itemsPerPage element in GData feed.""" + _qname = (OPENSEARCH_TEMPLATE_V1 % 'itemsPerPage', + OPENSEARCH_TEMPLATE_V2 % 'itemsPerPage') + + +class ExtendedProperty(atom.core.XmlElement): + """The Google Data extendedProperty element. + + Used to store arbitrary key-value information specific to your + application. The value can either be a text string stored as an XML + attribute (.value), or an XML node (XmlBlob) as a child element. + + This element is used in the Google Calendar data API and the Google + Contacts data API. + """ + _qname = GDATA_TEMPLATE % 'extendedProperty' + name = 'name' + value = 'value' + + def get_xml_blob(self): + """Returns the XML blob as an atom.core.XmlElement. + + Returns: + An XmlElement representing the blob's XML, or None if no + blob was set. + """ + if self._other_elements: + return self._other_elements[0] + else: + return None + + GetXmlBlob = get_xml_blob + + def set_xml_blob(self, blob): + """Sets the contents of the extendedProperty to XML as a child node. + + Since the extendedProperty is only allowed one child element as an XML + blob, setting the XML blob will erase any preexisting member elements + in this object. + + Args: + blob: str or atom.core.XmlElement representing the XML blob stored in + the extendedProperty. + """ + # Erase any existing extension_elements, clears the child nodes from the + # extendedProperty. + if isinstance(blob, atom.core.XmlElement): + self._other_elements = [blob] + else: + self._other_elements = [atom.core.parse(str(blob))] + + SetXmlBlob = set_xml_blob + + +class GDEntry(atom.data.Entry, LinkFinder): + """Extends Atom Entry to provide data processing""" + etag = '{http://schemas.google.com/g/2005}etag' + + def get_id(self): + if self.id is not None and self.id.text is not None: + return self.id.text.strip() + return None + + GetId = get_id + + def is_media(self): + if self.find_edit_media_link(): + return True + return False + + IsMedia = is_media + + def find_media_link(self): + """Returns the URL to the media content, if the entry is a media entry. + Otherwise returns None. + """ + if self.is_media(): + return self.content.src + return None + + FindMediaLink = find_media_link + + +class GDFeed(atom.data.Feed, LinkFinder): + """A Feed from a GData service.""" + etag = '{http://schemas.google.com/g/2005}etag' + total_results = TotalResults + start_index = StartIndex + items_per_page = ItemsPerPage + entry = [GDEntry] + + def get_id(self): + if self.id is not None and self.id.text is not None: + return self.id.text.strip() + return None + + GetId = get_id + + def get_generator(self): + if self.generator and self.generator.text: + return self.generator.text.strip() + return None + + +class BatchId(atom.core.XmlElement): + """Identifies a single operation in a batch request.""" + _qname = BATCH_TEMPLATE % 'id' + + +class BatchOperation(atom.core.XmlElement): + """The CRUD operation which this batch entry represents.""" + _qname = BATCH_TEMPLATE % 'operation' + type = 'type' + + +class BatchStatus(atom.core.XmlElement): + """The batch:status element present in a batch response entry. + + A status element contains the code (HTTP response code) and + reason as elements. In a single request these fields would + be part of the HTTP response, but in a batch request each + Entry operation has a corresponding Entry in the response + feed which includes status information. + + See http://code.google.com/apis/gdata/batch.html#Handling_Errors + """ + _qname = BATCH_TEMPLATE % 'status' + code = 'code' + reason = 'reason' + content_type = 'content-type' + + +class BatchEntry(GDEntry): + """An atom:entry for use in batch requests. + + The BatchEntry contains additional members to specify the operation to be + performed on this entry and a batch ID so that the server can reference + individual operations in the response feed. For more information, see: + http://code.google.com/apis/gdata/batch.html + """ + batch_operation = BatchOperation + batch_id = BatchId + batch_status = BatchStatus + + +class BatchInterrupted(atom.core.XmlElement): + """The batch:interrupted element sent if batch request was interrupted. + + Only appears in a feed if some of the batch entries could not be processed. + See: http://code.google.com/apis/gdata/batch.html#Handling_Errors + """ + _qname = BATCH_TEMPLATE % 'interrupted' + reason = 'reason' + success = 'success' + failures = 'failures' + parsed = 'parsed' + + +class BatchFeed(GDFeed): + """A feed containing a list of batch request entries.""" + interrupted = BatchInterrupted + entry = [BatchEntry] + + def add_batch_entry(self, entry=None, id_url_string=None, + batch_id_string=None, operation_string=None): + """Logic for populating members of a BatchEntry and adding to the feed. + + If the entry is not a BatchEntry, it is converted to a BatchEntry so + that the batch specific members will be present. + + The id_url_string can be used in place of an entry if the batch operation + applies to a URL. For example query and delete operations require just + the URL of an entry, no body is sent in the HTTP request. If an + id_url_string is sent instead of an entry, a BatchEntry is created and + added to the feed. + + This method also assigns the desired batch id to the entry so that it + can be referenced in the server's response. If the batch_id_string is + None, this method will assign a batch_id to be the index at which this + entry will be in the feed's entry list. + + Args: + entry: BatchEntry, atom.data.Entry, or another Entry flavor (optional) + The entry which will be sent to the server as part of the batch + request. The item must have a valid atom id so that the server + knows which entry this request references. + id_url_string: str (optional) The URL of the entry to be acted on. You + can find this URL in the text member of the atom id for an entry. + If an entry is not sent, this id will be used to construct a new + BatchEntry which will be added to the request feed. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. Note that batch_ids should either always be specified or + never, mixing could potentially result in duplicate batch ids. + operation_string: str (optional) The desired batch operation which will + set the batch_operation.type member of the entry. Options are + 'insert', 'update', 'delete', and 'query' + + Raises: + MissingRequiredParameters: Raised if neither an id_ url_string nor an + entry are provided in the request. + + Returns: + The added entry. + """ + if entry is None and id_url_string is None: + raise MissingRequiredParameters('supply either an entry or URL string') + if entry is None and id_url_string is not None: + entry = BatchEntry(id=atom.data.Id(text=id_url_string)) + if batch_id_string is not None: + entry.batch_id = BatchId(text=batch_id_string) + elif entry.batch_id is None or entry.batch_id.text is None: + entry.batch_id = BatchId(text=str(len(self.entry))) + if operation_string is not None: + entry.batch_operation = BatchOperation(type=operation_string) + self.entry.append(entry) + return entry + + AddBatchEntry = add_batch_entry + + def add_insert(self, entry, batch_id_string=None): + """Add an insert request to the operations in this batch request feed. + + If the entry doesn't yet have an operation or a batch id, these will + be set to the insert operation and a batch_id specified as a parameter. + + Args: + entry: BatchEntry The entry which will be sent in the batch feed as an + insert request. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. Note that batch_ids should either always be specified or + never, mixing could potentially result in duplicate batch ids. + """ + self.add_batch_entry(entry=entry, batch_id_string=batch_id_string, + operation_string=BATCH_INSERT) + + AddInsert = add_insert + + def add_update(self, entry, batch_id_string=None): + """Add an update request to the list of batch operations in this feed. + + Sets the operation type of the entry to insert if it is not already set + and assigns the desired batch id to the entry so that it can be + referenced in the server's response. + + Args: + entry: BatchEntry The entry which will be sent to the server as an + update (HTTP PUT) request. The item must have a valid atom id + so that the server knows which entry to replace. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. See also comments for AddInsert. + """ + self.add_batch_entry(entry=entry, batch_id_string=batch_id_string, + operation_string=BATCH_UPDATE) + + AddUpdate = add_update + + def add_delete(self, url_string=None, entry=None, batch_id_string=None): + """Adds a delete request to the batch request feed. + + This method takes either the url_string which is the atom id of the item + to be deleted, or the entry itself. The atom id of the entry must be + present so that the server knows which entry should be deleted. + + Args: + url_string: str (optional) The URL of the entry to be deleted. You can + find this URL in the text member of the atom id for an entry. + entry: BatchEntry (optional) The entry to be deleted. + batch_id_string: str (optional) + + Raises: + MissingRequiredParameters: Raised if neither a url_string nor an entry + are provided in the request. + """ + self.add_batch_entry(entry=entry, id_url_string=url_string, + batch_id_string=batch_id_string, operation_string=BATCH_DELETE) + + AddDelete = add_delete + + def add_query(self, url_string=None, entry=None, batch_id_string=None): + """Adds a query request to the batch request feed. + + This method takes either the url_string which is the query URL + whose results will be added to the result feed. The query URL will + be encapsulated in a BatchEntry, and you may pass in the BatchEntry + with a query URL instead of sending a url_string. + + Args: + url_string: str (optional) + entry: BatchEntry (optional) + batch_id_string: str (optional) + + Raises: + MissingRequiredParameters + """ + self.add_batch_entry(entry=entry, id_url_string=url_string, + batch_id_string=batch_id_string, operation_string=BATCH_QUERY) + + AddQuery = add_query + + def find_batch_link(self): + return self.find_url('http://schemas.google.com/g/2005#batch') + + FindBatchLink = find_batch_link + + +class EntryLink(atom.core.XmlElement): + """The gd:entryLink element. + + Represents a logically nested entry. For example, a <gd:who> + representing a contact might have a nested entry from a contact feed. + """ + _qname = GDATA_TEMPLATE % 'entryLink' + entry = GDEntry + rel = 'rel' + read_only = 'readOnly' + href = 'href' + + +class FeedLink(atom.core.XmlElement): + """The gd:feedLink element. + + Represents a logically nested feed. For example, a calendar feed might + have a nested feed representing all comments on entries. + """ + _qname = GDATA_TEMPLATE % 'feedLink' + feed = GDFeed + rel = 'rel' + read_only = 'readOnly' + count_hint = 'countHint' + href = 'href' + + +class AdditionalName(atom.core.XmlElement): + """The gd:additionalName element. + + Specifies additional (eg. middle) name of the person. + Contains an attribute for the phonetic representaton of the name. + """ + _qname = GDATA_TEMPLATE % 'additionalName' + yomi = 'yomi' + + +class Comments(atom.core.XmlElement): + """The gd:comments element. + + Contains a comments feed for the enclosing entry (such as a calendar event). + """ + _qname = GDATA_TEMPLATE % 'comments' + rel = 'rel' + feed_link = FeedLink + + +class Country(atom.core.XmlElement): + """The gd:country element. + + Country name along with optional country code. The country code is + given in accordance with ISO 3166-1 alpha-2: + http://www.iso.org/iso/iso-3166-1_decoding_table + """ + _qname = GDATA_TEMPLATE % 'country' + code = 'code' + + +class EmailImParent(atom.core.XmlElement): + address = 'address' + label = 'label' + rel = 'rel' + primary = 'primary' + + +class Email(EmailImParent): + """The gd:email element. + + An email address associated with the containing entity (which is + usually an entity representing a person or a location). + """ + _qname = GDATA_TEMPLATE % 'email' + display_name = 'displayName' + + +class FamilyName(atom.core.XmlElement): + """The gd:familyName element. + + Specifies family name of the person, eg. "Smith". + """ + _qname = GDATA_TEMPLATE % 'familyName' + yomi = 'yomi' + + +class Im(EmailImParent): + """The gd:im element. + + An instant messaging address associated with the containing entity. + """ + _qname = GDATA_TEMPLATE % 'im' + protocol = 'protocol' + + +class GivenName(atom.core.XmlElement): + """The gd:givenName element. + + Specifies given name of the person, eg. "John". + """ + _qname = GDATA_TEMPLATE % 'givenName' + yomi = 'yomi' + + +class NamePrefix(atom.core.XmlElement): + """The gd:namePrefix element. + + Honorific prefix, eg. 'Mr' or 'Mrs'. + """ + _qname = GDATA_TEMPLATE % 'namePrefix' + + +class NameSuffix(atom.core.XmlElement): + """The gd:nameSuffix element. + + Honorific suffix, eg. 'san' or 'III'. + """ + _qname = GDATA_TEMPLATE % 'nameSuffix' + + +class FullName(atom.core.XmlElement): + """The gd:fullName element. + + Unstructured representation of the name. + """ + _qname = GDATA_TEMPLATE % 'fullName' + + +class Name(atom.core.XmlElement): + """The gd:name element. + + Allows storing person's name in a structured way. Consists of + given name, additional name, family name, prefix, suffix and full name. + """ + _qname = GDATA_TEMPLATE % 'name' + given_name = GivenName + additional_name = AdditionalName + family_name = FamilyName + name_prefix = NamePrefix + name_suffix = NameSuffix + full_name = FullName + + +class OrgDepartment(atom.core.XmlElement): + """The gd:orgDepartment element. + + Describes a department within an organization. Must appear within a + gd:organization element. + """ + _qname = GDATA_TEMPLATE % 'orgDepartment' + + +class OrgJobDescription(atom.core.XmlElement): + """The gd:orgJobDescription element. + + Describes a job within an organization. Must appear within a + gd:organization element. + """ + _qname = GDATA_TEMPLATE % 'orgJobDescription' + + +class OrgName(atom.core.XmlElement): + """The gd:orgName element. + + The name of the organization. Must appear within a gd:organization + element. + + Contains a Yomigana attribute (Japanese reading aid) for the + organization name. + """ + _qname = GDATA_TEMPLATE % 'orgName' + yomi = 'yomi' + + +class OrgSymbol(atom.core.XmlElement): + """The gd:orgSymbol element. + + Provides a symbol of an organization. Must appear within a + gd:organization element. + """ + _qname = GDATA_TEMPLATE % 'orgSymbol' + + +class OrgTitle(atom.core.XmlElement): + """The gd:orgTitle element. + + The title of a person within an organization. Must appear within a + gd:organization element. + """ + _qname = GDATA_TEMPLATE % 'orgTitle' + + +class Organization(atom.core.XmlElement): + """The gd:organization element. + + An organization, typically associated with a contact. + """ + _qname = GDATA_TEMPLATE % 'organization' + label = 'label' + primary = 'primary' + rel = 'rel' + department = OrgDepartment + job_description = OrgJobDescription + name = OrgName + symbol = OrgSymbol + title = OrgTitle + + +class When(atom.core.XmlElement): + """The gd:when element. + + Represents a period of time or an instant. + """ + _qname = GDATA_TEMPLATE % 'when' + end = 'endTime' + start = 'startTime' + value = 'valueString' + + +class OriginalEvent(atom.core.XmlElement): + """The gd:originalEvent element. + + Equivalent to the Recurrence ID property specified in section 4.8.4.4 + of RFC 2445. Appears in every instance of a recurring event, to identify + the original event. + + Contains a <gd:when> element specifying the original start time of the + instance that has become an exception. + """ + _qname = GDATA_TEMPLATE % 'originalEvent' + id = 'id' + href = 'href' + when = When + + +class PhoneNumber(atom.core.XmlElement): + """The gd:phoneNumber element. + + A phone number associated with the containing entity (which is usually + an entity representing a person or a location). + """ + _qname = GDATA_TEMPLATE % 'phoneNumber' + label = 'label' + rel = 'rel' + uri = 'uri' + primary = 'primary' + + +class PostalAddress(atom.core.XmlElement): + """The gd:postalAddress element.""" + _qname = GDATA_TEMPLATE % 'postalAddress' + label = 'label' + rel = 'rel' + uri = 'uri' + primary = 'primary' + + +class Rating(atom.core.XmlElement): + """The gd:rating element. + + Represents a numeric rating of the enclosing entity, such as a + comment. Each rating supplies its own scale, although it may be + normalized by a service; for example, some services might convert all + ratings to a scale from 1 to 5. + """ + _qname = GDATA_TEMPLATE % 'rating' + average = 'average' + max = 'max' + min = 'min' + num_raters = 'numRaters' + rel = 'rel' + value = 'value' + + +class Recurrence(atom.core.XmlElement): + """The gd:recurrence element. + + Represents the dates and times when a recurring event takes place. + + The string that defines the recurrence consists of a set of properties, + each of which is defined in the iCalendar standard (RFC 2445). + + Specifically, the string usually begins with a DTSTART property that + indicates the starting time of the first instance of the event, and + often a DTEND property or a DURATION property to indicate when the + first instance ends. Next come RRULE, RDATE, EXRULE, and/or EXDATE + properties, which collectively define a recurring event and its + exceptions (but see below). (See section 4.8.5 of RFC 2445 for more + information about these recurrence component properties.) Last comes a + VTIMEZONE component, providing detailed timezone rules for any timezone + ID mentioned in the preceding properties. + + Google services like Google Calendar don't generally generate EXRULE + and EXDATE properties to represent exceptions to recurring events; + instead, they generate <gd:recurrenceException> elements. However, + Google services may include EXRULE and/or EXDATE properties anyway; + for example, users can import events and exceptions into Calendar, and + if those imported events contain EXRULE or EXDATE properties, then + Calendar will provide those properties when it sends a <gd:recurrence> + element. + + Note the the use of <gd:recurrenceException> means that you can't be + sure just from examining a <gd:recurrence> element whether there are + any exceptions to the recurrence description. To ensure that you find + all exceptions, look for <gd:recurrenceException> elements in the feed, + and use their <gd:originalEvent> elements to match them up with + <gd:recurrence> elements. + """ + _qname = GDATA_TEMPLATE % 'recurrence' + + +class RecurrenceException(atom.core.XmlElement): + """The gd:recurrenceException element. + + Represents an event that's an exception to a recurring event-that is, + an instance of a recurring event in which one or more aspects of the + recurring event (such as attendance list, time, or location) have been + changed. + + Contains a <gd:originalEvent> element that specifies the original + recurring event that this event is an exception to. + + When you change an instance of a recurring event, that instance becomes + an exception. Depending on what change you made to it, the exception + behaves in either of two different ways when the original recurring + event is changed: + + - If you add, change, or remove comments, attendees, or attendee + responses, then the exception remains tied to the original event, and + changes to the original event also change the exception. + - If you make any other changes to the exception (such as changing the + time or location) then the instance becomes "specialized," which means + that it's no longer as tightly tied to the original event. If you + change the original event, specialized exceptions don't change. But + see below. + + For example, say you have a meeting every Tuesday and Thursday at + 2:00 p.m. If you change the attendance list for this Thursday's meeting + (but not for the regularly scheduled meeting), then it becomes an + exception. If you change the time for this Thursday's meeting (but not + for the regularly scheduled meeting), then it becomes specialized. + + Regardless of whether an exception is specialized or not, if you do + something that deletes the instance that the exception was derived from, + then the exception is deleted. Note that changing the day or time of a + recurring event deletes all instances, and creates new ones. + + For example, after you've specialized this Thursday's meeting, say you + change the recurring meeting to happen on Monday, Wednesday, and Friday. + That change deletes all of the recurring instances of the + Tuesday/Thursday meeting, including the specialized one. + + If a particular instance of a recurring event is deleted, then that + instance appears as a <gd:recurrenceException> containing a + <gd:entryLink> that has its <gd:eventStatus> set to + "http://schemas.google.com/g/2005#event.canceled". (For more + information about canceled events, see RFC 2445.) + """ + _qname = GDATA_TEMPLATE % 'recurrenceException' + specialized = 'specialized' + entry_link = EntryLink + original_event = OriginalEvent + + +class Reminder(atom.core.XmlElement): + """The gd:reminder element. + + A time interval, indicating how long before the containing entity's start + time or due time attribute a reminder should be issued. Alternatively, + may specify an absolute time at which a reminder should be issued. Also + specifies a notification method, indicating what medium the system + should use to remind the user. + """ + _qname = GDATA_TEMPLATE % 'reminder' + absolute_time = 'absoluteTime' + method = 'method' + days = 'days' + hours = 'hours' + minutes = 'minutes' + + +class Transparency(atom.core.XmlElement): + """The gd:transparency element: + + Extensible enum corresponding to the TRANSP property defined in RFC 244. + """ + _qname = GDATA_TEMPLATE % 'transparency' + value = 'value' + + +class Agent(atom.core.XmlElement): + """The gd:agent element. + + The agent who actually receives the mail. Used in work addresses. + Also for 'in care of' or 'c/o'. + """ + _qname = GDATA_TEMPLATE % 'agent' + + +class HouseName(atom.core.XmlElement): + """The gd:housename element. + + Used in places where houses or buildings have names (and not + necessarily numbers), eg. "The Pillars". + """ + _qname = GDATA_TEMPLATE % 'housename' + + +class Street(atom.core.XmlElement): + """The gd:street element. + + Can be street, avenue, road, etc. This element also includes the + house number and room/apartment/flat/floor number. + """ + _qname = GDATA_TEMPLATE % 'street' + + +class PoBox(atom.core.XmlElement): + """The gd:pobox element. + + Covers actual P.O. boxes, drawers, locked bags, etc. This is usually + but not always mutually exclusive with street. + """ + _qname = GDATA_TEMPLATE % 'pobox' + + +class Neighborhood(atom.core.XmlElement): + """The gd:neighborhood element. + + This is used to disambiguate a street address when a city contains more + than one street with the same name, or to specify a small place whose + mail is routed through a larger postal town. In China it could be a + county or a minor city. + """ + _qname = GDATA_TEMPLATE % 'neighborhood' + + +class City(atom.core.XmlElement): + """The gd:city element. + + Can be city, village, town, borough, etc. This is the postal town and + not necessarily the place of residence or place of business. + """ + _qname = GDATA_TEMPLATE % 'city' + + +class Subregion(atom.core.XmlElement): + """The gd:subregion element. + + Handles administrative districts such as U.S. or U.K. counties that are + not used for mail addressing purposes. Subregion is not intended for + delivery addresses. + """ + _qname = GDATA_TEMPLATE % 'subregion' + + +class Region(atom.core.XmlElement): + """The gd:region element. + + A state, province, county (in Ireland), Land (in Germany), + departement (in France), etc. + """ + _qname = GDATA_TEMPLATE % 'region' + + +class Postcode(atom.core.XmlElement): + """The gd:postcode element. + + Postal code. Usually country-wide, but sometimes specific to the + city (e.g. "2" in "Dublin 2, Ireland" addresses). + """ + _qname = GDATA_TEMPLATE % 'postcode' + + +class Country(atom.core.XmlElement): + """The gd:country element. + + The name or code of the country. + """ + _qname = GDATA_TEMPLATE % 'country' + + +class FormattedAddress(atom.core.XmlElement): + """The gd:formattedAddress element. + + The full, unstructured postal address. + """ + _qname = GDATA_TEMPLATE % 'formattedAddress' + + +class StructuredPostalAddress(atom.core.XmlElement): + """The gd:structuredPostalAddress element. + + Postal address split into components. It allows to store the address + in locale independent format. The fields can be interpreted and used + to generate formatted, locale dependent address. The following elements + reperesent parts of the address: agent, house name, street, P.O. box, + neighborhood, city, subregion, region, postal code, country. The + subregion element is not used for postal addresses, it is provided for + extended uses of addresses only. In order to store postal address in an + unstructured form formatted address field is provided. + """ + _qname = GDATA_TEMPLATE % 'structuredPostalAddress' + rel = 'rel' + mail_class = 'mailClass' + usage = 'usage' + label = 'label' + primary = 'primary' + agent = Agent + house_name = HouseName + street = Street + po_box = PoBox + neighborhood = Neighborhood + city = City + subregion = Subregion + region = Region + postcode = Postcode + country = Country + formatted_address = FormattedAddress + + +class Where(atom.core.XmlElement): + """The gd:where element. + + A place (such as an event location) associated with the containing + entity. The type of the association is determined by the rel attribute; + the details of the location are contained in an embedded or linked-to + Contact entry. + + A <gd:where> element is more general than a <gd:geoPt> element. The + former identifies a place using a text description and/or a Contact + entry, while the latter identifies a place using a specific geographic + location. + """ + _qname = GDATA_TEMPLATE % 'where' + label = 'label' + rel = 'rel' + value = 'valueString' + entry_link = EntryLink + + +class AttendeeType(atom.core.XmlElement): + """The gd:attendeeType element.""" + _qname = GDATA_TEMPLATE % 'attendeeType' + value = 'value' + + +class AttendeeStatus(atom.core.XmlElement): + """The gd:attendeeStatus element.""" + _qname = GDATA_TEMPLATE % 'attendeeStatus' + value = 'value' + + +class EventStatus(atom.core.XmlElement): + """The gd:eventStatus element.""" + _qname = GDATA_TEMPLATE % 'eventStatus' + value = 'value' + + +class Visibility(atom.core.XmlElement): + """The gd:visibility element.""" + _qname = GDATA_TEMPLATE % 'visibility' + value = 'value' + + +class Who(atom.core.XmlElement): + """The gd:who element. + + A person associated with the containing entity. The type of the + association is determined by the rel attribute; the details about the + person are contained in an embedded or linked-to Contact entry. + + The <gd:who> element can be used to specify email senders and + recipients, calendar event organizers, and so on. + """ + _qname = GDATA_TEMPLATE % 'who' + email = 'email' + rel = 'rel' + value = 'valueString' + attendee_status = AttendeeStatus + attendee_type = AttendeeType + entry_link = EntryLink + + +class Deleted(atom.core.XmlElement): + """gd:deleted when present, indicates the containing entry is deleted.""" + _qname = GD_TEMPLATE % 'deleted' + + +class Money(atom.core.XmlElement): + """Describes money""" + _qname = GD_TEMPLATE % 'money' + amount = 'amount' + currency_code = 'currencyCode' + + +class MediaSource(object): + """GData Entries can refer to media sources, so this class provides a + place to store references to these objects along with some metadata. + """ + + def __init__(self, file_handle=None, content_type=None, content_length=None, + file_path=None, file_name=None): + """Creates an object of type MediaSource. + + Args: + file_handle: A file handle pointing to the file to be encapsulated in the + MediaSource. + content_type: string The MIME type of the file. Required if a file_handle + is given. + content_length: int The size of the file. Required if a file_handle is + given. + file_path: string (optional) A full path name to the file. Used in + place of a file_handle. + file_name: string The name of the file without any path information. + Required if a file_handle is given. + """ + self.file_handle = file_handle + self.content_type = content_type + self.content_length = content_length + self.file_name = file_name + + if (file_handle is None and content_type is not None and + file_path is not None): + self.set_file_handle(file_path, content_type) + + def set_file_handle(self, file_name, content_type): + """A helper function which can create a file handle from a given filename + and set the content type and length all at once. + + Args: + file_name: string The path and file name to the file containing the media + content_type: string A MIME type representing the type of the media + """ + + self.file_handle = open(file_name, 'rb') + self.content_type = content_type + self.content_length = os.path.getsize(file_name) + self.file_name = os.path.basename(file_name) + + SetFileHandle = set_file_handle + + def modify_request(self, http_request): + http_request.add_body_part(self.file_handle, self.content_type, + self.content_length) + return http_request + + ModifyRequest = modify_request diff --git a/gam/gdata/analytics/docs/__init__.py b/gam/gdata/analytics/docs/__init__.py new file mode 100755 index 00000000000..8031bc9b7ac --- /dev/null +++ b/gam/gdata/analytics/docs/__init__.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to Atom objects used with Google Documents.""" + +__author__ = ('api.jfisher (Jeff Fisher), ' + 'api.eric@google.com (Eric Bidelman)') + +import atom +import gdata + + +DOCUMENTS_NAMESPACE = 'http://schemas.google.com/docs/2007' + + +class Scope(atom.AtomBase): + """The DocList ACL scope element""" + + _tag = 'scope' + _namespace = gdata.GACL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + _attributes['type'] = 'type' + + def __init__(self, value=None, type=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.type = type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Role(atom.AtomBase): + """The DocList ACL role element""" + + _tag = 'role' + _namespace = gdata.GACL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class FeedLink(atom.AtomBase): + """The DocList gd:feedLink element""" + + _tag = 'feedLink' + _namespace = gdata.GDATA_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['href'] = 'href' + + def __init__(self, href=None, rel=None, text=None, extension_elements=None, + extension_attributes=None): + self.href = href + self.rel = rel + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class ResourceId(atom.AtomBase): + """The DocList gd:resourceId element""" + + _tag = 'resourceId' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class LastModifiedBy(atom.Person): + """The DocList gd:lastModifiedBy element""" + + _tag = 'lastModifiedBy' + _namespace = gdata.GDATA_NAMESPACE + + +class LastViewed(atom.Person): + """The DocList gd:lastViewed element""" + + _tag = 'lastViewed' + _namespace = gdata.GDATA_NAMESPACE + + +class WritersCanInvite(atom.AtomBase): + """The DocList docs:writersCanInvite element""" + + _tag = 'writersCanInvite' + _namespace = DOCUMENTS_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + +class DocumentListEntry(gdata.GDataEntry): + """The Google Documents version of an Atom Entry""" + + _tag = gdata.GDataEntry._tag + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feedLink', FeedLink) + _children['{%s}resourceId' % gdata.GDATA_NAMESPACE] = ('resourceId', + ResourceId) + _children['{%s}lastModifiedBy' % gdata.GDATA_NAMESPACE] = ('lastModifiedBy', + LastModifiedBy) + _children['{%s}lastViewed' % gdata.GDATA_NAMESPACE] = ('lastViewed', + LastViewed) + _children['{%s}writersCanInvite' % DOCUMENTS_NAMESPACE] = ( + 'writersCanInvite', WritersCanInvite) + + def __init__(self, resourceId=None, feedLink=None, lastViewed=None, + lastModifiedBy=None, writersCanInvite=None, author=None, + category=None, content=None, atom_id=None, link=None, + published=None, title=None, updated=None, text=None, + extension_elements=None, extension_attributes=None): + self.feedLink = feedLink + self.lastViewed = lastViewed + self.lastModifiedBy = lastModifiedBy + self.resourceId = resourceId + self.writersCanInvite = writersCanInvite + gdata.GDataEntry.__init__( + self, author=author, category=category, content=content, + atom_id=atom_id, link=link, published=published, title=title, + updated=updated, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + def GetAclLink(self): + """Extracts the DocListEntry's <gd:feedLink>. + + Returns: + A FeedLink object. + """ + return self.feedLink + + def GetDocumentType(self): + """Extracts the type of document from the DocListEntry. + + This method returns the type of document the DocListEntry + represents. Possible values are document, presentation, + spreadsheet, folder, or pdf. + + Returns: + A string representing the type of document. + """ + if self.category: + for category in self.category: + if category.scheme == gdata.GDATA_NAMESPACE + '#kind': + return category.label + else: + return None + + +def DocumentListEntryFromString(xml_string): + """Converts an XML string into a DocumentListEntry object. + + Args: + xml_string: string The XML describing a Document List feed entry. + + Returns: + A DocumentListEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(DocumentListEntry, xml_string) + + +class DocumentListAclEntry(gdata.GDataEntry): + """A DocList ACL Entry flavor of an Atom Entry""" + + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}scope' % gdata.GACL_NAMESPACE] = ('scope', Scope) + _children['{%s}role' % gdata.GACL_NAMESPACE] = ('role', Role) + + def __init__(self, category=None, atom_id=None, link=None, + title=None, updated=None, scope=None, role=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.GDataEntry.__init__(self, author=None, category=category, + content=None, atom_id=atom_id, link=link, + published=None, title=title, + updated=updated, text=None) + self.scope = scope + self.role = role + + +def DocumentListAclEntryFromString(xml_string): + """Converts an XML string into a DocumentListAclEntry object. + + Args: + xml_string: string The XML describing a Document List ACL feed entry. + + Returns: + A DocumentListAclEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(DocumentListAclEntry, xml_string) + + +class DocumentListFeed(gdata.GDataFeed): + """A feed containing a list of Google Documents Items""" + + _tag = gdata.GDataFeed._tag + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [DocumentListEntry]) + + +def DocumentListFeedFromString(xml_string): + """Converts an XML string into a DocumentListFeed object. + + Args: + xml_string: string The XML describing a DocumentList feed. + + Returns: + A DocumentListFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(DocumentListFeed, xml_string) + + +class DocumentListAclFeed(gdata.GDataFeed): + """A DocList ACL feed flavor of a Atom feed""" + + _tag = gdata.GDataFeed._tag + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [DocumentListAclEntry]) + + +def DocumentListAclFeedFromString(xml_string): + """Converts an XML string into a DocumentListAclFeed object. + + Args: + xml_string: string The XML describing a DocumentList feed. + + Returns: + A DocumentListFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(DocumentListAclFeed, xml_string) diff --git a/gam/gdata/analytics/docs/client.py b/gam/gdata/analytics/docs/client.py new file mode 100755 index 00000000000..8a84ccbd49d --- /dev/null +++ b/gam/gdata/analytics/docs/client.py @@ -0,0 +1,1027 @@ +#!/usr/bin/python +# +# Copyright 2011 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""DocsClient simplifies interactions with the Documents List API.""" + +__author__ = 'vicfryzel@google.com (Vic Fryzel)' + +import copy +import mimetypes +import re +import urllib +import atom.data +import atom.http_core +import gdata.client +import gdata.docs.data +import gdata.gauth + + +# Feed URIs that are given by the API, but cannot be obtained without +# making a mostly unnecessary HTTP request. +RESOURCE_FEED_URI = '/feeds/default/private/full' +RESOURCE_UPLOAD_URI = '/feeds/upload/create-session/default/private/full' +COLLECTION_UPLOAD_URI_TEMPLATE = \ + '/feeds/upload/create-session/feeds/default/private/full/%s/contents' +ARCHIVE_FEED_URI = '/feeds/default/private/archive' +METADATA_URI = '/feeds/metadata/default' +CHANGE_FEED_URI = '/feeds/default/private/changes' + + +class DocsClient(gdata.client.GDClient): + """Client for all features of the Google Documents List API.""" + + host = 'docs.google.com' + api_version = '3.0' + auth_service = 'writely' + alt_auth_service = 'wise' + alt_auth_token = None + auth_scopes = gdata.gauth.AUTH_SCOPES['writely'] + ssl = True + + def request(self, method=None, uri=None, **kwargs): + """Add support for imitating other users via 2-Legged OAuth. + + Args: + uri: (optional) URI of the request in which to replace default with + self.xoauth_requestor_id. + Returns: + Result of super(DocsClient, self).request(). + """ + if self.xoauth_requestor_id is not None and uri is not None: + if isinstance(uri, (str, unicode)): + uri = atom.http_core.Uri.parse_uri(uri) + uri.path.replace('/default', '/%s' % self.xoauth_requestor_id) + return super(DocsClient, self).request(method=method, uri=uri, **kwargs) + + Request = request + + def get_metadata(self, **kwargs): + """Retrieves the metadata of a user account. + + Args: + kwargs: Other parameters to pass to self.get_entry(). + + Returns: + gdata.docs.data.Metadata representing metadata of user's account. + """ + return self.get_entry( + METADATA_URI, desired_class=gdata.docs.data.Metadata, **kwargs) + + GetMetadata = get_metadata + + def get_changes(self, changestamp=None, max_results=None, **kwargs): + """Retrieves changes to a user's documents list. + + Args: + changestamp: (optional) String changestamp value to query since. + If provided, returned changes will have a changestamp larger than + the given one. + max_results: (optional) Number of results to fetch. API will limit + this number to 100 at most. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.docs.data.ChangeFeed. + """ + uri = atom.http_core.Uri.parse_uri(CHANGE_FEED_URI) + + if changestamp is not None: + uri.query['start-index'] = changestamp + if max_results is not None: + uri.query['max-results'] = max_results + + return self.get_feed( + uri, desired_class=gdata.docs.data.ChangeFeed, **kwargs) + + GetChanges = get_changes + + def get_resources(self, uri=None, limit=None, **kwargs): + """Retrieves the resources in a user's docslist, or the given URI. + + Args: + uri: (optional) URI to query for resources. If None, then + gdata.docs.client.DocsClient.RESOURCE_FEED_URI is used, which will + query for all non-collections. + limit: int (optional) A maximum cap for the number of results to + return in the feed. By default, the API returns a maximum of 100 + per page. Thus, if you set limit=5000, you will get <= 5000 + documents (guarenteed no more than 5000), and will need to follow the + feed's next links (feed.GetNextLink()) to the rest. See + get_everything(). Similarly, if you set limit=50, only <= 50 + documents are returned. Note: if the max-results parameter is set in + the uri parameter, it is chosen over a value set for limit. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.docs.data.ResourceFeed feed. + """ + if uri is None: + uri = RESOURCE_FEED_URI + + if isinstance(uri, basestring): + uri = atom.http_core.Uri.parse_uri(uri) + + # Add max-results param if it wasn't included in the uri. + if limit is not None and not 'max-results' in uri.query: + uri.query['max-results'] = limit + + return self.get_feed(uri, desired_class=gdata.docs.data.ResourceFeed, + **kwargs) + + GetResources = get_resources + + def get_all_resources(self, uri=None, **kwargs): + """Retrieves all of a user's non-collections or everything at the given URI. + + Folders are not included in this by default. Pass in a custom URI to + include collections in your query. The DocsQuery class is an easy way to + generate such a URI. + + This method makes multiple HTTP requests (by following the feed's next + links) in order to fetch the user's entire document list. + + Args: + uri: (optional) URI to query the doclist feed with. If None, then use + DocsClient.RESOURCE_FEED_URI, which will retrieve all + non-collections. + kwargs: Other parameters to pass to self.GetResources(). + + Returns: + List of gdata.docs.data.Resource objects representing the retrieved + entries. + """ + if uri is None: + uri = RESOURCE_FEED_URI + + feed = self.GetResources(uri=uri, **kwargs) + entries = feed.entry + + while feed.GetNextLink() is not None: + feed = self.GetResources(feed.GetNextLink().href, **kwargs) + entries.extend(feed.entry) + + return entries + + GetAllResources = get_all_resources + + def get_resource(self, entry, **kwargs): + """Retrieves a resource again given its entry. + + Args: + entry: gdata.docs.data.Resource to fetch and return. + kwargs: Other args to pass to GetResourceBySelfLink(). + Returns: + gdata.docs.data.Resource representing retrieved resource. + """ + + return self.GetResourceBySelfLink(entry.GetSelfLink().href, **kwargs) + + GetResource = get_resource + + def get_resource_by_self_link(self, self_link, etag=None, **kwargs): + """Retrieves a particular resource by its self link. + + Args: + self_link: URI at which to query for given resource. This can be found + using entry.GetSelfLink(). + etag: str (optional) The document/item's etag value to be used in a + conditional GET. See http://code.google.com/apis/documents/docs/3.0/ + developers_guide_protocol.html#RetrievingCached. + kwargs: Other parameters to pass to self.get_entry(). + + Returns: + gdata.docs.data.Resource representing the retrieved resource. + """ + if isinstance(self_link, atom.data.Link): + self_link = self_link.href + + return self.get_entry( + self_link, etag=etag, desired_class=gdata.docs.data.Resource, **kwargs) + + GetResourceBySelfLink = get_resource_by_self_link + + def get_resource_acl(self, entry, **kwargs): + """Retrieves the ACL sharing permissions for the given entry. + + Args: + entry: gdata.docs.data.Resource for which to get ACL. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.docs.data.AclFeed representing the resource's ACL. + """ + self._check_entry_is_resource(entry) + return self.get_feed(entry.GetAclFeedLink().href, + desired_class=gdata.docs.data.AclFeed, **kwargs) + + GetResourceAcl = get_resource_acl + + def create_resource(self, entry, media=None, collection=None, + create_uri=None, **kwargs): + """Creates new entries in Google Docs, and uploads their contents. + + Args: + entry: gdata.docs.data.Resource representing initial version + of entry being created. If media is also provided, the entry will + first be created with the given metadata and content. + media: (optional) gdata.data.MediaSource containing the file to be + uploaded. + collection: (optional) gdata.docs.data.Resource representing a collection + in which this new entry should be created. If provided along + with create_uri, create_uri will win (e.g. entry will be created at + create_uri, not necessarily in given collection). + create_uri: (optional) String URI at which to create the given entry. If + collection, media and create_uri are None, use + gdata.docs.client.RESOURCE_FEED_URI. If collection and create_uri are + None, use gdata.docs.client.RESOURCE_UPLOAD_URI. If collection and + media are not None, + gdata.docs.client.COLLECTION_UPLOAD_URI_TEMPLATE is used, + with the collection's resource ID substituted in. + kwargs: Other parameters to pass to self.post() and self.update(). + + Returns: + gdata.docs.data.Resource containing information about new entry. + """ + if media is not None: + if create_uri is None and collection is not None: + create_uri = COLLECTION_UPLOAD_URI_TEMPLATE % \ + collection.resource_id.text + elif create_uri is None: + create_uri = RESOURCE_UPLOAD_URI + uploader = gdata.client.ResumableUploader( + self, media.file_handle, media.content_type, media.content_length, + desired_class=gdata.docs.data.Resource) + return uploader.upload_file(create_uri, entry, **kwargs) + else: + if create_uri is None and collection is not None: + create_uri = collection.content.src + elif create_uri is None: + create_uri = RESOURCE_FEED_URI + return self.post( + entry, create_uri, desired_class=gdata.docs.data.Resource, **kwargs) + + CreateResource = create_resource + + def update_resource(self, entry, media=None, update_metadata=True, + new_revision=False, **kwargs): + """Updates an entry in Google Docs with new metadata and/or new data. + + Args: + entry: Entry to update. Make any metadata changes to this entry. + media: (optional) gdata.data.MediaSource object containing the file with + which to replace the entry's data. + update_metadata: (optional) True to update the metadata from the entry + itself. You might set this to False to only update an entry's + file content, and not its metadata. + new_revision: (optional) True to create a new revision with this update, + False otherwise. + kwargs: Other parameters to pass to self.post(). + + Returns: + gdata.docs.data.Resource representing the updated entry. + """ + + uri_params = {} + if new_revision: + uri_params['new-revision'] = 'true' + + if update_metadata and media is None: + uri = atom.http_core.parse_uri(entry.GetEditLink().href) + uri.query.update(uri_params) + return super(DocsClient, self).update(entry, **kwargs) + else: + uploader = gdata.client.ResumableUploader( + self, media.file_handle, media.content_type, media.content_length, + desired_class=gdata.docs.data.Resource) + return uploader.UpdateFile(entry_or_resumable_edit_link=entry, + update_metadata=update_metadata, + uri_params=uri_params, **kwargs) + + UpdateResource = update_resource + + def download_resource(self, entry, file_path, extra_params=None, **kwargs): + """Downloads the contents of the given entry to disk. + + Note: to download a file in memory, use the DownloadResourceToMemory() + method. + + Args: + entry: gdata.docs.data.Resource whose contents to fetch. + file_path: str Full path to which to save file. + extra_params: dict (optional) A map of any further parameters to control + how the document is downloaded/exported. For example, exporting a + spreadsheet as a .csv: extra_params={'gid': 0, 'exportFormat': 'csv'} + kwargs: Other parameters to pass to self._download_file(). + + Raises: + gdata.client.RequestError if the download URL is malformed or the server's + response was not successful. + """ + self._check_entry_is_not_collection(entry) + uri = self._get_download_uri(entry.content.src, extra_params) + self._download_file(uri, file_path, **kwargs) + + DownloadResource = download_resource + + def download_resource_to_memory(self, entry, extra_params=None, **kwargs): + """Returns the contents of the given entry. + + Args: + entry: gdata.docs.data.Resource whose contents to fetch. + extra_params: dict (optional) A map of any further parameters to control + how the document is downloaded/exported. For example, exporting a + spreadsheet as a .csv: extra_params={'gid': 0, 'exportFormat': 'csv'} + kwargs: Other parameters to pass to self._get_content(). + + Returns: + Content of given resource after being downloaded. + + Raises: + gdata.client.RequestError if the download URL is malformed or the server's + response was not successful. + """ + self._check_entry_is_not_collection(entry) + uri = self._get_download_uri(entry.content.src, extra_params) + return self._get_content(uri, **kwargs) + + DownloadResourceToMemory = download_resource_to_memory + + def _get_download_uri(self, base_uri, extra_params=None): + uri = base_uri.replace('&', '&') + if extra_params is not None: + if 'exportFormat' in extra_params and '/Export?' not in uri: + raise gdata.client.Error, ('This entry type cannot be exported ' + 'as a different format.') + + if 'gid' in extra_params and uri.find('spreadsheets') == -1: + raise gdata.client.Error, 'gid param is not valid for this resource type.' + + uri += '&' + urllib.urlencode(extra_params) + return uri + + def _get_content(self, uri, extra_params=None, auth_token=None, **kwargs): + """Fetches the given resource's content. + + This method is useful for downloading/exporting a file within enviornments + like Google App Engine, where the user may not have the ability to write + the file to a local disk. + + Be warned, this method will use as much memory as needed to store the + fetched content. This could cause issues in your environment or app. This + is only different from Download() in that you will probably retain an + open reference to the data returned from this method, where as the data + from Download() will be immediately written to disk and the memory + freed. This client library currently doesn't support reading server + responses into a buffer or yielding an open file pointer to responses. + + Args: + entry: Resource to fetch. + extra_params: dict (optional) A map of any further parameters to control + how the document is downloaded/exported. For example, exporting a + spreadsheet as a .csv: extra_params={'gid': 0, 'exportFormat': 'csv'} + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.request(). + + Returns: + The binary file content. + + Raises: + gdata.client.RequestError: on error response from server. + """ + server_response = None + token = auth_token + if 'spreadsheets' in uri and token is None \ + and self.alt_auth_token is not None: + token = self.alt_auth_token + server_response = self.request( + 'GET', uri, auth_token=token, **kwargs) + if server_response.status != 200: + raise gdata.client.RequestError, {'status': server_response.status, + 'reason': server_response.reason, + 'body': server_response.read()} + return server_response.read() + + def _download_file(self, uri, file_path, **kwargs): + """Downloads a file to disk from the specified URI. + + Note: to download a file in memory, use the GetContent() method. + + Args: + uri: str The full URL to download the file from. + file_path: str The full path to save the file to. + kwargs: Other parameters to pass to self.get_content(). + + Raises: + gdata.client.RequestError: on error response from server. + """ + f = open(file_path, 'wb') + try: + f.write(self._get_content(uri, **kwargs)) + except gdata.client.RequestError, e: + f.close() + raise e + f.flush() + f.close() + + _DownloadFile = _download_file + + def copy_resource(self, entry, title, **kwargs): + """Copies the given entry to a new entry with the given title. + + Note: Files do not support this feature. + + Args: + entry: gdata.docs.data.Resource to copy. + title: String title for the new entry. + kwargs: Other parameters to pass to self.post(). + + Returns: + gdata.docs.data.Resource representing duplicated resource. + """ + self._check_entry_is_resource(entry) + new_entry = gdata.docs.data.Resource( + title=atom.data.Title(text=title), + id=atom.data.Id(text=entry.GetSelfLink().href)) + return self.post(new_entry, RESOURCE_FEED_URI, **kwargs) + + CopyResource = copy_resource + + def move_resource(self, entry, collection=None, keep_in_collections=False, + **kwargs): + """Moves an item into a different collection (or out of all collections). + + Args: + entry: gdata.docs.data.Resource to move. + collection: gdata.docs.data.Resource (optional) An object representing + the destination collection. If None, set keep_in_collections to + False to remove the item from all collections. + keep_in_collections: boolean (optional) If True, the given entry + is not removed from any existing collections it is already in. + kwargs: Other parameters to pass to self.post(). + + Returns: + gdata.docs.data.Resource of the moved entry. + """ + self._check_entry_is_resource(entry) + + # Remove the item from any collections it is already in. + if not keep_in_collections: + for collection in entry.InCollections(): + uri = '%s/contents/%s' % ( + collection.href, + urllib.quote(entry.resource_id.text)) + self.delete(uri, force=True) + + if collection is not None: + self._check_entry_is_collection(collection) + entry = self.post(entry, collection.content.src, **kwargs) + return entry + + MoveResource = move_resource + + def delete_resource(self, entry, permanent=False, **kwargs): + """Trashes or deletes the given entry. + + Args: + entry: gdata.docs.data.Resource to trash or delete. + permanent: True to skip the trash and delete the entry forever. + kwargs: Other args to pass to gdata.client.GDClient.Delete() + + Returns: + Result of delete request. + """ + uri = entry.GetEditLink().href + if permanent: + uri += '?delete=true' + return super(DocsClient, self).delete(uri, **kwargs) + + DeleteResource = delete_resource + + def _check_entry_is_resource(self, entry): + """Ensures given entry is a gdata.docs.data.Resource. + + Args: + entry: Entry to test. + Raises: + ValueError: If given entry is not a resource. + """ + if not isinstance(entry, gdata.docs.data.Resource): + raise ValueError('%s is not a gdata.docs.data.Resource' % str(entry)) + + def _check_entry_is_collection(self, entry): + """Ensures given entry is a collection. + + Args: + entry: Entry to test. + Raises: + ValueError: If given entry is a collection. + """ + self._check_entry_is_resource(entry) + if entry.get_resource_type() != gdata.docs.data.COLLECTION_LABEL: + raise ValueError('%s is not a collection' % str(entry)) + + def _check_entry_is_not_collection(self, entry): + """Ensures given entry is not a collection. + + Args: + entry: Entry to test. + Raises: + ValueError: If given entry is a collection. + """ + try: + self._check_entry_is_resource(entry) + except ValueError: + return + if entry.get_resource_type() == gdata.docs.data.COLLECTION_LABEL: + raise ValueError( + '%s is a collection, which is not valid in this method' % str(entry)) + + def get_acl_entry(self, entry, **kwargs): + """Retrieves an AclEntry again. + + This is useful if you need to poll for an ACL changing. + + Args: + entry: gdata.docs.data.AclEntry to fetch and return. + kwargs: Other args to pass to GetAclEntryBySelfLink(). + Returns: + gdata.docs.data.AclEntry representing retrieved entry. + """ + + return self.GetAclEntryBySelfLink(entry.GetSelfLink().href, **kwargs) + + GetAclEntry = get_acl_entry + + def get_acl_entry_by_self_link(self, self_link, **kwargs): + """Retrieves a particular AclEntry by its self link. + + Args: + self_link: URI at which to query for given ACL entry. This can be found + using entry.GetSelfLink(). + kwargs: Other parameters to pass to self.get_entry(). + + Returns: + gdata.docs.data.AclEntry representing the retrieved entry. + """ + if isinstance(self_link, atom.data.Link): + self_link = self_link.href + + return self.get_entry(self_link, desired_class=gdata.docs.data.AclEntry, + **kwargs) + + GetAclEntryBySelfLink = get_acl_entry_by_self_link + + def add_acl_entry(self, resource, acl_entry, send_notifications=None, + **kwargs): + """Adds the given AclEntry to the given Resource. + + Args: + resource: gdata.docs.data.Resource to which to add AclEntry. + acl_entry: gdata.docs.data.AclEntry representing ACL entry to add. + send_notifications: True if users should be notified by email when + this AclEntry is added. + kwargs: Other parameters to pass to self.post(). + + Returns: + gdata.docs.data.AclEntry containing information about new entry. + Raises: + ValueError: If given resource has no ACL link. + """ + uri = resource.GetAclLink().href + if uri is None: + raise ValueError(('Given resource has no ACL link. Did you fetch this' + 'resource from the API?')) + if send_notifications is not None: + if send_notifications: + uri += '?send-notification-emails=true' + + return self.post(acl_entry, uri, desired_class=gdata.docs.data.AclEntry, + **kwargs) + + AddAclEntry = add_acl_entry + + def update_acl_entry(self, entry, send_notifications=None, **kwargs): + """Updates the given AclEntry with new metadata. + + Args: + entry: AclEntry to update. Make any metadata changes to this entry. + send_notifications: True if users should be notified by email when + this AclEntry is updated. + kwargs: Other parameters to pass to super(DocsClient, self).update(). + + Returns: + gdata.docs.data.AclEntry representing the updated ACL entry. + """ + uri = entry.GetEditLink().href + if send_notifications: + uri += '?send-notification-emails=true' + return super(DocsClient, self).update(entry, uri=uri, force=True, **kwargs) + + UpdateAclEntry = update_acl_entry + + def delete_acl_entry(self, entry, **kwargs): + """Deletes the given AclEntry. + + Args: + entry: gdata.docs.data.AclEntry to delete. + kwargs: Other args to pass to gdata.client.GDClient.Delete() + + Returns: + Result of delete request. + """ + return super(DocsClient, self).delete(entry.GetEditLink().href, force=True, + **kwargs) + + DeleteAclEntry = delete_acl_entry + + def batch_process_acl_entries(self, resource, entries, **kwargs): + """Applies the specified operation of each entry in a single request. + + To use this, simply set acl_entry.batch_operation to one of + ['query', 'insert', 'update', 'delete'], and optionally set + acl_entry.batch_id to a string of your choice. + + Then, put all of your modified AclEntry objects into a list and pass + that list as the entries parameter. + + Args: + resource: gdata.docs.data.Resource to which the given entries belong. + entries: [gdata.docs.data.AclEntry] to modify in some way. + kwargs: Other args to pass to gdata.client.GDClient.post() + + Returns: + Resulting gdata.docs.data.AclFeed of changes. + """ + feed = gdata.docs.data.AclFeed() + feed.entry = entries + return super(DocsClient, self).post( + feed, uri=resource.GetAclLink().href + '/acl', force=True, **kwargs) + + BatchProcessAclEntries = batch_process_acl_entries + + def get_revisions(self, entry, **kwargs): + """Retrieves the revision history for a resource. + + Args: + entry: gdata.docs.data.Resource for which to get revisions. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.docs.data.RevisionFeed representing the resource's revisions. + """ + self._check_entry_is_resource(entry) + return self.get_feed( + entry.GetRevisionsFeedLink().href, + desired_class=gdata.docs.data.RevisionFeed, **kwargs) + + GetRevisions = get_revisions + + def get_revision(self, entry, **kwargs): + """Retrieves a revision again given its entry. + + Args: + entry: gdata.docs.data.Revision to fetch and return. + kwargs: Other args to pass to GetRevisionBySelfLink(). + Returns: + gdata.docs.data.Revision representing retrieved revision. + """ + return self.GetRevisionBySelfLink(entry.GetSelfLink().href, **kwargs) + + GetRevision = get_revision + + def get_revision_by_self_link(self, self_link, **kwargs): + """Retrieves a particular reivision by its self link. + + Args: + self_link: URI at which to query for given revision. This can be found + using entry.GetSelfLink(). + kwargs: Other parameters to pass to self.get_entry(). + + Returns: + gdata.docs.data.Revision representing the retrieved revision. + """ + if isinstance(self_link, atom.data.Link): + self_link = self_link.href + + return self.get_entry(self_link, desired_class=gdata.docs.data.Revision, + **kwargs) + + GetRevisionBySelfLink = get_revision_by_self_link + + def download_revision(self, entry, file_path, extra_params=None, **kwargs): + """Downloads the contents of the given revision to disk. + + Note: to download a revision in memory, use the DownloadRevisionToMemory() + method. + + Args: + entry: gdata.docs.data.Revision whose contents to fetch. + file_path: str Full path to which to save file. + extra_params: dict (optional) A map of any further parameters to control + how the document is downloaded. + kwargs: Other parameters to pass to self._download_file(). + + Raises: + gdata.client.RequestError if the download URL is malformed or the server's + response was not successful. + """ + uri = self._get_download_uri(entry.content.src, extra_params) + self._download_file(uri, file_path, **kwargs) + + DownloadRevision = download_revision + + def download_revision_to_memory(self, entry, extra_params=None, **kwargs): + """Returns the contents of the given revision. + + Args: + entry: gdata.docs.data.Revision whose contents to fetch. + extra_params: dict (optional) A map of any further parameters to control + how the document is downloaded/exported. + kwargs: Other parameters to pass to self._get_content(). + + Returns: + Content of given revision after being downloaded. + + Raises: + gdata.client.RequestError if the download URL is malformed or the server's + response was not successful. + """ + self._check_entry_is_not_collection(entry) + uri = self._get_download_uri(entry.content.src, extra_params) + return self._get_content(uri, **kwargs) + + DownloadRevisionToMemory = download_revision_to_memory + + def publish_revision(self, entry, publish_auto=None, + publish_outside_domain=False, **kwargs): + """Publishes the given revision. + + This method can only be used for document revisions. + + Args: + entry: Revision to update. + publish_auto: True to automatically publish future revisions of the + document. False to not automatically publish future revisions. + None to take no action and use the default value. + publish_outside_domain: True to make the published revision available + outside of a Google Apps domain. False to not publish outside + the domain. None to use the default value. + kwargs: Other parameters to pass to super(DocsClient, self).update(). + + Returns: + gdata.docs.data.Revision representing the updated revision. + """ + entry.publish = gdata.docs.data.Publish(value='true') + if publish_auto == True: + entry.publish_auto = gdata.docs.data.PublishAuto(value='true') + elif publish_auto == False: + entry.publish_auto = gdata.docs.data.PublishAuto(value='false') + if publish_outside_domain == True: + entry.publish_outside_domain = \ + gdata.docs.data.PublishOutsideDomain(value='true') + elif publish_outside_domain == False: + entry.publish_outside_domain = \ + gdata.docs.data.PublishOutsideDomain(value='false') + return super(DocsClient, self).update(entry, force=True, **kwargs) + + PublishRevision = publish_revision + + def unpublish_revision(self, entry, **kwargs): + """Unpublishes the given revision. + + This method can only be used for document revisions. + + Args: + entry: Revision to update. + kwargs: Other parameters to pass to super(DocsClient, self).update(). + + Returns: + gdata.docs.data.Revision representing the updated revision. + """ + entry.publish = gdata.docs.data.Publish(value='false') + return super(DocsClient, self).update(entry, force=True, **kwargs) + + UnpublishRevision = unpublish_revision + + def delete_revision(self, entry, **kwargs): + """Deletes the given Revision. + + Args: + entry: gdata.docs.data.Revision to delete. + kwargs: Other args to pass to gdata.client.GDClient.Delete() + + Returns: + Result of delete request. + """ + return super(DocsClient, self).delete(entry, force=True, **kwargs) + + DeleteRevision = delete_revision + + def get_archive(self, entry, **kwargs): + """Retrieves an archive again given its entry. + + This is useful if you need to poll for an archive completing. + + Args: + entry: gdata.docs.data.Archive to fetch and return. + kwargs: Other args to pass to GetArchiveBySelfLink(). + Returns: + gdata.docs.data.Archive representing retrieved archive. + """ + + return self.GetArchiveBySelfLink(entry.GetSelfLink().href, **kwargs) + + GetArchive = get_archive + + def get_archive_by_self_link(self, self_link, **kwargs): + """Retrieves a particular archive by its self link. + + Args: + self_link: URI at which to query for given archive. This can be found + using entry.GetSelfLink(). + kwargs: Other parameters to pass to self.get_entry(). + + Returns: + gdata.docs.data.Archive representing the retrieved archive. + """ + if isinstance(self_link, atom.data.Link): + self_link = self_link.href + + return self.get_entry(self_link, desired_class=gdata.docs.data.Archive, + **kwargs) + + GetArchiveBySelfLink = get_archive_by_self_link + + def create_archive(self, entry, **kwargs): + """Creates a new archive of resources. + + Args: + entry: gdata.docs.data.Archive representing metadata of archive to + create. + kwargs: Other parameters to pass to self.post(). + + Returns: + gdata.docs.data.Archive containing information about new archive. + """ + return self.post(entry, ARCHIVE_FEED_URI, + desired_class=gdata.docs.data.Archive, **kwargs) + + CreateArchive = create_archive + + def update_archive(self, entry, **kwargs): + """Updates the given Archive with new metadata. + + This method is really only useful for updating the notification email + address of an archive that is being processed. + + Args: + entry: Archive to update. Make any metadata changes to this entry. + kwargs: Other parameters to pass to super(DocsClient, self).update(). + + Returns: + gdata.docs.data.Archive representing the updated archive. + """ + return super(DocsClient, self).update(entry, **kwargs) + + UpdateArchive = update_archive + + download_archive = DownloadResource + DownloadArchive = download_archive + download_archive_to_memory = DownloadResourceToMemory + DownloadArchiveToMemory = download_archive_to_memory + + def delete_archive(self, entry, **kwargs): + """Aborts the given Archive operation, or deletes the Archive. + + Args: + entry: gdata.docs.data.Archive to delete. + kwargs: Other args to pass to gdata.client.GDClient.Delete() + + Returns: + Result of delete request. + """ + return super(DocsClient, self).delete(entry, force=True, **kwargs) + + DeleteArchive = delete_archive + + +class DocsQuery(gdata.client.Query): + + def __init__(self, title=None, title_exact=None, opened_min=None, + opened_max=None, edited_min=None, edited_max=None, owner=None, + writer=None, reader=None, show_collections=None, show_root=None, + show_deleted=None, ocr=None, target_language=None, + source_language=None, convert=None, query=None, **kwargs): + """Constructs a query URL for the Google Documents List API. + + Args: + title: str (optional) Specifies the search terms for the title of a + document. This parameter used without title_exact will only + submit partial queries, not exact queries. + title_exact: str (optional) Meaningless without title. Possible values + are 'true' and 'false'. Note: Matches are case-insensitive. + opened_min: str (optional) Lower bound on the last time a document was + opened by the current user. Use the RFC 3339 timestamp + format. For example: opened_min='2005-08-09T09:57:00-08:00'. + opened_max: str (optional) Upper bound on the last time a document was + opened by the current user. (See also opened_min.) + edited_min: str (optional) Lower bound on the last time a document was + edited by the current user. This value corresponds to the edited.text + value in the doc's entry object, which represents changes to the + document's content or metadata. Use the RFC 3339 timestamp format. + For example: edited_min='2005-08-09T09:57:00-08:00' + edited_max: str (optional) Upper bound on the last time a document was + edited by the user. (See also edited_min.) + owner: str (optional) Searches for documents with a specific owner. Use + the email address of the owner. For example: owner='user@gmail.com' + writer: str (optional) Searches for documents which can be written to + by specific users. Use a single email address or a comma separated list + of email addresses. For example: writer='user1@gmail.com,user@example.com' + reader: str (optional) Searches for documents which can be read by + specific users. (See also writer.) + show_collections: str (optional) Specifies whether the query should return + collections as well as documents and files. Possible values are 'true' + and 'false'. Default is 'false'. + show_root: (optional) 'true' to specify when an item is in the root + collection. Default is 'false' + show_deleted: str (optional) Specifies whether the query should return + documents which are in the trash as well as other documents. + Possible values are 'true' and 'false'. Default is false. + ocr: str (optional) Specifies whether to attempt OCR on a .jpg, .png, or + .gif upload. Possible values are 'true' and 'false'. Default is + false. See OCR in the Protocol Guide: + http://code.google.com/apis/documents/docs/3.0/developers_guide_protocol.html#OCR + target_language: str (optional) Specifies the language to translate a + document into. See Document Translation in the Protocol Guide for a + table of possible values: + http://code.google.com/apis/documents/docs/3.0/developers_guide_protocol.html#DocumentTranslation + source_language: str (optional) Specifies the source language of the + original document. Optional when using the translation service. + If not provided, Google will attempt to auto-detect the source + language. See Document Translation in the Protocol Guide for a table of + possible values (link in target_language). + convert: str (optional) Used when uploading files specify if document uploads + should convert to a native Google Docs format. + Possible values are 'true' and 'false'. The default is 'true'. + query: str (optional) Full-text query to use. See the 'q' parameter in + the documentation. + """ + gdata.client.Query.__init__(self, **kwargs) + self.convert = convert + self.title = title + self.title_exact = title_exact + self.opened_min = opened_min + self.opened_max = opened_max + self.edited_min = edited_min + self.edited_max = edited_max + self.owner = owner + self.writer = writer + self.reader = reader + self.show_collections = show_collections + self.show_root = show_root + self.show_deleted = show_deleted + self.ocr = ocr + self.target_language = target_language + self.source_language = source_language + self.query = query + + def modify_request(self, http_request): + gdata.client._add_query_param('convert', self.convert, http_request) + gdata.client._add_query_param('title', self.title, http_request) + gdata.client._add_query_param('title-exact', self.title_exact, + http_request) + gdata.client._add_query_param('opened-min', self.opened_min, http_request) + gdata.client._add_query_param('opened-max', self.opened_max, http_request) + gdata.client._add_query_param('edited-min', self.edited_min, http_request) + gdata.client._add_query_param('edited-max', self.edited_max, http_request) + gdata.client._add_query_param('owner', self.owner, http_request) + gdata.client._add_query_param('writer', self.writer, http_request) + gdata.client._add_query_param('reader', self.reader, http_request) + gdata.client._add_query_param('query', self.query, http_request) + gdata.client._add_query_param('showfolders', self.show_collections, + http_request) + gdata.client._add_query_param('showroot', self.show_root, http_request) + gdata.client._add_query_param('showdeleted', self.show_deleted, + http_request) + gdata.client._add_query_param('ocr', self.ocr, http_request) + gdata.client._add_query_param('targetLanguage', self.target_language, + http_request) + gdata.client._add_query_param('sourceLanguage', self.source_language, + http_request) + gdata.client.Query.modify_request(self, http_request) + + ModifyRequest = modify_request diff --git a/gam/gdata/analytics/docs/data.py b/gam/gdata/analytics/docs/data.py new file mode 100755 index 00000000000..826fd98232a --- /dev/null +++ b/gam/gdata/analytics/docs/data.py @@ -0,0 +1,654 @@ +#!/usr/bin/python +# +# Copyright 2011 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data model classes for representing elements of the Documents List API.""" + +__author__ = 'vicfryzel@google.com (Vic Fryzel)' + +import re +import atom.core +import atom.data +import gdata.acl.data +import gdata.data + + +DOCUMENTS_NS = 'http://schemas.google.com/docs/2007' +LABELS_NS = 'http://schemas.google.com/g/2005/labels' +DOCUMENTS_TEMPLATE = '{http://schemas.google.com/docs/2007}%s' +ACL_FEEDLINK_REL = 'http://schemas.google.com/acl/2007#accessControlList' +REVISION_FEEDLINK_REL = DOCUMENTS_NS + '/revisions' +PARENT_LINK_REL = DOCUMENTS_NS + '#parent' +PUBLISH_LINK_REL = DOCUMENTS_NS + '#publish' +DATA_KIND_SCHEME = 'http://schemas.google.com/g/2005#kind' +LABELS_SCHEME = LABELS_NS +DOCUMENT_LABEL = 'document' +SPREADSHEET_LABEL = 'spreadsheet' +DRAWING_LABEL = 'drawing' +PRESENTATION_LABEL = 'presentation' +FILE_LABEL = 'file' +PDF_LABEL = 'pdf' +FORM_LABEL = 'form' +COLLECTION_LABEL = 'folder' +STARRED_LABEL = 'starred' +VIEWED_LABEL = 'viewed' +HIDDEN_LABEL = 'hidden' +TRASHED_LABEL = 'trashed' +MINE_LABEL = 'mine' +PRIVATE_LABEL = 'private' +SHAREDWITHDOMAIN_LABEL = 'shared-with-domain' +RESTRICTEDDOWNLOAD_LABEL = 'restricted-download' + + +class ResourceId(atom.core.XmlElement): + """The DocList gd:resourceId element.""" + _qname = gdata.data.GDATA_TEMPLATE % 'resourceId' + + +class LastModifiedBy(atom.data.Person): + """The DocList gd:lastModifiedBy element.""" + _qname = gdata.data.GDATA_TEMPLATE % 'lastModifiedBy' + + +class LastViewed(atom.data.Person): + """The DocList gd:lastViewed element.""" + _qname = gdata.data.GDATA_TEMPLATE % 'lastViewed' + + +class WritersCanInvite(atom.core.XmlElement): + """The DocList docs:writersCanInvite element.""" + _qname = DOCUMENTS_TEMPLATE % 'writersCanInvite' + value = 'value' + + +class Deleted(atom.core.XmlElement): + """The DocList gd:deleted element.""" + _qname = gdata.data.GDATA_TEMPLATE % 'deleted' + + +class QuotaBytesUsed(atom.core.XmlElement): + """The DocList gd:quotaBytesUsed element.""" + _qname = gdata.data.GDATA_TEMPLATE % 'quotaBytesUsed' + + +class Publish(atom.core.XmlElement): + """The DocList docs:publish element.""" + _qname = DOCUMENTS_TEMPLATE % 'publish' + value = 'value' + + +class PublishAuto(atom.core.XmlElement): + """The DocList docs:publishAuto element.""" + _qname = DOCUMENTS_TEMPLATE % 'publishAuto' + value = 'value' + + +class PublishOutsideDomain(atom.core.XmlElement): + """The DocList docs:publishOutsideDomain element.""" + _qname = DOCUMENTS_TEMPLATE % 'publishOutsideDomain' + value = 'value' + + +class Filename(atom.core.XmlElement): + """The DocList docs:filename element.""" + _qname = DOCUMENTS_TEMPLATE % 'filename' + + +class SuggestedFilename(atom.core.XmlElement): + """The DocList docs:suggestedFilename element.""" + _qname = DOCUMENTS_TEMPLATE % 'suggestedFilename' + + +class CategoryFinder(object): + """Mixin to provide category finding functionality. + + Analogous to atom.data.LinkFinder, but a simpler API, specialized for + DocList categories. + """ + + def add_category(self, scheme, term, label): + """Add a category for a scheme, term and label. + + Args: + scheme: The scheme for the category. + term: The term for the category. + label: The label for the category + + Returns: + The newly created atom.data.Category. + """ + category = atom.data.Category(scheme=scheme, term=term, label=label) + self.category.append(category) + return category + + AddCategory = add_category + + def get_categories(self, scheme): + """Fetch the category elements for a scheme. + + Args: + scheme: The scheme to fetch the elements for. + + Returns: + Generator of atom.data.Category elements. + """ + for category in self.category: + if category.scheme == scheme: + yield category + + GetCategories = get_categories + + def remove_categories(self, scheme): + """Remove category elements for a scheme. + + Args: + scheme: The scheme of category to remove. + """ + for category in list(self.get_categories(scheme)): + self.category.remove(category) + + RemoveCategories = remove_categories + + def get_first_category(self, scheme): + """Fetch the first category element for a scheme. + + Args: + scheme: The scheme of category to return. + + Returns: + atom.data.Category if found or None. + """ + try: + return self.get_categories(scheme).next() + except StopIteration, e: + # The entry doesn't have the category + return None + + GetFirstCategory = get_first_category + + def set_resource_type(self, label): + """Set the document type for an entry, by building the appropriate + atom.data.Category + + Args: + label: str The value for the category entry. If None is passed the + category is removed and not set. + + Returns: + An atom.data.Category or None if label is None. + """ + self.remove_categories(DATA_KIND_SCHEME) + if label is not None: + return self.add_category(scheme=DATA_KIND_SCHEME, + term='%s#%s' % (DOCUMENTS_NS, label), + label=label) + else: + return None + + SetResourceType = set_resource_type + + def get_resource_type(self): + """Extracts the type of document this Resource is. + + This method returns the type of document the Resource represents. Possible + values are document, presentation, drawing, spreadsheet, file, folder, + form, or pdf. + + 'folder' is a possible return value of this method because, for legacy + support, we have not yet renamed the folder keyword to collection in + the API itself. + + Returns: + String representing the type of document. + """ + category = self.get_first_category(DATA_KIND_SCHEME) + if category is not None: + return category.label + else: + return None + + GetResourceType = get_resource_type + + def get_labels(self): + """Extracts the labels for this Resource. + + This method returns the labels as a set, for example: 'hidden', 'starred', + 'viewed'. + + Returns: + Set of string labels. + """ + return set(category.label for category in + self.get_categories(LABELS_SCHEME)) + + GetLabels = get_labels + + def has_label(self, label): + """Whether this Resource has a label. + + Args: + label: The str label to test for + + Returns: + Boolean value indicating presence of label. + """ + return label in self.get_labels() + + HasLabel = has_label + + def add_label(self, label): + """Add a label, if it is not present. + + Args: + label: The str label to set + """ + if not self.has_label(self): + self.add_category(scheme=LABELS_SCHEME, + term='%s#%s' % (LABELS_NS, label), + label=label) + AddLabel = add_label + + def remove_label(self, label): + """Remove a label, if it is present. + + Args: + label: The str label to remove + """ + for category in self.get_categories(LABELS_SCHEME): + if category.label == label: + self.category.remove(category) + + RemoveLabel = remove_label + + def is_starred(self): + """Whether this Resource is starred. + + Returns: + Boolean value indicating that the resource is starred. + """ + return self.has_label(STARRED_LABEL) + + IsStarred = is_starred + + def is_hidden(self): + """Whether this Resource is hidden. + + Returns: + Boolean value indicating that the resource is hidden. + """ + return self.has_label(HIDDEN_LABEL) + + IsHidden = is_hidden + + def is_viewed(self): + """Whether this Resource is viewed. + + Returns: + Boolean value indicating that the resource is viewed. + """ + return self.has_label(VIEWED_LABEL) + + IsViewed = is_viewed + + def is_trashed(self): + """Whether this resource is trashed. + + Returns: + Boolean value indicating that the resource is trashed. + """ + return self.has_label(TRASHED_LABEL) + + IsTrashed = is_trashed + + def is_mine(self): + """Whether this resource is marked as mine. + + Returns: + Boolean value indicating that the resource is marked as mine. + """ + return self.has_label(MINE_LABEL) + + IsMine = is_mine + + def is_private(self): + """Whether this resource is private. + + Returns: + Boolean value indicating that the resource is private. + """ + return self.has_label(PRIVATE_LABEL) + + IsPrivate = is_private + + def is_shared_with_domain(self): + """Whether this resource is shared with the domain. + + Returns: + Boolean value indicating that the resource is shared with the domain. + """ + return self.has_label(SHAREDWITHDOMAIN_LABEL) + + IsSharedWithDomain = is_shared_with_domain + + def is_restricted_download(self): + """Whether this resource is restricted download. + + Returns: + Boolean value indicating whether the resource is restricted download. + """ + return self.has_label(RESTRICTEDDOWNLOAD_LABEL) + + IsRestrictedDownload = is_restricted_download + + +class AclEntry(gdata.acl.data.AclEntry, gdata.data.BatchEntry): + """Resource ACL entry.""" + @staticmethod + def get_instance(role=None, scope_type=None, scope_value=None, key=False): + entry = AclEntry() + + if role is not None: + if isinstance(role, basestring): + role = gdata.acl.data.AclRole(value=role) + + if key: + entry.with_key = gdata.acl.data.AclWithKey(key='', role=role) + else: + entry.role = role + + if scope_type is not None: + if scope_value is not None: + entry.scope = gdata.acl.data.AclScope(type=scope_type, + value=scope_value) + else: + entry.scope = gdata.acl.data.AclScope(type=scope_type) + return entry + + GetInstance = get_instance + + +class AclFeed(gdata.acl.data.AclFeed): + """Resource ACL feed.""" + entry = [AclEntry] + + +class Resource(gdata.data.GDEntry, CategoryFinder): + """DocList version of an Atom Entry.""" + + last_viewed = LastViewed + last_modified_by = LastModifiedBy + resource_id = ResourceId + deleted = Deleted + writers_can_invite = WritersCanInvite + quota_bytes_used = QuotaBytesUsed + feed_link = [gdata.data.FeedLink] + filename = Filename + suggested_filename = SuggestedFilename + # Only populated if you request /feeds/default/private/expandAcl + acl_feed = AclFeed + + def __init__(self, type=None, title=None, **kwargs): + super(Resource, self).__init__(**kwargs) + if isinstance(type, basestring): + self.set_resource_type(type) + + if title is not None: + if isinstance(title, basestring): + self.title = atom.data.Title(text=title) + else: + self.title = title + + def get_acl_feed_link(self): + """Extracts the Resource's ACL feed <gd:feedLink>. + + Returns: + A gdata.data.FeedLink object. + """ + for feed_link in self.feed_link: + if feed_link.rel == ACL_FEEDLINK_REL: + return feed_link + return None + + GetAclFeedLink = get_acl_feed_link + + def get_revisions_feed_link(self): + """Extracts the Resource's revisions feed <gd:feedLink>. + + Returns: + A gdata.data.FeedLink object. + """ + for feed_link in self.feed_link: + if feed_link.rel == REVISION_FEEDLINK_REL: + return feed_link + return None + + GetRevisionsFeedLink = get_revisions_feed_link + + def get_resumable_edit_media_link(self): + """Extracts the Resource's resumable update link. + + Returns: + A gdata.data.FeedLink object. + """ + for feed_link in self.feed_link: + if feed_link.rel == RESUMABLE_EDIT_MEDIA_LINK_REL: + return feed_link + return None + + GetRevisionsFeedLink = get_revisions_feed_link + + def in_collections(self): + """Returns the parents link(s) (collections) of this entry.""" + links = [] + for link in self.link: + if link.rel == PARENT_LINK_REL and link.href: + links.append(link) + return links + + InCollections = in_collections + + +class ResourceFeed(gdata.data.GDFeed): + """Main feed containing a list of resources.""" + entry = [Resource] + + +class Revision(gdata.data.GDEntry): + """Resource Revision entry.""" + publish = Publish + publish_auto = PublishAuto + publish_outside_domain = PublishOutsideDomain + + def find_publish_link(self): + """Get the link that points to the published resource on the web. + + Returns: + A str for the URL in the link with a rel ending in #publish. + """ + return self.find_url(PUBLISH_LINK_REL) + + FindPublishLink = find_publish_link + + def get_publish_link(self): + """Get the link that points to the published resource on the web. + + Returns: + A gdata.data.Link for the link with a rel ending in #publish. + """ + return self.get_link(PUBLISH_LINK_REL) + + GetPublishLink = get_publish_link + + +class RevisionFeed(gdata.data.GDFeed): + """A DocList Revision feed.""" + entry = [Revision] + + +class ArchiveResourceId(atom.core.XmlElement): + """The DocList docs:removed element.""" + _qname = DOCUMENTS_TEMPLATE % 'archiveResourceId' + + +class ArchiveFailure(atom.core.XmlElement): + """The DocList docs:archiveFailure element.""" + _qname = DOCUMENTS_TEMPLATE % 'archiveFailure' + + +class ArchiveComplete(atom.core.XmlElement): + """The DocList docs:archiveComplete element.""" + _qname = DOCUMENTS_TEMPLATE % 'archiveComplete' + + +class ArchiveTotal(atom.core.XmlElement): + """The DocList docs:archiveTotal element.""" + _qname = DOCUMENTS_TEMPLATE % 'archiveTotal' + + +class ArchiveTotalComplete(atom.core.XmlElement): + """The DocList docs:archiveTotalComplete element.""" + _qname = DOCUMENTS_TEMPLATE % 'archiveTotalComplete' + + +class ArchiveTotalFailure(atom.core.XmlElement): + """The DocList docs:archiveTotalFailure element.""" + _qname = DOCUMENTS_TEMPLATE % 'archiveTotalFailure' + + +class ArchiveConversion(atom.core.XmlElement): + """The DocList docs:removed element.""" + _qname = DOCUMENTS_TEMPLATE % 'archiveConversion' + source = 'source' + target = 'target' + + +class ArchiveNotify(atom.core.XmlElement): + """The DocList docs:archiveNotify element.""" + _qname = DOCUMENTS_TEMPLATE % 'archiveNotify' + + +class ArchiveStatus(atom.core.XmlElement): + """The DocList docs:archiveStatus element.""" + _qname = DOCUMENTS_TEMPLATE % 'archiveStatus' + + +class ArchiveNotifyStatus(atom.core.XmlElement): + """The DocList docs:archiveNotifyStatus element.""" + _qname = DOCUMENTS_TEMPLATE % 'archiveNotifyStatus' + + +class Archive(gdata.data.GDEntry): + """Archive entry.""" + archive_resource_ids = [ArchiveResourceId] + status = ArchiveStatus + date_completed = ArchiveComplete + num_resources = ArchiveTotal + num_complete_resources = ArchiveTotalComplete + num_failed_resources = ArchiveTotalFailure + failed_resource_ids = [ArchiveFailure] + notify_status = ArchiveNotifyStatus + conversions = [ArchiveConversion] + notification_email = ArchiveNotify + size = QuotaBytesUsed + + @staticmethod + def from_resource_list(resources): + resource_ids = [] + for resource in resources: + id = ArchiveResourceId(text=resource.resource_id.text) + resource_ids.append(id) + return Archive(archive_resource_ids=resource_ids) + + FromResourceList = from_resource_list + + +class Removed(atom.core.XmlElement): + """The DocList docs:removed element.""" + _qname = DOCUMENTS_TEMPLATE % 'removed' + + +class Changestamp(atom.core.XmlElement): + """The DocList docs:changestamp element.""" + _qname = DOCUMENTS_TEMPLATE % 'changestamp' + value = 'value' + + +class Change(Resource): + """Change feed entry.""" + changestamp = Changestamp + removed = Removed + + +class ChangeFeed(gdata.data.GDFeed): + """DocList Changes feed.""" + entry = [Change] + + +class QuotaBytesTotal(atom.core.XmlElement): + """The DocList gd:quotaBytesTotal element.""" + _qname = gdata.data.GDATA_TEMPLATE % 'quotaBytesTotal' + + +class QuotaBytesUsedInTrash(atom.core.XmlElement): + """The DocList docs:quotaBytesUsedInTrash element.""" + _qname = DOCUMENTS_TEMPLATE % 'quotaBytesUsedInTrash' + + +class ImportFormat(atom.core.XmlElement): + """The DocList docs:importFormat element.""" + _qname = DOCUMENTS_TEMPLATE % 'importFormat' + source = 'source' + target = 'target' + + +class ExportFormat(atom.core.XmlElement): + """The DocList docs:exportFormat element.""" + _qname = DOCUMENTS_TEMPLATE % 'exportFormat' + source = 'source' + target = 'target' + + +class FeatureName(atom.core.XmlElement): + """The DocList docs:featureName element.""" + _qname = DOCUMENTS_TEMPLATE % 'featureName' + + +class FeatureRate(atom.core.XmlElement): + """The DocList docs:featureRate element.""" + _qname = DOCUMENTS_TEMPLATE % 'featureRate' + + +class Feature(atom.core.XmlElement): + """The DocList docs:feature element.""" + _qname = DOCUMENTS_TEMPLATE % 'feature' + name = FeatureName + rate = FeatureRate + + +class MaxUploadSize(atom.core.XmlElement): + """The DocList docs:maxUploadSize element.""" + _qname = DOCUMENTS_TEMPLATE % 'maxUploadSize' + kind = 'kind' + + +class Metadata(gdata.data.GDEntry): + """Metadata entry for a user.""" + quota_bytes_total = QuotaBytesTotal + quota_bytes_used = QuotaBytesUsed + quota_bytes_used_in_trash = QuotaBytesUsedInTrash + import_formats = [ImportFormat] + export_formats = [ExportFormat] + features = [Feature] + max_upload_sizes = [MaxUploadSize] diff --git a/gam/gdata/analytics/docs/service.py b/gam/gdata/analytics/docs/service.py new file mode 100755 index 00000000000..b6f39f6a4a2 --- /dev/null +++ b/gam/gdata/analytics/docs/service.py @@ -0,0 +1,618 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""DocsService extends the GDataService to streamline Google Documents + operations. + + DocsService: Provides methods to query feeds and manipulate items. + Extends GDataService. + + DocumentQuery: Queries a Google Document list feed. + + DocumentAclQuery: Queries a Google Document Acl feed. +""" + + +__author__ = ('api.jfisher (Jeff Fisher), ' + 'e.bidelman (Eric Bidelman)') + +import re +import atom +import gdata.service +import gdata.docs +import urllib + +# XML Namespaces used in Google Documents entities. +DATA_KIND_SCHEME = gdata.GDATA_NAMESPACE + '#kind' +DOCUMENT_LABEL = 'document' +SPREADSHEET_LABEL = 'spreadsheet' +PRESENTATION_LABEL = 'presentation' +FOLDER_LABEL = 'folder' +PDF_LABEL = 'pdf' + +LABEL_SCHEME = gdata.GDATA_NAMESPACE + '/labels' +STARRED_LABEL_TERM = LABEL_SCHEME + '#starred' +TRASHED_LABEL_TERM = LABEL_SCHEME + '#trashed' +HIDDEN_LABEL_TERM = LABEL_SCHEME + '#hidden' +MINE_LABEL_TERM = LABEL_SCHEME + '#mine' +PRIVATE_LABEL_TERM = LABEL_SCHEME + '#private' +SHARED_WITH_DOMAIN_LABEL_TERM = LABEL_SCHEME + '#shared-with-domain' +VIEWED_LABEL_TERM = LABEL_SCHEME + '#viewed' + +FOLDERS_SCHEME_PREFIX = gdata.docs.DOCUMENTS_NAMESPACE + '/folders/' + +# File extensions of documents that are permitted to be uploaded or downloaded. +SUPPORTED_FILETYPES = { + 'CSV': 'text/csv', + 'TSV': 'text/tab-separated-values', + 'TAB': 'text/tab-separated-values', + 'DOC': 'application/msword', + 'DOCX': ('application/vnd.openxmlformats-officedocument.' + 'wordprocessingml.document'), + 'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet', + 'ODT': 'application/vnd.oasis.opendocument.text', + 'RTF': 'application/rtf', + 'SXW': 'application/vnd.sun.xml.writer', + 'TXT': 'text/plain', + 'XLS': 'application/vnd.ms-excel', + 'XLSX': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + 'PDF': 'application/pdf', + 'PNG': 'image/png', + 'PPT': 'application/vnd.ms-powerpoint', + 'PPS': 'application/vnd.ms-powerpoint', + 'HTM': 'text/html', + 'HTML': 'text/html', + 'ZIP': 'application/zip', + 'SWF': 'application/x-shockwave-flash' + } + + +class DocsService(gdata.service.GDataService): + + """Client extension for the Google Documents service Document List feed.""" + + __FILE_EXT_PATTERN = re.compile('.*\.([a-zA-Z]{3,}$)') + __RESOURCE_ID_PATTERN = re.compile('^([a-z]*)(:|%3A)([\w-]*)$') + + def __init__(self, email=None, password=None, source=None, + server='docs.google.com', additional_headers=None, **kwargs): + """Creates a client for the Google Documents service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'docs.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='writely', source=source, + server=server, additional_headers=additional_headers, **kwargs) + self.ssl = True + + def _MakeKindCategory(self, label): + if label is None: + return None + return atom.Category(scheme=DATA_KIND_SCHEME, + term=gdata.docs.DOCUMENTS_NAMESPACE + '#' + label, label=label) + + def _MakeContentLinkFromId(self, resource_id): + match = self.__RESOURCE_ID_PATTERN.match(resource_id) + label = match.group(1) + doc_id = match.group(3) + if label == DOCUMENT_LABEL: + return '/feeds/download/documents/Export?docId=%s' % doc_id + if label == PRESENTATION_LABEL: + return '/feeds/download/presentations/Export?docId=%s' % doc_id + if label == SPREADSHEET_LABEL: + return ('https://spreadsheets.google.com/feeds/download/spreadsheets/' + 'Export?key=%s' % doc_id) + raise ValueError, 'Invalid resource id: %s' % resource_id + + def _UploadFile(self, media_source, title, category, folder_or_uri=None): + """Uploads a file to the Document List feed. + + Args: + media_source: A gdata.MediaSource object containing the file to be + uploaded. + title: string The title of the document on the server after being + uploaded. + category: An atom.Category object specifying the appropriate document + type. + folder_or_uri: DocumentListEntry or string (optional) An object with a + link to a folder or a uri to a folder to upload to. + Note: A valid uri for a folder is of the form: + /feeds/folders/private/full/folder%3Afolder_id + + Returns: + A DocumentListEntry containing information about the document created on + the Google Documents service. + """ + if folder_or_uri: + try: + uri = folder_or_uri.content.src + except AttributeError: + uri = folder_or_uri + else: + uri = '/feeds/documents/private/full' + + entry = gdata.docs.DocumentListEntry() + entry.title = atom.Title(text=title) + if category is not None: + entry.category.append(category) + entry = self.Post(entry, uri, media_source=media_source, + extra_headers={'Slug': media_source.file_name}, + converter=gdata.docs.DocumentListEntryFromString) + return entry + + def _DownloadFile(self, uri, file_path): + """Downloads a file. + + Args: + uri: string The full Export URL to download the file from. + file_path: string The full path to save the file to. + + Raises: + RequestError: on error response from server. + """ + server_response = self.request('GET', uri) + response_body = server_response.read() + timeout = 5 + while server_response.status == 302 and timeout > 0: + server_response = self.request('GET', + server_response.getheader('Location')) + response_body = server_response.read() + timeout -= 1 + if server_response.status != 200: + raise gdata.service.RequestError, {'status': server_response.status, + 'reason': server_response.reason, + 'body': response_body} + f = open(file_path, 'wb') + f.write(response_body) + f.flush() + f.close() + + def MoveIntoFolder(self, source_entry, folder_entry): + """Moves a document into a folder in the Document List Feed. + + Args: + source_entry: DocumentListEntry An object representing the source + document/folder. + folder_entry: DocumentListEntry An object with a link to the destination + folder. + + Returns: + A DocumentListEntry containing information about the document created on + the Google Documents service. + """ + entry = gdata.docs.DocumentListEntry() + entry.id = source_entry.id + entry = self.Post(entry, folder_entry.content.src, + converter=gdata.docs.DocumentListEntryFromString) + return entry + + def Query(self, uri, converter=gdata.docs.DocumentListFeedFromString): + """Queries the Document List feed and returns the resulting feed of + entries. + + Args: + uri: string The full URI to be queried. This can contain query + parameters, a hostname, or simply the relative path to a Document + List feed. The DocumentQuery object is useful when constructing + query parameters. + converter: func (optional) A function which will be executed on the + retrieved item, generally to render it into a Python object. + By default the DocumentListFeedFromString function is used to + return a DocumentListFeed object. This is because most feed + queries will result in a feed and not a single entry. + """ + return self.Get(uri, converter=converter) + + def QueryDocumentListFeed(self, uri): + """Retrieves a DocumentListFeed by retrieving a URI based off the Document + List feed, including any query parameters. A DocumentQuery object can + be used to construct these parameters. + + Args: + uri: string The URI of the feed being retrieved possibly with query + parameters. + + Returns: + A DocumentListFeed object representing the feed returned by the server. + """ + return self.Get(uri, converter=gdata.docs.DocumentListFeedFromString) + + def GetDocumentListEntry(self, uri): + """Retrieves a particular DocumentListEntry by its unique URI. + + Args: + uri: string The unique URI of an entry in a Document List feed. + + Returns: + A DocumentListEntry object representing the retrieved entry. + """ + return self.Get(uri, converter=gdata.docs.DocumentListEntryFromString) + + def GetDocumentListFeed(self, uri=None): + """Retrieves a feed containing all of a user's documents. + + Args: + uri: string A full URI to query the Document List feed. + """ + if not uri: + uri = gdata.docs.service.DocumentQuery().ToUri() + return self.QueryDocumentListFeed(uri) + + def GetDocumentListAclEntry(self, uri): + """Retrieves a particular DocumentListAclEntry by its unique URI. + + Args: + uri: string The unique URI of an entry in a Document List feed. + + Returns: + A DocumentListAclEntry object representing the retrieved entry. + """ + return self.Get(uri, converter=gdata.docs.DocumentListAclEntryFromString) + + def GetDocumentListAclFeed(self, uri): + """Retrieves a feed containing all of a user's documents. + + Args: + uri: string The URI of a document's Acl feed to retrieve. + + Returns: + A DocumentListAclFeed object representing the ACL feed + returned by the server. + """ + return self.Get(uri, converter=gdata.docs.DocumentListAclFeedFromString) + + def Upload(self, media_source, title, folder_or_uri=None, label=None): + """Uploads a document inside of a MediaSource object to the Document List + feed with the given title. + + Args: + media_source: MediaSource The gdata.MediaSource object containing a + document file to be uploaded. + title: string The title of the document on the server after being + uploaded. + folder_or_uri: DocumentListEntry or string (optional) An object with a + link to a folder or a uri to a folder to upload to. + Note: A valid uri for a folder is of the form: + /feeds/folders/private/full/folder%3Afolder_id + label: optional label describing the type of the document to be created. + + Returns: + A DocumentListEntry containing information about the document created + on the Google Documents service. + """ + + return self._UploadFile(media_source, title, self._MakeKindCategory(label), + folder_or_uri) + + def Download(self, entry_or_id_or_url, file_path, export_format=None, + gid=None, extra_params=None): + """Downloads a document from the Document List. + + Args: + entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry, + or a url to download from (such as the content src). + file_path: string The full path to save the file to. + export_format: the format to convert to, if conversion is required. + gid: grid id, for downloading a single grid of a spreadsheet + extra_params: a map of any further parameters to control how the document + is downloaded + + Raises: + RequestError if the service does not respond with success + """ + + if isinstance(entry_or_id_or_url, gdata.docs.DocumentListEntry): + url = entry_or_id_or_url.content.src + else: + if self.__RESOURCE_ID_PATTERN.match(entry_or_id_or_url): + url = self._MakeContentLinkFromId(entry_or_id_or_url) + else: + url = entry_or_id_or_url + + if export_format is not None: + if url.find('/Export?') == -1: + raise gdata.service.Error, ('This entry cannot be exported ' + 'as a different format') + url += '&exportFormat=%s' % export_format + + if gid is not None: + if url.find('spreadsheets') == -1: + raise gdata.service.Error, 'grid id param is not valid for this entry' + url += '&gid=%s' % gid + + if extra_params: + url += '&' + urllib.urlencode(extra_params) + + self._DownloadFile(url, file_path) + + def Export(self, entry_or_id_or_url, file_path, gid=None, extra_params=None): + """Downloads a document from the Document List in a different format. + + Args: + entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry, + or a url to download from (such as the content src). + file_path: string The full path to save the file to. The export + format is inferred from the the file extension. + gid: grid id, for downloading a single grid of a spreadsheet + extra_params: a map of any further parameters to control how the document + is downloaded + + Raises: + RequestError if the service does not respond with success + """ + ext = None + match = self.__FILE_EXT_PATTERN.match(file_path) + if match: + ext = match.group(1) + self.Download(entry_or_id_or_url, file_path, ext, gid, extra_params) + + def CreateFolder(self, title, folder_or_uri=None): + """Creates a folder in the Document List feed. + + Args: + title: string The title of the folder on the server after being created. + folder_or_uri: DocumentListEntry or string (optional) An object with a + link to a folder or a uri to a folder to upload to. + Note: A valid uri for a folder is of the form: + /feeds/folders/private/full/folder%3Afolder_id + + Returns: + A DocumentListEntry containing information about the folder created on + the Google Documents service. + """ + if folder_or_uri: + try: + uri = folder_or_uri.content.src + except AttributeError: + uri = folder_or_uri + else: + uri = '/feeds/documents/private/full' + + folder_entry = gdata.docs.DocumentListEntry() + folder_entry.title = atom.Title(text=title) + folder_entry.category.append(self._MakeKindCategory(FOLDER_LABEL)) + folder_entry = self.Post(folder_entry, uri, + converter=gdata.docs.DocumentListEntryFromString) + + return folder_entry + + + def MoveOutOfFolder(self, source_entry): + """Moves a document into a folder in the Document List Feed. + + Args: + source_entry: DocumentListEntry An object representing the source + document/folder. + + Returns: + True if the entry was moved out. + """ + return self.Delete(source_entry.GetEditLink().href) + + # Deprecated methods + + #@atom.deprecated('Please use Upload instead') + def UploadPresentation(self, media_source, title, folder_or_uri=None): + """Uploads a presentation inside of a MediaSource object to the Document + List feed with the given title. + + This method is deprecated, use Upload instead. + + Args: + media_source: MediaSource The MediaSource object containing a + presentation file to be uploaded. + title: string The title of the presentation on the server after being + uploaded. + folder_or_uri: DocumentListEntry or string (optional) An object with a + link to a folder or a uri to a folder to upload to. + Note: A valid uri for a folder is of the form: + /feeds/folders/private/full/folder%3Afolder_id + + Returns: + A DocumentListEntry containing information about the presentation created + on the Google Documents service. + """ + return self._UploadFile( + media_source, title, self._MakeKindCategory(PRESENTATION_LABEL), + folder_or_uri=folder_or_uri) + + UploadPresentation = atom.deprecated('Please use Upload instead')( + UploadPresentation) + + #@atom.deprecated('Please use Upload instead') + def UploadSpreadsheet(self, media_source, title, folder_or_uri=None): + """Uploads a spreadsheet inside of a MediaSource object to the Document + List feed with the given title. + + This method is deprecated, use Upload instead. + + Args: + media_source: MediaSource The MediaSource object containing a spreadsheet + file to be uploaded. + title: string The title of the spreadsheet on the server after being + uploaded. + folder_or_uri: DocumentListEntry or string (optional) An object with a + link to a folder or a uri to a folder to upload to. + Note: A valid uri for a folder is of the form: + /feeds/folders/private/full/folder%3Afolder_id + + Returns: + A DocumentListEntry containing information about the spreadsheet created + on the Google Documents service. + """ + return self._UploadFile( + media_source, title, self._MakeKindCategory(SPREADSHEET_LABEL), + folder_or_uri=folder_or_uri) + + UploadSpreadsheet = atom.deprecated('Please use Upload instead')( + UploadSpreadsheet) + + #@atom.deprecated('Please use Upload instead') + def UploadDocument(self, media_source, title, folder_or_uri=None): + """Uploads a document inside of a MediaSource object to the Document List + feed with the given title. + + This method is deprecated, use Upload instead. + + Args: + media_source: MediaSource The gdata.MediaSource object containing a + document file to be uploaded. + title: string The title of the document on the server after being + uploaded. + folder_or_uri: DocumentListEntry or string (optional) An object with a + link to a folder or a uri to a folder to upload to. + Note: A valid uri for a folder is of the form: + /feeds/folders/private/full/folder%3Afolder_id + + Returns: + A DocumentListEntry containing information about the document created + on the Google Documents service. + """ + return self._UploadFile( + media_source, title, self._MakeKindCategory(DOCUMENT_LABEL), + folder_or_uri=folder_or_uri) + + UploadDocument = atom.deprecated('Please use Upload instead')( + UploadDocument) + + """Calling any of these functions is the same as calling Export""" + DownloadDocument = atom.deprecated('Please use Export instead')(Export) + DownloadPresentation = atom.deprecated('Please use Export instead')(Export) + DownloadSpreadsheet = atom.deprecated('Please use Export instead')(Export) + + """Calling any of these functions is the same as calling MoveIntoFolder""" + MoveDocumentIntoFolder = atom.deprecated( + 'Please use MoveIntoFolder instead')(MoveIntoFolder) + MovePresentationIntoFolder = atom.deprecated( + 'Please use MoveIntoFolder instead')(MoveIntoFolder) + MoveSpreadsheetIntoFolder = atom.deprecated( + 'Please use MoveIntoFolder instead')(MoveIntoFolder) + MoveFolderIntoFolder = atom.deprecated( + 'Please use MoveIntoFolder instead')(MoveIntoFolder) + + +class DocumentQuery(gdata.service.Query): + + """Object used to construct a URI to query the Google Document List feed""" + + def __init__(self, feed='/feeds/documents', visibility='private', + projection='full', text_query=None, params=None, + categories=None): + """Constructor for Document List Query + + Args: + feed: string (optional) The path for the feed. (e.g. '/feeds/documents') + visibility: string (optional) The visibility chosen for the current feed. + projection: string (optional) The projection chosen for the current feed. + text_query: string (optional) The contents of the q query parameter. This + string is URL escaped upon conversion to a URI. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to + the query's items. + categories: list (optional) List of category strings which should be + included as query categories. See gdata.service.Query for + additional documentation. + + Yields: + A DocumentQuery object used to construct a URI based on the Document + List feed. + """ + self.visibility = visibility + self.projection = projection + gdata.service.Query.__init__(self, feed, text_query, params, categories) + + def ToUri(self): + """Generates a URI from the query parameters set in the object. + + Returns: + A string containing the URI used to retrieve entries from the Document + List feed. + """ + old_feed = self.feed + self.feed = '/'.join([old_feed, self.visibility, self.projection]) + new_feed = gdata.service.Query.ToUri(self) + self.feed = old_feed + return new_feed + + def AddNamedFolder(self, email, folder_name): + """Adds a named folder category, qualified by a schema. + + This function lets you query for documents that are contained inside a + named folder without fear of collision with other categories. + + Args: + email: string The email of the user who owns the folder. + folder_name: string The name of the folder. + + Returns: + The string of the category that was added to the object. + """ + + category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name) + self.categories.append(category) + return category + + def RemoveNamedFolder(self, email, folder_name): + """Removes a named folder category, qualified by a schema. + + Args: + email: string The email of the user who owns the folder. + folder_name: string The name of the folder. + + Returns: + The string of the category that was removed to the object. + """ + category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name) + self.categories.remove(category) + return category + + +class DocumentAclQuery(gdata.service.Query): + + """Object used to construct a URI to query a Document's ACL feed""" + + def __init__(self, resource_id, feed='/feeds/acl/private/full'): + """Constructor for Document ACL Query + + Args: + resource_id: string The resource id. (e.g. 'document%3Adocument_id', + 'spreadsheet%3Aspreadsheet_id', etc.) + feed: string (optional) The path for the feed. + (e.g. '/feeds/acl/private/full') + + Yields: + A DocumentAclQuery object used to construct a URI based on the Document + ACL feed. + """ + self.resource_id = resource_id + gdata.service.Query.__init__(self, feed) + + def ToUri(self): + """Generates a URI from the query parameters set in the object. + + Returns: + A string containing the URI used to retrieve entries from the Document + ACL feed. + """ + return '%s/%s' % (gdata.service.Query.ToUri(self), self.resource_id) diff --git a/gam/gdata/analytics/dublincore/__init__.py b/gam/gdata/analytics/dublincore/__init__.py new file mode 100755 index 00000000000..22071f7a11e --- /dev/null +++ b/gam/gdata/analytics/dublincore/__init__.py @@ -0,0 +1,15 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/gam/gdata/analytics/dublincore/data.py b/gam/gdata/analytics/dublincore/data.py new file mode 100755 index 00000000000..c6345c16fba --- /dev/null +++ b/gam/gdata/analytics/dublincore/data.py @@ -0,0 +1,78 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains the data classes of the Dublin Core Metadata Initiative (DCMI) Extension""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core + + +DC_TEMPLATE = '{http://purl.org/dc/terms/}%s' + + +class Creator(atom.core.XmlElement): + """Entity primarily responsible for making the resource.""" + _qname = DC_TEMPLATE % 'creator' + + +class Date(atom.core.XmlElement): + """Point or period of time associated with an event in the lifecycle of the resource.""" + _qname = DC_TEMPLATE % 'date' + + +class Description(atom.core.XmlElement): + """Account of the resource.""" + _qname = DC_TEMPLATE % 'description' + + +class Format(atom.core.XmlElement): + """File format, physical medium, or dimensions of the resource.""" + _qname = DC_TEMPLATE % 'format' + + +class Identifier(atom.core.XmlElement): + """An unambiguous reference to the resource within a given context.""" + _qname = DC_TEMPLATE % 'identifier' + + +class Language(atom.core.XmlElement): + """Language of the resource.""" + _qname = DC_TEMPLATE % 'language' + + +class Publisher(atom.core.XmlElement): + """Entity responsible for making the resource available.""" + _qname = DC_TEMPLATE % 'publisher' + + +class Rights(atom.core.XmlElement): + """Information about rights held in and over the resource.""" + _qname = DC_TEMPLATE % 'rights' + + +class Subject(atom.core.XmlElement): + """Topic of the resource.""" + _qname = DC_TEMPLATE % 'subject' + + +class Title(atom.core.XmlElement): + """Name given to the resource.""" + _qname = DC_TEMPLATE % 'title' + + diff --git a/gam/gdata/analytics/exif/__init__.py b/gam/gdata/analytics/exif/__init__.py new file mode 100755 index 00000000000..7f1f9c2abd8 --- /dev/null +++ b/gam/gdata/analytics/exif/__init__.py @@ -0,0 +1,217 @@ +# -*-*- encoding: utf-8 -*-*- +# +# This is gdata.photos.exif, implementing the exif namespace in gdata +# +# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $ +# +# Copyright 2007 HÃ¥vard Gulldahl +# Portions copyright 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module maps elements from the {EXIF} namespace[1] to GData objects. +These elements describe image data, using exif attributes[2]. + +Picasa Web Albums uses the exif namespace to represent Exif data encoded +in a photo [3]. + +Picasa Web Albums uses the following exif elements: +exif:distance +exif:exposure +exif:flash +exif:focallength +exif:fstop +exif:imageUniqueID +exif:iso +exif:make +exif:model +exif:tags +exif:time + +[1]: http://schemas.google.com/photos/exif/2007. +[2]: http://en.wikipedia.org/wiki/Exif +[3]: http://code.google.com/apis/picasaweb/reference.html#exif_reference +""" + + +__author__ = u'havard@gulldahl.no'# (HÃ¥vard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' + + +import atom +import gdata + +EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007' + +class ExifBaseElement(atom.AtomBase): + """Base class for elements in the EXIF_NAMESPACE (%s). To add new elements, you only need to add the element tag name to self._tag + """ % EXIF_NAMESPACE + + _tag = '' + _namespace = EXIF_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class Distance(ExifBaseElement): + "(float) The distance to the subject, e.g. 0.0" + + _tag = 'distance' +def DistanceFromString(xml_string): + return atom.CreateClassFromXMLString(Distance, xml_string) + +class Exposure(ExifBaseElement): + "(float) The exposure time used, e.g. 0.025 or 8.0E4" + + _tag = 'exposure' +def ExposureFromString(xml_string): + return atom.CreateClassFromXMLString(Exposure, xml_string) + +class Flash(ExifBaseElement): + """(string) Boolean value indicating whether the flash was used. + The .text attribute will either be `true' or `false' + + As a convenience, this object's .bool method will return what you want, + so you can say: + + flash_used = bool(Flash) + + """ + + _tag = 'flash' + def __bool__(self): + if self.text.lower() in ('true','false'): + return self.text.lower() == 'true' +def FlashFromString(xml_string): + return atom.CreateClassFromXMLString(Flash, xml_string) + +class Focallength(ExifBaseElement): + "(float) The focal length used, e.g. 23.7" + + _tag = 'focallength' +def FocallengthFromString(xml_string): + return atom.CreateClassFromXMLString(Focallength, xml_string) + +class Fstop(ExifBaseElement): + "(float) The fstop value used, e.g. 5.0" + + _tag = 'fstop' +def FstopFromString(xml_string): + return atom.CreateClassFromXMLString(Fstop, xml_string) + +class ImageUniqueID(ExifBaseElement): + "(string) The unique image ID for the photo. Generated by Google Photo servers" + + _tag = 'imageUniqueID' +def ImageUniqueIDFromString(xml_string): + return atom.CreateClassFromXMLString(ImageUniqueID, xml_string) + +class Iso(ExifBaseElement): + "(int) The iso equivalent value used, e.g. 200" + + _tag = 'iso' +def IsoFromString(xml_string): + return atom.CreateClassFromXMLString(Iso, xml_string) + +class Make(ExifBaseElement): + "(string) The make of the camera used, e.g. Fictitious Camera Company" + + _tag = 'make' +def MakeFromString(xml_string): + return atom.CreateClassFromXMLString(Make, xml_string) + +class Model(ExifBaseElement): + "(string) The model of the camera used,e.g AMAZING-100D" + + _tag = 'model' +def ModelFromString(xml_string): + return atom.CreateClassFromXMLString(Model, xml_string) + +class Time(ExifBaseElement): + """(int) The date/time the photo was taken, e.g. 1180294337000. + Represented as the number of milliseconds since January 1st, 1970. + + The value of this element will always be identical to the value + of the <gphoto:timestamp>. + + Look at this object's .isoformat() for a human friendly datetime string: + + photo_epoch = Time.text # 1180294337000 + photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z' + + Alternatively: + photo_datetime = Time.datetime() # (requires python >= 2.3) + """ + + _tag = 'time' + def isoformat(self): + """(string) Return the timestamp as a ISO 8601 formatted string, + e.g. '2007-05-27T19:32:17.000Z' + """ + import time + epoch = float(self.text)/1000 + return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch)) + + def datetime(self): + """(datetime.datetime) Return the timestamp as a datetime.datetime object + + Requires python 2.3 + """ + import datetime + epoch = float(self.text)/1000 + return datetime.datetime.fromtimestamp(epoch) + +def TimeFromString(xml_string): + return atom.CreateClassFromXMLString(Time, xml_string) + +class Tags(ExifBaseElement): + """The container for all exif elements. + The <exif:tags> element can appear as a child of a photo entry. + """ + + _tag = 'tags' + _children = atom.AtomBase._children.copy() + _children['{%s}fstop' % EXIF_NAMESPACE] = ('fstop', Fstop) + _children['{%s}make' % EXIF_NAMESPACE] = ('make', Make) + _children['{%s}model' % EXIF_NAMESPACE] = ('model', Model) + _children['{%s}distance' % EXIF_NAMESPACE] = ('distance', Distance) + _children['{%s}exposure' % EXIF_NAMESPACE] = ('exposure', Exposure) + _children['{%s}flash' % EXIF_NAMESPACE] = ('flash', Flash) + _children['{%s}focallength' % EXIF_NAMESPACE] = ('focallength', Focallength) + _children['{%s}iso' % EXIF_NAMESPACE] = ('iso', Iso) + _children['{%s}time' % EXIF_NAMESPACE] = ('time', Time) + _children['{%s}imageUniqueID' % EXIF_NAMESPACE] = ('imageUniqueID', ImageUniqueID) + + def __init__(self, extension_elements=None, extension_attributes=None, text=None): + ExifBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.fstop=None + self.make=None + self.model=None + self.distance=None + self.exposure=None + self.flash=None + self.focallength=None + self.iso=None + self.time=None + self.imageUniqueID=None +def TagsFromString(xml_string): + return atom.CreateClassFromXMLString(Tags, xml_string) + diff --git a/gam/gdata/analytics/finance/__init__.py b/gam/gdata/analytics/finance/__init__.py new file mode 100755 index 00000000000..28ab898d06a --- /dev/null +++ b/gam/gdata/analytics/finance/__init__.py @@ -0,0 +1,486 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Tan Swee Heng +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains extensions to Atom objects used with Google Finance.""" + + +__author__ = 'thesweeheng@gmail.com' + + +import atom +import gdata + + +GD_NAMESPACE = 'http://schemas.google.com/g/2005' +GF_NAMESPACE = 'http://schemas.google.com/finance/2007' + + +class Money(atom.AtomBase): + """The <gd:money> element.""" + _tag = 'money' + _namespace = GD_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['amount'] = 'amount' + _attributes['currencyCode'] = 'currency_code' + + def __init__(self, amount=None, currency_code=None, **kwargs): + self.amount = amount + self.currency_code = currency_code + atom.AtomBase.__init__(self, **kwargs) + + def __str__(self): + return "%s %s" % (self.amount, self.currency_code) + + +def MoneyFromString(xml_string): + return atom.CreateClassFromXMLString(Money, xml_string) + + +class _Monies(atom.AtomBase): + """An element containing multiple <gd:money> in multiple currencies.""" + _namespace = GF_NAMESPACE + _children = atom.AtomBase._children.copy() + _children['{%s}money' % GD_NAMESPACE] = ('money', [Money]) + + def __init__(self, money=None, **kwargs): + self.money = money or [] + atom.AtomBase.__init__(self, **kwargs) + + def __str__(self): + return " / ".join(["%s" % i for i in self.money]) + + +class CostBasis(_Monies): + """The <gf:costBasis> element.""" + _tag = 'costBasis' + + +def CostBasisFromString(xml_string): + return atom.CreateClassFromXMLString(CostBasis, xml_string) + + +class DaysGain(_Monies): + """The <gf:daysGain> element.""" + _tag = 'daysGain' + + +def DaysGainFromString(xml_string): + return atom.CreateClassFromXMLString(DaysGain, xml_string) + + +class Gain(_Monies): + """The <gf:gain> element.""" + _tag = 'gain' + + +def GainFromString(xml_string): + return atom.CreateClassFromXMLString(Gain, xml_string) + + +class MarketValue(_Monies): + """The <gf:marketValue> element.""" + _tag = 'gain' + _tag = 'marketValue' + + +def MarketValueFromString(xml_string): + return atom.CreateClassFromXMLString(MarketValue, xml_string) + + +class Commission(_Monies): + """The <gf:commission> element.""" + _tag = 'commission' + + +def CommissionFromString(xml_string): + return atom.CreateClassFromXMLString(Commission, xml_string) + + +class Price(_Monies): + """The <gf:price> element.""" + _tag = 'price' + + +def PriceFromString(xml_string): + return atom.CreateClassFromXMLString(Price, xml_string) + + +class Symbol(atom.AtomBase): + """The <gf:symbol> element.""" + _tag = 'symbol' + _namespace = GF_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['fullName'] = 'full_name' + _attributes['exchange'] = 'exchange' + _attributes['symbol'] = 'symbol' + + def __init__(self, full_name=None, exchange=None, symbol=None, **kwargs): + self.full_name = full_name + self.exchange = exchange + self.symbol = symbol + atom.AtomBase.__init__(self, **kwargs) + + def __str__(self): + return "%s:%s (%s)" % (self.exchange, self.symbol, self.full_name) + + +def SymbolFromString(xml_string): + return atom.CreateClassFromXMLString(Symbol, xml_string) + + +class TransactionData(atom.AtomBase): + """The <gf:transactionData> element.""" + _tag = 'transactionData' + _namespace = GF_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + _attributes['date'] = 'date' + _attributes['shares'] = 'shares' + _attributes['notes'] = 'notes' + _children = atom.AtomBase._children.copy() + _children['{%s}commission' % GF_NAMESPACE] = ('commission', Commission) + _children['{%s}price' % GF_NAMESPACE] = ('price', Price) + + def __init__(self, type=None, date=None, shares=None, + notes=None, commission=None, price=None, **kwargs): + self.type = type + self.date = date + self.shares = shares + self.notes = notes + self.commission = commission + self.price = price + atom.AtomBase.__init__(self, **kwargs) + + +def TransactionDataFromString(xml_string): + return atom.CreateClassFromXMLString(TransactionData, xml_string) + + +class TransactionEntry(gdata.GDataEntry): + """An entry of the transaction feed. + + A TransactionEntry contains TransactionData such as the transaction + type (Buy, Sell, Sell Short, or Buy to Cover), the number of units, + the date, the price, any commission, and any notes. + """ + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _children['{%s}transactionData' % GF_NAMESPACE] = ( + 'transaction_data', TransactionData) + + def __init__(self, transaction_data=None, **kwargs): + self.transaction_data = transaction_data + gdata.GDataEntry.__init__(self, **kwargs) + + def transaction_id(self): + return self.id.text.split("/")[-1] + + transaction_id = property(transaction_id, doc='The transaction ID.') + + +def TransactionEntryFromString(xml_string): + return atom.CreateClassFromXMLString(TransactionEntry, xml_string) + + +class TransactionFeed(gdata.GDataFeed): + """A feed that lists all of the transactions that have been recorded for + a particular position. + + A transaction is a collection of information about an instance of + buying or selling a particular security. The TransactionFeed lists all + of the transactions that have been recorded for a particular position + as a list of TransactionEntries. + """ + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [TransactionEntry]) + + +def TransactionFeedFromString(xml_string): + return atom.CreateClassFromXMLString(TransactionFeed, xml_string) + + +class TransactionFeedLink(atom.AtomBase): + """Link to TransactionFeed embedded in PositionEntry. + + If a PositionFeed is queried with transactions='true', TransactionFeeds + are inlined in the returned PositionEntries. These TransactionFeeds are + accessible via TransactionFeedLink's feed attribute. + """ + _tag = 'feedLink' + _namespace = GD_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['href'] = 'href' + _children = atom.AtomBase._children.copy() + _children['{%s}feed' % atom.ATOM_NAMESPACE] = ( + 'feed', TransactionFeed) + + def __init__(self, href=None, feed=None, **kwargs): + self.href = href + self.feed = feed + atom.AtomBase.__init__(self, **kwargs) + + +class PositionData(atom.AtomBase): + """The <gf:positionData> element.""" + _tag = 'positionData' + _namespace = GF_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['gainPercentage'] = 'gain_percentage' + _attributes['return1w'] = 'return1w' + _attributes['return4w'] = 'return4w' + _attributes['return3m'] = 'return3m' + _attributes['returnYTD'] = 'returnYTD' + _attributes['return1y'] = 'return1y' + _attributes['return3y'] = 'return3y' + _attributes['return5y'] = 'return5y' + _attributes['returnOverall'] = 'return_overall' + _attributes['shares'] = 'shares' + _children = atom.AtomBase._children.copy() + _children['{%s}costBasis' % GF_NAMESPACE] = ('cost_basis', CostBasis) + _children['{%s}daysGain' % GF_NAMESPACE] = ('days_gain', DaysGain) + _children['{%s}gain' % GF_NAMESPACE] = ('gain', Gain) + _children['{%s}marketValue' % GF_NAMESPACE] = ('market_value', MarketValue) + + def __init__(self, gain_percentage=None, + return1w=None, return4w=None, return3m=None, returnYTD=None, + return1y=None, return3y=None, return5y=None, return_overall=None, + shares=None, cost_basis=None, days_gain=None, + gain=None, market_value=None, **kwargs): + self.gain_percentage = gain_percentage + self.return1w = return1w + self.return4w = return4w + self.return3m = return3m + self.returnYTD = returnYTD + self.return1y = return1y + self.return3y = return3y + self.return5y = return5y + self.return_overall = return_overall + self.shares = shares + self.cost_basis = cost_basis + self.days_gain = days_gain + self.gain = gain + self.market_value = market_value + atom.AtomBase.__init__(self, **kwargs) + + +def PositionDataFromString(xml_string): + return atom.CreateClassFromXMLString(PositionData, xml_string) + + +class PositionEntry(gdata.GDataEntry): + """An entry of the position feed. + + A PositionEntry contains the ticker exchange and Symbol for a stock, + mutual fund, or other security, along with PositionData such as the + number of units of that security that the user holds, and performance + statistics. + """ + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _children['{%s}positionData' % GF_NAMESPACE] = ( + 'position_data', PositionData) + _children['{%s}symbol' % GF_NAMESPACE] = ('symbol', Symbol) + _children['{%s}feedLink' % GD_NAMESPACE] = ( + 'feed_link', TransactionFeedLink) + + def __init__(self, position_data=None, symbol=None, feed_link=None, + **kwargs): + self.position_data = position_data + self.symbol = symbol + self.feed_link = feed_link + gdata.GDataEntry.__init__(self, **kwargs) + + def position_title(self): + return self.title.text + + position_title = property(position_title, + doc='The position title as a string (i.e. position.title.text).') + + def ticker_id(self): + return self.id.text.split("/")[-1] + + ticker_id = property(ticker_id, doc='The position TICKER ID.') + + def transactions(self): + if self.feed_link.feed: + return self.feed_link.feed.entry + else: + return None + + transactions = property(transactions, doc=""" + Inlined TransactionEntries are returned if PositionFeed is queried + with transactions='true'.""") + + +def PositionEntryFromString(xml_string): + return atom.CreateClassFromXMLString(PositionEntry, xml_string) + + +class PositionFeed(gdata.GDataFeed): + """A feed that lists all of the positions in a particular portfolio. + + A position is a collection of information about a security that the + user holds. The PositionFeed lists all of the positions in a particular + portfolio as a list of PositionEntries. + """ + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PositionEntry]) + + +def PositionFeedFromString(xml_string): + return atom.CreateClassFromXMLString(PositionFeed, xml_string) + + +class PositionFeedLink(atom.AtomBase): + """Link to PositionFeed embedded in PortfolioEntry. + + If a PortfolioFeed is queried with positions='true', the PositionFeeds + are inlined in the returned PortfolioEntries. These PositionFeeds are + accessible via PositionFeedLink's feed attribute. + """ + _tag = 'feedLink' + _namespace = GD_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['href'] = 'href' + _children = atom.AtomBase._children.copy() + _children['{%s}feed' % atom.ATOM_NAMESPACE] = ( + 'feed', PositionFeed) + + def __init__(self, href=None, feed=None, **kwargs): + self.href = href + self.feed = feed + atom.AtomBase.__init__(self, **kwargs) + + +class PortfolioData(atom.AtomBase): + """The <gf:portfolioData> element.""" + _tag = 'portfolioData' + _namespace = GF_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['currencyCode'] = 'currency_code' + _attributes['gainPercentage'] = 'gain_percentage' + _attributes['return1w'] = 'return1w' + _attributes['return4w'] = 'return4w' + _attributes['return3m'] = 'return3m' + _attributes['returnYTD'] = 'returnYTD' + _attributes['return1y'] = 'return1y' + _attributes['return3y'] = 'return3y' + _attributes['return5y'] = 'return5y' + _attributes['returnOverall'] = 'return_overall' + _children = atom.AtomBase._children.copy() + _children['{%s}costBasis' % GF_NAMESPACE] = ('cost_basis', CostBasis) + _children['{%s}daysGain' % GF_NAMESPACE] = ('days_gain', DaysGain) + _children['{%s}gain' % GF_NAMESPACE] = ('gain', Gain) + _children['{%s}marketValue' % GF_NAMESPACE] = ('market_value', MarketValue) + + def __init__(self, currency_code=None, gain_percentage=None, + return1w=None, return4w=None, return3m=None, returnYTD=None, + return1y=None, return3y=None, return5y=None, return_overall=None, + cost_basis=None, days_gain=None, gain=None, market_value=None, **kwargs): + self.currency_code = currency_code + self.gain_percentage = gain_percentage + self.return1w = return1w + self.return4w = return4w + self.return3m = return3m + self.returnYTD = returnYTD + self.return1y = return1y + self.return3y = return3y + self.return5y = return5y + self.return_overall = return_overall + self.cost_basis = cost_basis + self.days_gain = days_gain + self.gain = gain + self.market_value = market_value + atom.AtomBase.__init__(self, **kwargs) + + +def PortfolioDataFromString(xml_string): + return atom.CreateClassFromXMLString(PortfolioData, xml_string) + + +class PortfolioEntry(gdata.GDataEntry): + """An entry of the PortfolioFeed. + + A PortfolioEntry contains the portfolio's title along with PortfolioData + such as currency, total market value, and overall performance statistics. + """ + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _children['{%s}portfolioData' % GF_NAMESPACE] = ( + 'portfolio_data', PortfolioData) + _children['{%s}feedLink' % GD_NAMESPACE] = ( + 'feed_link', PositionFeedLink) + + def __init__(self, portfolio_data=None, feed_link=None, **kwargs): + self.portfolio_data = portfolio_data + self.feed_link = feed_link + gdata.GDataEntry.__init__(self, **kwargs) + + def portfolio_title(self): + return self.title.text + + def set_portfolio_title(self, portfolio_title): + self.title = atom.Title(text=portfolio_title, title_type='text') + + portfolio_title = property(portfolio_title, set_portfolio_title, + doc='The portfolio title as a string (i.e. portfolio.title.text).') + + def portfolio_id(self): + return self.id.text.split("/")[-1] + + portfolio_id = property(portfolio_id, + doc='The portfolio ID. Do not confuse with portfolio.id.') + + def positions(self): + if self.feed_link.feed: + return self.feed_link.feed.entry + else: + return None + + positions = property(positions, doc=""" + Inlined PositionEntries are returned if PortfolioFeed was queried + with positions='true'.""") + + +def PortfolioEntryFromString(xml_string): + return atom.CreateClassFromXMLString(PortfolioEntry, xml_string) + + +class PortfolioFeed(gdata.GDataFeed): + """A feed that lists all of the user's portfolios. + + A portfolio is a collection of positions that the user holds in various + securities, plus metadata. The PortfolioFeed lists all of the user's + portfolios as a list of PortfolioEntries. + """ + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PortfolioEntry]) + + +def PortfolioFeedFromString(xml_string): + return atom.CreateClassFromXMLString(PortfolioFeed, xml_string) + + diff --git a/gam/gdata/analytics/finance/data.py b/gam/gdata/analytics/finance/data.py new file mode 100755 index 00000000000..5e0caa8920b --- /dev/null +++ b/gam/gdata/analytics/finance/data.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains the data classes of the Google Finance Portfolio Data API""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import atom.data +import gdata.data +import gdata.opensearch.data + + +GF_TEMPLATE = '{http://schemas.google.com/finance/2007/}%s' + + +class Commission(atom.core.XmlElement): + """Commission for the transaction""" + _qname = GF_TEMPLATE % 'commission' + money = [gdata.data.Money] + + +class CostBasis(atom.core.XmlElement): + """Cost basis for the portfolio or position""" + _qname = GF_TEMPLATE % 'costBasis' + money = [gdata.data.Money] + + +class DaysGain(atom.core.XmlElement): + """Today's gain for the portfolio or position""" + _qname = GF_TEMPLATE % 'daysGain' + money = [gdata.data.Money] + + +class Gain(atom.core.XmlElement): + """Total gain for the portfolio or position""" + _qname = GF_TEMPLATE % 'gain' + money = [gdata.data.Money] + + +class MarketValue(atom.core.XmlElement): + """Market value for the portfolio or position""" + _qname = GF_TEMPLATE % 'marketValue' + money = [gdata.data.Money] + + +class PortfolioData(atom.core.XmlElement): + """Data for the portfolio""" + _qname = GF_TEMPLATE % 'portfolioData' + return_overall = 'returnOverall' + currency_code = 'currencyCode' + return3y = 'return3y' + return4w = 'return4w' + market_value = MarketValue + return_y_t_d = 'returnYTD' + cost_basis = CostBasis + gain_percentage = 'gainPercentage' + days_gain = DaysGain + return3m = 'return3m' + return5y = 'return5y' + return1w = 'return1w' + gain = Gain + return1y = 'return1y' + + +class PortfolioEntry(gdata.data.GDEntry): + """Describes an entry in a feed of Finance portfolios""" + portfolio_data = PortfolioData + + +class PortfolioFeed(gdata.data.GDFeed): + """Describes a Finance portfolio feed""" + entry = [PortfolioEntry] + + +class PositionData(atom.core.XmlElement): + """Data for the position""" + _qname = GF_TEMPLATE % 'positionData' + return_y_t_d = 'returnYTD' + return5y = 'return5y' + return_overall = 'returnOverall' + cost_basis = CostBasis + return3y = 'return3y' + return1y = 'return1y' + return4w = 'return4w' + shares = 'shares' + days_gain = DaysGain + gain_percentage = 'gainPercentage' + market_value = MarketValue + gain = Gain + return3m = 'return3m' + return1w = 'return1w' + + +class Price(atom.core.XmlElement): + """Price of the transaction""" + _qname = GF_TEMPLATE % 'price' + money = [gdata.data.Money] + + +class Symbol(atom.core.XmlElement): + """Stock symbol for the company""" + _qname = GF_TEMPLATE % 'symbol' + symbol = 'symbol' + exchange = 'exchange' + full_name = 'fullName' + + +class PositionEntry(gdata.data.GDEntry): + """Describes an entry in a feed of Finance positions""" + symbol = Symbol + position_data = PositionData + + +class PositionFeed(gdata.data.GDFeed): + """Describes a Finance position feed""" + entry = [PositionEntry] + + +class TransactionData(atom.core.XmlElement): + """Data for the transction""" + _qname = GF_TEMPLATE % 'transactionData' + shares = 'shares' + notes = 'notes' + date = 'date' + type = 'type' + commission = Commission + price = Price + + +class TransactionEntry(gdata.data.GDEntry): + """Describes an entry in a feed of Finance transactions""" + transaction_data = TransactionData + + +class TransactionFeed(gdata.data.GDFeed): + """Describes a Finance transaction feed""" + entry = [TransactionEntry] + + diff --git a/gam/gdata/analytics/finance/service.py b/gam/gdata/analytics/finance/service.py new file mode 100755 index 00000000000..6e3eb86d476 --- /dev/null +++ b/gam/gdata/analytics/finance/service.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Tan Swee Heng +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Classes to interact with the Google Finance server.""" + + +__author__ = 'thesweeheng@gmail.com' + + +import gdata.service +import gdata.finance +import atom + + +class PortfolioQuery(gdata.service.Query): + """A query object for the list of a user's portfolios.""" + + def returns(self): + return self.get('returns', False) + + def set_returns(self, value): + if value is 'true' or value is True: + self['returns'] = 'true' + + returns = property(returns, set_returns, doc="The returns query parameter") + + def positions(self): + return self.get('positions', False) + + def set_positions(self, value): + if value is 'true' or value is True: + self['positions'] = 'true' + + positions = property(positions, set_positions, + doc="The positions query parameter") + + +class PositionQuery(gdata.service.Query): + """A query object for the list of a user's positions in a portfolio.""" + + def returns(self): + return self.get('returns', False) + + def set_returns(self, value): + if value is 'true' or value is True: + self['returns'] = 'true' + + returns = property(returns, set_returns, + doc="The returns query parameter") + + def transactions(self): + return self.get('transactions', False) + + def set_transactions(self, value): + if value is 'true' or value is True: + self['transactions'] = 'true' + + transactions = property(transactions, set_transactions, + doc="The transactions query parameter") + + +class FinanceService(gdata.service.GDataService): + + def __init__(self, email=None, password=None, source=None, + server='finance.google.com', **kwargs): + """Creates a client for the Finance service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'finance.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__(self, + email=email, password=password, service='finance', server=server, + **kwargs) + + def GetPortfolioFeed(self, query=None): + uri = '/finance/feeds/default/portfolios' + if query: + uri = PortfolioQuery(feed=uri, params=query).ToUri() + return self.Get(uri, converter=gdata.finance.PortfolioFeedFromString) + + def GetPositionFeed(self, portfolio_entry=None, portfolio_id=None, + query=None): + """ + Args: + portfolio_entry: PortfolioEntry (optional; see Notes) + portfolio_id: string (optional; see Notes) This may be obtained + from a PortfolioEntry's portfolio_id attribute. + query: PortfolioQuery (optional) + + Notes: + Either a PortfolioEntry OR a portfolio ID must be provided. + """ + if portfolio_entry: + uri = portfolio_entry.GetSelfLink().href + '/positions' + elif portfolio_id: + uri = '/finance/feeds/default/portfolios/%s/positions' % portfolio_id + if query: + uri = PositionQuery(feed=uri, params=query).ToUri() + return self.Get(uri, converter=gdata.finance.PositionFeedFromString) + + def GetTransactionFeed(self, position_entry=None, + portfolio_id=None, ticker_id=None): + """ + Args: + position_entry: PositionEntry (optional; see Notes) + portfolio_id: string (optional; see Notes) This may be obtained + from a PortfolioEntry's portfolio_id attribute. + ticker_id: string (optional; see Notes) This may be obtained from + a PositionEntry's ticker_id attribute. Alternatively it can + be constructed using the security's exchange and symbol, + e.g. 'NASDAQ:GOOG' + + Notes: + Either a PositionEntry OR (a portfolio ID AND ticker ID) must + be provided. + """ + if position_entry: + uri = position_entry.GetSelfLink().href + '/transactions' + elif portfolio_id and ticker_id: + uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions' \ + % (portfolio_id, ticker_id) + return self.Get(uri, converter=gdata.finance.TransactionFeedFromString) + + def GetPortfolio(self, portfolio_id=None, query=None): + uri = '/finance/feeds/default/portfolios/%s' % portfolio_id + if query: + uri = PortfolioQuery(feed=uri, params=query).ToUri() + return self.Get(uri, converter=gdata.finance.PortfolioEntryFromString) + + def AddPortfolio(self, portfolio_entry=None): + uri = '/finance/feeds/default/portfolios' + return self.Post(portfolio_entry, uri, + converter=gdata.finance.PortfolioEntryFromString) + + def UpdatePortfolio(self, portfolio_entry=None): + uri = portfolio_entry.GetEditLink().href + return self.Put(portfolio_entry, uri, + converter=gdata.finance.PortfolioEntryFromString) + + def DeletePortfolio(self, portfolio_entry=None): + uri = portfolio_entry.GetEditLink().href + return self.Delete(uri) + + def GetPosition(self, portfolio_id=None, ticker_id=None, query=None): + uri = '/finance/feeds/default/portfolios/%s/positions/%s' \ + % (portfolio_id, ticker_id) + if query: + uri = PositionQuery(feed=uri, params=query).ToUri() + return self.Get(uri, converter=gdata.finance.PositionEntryFromString) + + def DeletePosition(self, position_entry=None, + portfolio_id=None, ticker_id=None, transaction_feed=None): + """A position is deleted by deleting all its transactions. + + Args: + position_entry: PositionEntry (optional; see Notes) + portfolio_id: string (optional; see Notes) This may be obtained + from a PortfolioEntry's portfolio_id attribute. + ticker_id: string (optional; see Notes) This may be obtained from + a PositionEntry's ticker_id attribute. Alternatively it can + be constructed using the security's exchange and symbol, + e.g. 'NASDAQ:GOOG' + transaction_feed: TransactionFeed (optional; see Notes) + + Notes: + Either a PositionEntry OR (a portfolio ID AND ticker ID) OR + a TransactionFeed must be provided. + """ + if transaction_feed: + feed = transaction_feed + else: + if position_entry: + feed = self.GetTransactionFeed(position_entry=position_entry) + elif portfolio_id and ticker_id: + feed = self.GetTransactionFeed( + portfolio_id=portfolio_id, ticker_id=ticker_id) + for txn in feed.entry: + self.DeleteTransaction(txn) + return True + + def GetTransaction(self, portfolio_id=None, ticker_id=None, + transaction_id=None): + uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions/%s' \ + % (portfolio_id, ticker_id, transaction_id) + return self.Get(uri, converter=gdata.finance.TransactionEntryFromString) + + def AddTransaction(self, transaction_entry=None, transaction_feed = None, + position_entry=None, portfolio_id=None, ticker_id=None): + """ + Args: + transaction_entry: TransactionEntry (required) + transaction_feed: TransactionFeed (optional; see Notes) + position_entry: PositionEntry (optional; see Notes) + portfolio_id: string (optional; see Notes) This may be obtained + from a PortfolioEntry's portfolio_id attribute. + ticker_id: string (optional; see Notes) This may be obtained from + a PositionEntry's ticker_id attribute. Alternatively it can + be constructed using the security's exchange and symbol, + e.g. 'NASDAQ:GOOG' + + Notes: + Either a TransactionFeed OR a PositionEntry OR (a portfolio ID AND + ticker ID) must be provided. + """ + if transaction_feed: + uri = transaction_feed.GetPostLink().href + elif position_entry: + uri = position_entry.GetSelfLink().href + '/transactions' + elif portfolio_id and ticker_id: + uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions' \ + % (portfolio_id, ticker_id) + return self.Post(transaction_entry, uri, + converter=gdata.finance.TransactionEntryFromString) + + def UpdateTransaction(self, transaction_entry=None): + uri = transaction_entry.GetEditLink().href + return self.Put(transaction_entry, uri, + converter=gdata.finance.TransactionEntryFromString) + + def DeleteTransaction(self, transaction_entry=None): + uri = transaction_entry.GetEditLink().href + return self.Delete(uri) diff --git a/gam/gdata/analytics/gauth.py b/gam/gdata/analytics/gauth.py new file mode 100755 index 00000000000..4f2f6d1412e --- /dev/null +++ b/gam/gdata/analytics/gauth.py @@ -0,0 +1,1552 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +"""Provides auth related token classes and functions for Google Data APIs. + +Token classes represent a user's authorization of this app to access their +data. Usually these are not created directly but by a GDClient object. + +ClientLoginToken +AuthSubToken +SecureAuthSubToken +OAuthHmacToken +OAuthRsaToken +TwoLeggedOAuthHmacToken +TwoLeggedOAuthRsaToken + +Functions which are often used in application code (as opposed to just within +the gdata-python-client library) are the following: + +generate_auth_sub_url +authorize_request_token + +The following are helper functions which are used to save and load auth token +objects in the App Engine datastore. These should only be used if you are using +this library within App Engine: + +ae_load +ae_save +""" + + +import datetime +import time +import random +import urllib +import urlparse +import atom.http_core + +try: + import simplejson +except ImportError: + try: + # Try to import from django, should work on App Engine + from django.utils import simplejson + except ImportError: + # Should work for Python2.6 and higher. + import json as simplejson + +try: + from urlparse import parse_qsl +except ImportError: + from cgi import parse_qsl + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +PROGRAMMATIC_AUTH_LABEL = 'GoogleLogin auth=' +AUTHSUB_AUTH_LABEL = 'AuthSub token=' +OAUTH2_AUTH_LABEL = 'OAuth ' + + +# This dict provides the AuthSub and OAuth scopes for all services by service +# name. The service name (key) is used in ClientLogin requests. +AUTH_SCOPES = { + 'cl': ( # Google Calendar API + 'https://www.google.com/calendar/feeds/', + 'http://www.google.com/calendar/feeds/'), + 'gbase': ( # Google Base API + 'http://base.google.com/base/feeds/', + 'http://www.google.com/base/feeds/'), + 'blogger': ( # Blogger API + 'http://www.blogger.com/feeds/',), + 'codesearch': ( # Google Code Search API + 'http://www.google.com/codesearch/feeds/',), + 'cp': ( # Contacts API + 'https://www.google.com/m8/feeds/', + 'http://www.google.com/m8/feeds/'), + 'finance': ( # Google Finance API + 'http://finance.google.com/finance/feeds/',), + 'health': ( # Google Health API + 'https://www.google.com/health/feeds/',), + 'writely': ( # Documents List API + 'https://docs.google.com/feeds/', + 'https://spreadsheets.google.com/feeds/', + 'https://docs.googleusercontent.com/'), + 'lh2': ( # Picasa Web Albums API + 'http://picasaweb.google.com/data/',), + 'apps': ( # Google Apps Provisioning API + 'http://www.google.com/a/feeds/', + 'https://www.google.com/a/feeds/', + 'http://apps-apis.google.com/a/feeds/', + 'https://apps-apis.google.com/a/feeds/'), + 'weaver': ( # Health H9 Sandbox + 'https://www.google.com/h9/feeds/',), + 'wise': ( # Spreadsheets Data API + 'https://spreadsheets.google.com/feeds/'), + 'sitemaps': ( # Google Webmaster Tools API + 'https://www.google.com/webmasters/tools/feeds/',), + 'youtube': ( # YouTube API + 'http://gdata.youtube.com/feeds/api/', + 'http://uploads.gdata.youtube.com/feeds/api', + 'http://gdata.youtube.com/action/GetUploadToken'), + 'books': ( # Google Books API + 'http://www.google.com/books/feeds/',), + 'analytics': ( # Google Analytics API + 'https://www.google.com/analytics/feeds/',), + 'jotspot': ( # Google Sites API + 'http://sites.google.com/feeds/', + 'https://sites.google.com/feeds/'), + 'local': ( # Google Maps Data API + 'http://maps.google.com/maps/feeds/',), + 'code': ( # Project Hosting Data API + 'http://code.google.com/feeds/issues',)} + + + +class Error(Exception): + pass + + +class UnsupportedTokenType(Error): + """Raised when token to or from blob is unable to convert the token.""" + pass + + +class OAuth2AccessTokenError(Error): + """Raised when an OAuth2 error occurs.""" + def __init__(self, error_message): + self.error_message = error_message + + +# ClientLogin functions and classes. +def generate_client_login_request_body(email, password, service, source, + account_type='HOSTED_OR_GOOGLE', captcha_token=None, + captcha_response=None): + """Creates the body of the autentication request + + See http://code.google.com/apis/accounts/AuthForInstalledApps.html#Request + for more details. + + Args: + email: str + password: str + service: str + source: str + account_type: str (optional) Defaul is 'HOSTED_OR_GOOGLE', other valid + values are 'GOOGLE' and 'HOSTED' + captcha_token: str (optional) + captcha_response: str (optional) + + Returns: + The HTTP body to send in a request for a client login token. + """ + # Create a POST body containing the user's credentials. + request_fields = {'Email': email, + 'Passwd': password, + 'accountType': account_type, + 'service': service, + 'source': source} + if captcha_token and captcha_response: + # Send the captcha token and response as part of the POST body if the + # user is responding to a captch challenge. + request_fields['logintoken'] = captcha_token + request_fields['logincaptcha'] = captcha_response + return urllib.urlencode(request_fields) + + +GenerateClientLoginRequestBody = generate_client_login_request_body + + +def get_client_login_token_string(http_body): + """Returns the token value for a ClientLoginToken. + + Reads the token from the server's response to a Client Login request and + creates the token value string to use in requests. + + Args: + http_body: str The body of the server's HTTP response to a Client Login + request + + Returns: + The token value string for a ClientLoginToken. + """ + for response_line in http_body.splitlines(): + if response_line.startswith('Auth='): + # Strip off the leading Auth= and return the Authorization value. + return response_line[5:] + return None + + +GetClientLoginTokenString = get_client_login_token_string + + +def get_captcha_challenge(http_body, + captcha_base_url='http://www.google.com/accounts/'): + """Returns the URL and token for a CAPTCHA challenge issued by the server. + + Args: + http_body: str The body of the HTTP response from the server which + contains the CAPTCHA challenge. + captcha_base_url: str This function returns a full URL for viewing the + challenge image which is built from the server's response. This + base_url is used as the beginning of the URL because the server + only provides the end of the URL. For example the server provides + 'Captcha?ctoken=Hi...N' and the URL for the image is + 'http://www.google.com/accounts/Captcha?ctoken=Hi...N' + + Returns: + A dictionary containing the information needed to repond to the CAPTCHA + challenge, the image URL and the ID token of the challenge. The + dictionary is in the form: + {'token': string identifying the CAPTCHA image, + 'url': string containing the URL of the image} + Returns None if there was no CAPTCHA challenge in the response. + """ + contains_captcha_challenge = False + captcha_parameters = {} + for response_line in http_body.splitlines(): + if response_line.startswith('Error=CaptchaRequired'): + contains_captcha_challenge = True + elif response_line.startswith('CaptchaToken='): + # Strip off the leading CaptchaToken= + captcha_parameters['token'] = response_line[13:] + elif response_line.startswith('CaptchaUrl='): + captcha_parameters['url'] = '%s%s' % (captcha_base_url, + response_line[11:]) + if contains_captcha_challenge: + return captcha_parameters + else: + return None + + +GetCaptchaChallenge = get_captcha_challenge + + +class ClientLoginToken(object): + + def __init__(self, token_string): + self.token_string = token_string + + def modify_request(self, http_request): + http_request.headers['Authorization'] = '%s%s' % (PROGRAMMATIC_AUTH_LABEL, + self.token_string) + + ModifyRequest = modify_request + + +# AuthSub functions and classes. +def _to_uri(str_or_uri): + if isinstance(str_or_uri, (str, unicode)): + return atom.http_core.Uri.parse_uri(str_or_uri) + return str_or_uri + + +def generate_auth_sub_url(next, scopes, secure=False, session=True, + request_url=atom.http_core.parse_uri( + 'https://www.google.com/accounts/AuthSubRequest'), + domain='default', scopes_param_prefix='auth_sub_scopes'): + """Constructs a URI for requesting a multiscope AuthSub token. + + The generated token will contain a URL parameter to pass along the + requested scopes to the next URL. When the Google Accounts page + redirects the broswser to the 'next' URL, it appends the single use + AuthSub token value to the URL as a URL parameter with the key 'token'. + However, the information about which scopes were requested is not + included by Google Accounts. This method adds the scopes to the next + URL before making the request so that the redirect will be sent to + a page, and both the token value and the list of scopes for which the token + was requested. + + Args: + next: atom.http_core.Uri or string The URL user will be sent to after + authorizing this web application to access their data. + scopes: list containint strings or atom.http_core.Uri objects. The URLs + of the services to be accessed. Could also be a single string + or single atom.http_core.Uri for requesting just one scope. + secure: boolean (optional) Determines whether or not the issued token + is a secure token. + session: boolean (optional) Determines whether or not the issued token + can be upgraded to a session token. + request_url: atom.http_core.Uri or str The beginning of the request URL. + This is normally + 'http://www.google.com/accounts/AuthSubRequest' or + '/accounts/AuthSubRequest' + domain: The domain which the account is part of. This is used for Google + Apps accounts, the default value is 'default' which means that + the requested account is a Google Account (@gmail.com for + example) + scopes_param_prefix: str (optional) The requested scopes are added as a + URL parameter to the next URL so that the page at + the 'next' URL can extract the token value and the + valid scopes from the URL. The key for the URL + parameter defaults to 'auth_sub_scopes' + + Returns: + An atom.http_core.Uri which the user's browser should be directed to in + order to authorize this application to access their information. + """ + if isinstance(next, (str, unicode)): + next = atom.http_core.Uri.parse_uri(next) + # If the user passed in a string instead of a list for scopes, convert to + # a single item tuple. + if isinstance(scopes, (str, unicode, atom.http_core.Uri)): + scopes = (scopes,) + scopes_string = ' '.join([str(scope) for scope in scopes]) + next.query[scopes_param_prefix] = scopes_string + + if isinstance(request_url, (str, unicode)): + request_url = atom.http_core.Uri.parse_uri(request_url) + request_url.query['next'] = str(next) + request_url.query['scope'] = scopes_string + if session: + request_url.query['session'] = '1' + else: + request_url.query['session'] = '0' + if secure: + request_url.query['secure'] = '1' + else: + request_url.query['secure'] = '0' + request_url.query['hd'] = domain + return request_url + + +def auth_sub_string_from_url(url, scopes_param_prefix='auth_sub_scopes'): + """Finds the token string (and scopes) after the browser is redirected. + + After the Google Accounts AuthSub pages redirect the user's broswer back to + the web application (using the 'next' URL from the request) the web app must + extract the token from the current page's URL. The token is provided as a + URL parameter named 'token' and if generate_auth_sub_url was used to create + the request, the token's valid scopes are included in a URL parameter whose + name is specified in scopes_param_prefix. + + Args: + url: atom.url.Url or str representing the current URL. The token value + and valid scopes should be included as URL parameters. + scopes_param_prefix: str (optional) The URL parameter key which maps to + the list of valid scopes for the token. + + Returns: + A tuple containing the token value as a string, and a tuple of scopes + (as atom.http_core.Uri objects) which are URL prefixes under which this + token grants permission to read and write user data. + (token_string, (scope_uri, scope_uri, scope_uri, ...)) + If no scopes were included in the URL, the second value in the tuple is + None. If there was no token param in the url, the tuple returned is + (None, None) + """ + if isinstance(url, (str, unicode)): + url = atom.http_core.Uri.parse_uri(url) + if 'token' not in url.query: + return (None, None) + token = url.query['token'] + # TODO: decide whether no scopes should be None or (). + scopes = None # Default to None for no scopes. + if scopes_param_prefix in url.query: + scopes = tuple(url.query[scopes_param_prefix].split(' ')) + return (token, scopes) + + +AuthSubStringFromUrl = auth_sub_string_from_url + + +def auth_sub_string_from_body(http_body): + """Extracts the AuthSub token from an HTTP body string. + + Used to find the new session token after making a request to upgrade a + single use AuthSub token. + + Args: + http_body: str The repsonse from the server which contains the AuthSub + key. For example, this function would find the new session token + from the server's response to an upgrade token request. + + Returns: + The raw token value string to use in an AuthSubToken object. + """ + for response_line in http_body.splitlines(): + if response_line.startswith('Token='): + # Strip off Token= and return the token value string. + return response_line[6:] + return None + + +class AuthSubToken(object): + + def __init__(self, token_string, scopes=None): + self.token_string = token_string + self.scopes = scopes or [] + + def modify_request(self, http_request): + """Sets Authorization header, allows app to act on the user's behalf.""" + http_request.headers['Authorization'] = '%s%s' % (AUTHSUB_AUTH_LABEL, + self.token_string) + + ModifyRequest = modify_request + + def from_url(str_or_uri): + """Creates a new AuthSubToken using information in the URL. + + Uses auth_sub_string_from_url. + + Args: + str_or_uri: The current page's URL (as a str or atom.http_core.Uri) + which should contain a token query parameter since the + Google auth server redirected the user's browser to this + URL. + """ + token_and_scopes = auth_sub_string_from_url(str_or_uri) + return AuthSubToken(token_and_scopes[0], token_and_scopes[1]) + + from_url = staticmethod(from_url) + FromUrl = from_url + + def _upgrade_token(self, http_body): + """Replaces the token value with a session token from the auth server. + + Uses the response of a token upgrade request to modify this token. Uses + auth_sub_string_from_body. + """ + self.token_string = auth_sub_string_from_body(http_body) + + +# Functions and classes for Secure-mode AuthSub +def build_auth_sub_data(http_request, timestamp, nonce): + """Creates the data string which must be RSA-signed in secure requests. + + For more details see the documenation on secure AuthSub requests: + http://code.google.com/apis/accounts/docs/AuthSub.html#signingrequests + + Args: + http_request: The request being made to the server. The Request's URL + must be complete before this signature is calculated as any changes + to the URL will invalidate the signature. + nonce: str Random 64-bit, unsigned number encoded as an ASCII string in + decimal format. The nonce/timestamp pair should always be unique to + prevent replay attacks. + timestamp: Integer representing the time the request is sent. The + timestamp should be expressed in number of seconds after January 1, + 1970 00:00:00 GMT. + """ + return '%s %s %s %s' % (http_request.method, str(http_request.uri), + str(timestamp), nonce) + + +def generate_signature(data, rsa_key): + """Signs the data string for a secure AuthSub request.""" + import base64 + try: + from tlslite.utils import keyfactory + except ImportError: + from gdata.tlslite.utils import keyfactory + private_key = keyfactory.parsePrivateKey(rsa_key) + signed = private_key.hashAndSign(data) + # Python2.3 and lower does not have the base64.b64encode function. + if hasattr(base64, 'b64encode'): + return base64.b64encode(signed) + else: + return base64.encodestring(signed).replace('\n', '') + + +class SecureAuthSubToken(AuthSubToken): + + def __init__(self, token_string, rsa_private_key, scopes=None): + self.token_string = token_string + self.scopes = scopes or [] + self.rsa_private_key = rsa_private_key + + def from_url(str_or_uri, rsa_private_key): + """Creates a new SecureAuthSubToken using information in the URL. + + Uses auth_sub_string_from_url. + + Args: + str_or_uri: The current page's URL (as a str or atom.http_core.Uri) + which should contain a token query parameter since the Google auth + server redirected the user's browser to this URL. + rsa_private_key: str the private RSA key cert used to sign all requests + made with this token. + """ + token_and_scopes = auth_sub_string_from_url(str_or_uri) + return SecureAuthSubToken(token_and_scopes[0], rsa_private_key, + token_and_scopes[1]) + + from_url = staticmethod(from_url) + FromUrl = from_url + + def modify_request(self, http_request): + """Sets the Authorization header and includes a digital signature. + + Calculates a digital signature using the private RSA key, a timestamp + (uses now at the time this method is called) and a random nonce. + + Args: + http_request: The atom.http_core.HttpRequest which contains all of the + information needed to send a request to the remote server. The + URL and the method of the request must be already set and cannot be + changed after this token signs the request, or the signature will + not be valid. + """ + timestamp = str(int(time.time())) + nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)]) + data = build_auth_sub_data(http_request, timestamp, nonce) + signature = generate_signature(data, self.rsa_private_key) + http_request.headers['Authorization'] = ( + '%s%s sigalg="rsa-sha1" data="%s" sig="%s"' % (AUTHSUB_AUTH_LABEL, + self.token_string, data, signature)) + + ModifyRequest = modify_request + + +# OAuth functions and classes. +RSA_SHA1 = 'RSA-SHA1' +HMAC_SHA1 = 'HMAC-SHA1' + + +def build_oauth_base_string(http_request, consumer_key, nonce, signaure_type, + timestamp, version, next='oob', token=None, + verifier=None): + """Generates the base string to be signed in the OAuth request. + + Args: + http_request: The request being made to the server. The Request's URL + must be complete before this signature is calculated as any changes + to the URL will invalidate the signature. + consumer_key: Domain identifying the third-party web application. This is + the domain used when registering the application with Google. It + identifies who is making the request on behalf of the user. + nonce: Random 64-bit, unsigned number encoded as an ASCII string in decimal + format. The nonce/timestamp pair should always be unique to prevent + replay attacks. + signaure_type: either RSA_SHA1 or HMAC_SHA1 + timestamp: Integer representing the time the request is sent. The + timestamp should be expressed in number of seconds after January 1, + 1970 00:00:00 GMT. + version: The OAuth version used by the requesting web application. This + value must be '1.0' or '1.0a'. If not provided, Google assumes version + 1.0 is in use. + next: The URL the user should be redirected to after granting access + to a Google service(s). It can include url-encoded query parameters. + The default value is 'oob'. (This is the oauth_callback.) + token: The string for the OAuth request token or OAuth access token. + verifier: str Sent as the oauth_verifier and required when upgrading a + request token to an access token. + """ + # First we must build the canonical base string for the request. + params = http_request.uri.query.copy() + params['oauth_consumer_key'] = consumer_key + params['oauth_nonce'] = nonce + params['oauth_signature_method'] = signaure_type + params['oauth_timestamp'] = str(timestamp) + if next is not None: + params['oauth_callback'] = str(next) + if token is not None: + params['oauth_token'] = token + if version is not None: + params['oauth_version'] = version + if verifier is not None: + params['oauth_verifier'] = verifier + # We need to get the key value pairs in lexigraphically sorted order. + sorted_keys = None + try: + sorted_keys = sorted(params.keys()) + # The sorted function is not available in Python2.3 and lower + except NameError: + sorted_keys = params.keys() + sorted_keys.sort() + pairs = [] + for key in sorted_keys: + pairs.append('%s=%s' % (urllib.quote(key, safe='~'), + urllib.quote(params[key], safe='~'))) + # We want to escape /'s too, so use safe='~' + all_parameters = urllib.quote('&'.join(pairs), safe='~') + normailzed_host = http_request.uri.host.lower() + normalized_scheme = (http_request.uri.scheme or 'http').lower() + non_default_port = None + if (http_request.uri.port is not None + and ((normalized_scheme == 'https' and http_request.uri.port != 443) + or (normalized_scheme == 'http' and http_request.uri.port != 80))): + non_default_port = http_request.uri.port + path = http_request.uri.path or '/' + request_path = None + if not path.startswith('/'): + path = '/%s' % path + if non_default_port is not None: + # Set the only safe char in url encoding to ~ since we want to escape / + # as well. + request_path = urllib.quote('%s://%s:%s%s' % ( + normalized_scheme, normailzed_host, non_default_port, path), safe='~') + else: + # Set the only safe char in url encoding to ~ since we want to escape / + # as well. + request_path = urllib.quote('%s://%s%s' % ( + normalized_scheme, normailzed_host, path), safe='~') + # TODO: ensure that token escaping logic is correct, not sure if the token + # value should be double escaped instead of single. + base_string = '&'.join((http_request.method.upper(), request_path, + all_parameters)) + # Now we have the base string, we can calculate the oauth_signature. + return base_string + + +def generate_hmac_signature(http_request, consumer_key, consumer_secret, + timestamp, nonce, version, next='oob', + token=None, token_secret=None, verifier=None): + import hmac + import base64 + base_string = build_oauth_base_string( + http_request, consumer_key, nonce, HMAC_SHA1, timestamp, version, + next, token, verifier=verifier) + hash_key = None + hashed = None + if token_secret is not None: + hash_key = '%s&%s' % (urllib.quote(consumer_secret, safe='~'), + urllib.quote(token_secret, safe='~')) + else: + hash_key = '%s&' % urllib.quote(consumer_secret, safe='~') + try: + import hashlib + hashed = hmac.new(hash_key, base_string, hashlib.sha1) + except ImportError: + import sha + hashed = hmac.new(hash_key, base_string, sha) + # Python2.3 does not have base64.b64encode. + if hasattr(base64, 'b64encode'): + return base64.b64encode(hashed.digest()) + else: + return base64.encodestring(hashed.digest()).replace('\n', '') + + +def generate_rsa_signature(http_request, consumer_key, rsa_key, + timestamp, nonce, version, next='oob', + token=None, token_secret=None, verifier=None): + import base64 + try: + from tlslite.utils import keyfactory + except ImportError: + from gdata.tlslite.utils import keyfactory + base_string = build_oauth_base_string( + http_request, consumer_key, nonce, RSA_SHA1, timestamp, version, + next, token, verifier=verifier) + private_key = keyfactory.parsePrivateKey(rsa_key) + # Sign using the key + signed = private_key.hashAndSign(base_string) + # Python2.3 does not have base64.b64encode. + if hasattr(base64, 'b64encode'): + return base64.b64encode(signed) + else: + return base64.encodestring(signed).replace('\n', '') + + +def generate_auth_header(consumer_key, timestamp, nonce, signature_type, + signature, version='1.0', next=None, token=None, + verifier=None): + """Builds the Authorization header to be sent in the request. + + Args: + consumer_key: Identifies the application making the request (str). + timestamp: + nonce: + signature_type: One of either HMAC_SHA1 or RSA_SHA1 + signature: The HMAC or RSA signature for the request as a base64 + encoded string. + version: The version of the OAuth protocol that this request is using. + Default is '1.0' + next: The URL of the page that the user's browser should be sent to + after they authorize the token. (Optional) + token: str The OAuth token value to be used in the oauth_token parameter + of the header. + verifier: str The OAuth verifier which must be included when you are + upgrading a request token to an access token. + """ + params = { + 'oauth_consumer_key': consumer_key, + 'oauth_version': version, + 'oauth_nonce': nonce, + 'oauth_timestamp': str(timestamp), + 'oauth_signature_method': signature_type, + 'oauth_signature': signature} + if next is not None: + params['oauth_callback'] = str(next) + if token is not None: + params['oauth_token'] = token + if verifier is not None: + params['oauth_verifier'] = verifier + pairs = [ + '%s="%s"' % ( + k, urllib.quote(v, safe='~')) for k, v in params.iteritems()] + return 'OAuth %s' % (', '.join(pairs)) + + +REQUEST_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetRequestToken' +ACCESS_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetAccessToken' + + +def generate_request_for_request_token( + consumer_key, signature_type, scopes, rsa_key=None, consumer_secret=None, + auth_server_url=REQUEST_TOKEN_URL, next='oob', version='1.0'): + """Creates request to be sent to auth server to get an OAuth request token. + + Args: + consumer_key: + signature_type: either RSA_SHA1 or HMAC_SHA1. The rsa_key must be + provided if the signature type is RSA but if the signature method + is HMAC, the consumer_secret must be used. + scopes: List of URL prefixes for the data which we want to access. For + example, to request access to the user's Blogger and Google Calendar + data, we would request + ['http://www.blogger.com/feeds/', + 'https://www.google.com/calendar/feeds/', + 'http://www.google.com/calendar/feeds/'] + rsa_key: Only used if the signature method is RSA_SHA1. + consumer_secret: Only used if the signature method is HMAC_SHA1. + auth_server_url: The URL to which the token request should be directed. + Defaults to 'https://www.google.com/accounts/OAuthGetRequestToken'. + next: The URL of the page that the user's browser should be sent to + after they authorize the token. (Optional) + version: The OAuth version used by the requesting web application. + Defaults to '1.0a' + + Returns: + An atom.http_core.HttpRequest object with the URL, Authorization header + and body filled in. + """ + request = atom.http_core.HttpRequest(auth_server_url, 'POST') + # Add the requested auth scopes to the Auth request URL. + if scopes: + request.uri.query['scope'] = ' '.join(scopes) + + timestamp = str(int(time.time())) + nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)]) + signature = None + if signature_type == HMAC_SHA1: + signature = generate_hmac_signature( + request, consumer_key, consumer_secret, timestamp, nonce, version, + next=next) + elif signature_type == RSA_SHA1: + signature = generate_rsa_signature( + request, consumer_key, rsa_key, timestamp, nonce, version, next=next) + else: + return None + + request.headers['Authorization'] = generate_auth_header( + consumer_key, timestamp, nonce, signature_type, signature, version, + next) + request.headers['Content-Length'] = '0' + return request + + +def generate_request_for_access_token( + request_token, auth_server_url=ACCESS_TOKEN_URL): + """Creates a request to ask the OAuth server for an access token. + + Requires a request token which the user has authorized. See the + documentation on OAuth with Google Data for more details: + http://code.google.com/apis/accounts/docs/OAuth.html#AccessToken + + Args: + request_token: An OAuthHmacToken or OAuthRsaToken which the user has + approved using their browser. + auth_server_url: (optional) The URL at which the OAuth access token is + requested. Defaults to + https://www.google.com/accounts/OAuthGetAccessToken + + Returns: + A new HttpRequest object which can be sent to the OAuth server to + request an OAuth Access Token. + """ + http_request = atom.http_core.HttpRequest(auth_server_url, 'POST') + http_request.headers['Content-Length'] = '0' + return request_token.modify_request(http_request) + + +def oauth_token_info_from_body(http_body): + """Exracts an OAuth request token from the server's response. + + Returns: + A tuple of strings containing the OAuth token and token secret. If + neither of these are present in the body, returns (None, None) + """ + token = None + token_secret = None + for pair in http_body.split('&'): + if pair.startswith('oauth_token='): + token = urllib.unquote(pair[len('oauth_token='):]) + if pair.startswith('oauth_token_secret='): + token_secret = urllib.unquote(pair[len('oauth_token_secret='):]) + return (token, token_secret) + + +def hmac_token_from_body(http_body, consumer_key, consumer_secret, + auth_state): + token_value, token_secret = oauth_token_info_from_body(http_body) + token = OAuthHmacToken(consumer_key, consumer_secret, token_value, + token_secret, auth_state) + return token + + +def rsa_token_from_body(http_body, consumer_key, rsa_private_key, + auth_state): + token_value, token_secret = oauth_token_info_from_body(http_body) + token = OAuthRsaToken(consumer_key, rsa_private_key, token_value, + token_secret, auth_state) + return token + + +DEFAULT_DOMAIN = 'default' +OAUTH_AUTHORIZE_URL = 'https://www.google.com/accounts/OAuthAuthorizeToken' + + +def generate_oauth_authorization_url( + token, next=None, hd=DEFAULT_DOMAIN, hl=None, btmpl=None, + auth_server=OAUTH_AUTHORIZE_URL): + """Creates a URL for the page where the request token can be authorized. + + Args: + token: str The request token from the OAuth server. + next: str (optional) URL the user should be redirected to after granting + access to a Google service(s). It can include url-encoded query + parameters. + hd: str (optional) Identifies a particular hosted domain account to be + accessed (for example, 'mycollege.edu'). Uses 'default' to specify a + regular Google account ('username@gmail.com'). + hl: str (optional) An ISO 639 country code identifying what language the + approval page should be translated in (for example, 'hl=en' for + English). The default is the user's selected language. + btmpl: str (optional) Forces a mobile version of the approval page. The + only accepted value is 'mobile'. + auth_server: str (optional) The start of the token authorization web + page. Defaults to + 'https://www.google.com/accounts/OAuthAuthorizeToken' + + Returns: + An atom.http_core.Uri pointing to the token authorization page where the + user may allow or deny this app to access their Google data. + """ + uri = atom.http_core.Uri.parse_uri(auth_server) + uri.query['oauth_token'] = token + uri.query['hd'] = hd + if next is not None: + uri.query['oauth_callback'] = str(next) + if hl is not None: + uri.query['hl'] = hl + if btmpl is not None: + uri.query['btmpl'] = btmpl + return uri + + +def oauth_token_info_from_url(url): + """Exracts an OAuth access token from the redirected page's URL. + + Returns: + A tuple of strings containing the OAuth token and the OAuth verifier which + need to sent when upgrading a request token to an access token. + """ + if isinstance(url, (str, unicode)): + url = atom.http_core.Uri.parse_uri(url) + token = None + verifier = None + if 'oauth_token' in url.query: + token = urllib.unquote(url.query['oauth_token']) + if 'oauth_verifier' in url.query: + verifier = urllib.unquote(url.query['oauth_verifier']) + return (token, verifier) + + +def authorize_request_token(request_token, url): + """Adds information to request token to allow it to become an access token. + + Modifies the request_token object passed in by setting and unsetting the + necessary fields to allow this token to form a valid upgrade request. + + Args: + request_token: The OAuth request token which has been authorized by the + user. In order for this token to be upgraded to an access token, + certain fields must be extracted from the URL and added to the token + so that they can be passed in an upgrade-token request. + url: The URL of the current page which the user's browser was redirected + to after they authorized access for the app. This function extracts + information from the URL which is needed to upgraded the token from + a request token to an access token. + + Returns: + The same token object which was passed in. + """ + token, verifier = oauth_token_info_from_url(url) + request_token.token = token + request_token.verifier = verifier + request_token.auth_state = AUTHORIZED_REQUEST_TOKEN + return request_token + + +AuthorizeRequestToken = authorize_request_token + + +def upgrade_to_access_token(request_token, server_response_body): + """Extracts access token information from response to an upgrade request. + + Once the server has responded with the new token info for the OAuth + access token, this method modifies the request_token to set and unset + necessary fields to create valid OAuth authorization headers for requests. + + Args: + request_token: An OAuth token which this function modifies to allow it + to be used as an access token. + server_response_body: str The server's response to an OAuthAuthorizeToken + request. This should contain the new token and token_secret which + are used to generate the signature and parameters of the Authorization + header in subsequent requests to Google Data APIs. + + Returns: + The same token object which was passed in. + """ + token, token_secret = oauth_token_info_from_body(server_response_body) + request_token.token = token + request_token.token_secret = token_secret + request_token.auth_state = ACCESS_TOKEN + request_token.next = None + request_token.verifier = None + return request_token + + +UpgradeToAccessToken = upgrade_to_access_token + + +REQUEST_TOKEN = 1 +AUTHORIZED_REQUEST_TOKEN = 2 +ACCESS_TOKEN = 3 + + +class OAuthHmacToken(object): + SIGNATURE_METHOD = HMAC_SHA1 + + def __init__(self, consumer_key, consumer_secret, token, token_secret, + auth_state, next=None, verifier=None): + self.consumer_key = consumer_key + self.consumer_secret = consumer_secret + self.token = token + self.token_secret = token_secret + self.auth_state = auth_state + self.next = next + self.verifier = verifier # Used to convert request token to access token. + + def generate_authorization_url( + self, google_apps_domain=DEFAULT_DOMAIN, language=None, btmpl=None, + auth_server=OAUTH_AUTHORIZE_URL): + """Creates the URL at which the user can authorize this app to access. + + Args: + google_apps_domain: str (optional) If the user should be signing in + using an account under a known Google Apps domain, provide the + domain name ('example.com') here. If not provided, 'default' + will be used, and the user will be prompted to select an account + if they are signed in with a Google Account and Google Apps + accounts. + language: str (optional) An ISO 639 country code identifying what + language the approval page should be translated in (for example, + 'en' for English). The default is the user's selected language. + btmpl: str (optional) Forces a mobile version of the approval page. The + only accepted value is 'mobile'. + auth_server: str (optional) The start of the token authorization web + page. Defaults to + 'https://www.google.com/accounts/OAuthAuthorizeToken' + """ + return generate_oauth_authorization_url( + self.token, hd=google_apps_domain, hl=language, btmpl=btmpl, + auth_server=auth_server) + + GenerateAuthorizationUrl = generate_authorization_url + + def modify_request(self, http_request): + """Sets the Authorization header in the HTTP request using the token. + + Calculates an HMAC signature using the information in the token to + indicate that the request came from this application and that this + application has permission to access a particular user's data. + + Returns: + The same HTTP request object which was passed in. + """ + timestamp = str(int(time.time())) + nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)]) + signature = generate_hmac_signature( + http_request, self.consumer_key, self.consumer_secret, timestamp, + nonce, version='1.0', next=self.next, token=self.token, + token_secret=self.token_secret, verifier=self.verifier) + http_request.headers['Authorization'] = generate_auth_header( + self.consumer_key, timestamp, nonce, HMAC_SHA1, signature, + version='1.0', next=self.next, token=self.token, + verifier=self.verifier) + return http_request + + ModifyRequest = modify_request + + +class OAuthRsaToken(OAuthHmacToken): + SIGNATURE_METHOD = RSA_SHA1 + + def __init__(self, consumer_key, rsa_private_key, token, token_secret, + auth_state, next=None, verifier=None): + self.consumer_key = consumer_key + self.rsa_private_key = rsa_private_key + self.token = token + self.token_secret = token_secret + self.auth_state = auth_state + self.next = next + self.verifier = verifier # Used to convert request token to access token. + + def modify_request(self, http_request): + """Sets the Authorization header in the HTTP request using the token. + + Calculates an RSA signature using the information in the token to + indicate that the request came from this application and that this + application has permission to access a particular user's data. + + Returns: + The same HTTP request object which was passed in. + """ + timestamp = str(int(time.time())) + nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)]) + signature = generate_rsa_signature( + http_request, self.consumer_key, self.rsa_private_key, timestamp, + nonce, version='1.0', next=self.next, token=self.token, + token_secret=self.token_secret, verifier=self.verifier) + http_request.headers['Authorization'] = generate_auth_header( + self.consumer_key, timestamp, nonce, RSA_SHA1, signature, + version='1.0', next=self.next, token=self.token, + verifier=self.verifier) + return http_request + + ModifyRequest = modify_request + + +class TwoLeggedOAuthHmacToken(OAuthHmacToken): + + def __init__(self, consumer_key, consumer_secret, requestor_id): + self.requestor_id = requestor_id + OAuthHmacToken.__init__( + self, consumer_key, consumer_secret, None, None, ACCESS_TOKEN, + next=None, verifier=None) + + def modify_request(self, http_request): + """Sets the Authorization header in the HTTP request using the token. + + Calculates an HMAC signature using the information in the token to + indicate that the request came from this application and that this + application has permission to access a particular user's data using 2LO. + + Returns: + The same HTTP request object which was passed in. + """ + http_request.uri.query['xoauth_requestor_id'] = self.requestor_id + return OAuthHmacToken.modify_request(self, http_request) + + ModifyRequest = modify_request + + +class TwoLeggedOAuthRsaToken(OAuthRsaToken): + + def __init__(self, consumer_key, rsa_private_key, requestor_id): + self.requestor_id = requestor_id + OAuthRsaToken.__init__( + self, consumer_key, rsa_private_key, None, None, ACCESS_TOKEN, + next=None, verifier=None) + + def modify_request(self, http_request): + """Sets the Authorization header in the HTTP request using the token. + + Calculates an RSA signature using the information in the token to + indicate that the request came from this application and that this + application has permission to access a particular user's data using 2LO. + + Returns: + The same HTTP request object which was passed in. + """ + http_request.uri.query['xoauth_requestor_id'] = self.requestor_id + return OAuthRsaToken.modify_request(self, http_request) + + ModifyRequest = modify_request + + +class OAuth2Token(object): + """Token object for OAuth 2.0 as described on + <http://code.google.com/apis/accounts/docs/OAuth2.html>. + + Token can be applied to a gdata.client.GDClient object using the authorize() + method, which then signs each request from that object with the OAuth 2.0 + access token. + This class supports 3 flows of OAuth 2.0: + Client-side web flow: call generate_authorize_url with `response_type='token'' + and the registered `redirect_uri'. + Server-side web flow: call generate_authorize_url with the registered + `redirect_url'. + Native applications flow: call generate_authorize_url as it is. You will have + to ask the user to go to the generated url and pass in the authorization + code to your application. + """ + + def __init__(self, client_id, client_secret, scope, user_agent, + auth_uri='https://accounts.google.com/o/oauth2/auth', + token_uri='https://accounts.google.com/o/oauth2/token', + access_token=None, refresh_token=None): + """Create an instance of OAuth2Token + + This constructor is not usually called by the user, instead + OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow. + + Args: + client_id: string, client identifier. + client_secret: string client secret. + scope: string, scope of the credentials being requested. + user_agent: string, HTTP User-Agent to provide for this application. + auth_uri: string, URI for authorization endpoint. For convenience + defaults to Google's endpoints but any OAuth 2.0 provider can be used. + token_uri: string, URI for token endpoint. For convenience + defaults to Google's endpoints but any OAuth 2.0 provider can be used. + access_token: string, access token. + refresh_token: string, refresh token. + """ + self.client_id = client_id + self.client_secret = client_secret + self.scope = scope + self.user_agent = user_agent + self.auth_uri = auth_uri + self.token_uri = token_uri + self.access_token = access_token + self.refresh_token = refresh_token + + # True if the credentials have been revoked or expired and can't be + # refreshed. + self._invalid = False + + @property + def invalid(self): + """True if the credentials are invalid, such as being revoked.""" + return getattr(self, '_invalid', False) + + def _refresh(self, request): + """Refresh the access_token using the refresh_token. + + Args: + http: An instance of httplib2.Http.request + or something that acts like it. + """ + body = urllib.urlencode({ + 'grant_type': 'refresh_token', + 'client_id': self.client_id, + 'client_secret': self.client_secret, + 'refresh_token' : self.refresh_token + }) + headers = { + 'user-agent': self.user_agent, + } + + print 'REFRESHING!' + http_request = atom.http_core.HttpRequest(uri=self.token_uri, method='POST', headers=headers) + http_request.add_body_part(body, mime_type='application/x-www-form-urlencoded') + response = request(http_request) + body = response.read() + if response.status == 200: + self._extract_tokens(body) + else: + self._invalid = True + return response + + def _extract_tokens(self, body): + d = simplejson.loads(body) + self.access_token = d['access_token'] + self.refresh_token = d.get('refresh_token', self.refresh_token) + if 'expires_in' in d: + self.token_expiry = datetime.timedelta( + seconds = int(d['expires_in'])) + datetime.datetime.now() + else: + self.token_expiry = None + + def generate_authorize_url(self, redirect_uri='oob', response_type='code', **kwargs): + """Returns a URI to redirect to the provider. + + Args: + redirect_uri: string, Either the string 'oob' for a non-web-based + application, or a URI that handles the callback from + the authorization server. + response_type: string, Either the string 'code' for server-side or + native application, or the string 'token' for client- + side application. + + If redirect_uri is 'oob' then pass in the + generated verification code to get_access_token, + otherwise pass in the query parameters received + at the callback uri to get_access_token. + If the response_type is 'token', no need to call + get_access_token as the API will return it within + the query parameters received at the callback: + oauth2_token.access_token = YOUR_ACCESS_TOKEN + """ + self.redirect_uri = redirect_uri + query = { + 'response_type': response_type, + 'client_id': self.client_id, + 'redirect_uri': redirect_uri, + 'scope': self.scope, + } + query.update(kwargs) + parts = list(urlparse.urlparse(self.auth_uri)) + query.update(dict(parse_qsl(parts[4]))) # 4 is the index of the query part + parts[4] = urllib.urlencode(query) + return urlparse.urlunparse(parts) + + def get_access_token(self, code): + """Exhanges a code for an access token. + + Args: + code: string or dict, either the code as a string, or a dictionary + of the query parameters to the redirect_uri, which contains + the code. + """ + + if not (isinstance(code, str) or isinstance(code, unicode)): + code = code['code'] + + body = urllib.urlencode({ + 'grant_type': 'authorization_code', + 'client_id': self.client_id, + 'client_secret': self.client_secret, + 'code': code, + 'redirect_uri': self.redirect_uri, + 'scope': self.scope + }) + headers = { + 'user-agent': self.user_agent, + } + http_client = atom.http_core.HttpClient() + http_request = atom.http_core.HttpRequest(uri=self.token_uri, method='POST', + headers=headers) + http_request.add_body_part(data=body, mime_type='application/x-www-form-urlencoded') + response = http_client.request(http_request) + body = response.read() + if response.status == 200: + self._extract_tokens(body) + return self + else: + error_msg = 'Invalid response %s.' % response.status + try: + d = simplejson.loads(body) + if 'error' in d: + error_msg = d['error'] + except: + pass + raise OAuth2AccessTokenError(error_msg) + + def authorize(self, client): + """Authorize a gdata.client.GDClient instance with these credentials. + + Args: + client: An instance of gdata.client.GDClient + or something that acts like it. + + Returns: + A modified instance of client that was passed in. + + Example: + + c = gdata.client.GDClient(source='user-agent') + c = token.authorize(c) + """ + client.auth_token = self + request_orig = client.http_client.request + + def new_request(http_request): + response = request_orig(http_request) + if response.status == 401: + refresh_response = self._refresh(request_orig) + if self._invalid: + return refresh_response + else: + self.modify_request(http_request) + return request_orig(http_request) + else: + return response + + client.http_client.request = new_request + return client + + def modify_request(self, http_request): + """Sets the Authorization header in the HTTP request using the token. + + Returns: + The same HTTP request object which was passed in. + """ + http_request.headers['Authorization'] = '%s%s' % (OAUTH2_AUTH_LABEL, self.access_token) + return http_request + + ModifyRequest = modify_request + + +def _join_token_parts(*args): + """"Escapes and combines all strings passed in. + + Used to convert a token object's members into a string instead of + using pickle. + + Note: A None value will be converted to an empty string. + + Returns: + A string in the form 1x|member1|member2|member3... + """ + return '|'.join([urllib.quote_plus(a or '') for a in args]) + + +def _split_token_parts(blob): + """Extracts and unescapes fields from the provided binary string. + + Reverses the packing performed by _join_token_parts. Used to extract + the members of a token object. + + Note: An empty string from the blob will be interpreted as None. + + Args: + blob: str A string of the form 1x|member1|member2|member3 as created + by _join_token_parts + + Returns: + A list of unescaped strings. + """ + return [urllib.unquote_plus(part) or None for part in blob.split('|')] + + +def token_to_blob(token): + """Serializes the token data as a string for storage in a datastore. + + Supported token classes: ClientLoginToken, AuthSubToken, SecureAuthSubToken, + OAuthRsaToken, and OAuthHmacToken, TwoLeggedOAuthRsaToken, + TwoLeggedOAuthHmacToken and OAuth2Token. + + Args: + token: A token object which must be of one of the supported token classes. + + Raises: + UnsupportedTokenType if the token is not one of the supported token + classes listed above. + + Returns: + A string represenging this token. The string can be converted back into + an equivalent token object using token_from_blob. Note that any members + which are set to '' will be set to None when the token is deserialized + by token_from_blob. + """ + if isinstance(token, ClientLoginToken): + return _join_token_parts('1c', token.token_string) + # Check for secure auth sub type first since it is a subclass of + # AuthSubToken. + elif isinstance(token, SecureAuthSubToken): + return _join_token_parts('1s', token.token_string, token.rsa_private_key, + *token.scopes) + elif isinstance(token, AuthSubToken): + return _join_token_parts('1a', token.token_string, *token.scopes) + elif isinstance(token, TwoLeggedOAuthRsaToken): + return _join_token_parts( + '1rtl', token.consumer_key, token.rsa_private_key, token.requestor_id) + elif isinstance(token, TwoLeggedOAuthHmacToken): + return _join_token_parts( + '1htl', token.consumer_key, token.consumer_secret, token.requestor_id) + # Check RSA OAuth token first since the OAuthRsaToken is a subclass of + # OAuthHmacToken. + elif isinstance(token, OAuthRsaToken): + return _join_token_parts( + '1r', token.consumer_key, token.rsa_private_key, token.token, + token.token_secret, str(token.auth_state), token.next, + token.verifier) + elif isinstance(token, OAuthHmacToken): + return _join_token_parts( + '1h', token.consumer_key, token.consumer_secret, token.token, + token.token_secret, str(token.auth_state), token.next, + token.verifier) + elif isinstance(token, OAuth2Token): + return _join_token_parts( + '2o', token.client_id, token.client_secret, token.scope, + token.user_agent, token.auth_uri, token.token_uri, + token.access_token, token.refresh_token) + else: + raise UnsupportedTokenType( + 'Unable to serialize token of type %s' % type(token)) + + +TokenToBlob = token_to_blob + + +def token_from_blob(blob): + """Deserializes a token string from the datastore back into a token object. + + Supported token classes: ClientLoginToken, AuthSubToken, SecureAuthSubToken, + OAuthRsaToken, and OAuthHmacToken, TwoLeggedOAuthRsaToken, + TwoLeggedOAuthHmacToken and OAuth2Token. + + Args: + blob: string created by token_to_blob. + + Raises: + UnsupportedTokenType if the token is not one of the supported token + classes listed above. + + Returns: + A new token object with members set to the values serialized in the + blob string. Note that any members which were set to '' in the original + token will now be None. + """ + parts = _split_token_parts(blob) + if parts[0] == '1c': + return ClientLoginToken(parts[1]) + elif parts[0] == '1a': + return AuthSubToken(parts[1], parts[2:]) + elif parts[0] == '1s': + return SecureAuthSubToken(parts[1], parts[2], parts[3:]) + elif parts[0] == '1rtl': + return TwoLeggedOAuthRsaToken(parts[1], parts[2], parts[3]) + elif parts[0] == '1htl': + return TwoLeggedOAuthHmacToken(parts[1], parts[2], parts[3]) + elif parts[0] == '1r': + auth_state = int(parts[5]) + return OAuthRsaToken(parts[1], parts[2], parts[3], parts[4], auth_state, + parts[6], parts[7]) + elif parts[0] == '1h': + auth_state = int(parts[5]) + return OAuthHmacToken(parts[1], parts[2], parts[3], parts[4], auth_state, + parts[6], parts[7]) + elif parts[0] == '2o': + return OAuth2Token(parts[1], parts[2], parts[3], parts[4], parts[5], + parts[6], parts[7], parts[8]) + else: + raise UnsupportedTokenType( + 'Unable to deserialize token with type marker of %s' % parts[0]) + + +TokenFromBlob = token_from_blob + + +def dump_tokens(tokens): + return ','.join([token_to_blob(t) for t in tokens]) + + +def load_tokens(blob): + return [token_from_blob(s) for s in blob.split(',')] + + +def find_scopes_for_services(service_names=None): + """Creates a combined list of scope URLs for the desired services. + + This method searches the AUTH_SCOPES dictionary. + + Args: + service_names: list of strings (optional) Each name must be a key in the + AUTH_SCOPES dictionary. If no list is provided (None) then + the resulting list will contain all scope URLs in the + AUTH_SCOPES dict. + + Returns: + A list of URL strings which are the scopes needed to access these services + when requesting a token using AuthSub or OAuth. + """ + result_scopes = [] + if service_names is None: + for service_name, scopes in AUTH_SCOPES.iteritems(): + result_scopes.extend(scopes) + else: + for service_name in service_names: + result_scopes.extend(AUTH_SCOPES[service_name]) + return result_scopes + + +FindScopesForServices = find_scopes_for_services + + +def ae_save(token, token_key): + """Stores an auth token in the App Engine datastore. + + This is a convenience method for using the library with App Engine. + Recommended usage is to associate the auth token with the current_user. + If a user is signed in to the app using the App Engine users API, you + can use + gdata.gauth.ae_save(some_token, users.get_current_user().user_id()) + If you are not using the Users API you are free to choose whatever + string you would like for a token_string. + + Args: + token: an auth token object. Must be one of ClientLoginToken, + AuthSubToken, SecureAuthSubToken, OAuthRsaToken, or OAuthHmacToken + (see token_to_blob). + token_key: str A unique identified to be used when you want to retrieve + the token. If the user is signed in to App Engine using the + users API, I recommend using the user ID for the token_key: + users.get_current_user().user_id() + """ + import gdata.alt.app_engine + key_name = ''.join(('gd_auth_token', token_key)) + return gdata.alt.app_engine.set_token(key_name, token_to_blob(token)) + + +AeSave = ae_save + + +def ae_load(token_key): + """Retrieves a token object from the App Engine datastore. + + This is a convenience method for using the library with App Engine. + See also ae_save. + + Args: + token_key: str The unique key associated with the desired token when it + was saved using ae_save. + + Returns: + A token object if there was a token associated with the token_key or None + if the key could not be found. + """ + import gdata.alt.app_engine + key_name = ''.join(('gd_auth_token', token_key)) + token_string = gdata.alt.app_engine.get_token(key_name) + if token_string is not None: + return token_from_blob(token_string) + else: + return None + + +AeLoad = ae_load + + +def ae_delete(token_key): + """Removes the token object from the App Engine datastore.""" + import gdata.alt.app_engine + key_name = ''.join(('gd_auth_token', token_key)) + gdata.alt.app_engine.delete_token(key_name) + + +AeDelete = ae_delete diff --git a/gam/gdata/analytics/gauth.pyc b/gam/gdata/analytics/gauth.pyc new file mode 100755 index 0000000000000000000000000000000000000000..2440c18d4fed0a7b1ef16604d4ad1dd6dcf6df26 GIT binary patch literal 56592 zcmeIb3y@sdc^-CpW-!1E-dF&;;9__0f{UF22@DqC5+uO|$-&^s#SWHt1_U|gs@2o? z0@FZG_iW#u9Y9=ABC!%lJ*=0Vl;tX0sfrazm8w)qvXwY?BC+HOMd#rvNsbenw&hgf zq~wT696M#jlJkAvf6l$Pdm6jI>{5@ivD;_v<J|N7&;LIE8UE+n$3MKkHd}Z8-zffm z3cu_pMx0BWTR>WI$&hPS+yXA_ddMvdh3jFrFdVK&+`>q>-sBcGh3iqbu-V<+6>hh< zg)Q!S>%1FD$K364*ZC{1QmNeCtM|jMIqsH~rE>lCpUd<72h8K^-^v2;?sKh*`<(x7 z#5Fg$<!x?Zo2y)JJ)3{`?eHBkY!4auU4j<wa0%MB(<PhT0-A?fce}=8F8Pv6wz^Zv z_q${V{``PTcHz&LU9ty%zT%R7_;cSSkK@l<E;)cdKj@OD@aKnI@(lib)g{m2&(~b? zJpMfCk_r6zx=W7W&kwug82<c-OOE5ukGkXp{`{CrUc{e2>ylIW^W!f0s5`Z=$0Z+g z>0|B&2K}^q%eni`eSQI>^l_K&)zg>qr~6!TCOrK_{&c@fKFP`B|DSSiVXB^R=@V|@ zfJ>&CY2ke?eV<!E1JAl-#=V6(KIqbeZs94HoO9{Zs^olL+%qn@pwA+gKI_s$F5T>I zz2Dt>&LtP!eYEnDOD?<Q)9x({?O|QNtU{l6>GN)3!X;Pq`J_u;38l~Gr5|z0RlWXz zOJ3E*2VL?RmmX0o;ym9`m(00^V=kF@3sWw+7CQJ@b?|uU33`3qy#=6rq4(kE^zejB zJ|DhC&ILU@>5@O=7Cz*XFSvyl{o53`s0Eo{Q_nu^QoQ;e_3V^Os%q;;T=KoT_^3;2 zy7(B!W8t(*7Tv<fSyYN^RD=uM)>YI?E=iR6j7w5oe8Q!la0{5V8@m0JOF!inrd@K= zEntq8+`^1Y8gAj7OKycByv>|~jq@&Py7asTY&m~=!6mKmw4Fb_=#mweUQpiG^QV}> zPAE6apJGzGyjl3POICI9vP<63#TA!caSN}wWKFlTE}eA?S6y;Px39Y7t}Z^Ku797# zEOu$^7Uo>?MZKDLDP|G_{HAU{>(bA<h3hW4r`ylD^mA@O6E!#a6U0n^f5^F4JMA|b zNt#8q)$UT%Nt?B9nnd09?X(ruo3$+Ct5y=-SZ&q2jdm-GZnQhm#diB<GmXyGy0vKf z)yvt``1pL!C!KVqlV)kF8`YxKEbUBK1?^7b`)VvEYTt;uOAS0-SwU@4tzJ*F43E-A zXFNf%Q_;0-wbpFj<+Ch}YMnG{wYyQhld6SDqm$OVc)hsVjTY~snv3UVnhn<2Uc7}@ zXp-GuY2R$L)VlF$^l@%=(Qf9_`f4Zbd32R`mzHaF%c0vitJ&TBTKh_R^Jbd#<?4N& zXCJ?ikJ#E$qrRjbv~P4Vt{9LcYP8t*W}|N7QEw+{G+E1{_R30|?=dL1R<mxj*1$Nm z#xV-fO%BYlmAl=gcI#N(I@@e4c50owM^rpB-Do$P?X^bh=IQbAn`tZUV7RKB+-kPE zSY7Qj$9cnvNUNRn_0=@%R=b=#Okw$R6}Xf(SJF;t@{6vh6KG^sdjqor2x+!!i9wMS zXVZ|eqiokSCi;8}Q;|mOM%Hb2(y0hlVsf*k_G&YU+AYp!>c{6sbho{#R;*^|4n_dr z;=AidrBo<-TT82K1OKv7<BflVf3uSnfO>bF@ZMNnX{NWbcFUfP;n_;9lcm+yvu5@b z?ru$GpSmf~o~pN(qsdRFH*Q38_0=RvJ4dqJ$YFplkWydIWIOQG1CcG8GZ{(0fF#PE z$K_JDyONzgetd0hZK^2!c)iw4TS={R{6?B4+41bK&-#uzyI8ia-pmdaA1v0gv|Kjt zL%Giu+4@>g%GKS3TsQeZ1HmTyb}TmA0QwFG#FyCJjJ*DFHu1pH3DH?v>(rORcbtUc zX%_iU2aL_~M?>qMd0_c@9vhF9Y_iCDqtU9h>ODi^ABDEC5B^d1+=Cmkl-8QvyaSwA zdy+j}`X*`Dv-0Ts2`$fk26R|UvZXZb4$Aue!KD#%*4r)MjIPFCBfr*ZbkpYDtTeAH zz#>^~EnO_Pf~)L!cNir%mrmyGT@PKg{-GW)A9<cIR44}6D?3(OY4r3b&lsSYg}rUg z?g(1Q9B&5?OCJmK#wl1!vAjF_aIFD|ohpr9ABba2mVs;1PIkQ8Za1?~%oco-VH}rh zs5p#1nkp1K1;o=`T}(?bkA5D{KH%#?zSR|?8gyN>YGD4nSp$o$iJBh2n0BwJDA5?r zuP>~;y!$$}*lyp>d%}gQ2l-MQG=HsH^KQ3M&%Yw>tA&SU;NYP2%%}(Eunt-H1M;Z# zx7yuo1-}w<;ux0!llNgB*!pH0sIWW<#1Z8WtO<=QBe5d5FoQ7G`A(<ZspE<M5!0W= zFXQZ{V6rMMo+DOplSG+aKCV_<wdJ&06-B64m)pr|6StO2S(ykK&z$~(p=&@nKz$kG z1w{A-P5BpYl8R2P+>Q4k`&ao1s~D|x_u|jBR<;T<-T^D6d7ZzzlCEzhDTlQ(L0gB| zR`wv9MiSQm*h#du1a?8>l>7h~uNxiECE(u02*hx;Ra*oXhBrVGToEF)%04y8g&*%{ z@;DOBO|=S4^=|t51WNi5{4!3$Xa&7x1tN7Pr^UgkyAPNZwxir%h@fww<HPJY2TveS z?s%;gS=mv_d5+q3aBSIBoteXLh97Q6@@B=oiRqQBZ3F2^KVRzm>M~eNOrZe-yO5>L z8}XC4D5n46XWoNTqt)nEt3Sok*d|w*WXZ+I#|L;z?xLtfR}WuP{62gwjy)guU3hZo z>fHSKbJeSH_2Sj57q6VJlcsIKkGV81pT|XG6UPN(eF`H4k?GW%BQ6=n9|%w*_yd*0 zCU@%IX7}bM*SX@}994>sHdCU13HQ9(>|SaCiohaam^V{IottD;+5ZBNoe@ti?mb#; zCwGmx2UoaC$`v?V&uos5N6}oGMxMp!U+W|&#~}^0S6kieI2o%8?at*^)`cvPCev7m zQ%l|D=KEt~T~Qe2k!3KG5pY7S(WERAMbn*|*=hWZBB1z2^K_JTJGxzgNVC@NB*hcZ z*f$#WwDh38+9}`r)>OM##mW^;wpS>`)S5@4bLoxRYLlyRqJKUnjz(>8zMbfeTC<U; zb~Xriz++-;Cv4&-#6SD*CzP>XTj|!9YQ~xOSK5;UV%7@8vUM%vD8y-ZwbSwgLGJ$2 z{QRpnMdaMUeo731*e5waLs^5^Cou|qA6a*b22&`@@GS?kOB(zt$1&+{#ibt~A~~TM zKcLE%X)J|oRi29LZo^l)_1&};$zma&fW_~UJmE5J5j+HFs$O%cbr+^FK7^d{`;n;7 z@>F%X@O#Fw%T*JegmLQ<4F^H!Mk8$|VkB*~KKvT(vLU&=WkU#Y1VflhSzH?bK5}Jy zkhuNhmCgEZys~Y0yt2JgC-Nr>A?DWdJ&B7uALR1ACAGx4*Pe3su>LAsg;>#8g`MrZ z#j<{8?)B~NIu`bv@K1IfrTOVEsDi<spXPq)SnDPxms<Kj^oHUaCsks67&nrgT4aSp zR|G2oj35mHU`$dk7beK(N`yGa&%xdj{`_bC3zrC@$}uzWC-|0;OWq*C^n+Wa#lm&x zs~Na11eShueEW!2LjTk~kv6V`j7$qL(;47a=VnD}O~Qmc-)%y~&Fr>Y>vS^;@Zwr# z+bvIf2;O=31gD|>ReoI%ibsGyQka`*x7q`G>Q~iz^f!?~oRQo9SY_wXwn`RwI<Rzo z9A=JyMQhnh{sfeSV9<ekL#}~z*xh+U019Yy?%Zoj?%s$4g}DXlVgULP2N?vpUq29@ z4+(Iu^A_;>3@Olvd#QN8N$(Gk{E(<4*Jk(3jUl(6IkzY$GG7M)>-@OGv$<r<ev^{& zzct(o`Pbqr!ml>Rep;trou9ch9RbCHth8<d4#{06)Fc(q-bYFD85<0cy>QsKaKOny z2b9Aig}Pb;$~0Cs)87~Z8P<;{0Tjxqw+Xd^)C6T5U|I+sfO=!K*^Jm#cJPfxN(~T8 zFRCoMx(p`B8brFaxC)eRjAhYv=<5PVQc2Uu@^V6R#DJ1Vh0coKEvCSAE!Ha4UaH0{ zru7<Ec~Nj)j+8wsLHo!IBuB-!Q_%(VB)wCk(xy~E6u!wznwYVX`ea>GeCASPYHDiM zh$_nsqr&Tk`d;eJgy%yZv;YQ|y$*>?M@d5x9dvQz#yPuw27ux)C(EYhYeCmz$~|aE z+vqURAlv0(Kz+)A@~B=}lXECW=F8`dEQis`Kaf66Wt3WjDfTKjHp7^rFHUHvCInM# zCse@T)Nb4jZ4NN0%8%MmPXGvuFZw#k_W8-vH}~{Spg=@@QG+Vq6pY#$K&I6WeJ(FT zQx)crhcYtCR}v`B_^F7&tU&pji0k*y=E<d;X^`TJQBdzPGABO6wNCSlq?W_k;jF<I z(u|--vy((^`sxD2&@?`d+xP`OBOg8ASxw^;xKOs3B)Nx;2+N^M9z87UYTd8hd}g|& z`Qa&)j*vmAl!!mXlJR?Fc7wwrbl+doZ{bD8#<=~I(17YIV?g)2@prUx0KbtTb)cX8 z*ZB$mkqtSEUlt)rB}FIBy(JMf7uix+z(#`L_J{%*xj#*iCrsl0;)8e^e*(!yhC}id zTkd7ByhWe|Mz*N0L+e4g$WB$9=m{7pnUP*X2j@h_XLBGCStgawZ5Br7DaF1TU%ePl zzw*lT{N<VI^tJg*)hp9y&tFmV#R5Y~2$gYG^i15T2#9AxHquM6s?b<qod%|In0+Rk zI?SwrXfE?>P~sW9Q(s=uhg>~iPm1bw_3(T~d<NzHA%4<wj}DCukBx|PCD^bbtS>u? z#D=<yF(2Y+-v_y#<f^*`?qHL<1>z2-w&LdSC&=p*V8uB#pz(*u2NkZCDHs?H>uReZ z)eq~bb=%956Ev!I2!P6Z4cbw%SEkrs?KDdI|8A$+?tqjwirznb$4!IMZS(uB9CeB- zuCfm+EZ6i6(!7;LS=gTZ_^Y%;vSYWqlidNN$wHIW+bd}%S)TpK`K}jG=YSzSHeg8a zIcLH;*e4tReAsUcYa+mWVF^?yO&}-M#jVZmrgSLhUfbiK48Uh5=6?@3w}kU_Z;N{q z|6}nWg<^nh8S%yY>HW`p+cKihM${G*r`aZJxP|h{ODxQ8hH*3OI^Q81=pSu@x?#H* zK+A%AvU__fMHXr#%2{v^4Tk?7Tq3UKCOLxHuWSKSLPYL%R_k2|-e3qWi%s%Nku<Ls zEw47a4Gn(8RgbAMA}c0v9#l67cfmTk)&R%lF}@1D66J`UWSt^vRBN_#KFn7K@^5p@ zOr-s-^iG#d%2ae+>OMR&GgCC}fd^&XBqs-|C$p#LxH|1@4PsCzXMzPyu%x3=1BTa? z6>wCRl?F6$QDDVTN>BH^(BqpTF2jbX+#8B8YU=IuZsb`9GO3r^Ybm5c&!?2woZ2EZ z=$^&&on0g4FNOeI3Cy`DqtU82SIHBB_3rDoah?%K<x(4bW)eE16%u;QPNNvMd@%)w zlSa$6+kuVo^+g#+x@P*(Fo>`rVgg}ql$pBQMj<r~^ihcmi_qPu&mvWRm}uwZlr<Z$ z%!1wVeM7arvwW!!P#W2syu*F61Y3PNA`XK(3vw>;*A#FUb-Z2xCCsr`Gy?UvHLAgo zYzi%k3TU|}RMv3}E({+%_GL^kgu2|uE*q@L8P<o>)~B4S>-uKEAPqTqKs#k*&%|-E znT3GB=6Ft!GCEXifWbBg+8U51Q_+m<y<oTOdF0{KdXAwh{&74Ecn36s<L*b89hbeF zoTj?udnYUZX{Zj`%@hjd9@QPjaGBCNCJ!)%VMcj$GT<zFfiO&F(IO0P4KPFJ0h{Vj zQI<g`_+Hio5wOK^TV1)?0Y4#OfmN%ba@~FmOiCOuP8`H;hsJgk*C(chC4xD$jubt` z6-7TvdM!#Y@W$ww8|8c_0m@^W*!YeK4Do<6dNsZUnKWf+(uUd*Bg^*Nb!oMERRtRf z^vhN0(FPu1>A-UZy*I?tQ!!P^gh(lx4TE`l?AJQJpYTs6e0|G+AR^QnF;4N1;Npi7 z^_3a~rxbIPnYMbO$xq#+29<_IwyUQUFX|rALtB(p5tJ%y$2S^xO#UyyFNdS3)}tc& z$pBNb9q3+91x15#Ma<GeuGon{iF)uQX%8^KgNx#q1?>^g0=so!;d~JOd~jJ(mFH-c z#x9ml*wpNk{%}71!v~jBbj?qpaL@$NWQog~STD!JD`!2jGp`p`N@NV($YQ50%Ih6h zuttqA+Zu(t4#yf+1|!Y^Ia7@Rr~+Zm-G2Jfa!FUf)d)7sxk!R2=SEVhCR#5}h>P_2 zKu=yUrMr-m+#;3|@j&r;loMZIa*@d;CYPCfn#s#ZRLQM&qoqn9By{fDr=Q_VGP?$0 z@n`wPJReew_5euKqfE9wyN~7hdtf+@`5nb{58#IJH^h&d1U$gOX*ap^=TL_`y07xY z@Y9uTmC4HXAzXu3-wrux*HB2Ib*ILi@<cAX{0MFyiO~l8OtyHlyS>?UK1D8;G7awW z;s7+CP;x>~N>!%if`#qN4nm6E;`xDF<7A1!WRuSxCc_MF7d$ce>NyE@*>9tH7aD<` zGnT~j36o?>Y&hWsM!rDHBY-FXE^s`cyaf2^JWnrn%5u(-OTPF_#T9^`fs>0CYxUcp zKm<{&ihgEeQXX4+RYDB8O4P&5k@C0Tzo9gTxo#ci$Ug(c?a;rEt-~73k%El|`sp=; zIqK`hakTp<fqNB;2ZdB%>U%uo#JP!fV0oGu!*|rT+{lIMMMHXU7V&S+(n9Y>X`_s3 z*OuVjhtk*@(yR=g0S%a)WQp^OqKx(X_(m)<&o<%;3}cfl3u$}qa2Z(NFj{@XBpVFs z_x}hxJ?$?rTHXr~lAb=mP{XeWG<G0~>iISz1~+s{HxL7%(3rlzgsi}5_p~uk08M}f z$m<zRxj7wm;hvKQ*(?l*fr<Jr+UOZv<MwO*o1CXt7wL~VfE;+w#v{HXoN{$iWEu-+ zwME1qh=x}-TlHY%VAqb|RJjZWla8<eF}j8cAm!R+48Qz}tP%Rs>&lKslNxtwA&%M< zth<Ev`-pK7mv2zdY7>P;+6S7Rh#D}1p`|+7hR5rRZ5Vy~sluI#P$E7<L0*Szy%N9% zEi(YquMf?FnJ5U~CL&C-pra8}HZuM&&~5xGl38iXW@jf!Rvv<2#q@5FYSvV~tj!1{ zt#Iaraf(@9W1?!X2V+C(#V#FR3f@5m%qsB`$=+v4Zm8aM2S{ReRz^T<M)CKV_t>QS zU!$V)h91d;!lkyXr!Wf+1ct)KZv;XM<eGOWhRdT#u&s2hSCa6g!wb(f0Hyf^06x9^ zA$GFRB=lC_ho>ae_8M=2q2PK8V^EIJDIVqv#Y>(&Jrn#BD*$Ws*n<dQK%TiBY)$gy zgFBZO)K_=O%*BO>(F=Vw!1jZ%9>>gM_&c>Vd_+fiTQ!~@bHL27jpF<i%`01QOb6~H zKc$Cf8oV?ca$$Nfivw8`SsXTQCZO}tK)~hPe2I_?<(P9j%pxo;b-BrPzT`hPMMeR# zkKQN%!z}Cq*y#eZthsWHxD<Otuvuxyy#;&0D7UqwqvUFMjr~DT{SE8Y7;OjCZPtg8 zgx?IVvx7*CS%Nbc_@I?NQcc3Q3yuzju2_RZdqJ#K%_0gLf<42dN42X+Xa46oE?~Hz z-W@Xrk(IFqHSirw2{1^T3kJt)4*tFZg4~J_nQZ`Mhkp8!1_XMLmPic%SAo?^VX?h2 zl^Z1DPog~UY(R8FyTjbIv;FpmcpC2t!$ZtgY=pm-;ovXiBLR$7$Bh7iL!*&+78k}U zlX0y$YK`TYa)5t^+yN;4&90Kw@Nw`Ht3l9Q>s=bgFqWjBKuyTLkZFa)3PDGDE)(E- zk+9EGxP4)b<?4RZ90Z1_U>~*Lq9*nUT-16(Z$y-J7ypc^doHQa{u&;T)Kn6yfh_A3 z`U)$2QIO}oxj2D{g$N5iy$24wtO>kAq?e<zq9~0KZyT5o*Wff604(Av#+vcBS8yfU zpITc5?^iY6U?cD$H>^Mde-?GxirKoYSTS5Rf)h%EgcAuk5PP_iVfj%bvtP!`I9+Mh zq$mXZ+gKV-mF5{!RMvSF1f(>0Wgy`q3N~b_BEj4V{|4M%B5h_()4*CL?mJ3tkROOQ zjsG=%C9bpi#6zVucapdGJu3E*fv@VDX}*8|DMywA=|*Se-%|1{rCGW#^2O(P3oDMf z!ohq7*Gto#OHXhC{0anycnLX*#_8(etzd$tpJtorHa9x7ZDec{jDS0~RYr$$RG!<c zo<|-`GzDF>$}S}58lnKw0DiwQ;`Vz`*g9Mw2~HxJV9%xpp@CbXpl*rhrjKQfn=LOl znKstXt2vTgz>+Szg#w1O6rn+^uP&!8zYVR0=ThZ*vKROsIZcv_fpgT``>1mFha6R* zwlv>VHVjfnXpUs=+#gk(k$+`~DS+c*N>ODQnqCqF_TTV=slH;bK$S3-BLPay91kcI z$^KHm3+ad*rEH0I)Zic_K@&dq_0^{K&QYpq!A)hU2G6Ggq4-KE%E&a*XkiUDFb6hn zd1(gztu}C*34Ady2{d)5UOcwg=pKdm<7cL|y1WSf&b&BHDhEe2H*@)Nm{hMRBkoEd z2Sde7^&Wg>tk0_Bc((z^?JoR)fLR-mc5Sm*t+`ga3r!s8Wvc<ii!sYb1q#zQs3`!; z*{or2V7Ci(g%?}%(oRR0v9ksq{mce*K?fMK)kVys2?$fe!NhyDDz7CkDKrUfN$;>I zGTV@$eE%s^O1i?ZOZD>UT5FYlLMOaq4SIj#V;^}Tdf|oB`ain($~*@-qa<`Vi^Bir zfIxoHmbDamvU<~|jkV)M?PdSY8z+%S16eHkhhOhf<k3ScDKsQpmH$PK@OSSsgwJN> z3;!?T1`8i7KvS-Q#g6ofTOOj0%XXHd@!YjO6Lywk%lb&jz@6pLfx%b6=!SPJ41NRm zHgQt}@CWp5^ey-juW6f>``{dJpkmVv)oWl%!H9w#L7V0#H=}(SeQ)NlL`>u;;~FX$ zfOrNXhN=E-g^*2dK-b;w+||G@o_f&$KEn{XagaTrTTZ0ZqjVQK^`bPO-DU<kHJw`R z!f8ETVGd@yyv&Uw@_5qV+YY0i%yJlj%$=pLb{cPBeZHLD)wfHvY-zfcuv0?$xCiWa z;@4R?-EMr##5*a6R!BXa<m^cU%we!MS*?n$#Tnidty{4Q`Nnj@s47i11X<)^bM;$} zT>cILYcF0oc&H9n_CjI17qaxup`DPb<wZ58Y4PZKo`jtwSpP^T&*GPn9(yST!pV9m zWfKJziX|qoj0TAX;tJvyY@u*MCb!OnKd!AA{IqZYj^ts$&kXknCjdD<2SgT93O^Kg zAc;g1*TKM+hS3xi2lISi;nhYJ72*c|35SpqO7Uf<txmQhU!Bk4hU}O;GN7O)HAbtU zh-TMtMb4dK48O{L3B~nt?(1g&G7E9%pJ4udV|;tC-;ou}@jejqBk=3eQ~PiGrrxM0 z3B890vNh6>*14Gra-dAUTpN@YbJnEP5^)ROJp)8ae6G>wfwI7tF`-5%C=0}bwHthf z-=i|CVm1G14r|-CaSsDmAdxG7S};F@3&0vc4E88^+uaw?QCt+n-5REF56lCY!^3m0 zJx$`k59A^SZGoY8yrU=52W24{5#|@D!pjGcN?IVy50D1d2b#Z!C5?yU;yt&L4!~!L zoF*Xr);8^If>*iU!!&_bOHk)j5V7<N>t-!5M->op82fHEupy~nu)Op!6L_qFnfxLd zFyBxHi02TOquGT81=4P-y$0q*oQP*hFvs2<23KI|uqO&eAL#U;urn@9q<(-nyq@pu z6L>Fy+4W4OXSQ>d9D&bVI!d5dUhIWivo;m=Xb;)nPP*I%8(HFKO9hePWEYsxcg`vc z0597RIlWqws`9%i2kuSy0{#(n*H|pe*T&UiK)jVJtO`w*k$TV7VSARD%5vf(zPHpe zE8GkuwyNR|;JHDEJzHO6xXO+op<gkps5Nh%IfQk7EL*CbI3x+?%%Q{CA->avSL4tI zTJUeMx{CU8mgCRim5A#dOeuVPiOOwqjWDC<$RD>^!VfUfG6uQxlx7-N;&4nKc9t)e zkzfs?*)gn(F2+Kk2@;J5a*>R%Arvr!5`Ld3fXs#4{zT<;Wlv=fbVajfJ1%@t3t1KG z-NhWe7($_wnn|K-(EwdNJdYZG8Rh)r&<e<;o5prg9v#~!<t9bNxl7Y0GIsaUE7LQ& z6WiQwFV^{#{04i#?qoj$Y(PAM|K%Ti9~>+=NuZlC(nvZY@`^5uu<GIp{);G$n8@;A zGYgOnLL1#4bDb)EB5~{A(XkQ5!!~TM@9>(1z@w5cz^2-ZpA^o*3)^`KZ%O~+7v+YD z7w~b!7kGMa+}+(zgo$$CR*UwcGCaKAns6fk8FJX)A%~s6$ICaz$t)vpg?u*V<la_y z=OQ1Ud+n3%-ZuB<R-#gDtpO6f1q=$L2{w8w%C7h)7?f@9K>m~-)u_1g`X9LKmLu6l zT#JJ6IL~$uJJxJ;x4T<A*n)dI!jqkD<@+m1{(Pr;w=29wSScHu->tlfS3BHou>JpK z#g~S+JKgPJm;D31-LALDX?5Z4F23!Qp4{6dOS@&*?N)xjlI(W(cB|4o?#<oq7UD+j zX5+JGh7vv5kJp^0yeV@-zN?QpwCPRs1kK$oN>lWBk82(B!<g(1`H*SY?JsBY!-e)d z=5Fof$h?V4w=*9n6g4~sip3?B{W^HLi$1uE;WP>sL8^uz4`j)eZE`3P>s8XVfKWvq z3SH@X(h%g{KV2#b(5nopoergw)6qG3jjh|+0?kM#IR@#e3qy3TlO)A^kc*Ozl_Tdp z&cuyrKYj-;bPOpGv;}2<9<gGk+J&){4h{Ct@2_cqsKWz*hW`Oxs?h&0O6ec`CsAjo zN89plG%Z2HlDFwyXj;IqO2d+uHAvGU(gN+1-#DGZi5SWjpyMjhF^Mm+v2IjOuCn-( zubIIvZfx(6yXSnqxcnpI+x<pDzmNzj1A#3cKvQ=Ib>75@sTb%f8S$bvA-g-7k>KHS zG#5;;Z4uhB(44$Dd`2LH5(+TnK?trjrf?*~MaU+2m2_8S^8;+5;-BQNz}Ca75qhn> ze4v=zxshBQynH9KBLsOeWvEw9-4V>=$AbE=&p6`Maa<l8!u7tFAkOIcq+gP1aGP=% zHdi&9^go(1`;25B>zKE|Qhmez_24{{jX0+E*g8~}m@t3z#Oat=Hb(^_G^H%a>J9J3 zdU9g93k}N|24GX!Yn<{S3OLkk4;(N^aoz=+S_{sK6dJx)CFCV#jy)DTuP_^SpYv;o zW<j@jsTAy0zJ6pz@TK~(DwU}ANCRbAJlK{#3LVu(=fl~V!xnWVtKCS&Lw-r=^Ws=Z zJEUUdMb@BRut9GQWjUS=<MR07O#I<tZjV7iR$0uLe{eXPouo5g++;$p4wVcq|GVb0 zNZ!%3O$w=^w?Ofk7}!}jQkk}likb{Ll+|`W#Lx3nDn4Fcg#co#HS@s>%e*OJgN3Y3 z7CWmir5IDbr3TI}YTYI61&nXwu?j+9m$WlUj?t8@<QvO`NBp(9IFhvv_|G_Sip74s z<Ik`Nx~0gH7RFpnkv~nQIb(;?gcL1P^;CgpVqU3&+kww9!{%eD9fg7id22VC--i#? z(cQ>q9^8ofMx&p4x=T-6RloGA>~;JfvO(;V2VWG8?dw!blEVl)_Hg^C>Vhse&Y33J z;I35O<6yb7&$7^`_f*F9-!6D<K8D}+@Pkk>T-guL&25z(@ZsEEd9re7=xK->PvCA( z<!Su46MyM=gFM4ym1joADhG!5RkjV)$5ArJfZdqEFPj0FQa_C_6yQI>*V;9>tgX~j zfN3PZ4?P2Tcl<By0hxBO{`a?3z#rpVu(C|S^np8fV)wsBaR3v=?QKwSK(9d!bj3eG zw(x}9GQMwVoBR3?`0nmxXxC<U56q=FcJPDXy1{Ew&jHQH4zT$r!RC*P&Hsa-NJq!E zIxx6BAMNsAkmJW3gE5D6LEU*&A-9i`cX*Z@{N@}ViKR6z9gl8-?L`-)V3|XQ=VXwa zl|nrJ1&%9M+TrZ^!&&^(xQhQApAiM%nUv6`6PFM@$qCUSTSCA^Q#1cE9|bW@jDZ|s zw2I@u%$Hn1)N(UO2n&WIfy$t?9j`J6pW-gQi@&D$=HiCN1~#Pl`}pvSNL0HPaQt() z)vC!?F%<vsTGNU(Y5C{*S+QQZJcLE7J}3Jg|5YZ^mj4ppZ$ZMA=Bjeuq)GfM%=Xuj z6!PxFi@I7wxzd~iSIr3m>?+Pemke*iA{nM6j^!|l>wT4jLkI9=Tjih@)B$MIAG2ja zRYtxnzJZ(avhZdb#Vn@TX6pmYHppOR8)Oi0VZr1}2rHqygm4W2(h?$s$%I4k2G9q= z4_dr!l6H0w5CFa2y7N~A$q-v1F%mXSCe;nqd4+X{P)Q&$sMO|9SuX^i>tIk?7|mH1 z-&jUMQTzrIfg|TJ@T~C~Z|*S3!K(yQ9LoZxVr;`S#mgWdivK3x3UE|E?LRRAC;k$X z??<vB0HJu80|@oaIedJb@bNypc7TsySurSqbw&0StyQp&Qi^bF*m)t$zl#fas$o_E zAE&6MU?eGEI7E`d&;eUwS}<23o)mB=R2rwKIDskzf;m(n*JO0aRUuxn0jz4TTR|0K ziVfmad-2tv@fefF1G)+-GNejA_u2>D7XX;!giXlOfD0f1>@WssL018Z8pxNZmdfKl zN3{mD3(y?Us3<st=K;xJ_J<gpvov!W!#7X~?8R#Gh2BBd)5tz?I>#dtLoZuo1xkKo zCCkF%01<E!EbI~E*K>Bxyqd)b+GReaUb4E1EpO16dI2jJs>tgG$Q4(be`nk>Gx|JI z7MQC*V4ef}N@szHwEEPGIcpnOdgHgPxt^`DqWw*fV~yj6@X~G9+f9svl-iI_LM6DA z1<8t{OmNQjMpKB9Fr>O|*CfRSiZEVDto=#(?oP@y@ahb^9}0JN+V8>b=Y0tJVfdp{ zs!%{&&5$rV1dppB#+N?PQ<Dr>zx7BDz4xa<2dbCpzmOU%k2pFqXnFm<_2vzj4`E3F zDXIy)A!H2(5)MLQ?w~Ib#0SgG%P)x}`7m2s28$x`k6<0dKgr}rnfzHMKZc|vVdMZ5 z|2W_N1e2dak`d!VP@tl_MEQw5Y@opKV}Uuieaq5ZV(UrmVFhPQ)8(un^y1Vfg`P>D z3tB9eH*bd)K>T&)_<1HdjW*2si+CnM<hIf`;{O;AHiTC9;=>%RN(!}!`%6Ts5nj4| z!#<wZcKk(5uY!9W8m@f2lD&vLl9Apf+L8tfAwgsF^_i!bC^v<N{Sf;MA*JiPI!B)a z$PpF}KLVx~xZ1SZMBzZ%z?~quU;z<z9rig8XW8L^d@W1^@Vf&4jgs{OPh&b`;Chm5 zsE4sV-z<2zmal~@c)-H=Kr|Xuz?e*YKqS1VFNK<^WQJMLG`0Vg!_1;u@(n=Wpc~LR zgErs^D8?u`?$(RS52AqfeutinKEGIs0r^3Lwz;|PZWU@Jf~!ZZU^-AJQgjG9UeFcN z4!hY3?fn)o5ga;gy9&1NfU8PmfGde=Rcb(>O2IA6HI(xW>WeQV?qg{dI@cZ|p+(yZ z)TaYF4;Y-7I!Ep19!2sF8WRvgzVw5Z$Vc2MA$Nu_BuYi03Y?A|P{dSu)!zP=mxQdW z)An=ZO*XDKR4aX#Au#hMJsn-~+vAC)p((`XRZzb`SmbD9m~4&vNQ9QXW<*Z@dF_5j zENbT-8QYGKYf#j1kL}MQ&>kJRXcP2)wJ^mhx^`X|t33)OTx5F%e0llnKU3bvygugj zMB%CxDd&JtAJ9vwN*~*H-6!i8e*YJ*sX2aHPe)f758Wur;Cb>>DM`s@pl^;IvWx<* z)Bxc(gK2JXDJc!xo&*tSjF^OZDF|)Y3A>J#g8^zkaPTm%7V>it4xF&E{us%B4|ij$ zJMcJ~pm$dH=8)z&8ygL&cj+M7yMt}rF62uk<GgJnfvp;thZBKSc66ZIf=wpYgyJUN zK)4GrCmc)(&95SWthQchTkas!wh>frJczFmA5z~mV*$yJT|wNu?$Ye!V|;sw$z>!` z6_r{mLSAV0jPl=S?zfoyAd??r@>M1$nEW*+zsls-n0$lDH<?T$De(k<i_aFANYeTn zxDv0@2fo&apXXb$DIQ>rqxc%XhC$DXhnU30BdfIz;hUT@@P$`_>q3MkbL0LN`Gn_L z{!{y)$vQYhQ#nQRVF>3Yu0K080#0F1Wfw&BI<oifiaCx41@VTgWyD*~TA<mW!X(w8 zo)msW!~HIle-A|k64v9FQOmG{H)mx%2_uqTr>7QpO5IIaAgp*>|Axp}4sme=B}c(d zBhUvU{o*A`@NU=iO6Tz(fku`LW_FwiTTu4frpQC~=i&UYwKUBOyTl9idSHi1QU^{) z)4nPBxgWWWAofRpl#pKNBpPM^AkQI&A=XvC<ryx@UivVs)d!BJ%OObSZTZs4rx-{> zFFBzD?*KfXSLj>MC@LjPlE2;nr??*dXn#&=5&I)md>d)tp?%o|s`W7Bg{%N4IXh$% z5{WzI(8Rw1n0^pmu~=r8n1g5~{uUD=06C19Cp1~Kp2>{=HZz<>vZ1B?EIu5#lx;3- zC39JtcjaUIxKb<lXaqm1KE-Aen}ILJb||Jp?#?p#&pcx9x!1m+Z~#zKXz_wC?z|GT z7SbJq5y!)4@U!q-P@ggDFrRh~_GZ;XWd(12kpEw&=2{WgeCUARM0?J|l}NF#!m<mK zV~hB~yCS99=%r1iozn6nh96pQyi0>$D_MSB+I(Y7BQ2SN)t1**m>3M-1@5BuZumjH zLb7!f_{L(K_HP{i^XQlCO5Xa#TFzvoMx_T{Lwm-{ouV%_WHF--5weKFYD@MZDUZR# znNRLX=KC-c>gq%>X?@ucXi@K0ZWf*AYzHwr`2=B#dMLM<3fuj#VI0ISs?-Ttq*|l+ z3A(0m0XhJP0yKO9;u~NLj*ggfO*>^L=(YeBo9F`xF(<^<BSnb`3zDEUM>)nbZ!z`v z=a@~TltvT7UBKC?=(^^#DpR%TFF86J8jrpBQ0i+dQv1j_5=S@^Tkz5i?Gau4u8aT? zhejY^`2GR-3NpmM&4h~2ciC5hBzgM2uAb=c>cdO=(?MZ1PoS?8I0z&t(17H}aFH_$ z6zV0K6CMCLqht}lKFE>HZs^MfbTuk9?Pn;w;}-Dc^O_C-K;?4)p`lFs6wYqopG@6L zCGXcIzS}$R>_!-5^RgcVG#9K0g~K2dN{0tEmUb@XQH{Ka;pq)wQCd15#U$TRrEx7( za}PjmhLw!XDT@=(VU{T}Ta|M&g;4W$z7aX?%}I8&A>W9+oLpJxeI%Cbw1RRoOIl?H zt?OW`A2xS^|AJi`%!v0{LW3w&QSK`F$XYXzYm&pC3)vIPccGkZVn@_enygO1nd2IL zkc${tD8-_cKx3M>70ytqIBaJPfvzUVDKW@N5%CH|>a!&u%DNQF3Ov~5xvSBs4}A>8 z1T9t&W$c41hb3O4*0>3e*i{CkGN!W3gTWtLU{p*RVo<fRrnbC6Vc*x=rRJHmH9-i> z`-0=i5N~7&Jttr0&Ek}?13*Brj3YyExEuyAw8Z+f*v0-kgTYd_F5qw)3?YI`wig?S zND@RwulqIL(YITb=1oy_Y);e)>oVscG-SdG^73$rvs_m-djNNG1w>Q(wMuf{+_-SK zB;_@7>;|)x57Kvd=*o_3Y4zfyhmEYY!hln9q_Z85gId{(9MoX3;mh3G$4&HP*kNU1 z=yr?|MwI|>P|mJ{A@|Tp9gsLrX|AsWCrOcQk+Pcduh2x!8a=qg4B|dN&8wH0&^^V( z)C;(>eBu$LdRCGD6SMq%B%+z6Ns<`6A#xd^w45_FJ|dt)%_P+{XGl*-SKa%_n9@;V zKYnzi*kx=g=>r*x8T_)JMq-Qvm<NmitYmZrIOh4wa)@4t*TC^$K*`f01v3iT0G@V> zyNzI0HJWO%$IhPM)q$LKrNcy?@9Vw!xXB1C?vNa6#k`;iUTFPCXxuuUHO#2k$(&^^ zVP*mfmMBp8d&0J21sT!6vXx!N<R1YbH#`u^Bn0)lfc1&U1jhBQUxoGL;UnINeH8>g zj{%DR4iekVbBebD!(ZSt{2p14aRA>8=1>7!+}dq1ZZ<Q<4Vl!P2qw;-R276OGMIua zwvvz~7J@7){+D3^pn#qK<Q=q44%{{`z_Et?a@l85&otw_mN%DR{Ca6R2UP1GQEqXK zLk8N|ue)xHF;R+n9Zau1qGAwf8KoCf3HksP9`=<88BuD%E@poR6oR5!DNQTfwrhvP z2*;=u!a9by&>9ew<)~(RM)GJfzUn?haZiIx%W44E+g;NPP_wd_LKhGgK3Z*BM*$=$ zA+aI^ENE90d63k-w{4KxeQ*H*Z{LU&DsFB+L&VL{(_nD)vDr{@zWT^6akMPtB)U&h zsj<jgRf@ffJ?E$sttdtv_r6KTYzXtAQjf~oXHPZDaXd;|6L!92__g4>K^L7jhM*I| z-<1r%=zw`<+L!^DR#r%mK~C}*L%#QgCZ-IdJp*DBhHr~)QN^%_@zEIO?dn&nv7P-m z1DSgyaglh2k+XpdYGK)k+5LQwN%Bme@(-~z3p}B)RtzI^HNL!nlf>flpSgB^ZoWEy z_2u)k5`Rn({_ptJdw^mdM>#q4_UcDhiC`#ky2til*DC*8k64IZ9tp7+nX1ABDSB0f z#gC|lm@4}Nq<J*09@4UIUh`NYHY<KE15(nWzUl+L0ENgD$kl3}F%)aAs%P9An=Mw7 zi$7ZRn51$uVD%#S`3-T3Q!*Hlp;2K!oNyM(8HB6g(#A**3hChKT9^xC2K|`@YLs7u z7D=NpTJ|BV>7Y%rE(Hx{dC?T@E^sy)X#dc1VkF{Us*U^MotbPn0V{UMS>Jh3_7<x? zycbgTt(p$<;yf9KywZSQY;}8K!E3ODy^1-?l~-PiRHFZE8#PvHJo*usGnG)7Q&~bG z%>|E@&@zpC-Y^?~Qle~iF{8N?l3SPvcFZ%e=fK2<L$}`zMwr#PYElW6v|RLUoSvCE zKQ~uG1IF-*!xeu)avu{b=(Nj0b5xYLA!dY_!Y?zZO{%soOo8%K#1uR5-t7sPg4jY) zj~ZOr4P<cfZy~AUr+?JVp2aV_h$PjKCh3skmdR|AAV2_a5flKJLQfC{04U=v1T)*> z(k*TQ-VGMgY}}>eZec4t0xdwm78J-ID<*o%m{1i5#s(K+f6F`%^>cUy&POhiWPYTT zsp3}HiO4FjLtYHYRdU6knr^C?5H`Vfdp<;F<b71`P;mwRGYqVa{E%U2kEc1h{1JU? z3=ZSML<`e;Q@4bXkPe$#By^gaB}cr1`ZrK+Pz#ZL5KmDyh>*04a|j5F;y<uVkEACw zT=uYtqMHi7T}8GGC!bmj@ji8r#j_OWRN4u{1x%r|6rP6MmG*ZCOG&F@VLofoZEVwS zh1EIDu;NzP(Xd^INFiV5QBck(0_^03*S|0%_{0$ehvTW}XjSiSpIix3DkdCXKoo=x z<iIo>JdIU&1=G~B6bPabR9tv0%l4OwGB*(LL%?Y4I}kF^kf>I!$(r_>9aiKq5&K#y z1*??h<{Nv0hGd;?0(9e{v^}M=Ls{x0CKNWeN7DC{ikk3hAD%|PQFTug{jn%~KyyIU zK#j;_tQ%JGgMXYV9}>m5R|OLP1K<LSjP`9@34<9cdXPwB50dq(RKD|J#-&NIVvzY! z5tokT3dC6<jPlqH)R}yPC~cJG!hP>aV>x+CTZ~ZI@8ANc%v2=ia}1@0=`BDZyz?9M z>9W~9V)zPaMnDc2s0jEGrO1yR{&IW>-*>sgVe>US)c8bOuz9f^=ABJ6`@m%mxANe_ z=kR`@Ti=5UF%1pB9RWt>W)Awg!JB@PL_Y(Wa&rt<Ni@>KqWF-N#0h-o4>~k|tlaEv ze67dA@V>HOV`A&%Xk0Kw#V2J>+ZS%Eq&FcWSS?QvCc5`wFg!m5Y5=*%3tdJD@{sz5 z3B8B0);;h={+5h+Z}er)boZ#yiABLyn!ZqF^hjQ|WiewLdIsa|MKCOm4hFg}dW0NT zgX4Y4ft5B#^J1m@jqO*tz-RZFyv2lpBKkdOewEJ(CO|BL6hlRM-((g!*-Ycg9C8ox z8Cj(F$}dJ{`fqc2?MJC@oc=EKZh0F1F7$a}EdE&mUMZj<8>Il1bC)m9PS0P9&sSeL z5ApPzF`!TJHCexN=PyiOyE0!rclDL&%d-lTbydf(<-%+gOho*@GCxB%U-Vn$d!YIn zVj*&kzsukgWHkRO`^knuF*dw&e5|tVafF}PAOAf(s^h1B$YV_CQC##ep<s;ge_%q% zcv|pW1kBw`PLp5Co%fG~)BFl*duUGc?K7jKM@87;H2+l?PuI(7lFR<L6+M5?cMUz$ z8Tj8}=RXqc{0*)xVP`Fz2V&=6W4`Y;b{-yi2kb1VlIT08JZ|2Y|CCoJm=GJk7YLL{ z@?UbnvpONrv9bNy=s3UDzLLVRJCS}Y_g1MRpZ;M*7)Fh7VHlNI^zBY6MVDm)%eOp| zHhPPM*gWufV5um<3nimGIQ{@7<HC^i{n4QcZv1hfMzEz6p$J^wISm)kMj}0g4V4D% zcaW(R@vk7%|BNs2+Yr8d8r?Fl8E8UAL0zRgCAK8#n87c5KN9ka>YHr=L9ZCNhz_HA z3iF@WjZ&HN?^r?es3@_5UIlRS%2m(iz9&qf*q1wIdh_h9K*z)+!bQn_n%stYKyr4A z0ACvd9RkL41ReaqqCNKD17zPNkMz4J{vazN@djr!I<{RQEqWJtaK@^$HT?G{Z+*v_ z4q~i#Skpr9?`ln-dXLxi(syl5|7qqQzB@}g{yzXA1J|!WhwH8sE&eQT@q1*{&vIV+ zqgcPchl*b8Tfd~BqRS^`U&$9O$sl`_L2XhH<poevX@R-_8vhSVxsK?&P*#kPWJ9Hb zGeV%dz%`^EcpCBod%(2^5DJjDprMBveq7HLiH;exk&qYLT)NFIY^S(HK`i?=@S_?Q z4E?n1nsxmoVk1Edkl>W%PGb?e&Ni(>FFoQWgOl22@Ci`@t0?#>Qrm!1p>wT87QMNS z2Z$bi@thsH9-34rLUP6SBx4$x%@3Q^WXgrg1tuYAWwR))Vf$}Tfq3l)6dghB5H<wm z-9%YlyCJm-?ZC2W*PHNqfSGB9Az!lSL(vT#MFInR{;BOWxAr2^K78>Qzl!-zL-~Su zpzD-sDHUp_H2&UTkJJ9#GycS2JIoVJ@~Y|(M-5|z56o1LG8*2yZBAOwN3NS1s-k4{ znE%S<H+}iDFu%PaOMg%f!Px(f{R7~@>!g8$Z$IB&MQ<?@OSLzGdY^Odx7A1ykNcbO zi7>Z_Fk<j6!9Z~oXrT{UwEzNTqaY6+;MdsOjcqxF7K;<9sH0)GN3MD|Wh#vc$J@un zQ=B}`lCuwBQ9M*`L5c!EjNuckBI6&g;euT8yRd87Ze1ARq2FaWSj|*EvWo1<;a~%z zg(4jOPJ?L)$8e)cfZ%YRH&!+Df3%jYW~+cpx|nF6o8OVgX~1#VIQ<)uWA(ii2!oRX zGvEwjzkZ;qg!#j&?l$B#>dUY%uBVFzk{19Z1FTK9$ieeVF%MhO(a0VJFII#W<HYfw z&ue=kLjI85@09RawWl^@@khP|Lo#goc_%DXqel)r>CtLp|EuteYa#ZVg=;u!I?HQ# zacr6|2w{H9u=hyl)u@J|2uEC>MM2>p^&Z&8c@K%)I_<<RnAUL?1MpuxED~72EVzTV zK$<NH%GU51+?;TBI1i}Ekx^)#SpmJzKxmc#c5u7&Dlhnts6B>p%~)LwRX-l)>|kd! z5am*TR{t2&0>1_8r5*jE&`3F$DaKe2AZZ&Xw%x;ap3k~$DPiVRKt5g)R?Dqbi9L;q zKZ3$kMAe@LVsU*B;&J?cGWoxl#7zEgCjXqt|HI^8F!`5AHpJRcx$xQu5CbhlZeQ8n zjO&p)MFakj#qbj?HsCmbSAbMtu0d=pk6J#CH}f4F5aKtxmlut#c=^O&X{~~~0>6iq z^ajR1nTr2kd@m%WokG~)uOk&WFcw{iW3iDwsyIKY-_NcR5xQZC?v#_bWr7@D_UlN{ zH3;nuQ$;ZJ4(`EHA^HxlWK-9bGMqB|6r^~*ltt(iJTSpiUA%<79k({qWaO<(K*9>? z#Q+Gy?!FuL$86vh<%bYD6{3CxIEK{;HYV7l?j!OUbS1b}P#wl(_!ZTMKu$7-Ra%H0 zECW?lb}7%8{U!7!Ho!Myt{m@d4R~(cNO);&48XRwCzP8v%fx~d4xzzLPF=#qlt+dh z6AB-KTx4(-h5G=^`?4x&UtF_s8}QM!#U`lLGE($Qi!qV|yS&u6zK#j2X5@q8^B3at zbC)j9UVPSM_1k>-8%(B=XxS|zw6}`Np2edQF+YCi*xK6KG4Q9$$G{l)*gB1GB9~$l zSx}S-7M3?cO|%tvi7zfQ`4$tdJ*{JSWXfBLu>mcZA!Da2ydaN;!nr{VS5@_1jW+&S z{85%6h$je|e&N4ikz8oppk;e7&+^e0B!SV}&;noqaP87Wzr__0;ep#crT{AAIHq~H zvVC|{<vE2?*#>XpT`&h|eSljiHe3S8{rUiu8t4;td7K5xm*WXnh!lHJc0tX>=U6Fn zN~Q;U+lTB+U_*uRRSS#G>Gz!?_W{NgD*JJMH|7A>i7*X07-|A3^2`<pPoRT?fDj%u z1KQL&2TjNEs+pZ_itMZqG5XrIB-4l5c?jHMu*ZRc?J;EtD8t#fNYZAvChMOv!|6l_ z+|aqIyDP=4d4wNC5iwCDa3KoY$o$0r24DUx=Y<lL!}xE9*=`$DjL#3nKa3~wUL<r4 zmr;ftdKo|MEXY|!pe&mVp(lxer|=SEA8;6ghIyQW`2Y$+2ys9gLn6vWiV@9iT5+pz zGZ;{Ekiu)4CB3d-ptuRa!m#V*ODsf(KcIv;89K9<(Oj>#i+DEkWpY13`rtK@kLQNK zt_gP(4v?hTg`iSU_wyFpnBN+c6-_XFt=K7?cGzwmgY<NaQM8N)HQua9OXxtS68M(U z>WxFXlBN$HM%Gp*Z&UUwqqV#wwxE|A1EzwrNMM%2dKD&m+xAoTunt%WCWw`GOXYR) zg2$=#3RA`D3fd){AG}RD+_;uw1A72b7eY^IK>Q@YC#pR5<oa!l(dAZIl2WOKtYw=w z>JqE4xy%GAth64dLUVm@qo~eW1Dlb{f?0puVl<<kqO&%W-Y+4a8Sux1qW%hO{Lu$a zP|qZ&BU;20M*%7F8D|MB(RZb{tz=PH%Ji$3P5$g?gEYlbdTe>0HRgC=%fq1}q17hr zN0AAPMhm9OoMCh(`ux>vakcCjs>XSi(R=4IypV{Lw}TW*MJyt40chGbB=L4&B_n6# z9YvK^SA_DIho^pvynr5pd}XQlO3@%Vgmhz~w1O-e$J=SHwv@Nz(4bnj5ebr--CnC< z&!`0$CbA-7w1Anyj*k$9lQyvcVYyS0^dG-SbjjCl?-=|X7zu1M4z42Nd<tYt#EWt@ zG5idE*>$!~qBWUToOb(UxYd5F?1PlLNy{u2fZ1yhiIK)Ak3*$G6E>9sP3V<D8o~QX zk)|s#e7%gJSU)m`7(k#&@NxIS(*gC`EwpszcC^DE-hg0Qo<%es5N9xm*F!u-L~6Y6 zhAPC*h{~1kHU_VW^An6w#5F=~;SdHeiX%Ff0S6iwqllKy;h+WXDBgppxSQSGqx3Ak zwa;fjh_VDHA=qxSmnyix?E&O#IYhtR%$T=|I=Y`ev_SP=!H7KugirF6am)MpJ9g9| zFupJVanL$<bHQ^N)iY~Ak=kMi&^sE!a&djBEUd;_E?Ki&oDNiHj1z1XQjtWDe(TD* z1?lNc8L<VmpLa<w94tePo=Iir<UiMg?yGE5{QG!UVivUwxX$``*Fn6hPKJs9KW2Uw zNeK_X4WFuQhnMZR$Q-*xhqEbhIccZU?#xa;&PJVQa)Ak5v5Pf4sI`6kt(H0Y`Wb0c z<DA`%|22yumm2>8lRsoa-i2Lu@g}UWnDG}%qALea+>!C;<l!kgRE;p1BUFVelWSG& zuP$dd<E?nK5l%`zP5mU{ZxdCJ-*#h914RU`mncJ=FCwx&30{lAAjtb}uZ#eV4C`(; z{*Dx!Fa^h9{4xrqUxTDpk@Gox{P6#f`?_;qt+=l`Zt(;r`5{}6;DVKb405i9`#d^0 z9rjHCln1M?bBQbLYXQY9N!0_mCQtDdY$yL3i(+8;`KsZ<c?Wq-dHc+;(KPm3C)}|~ zH)aSs+?#{SY;sNN>)RitV$R>e8Kcegv@v~9(t+v?Y~W-DeQZ5Fqdo2>22O;tqWO_2 zZo;}uC+qgIj=|MDalRN7BI8z0&X=9oqlvpE1qPAiXWZ9-!7I!FoCaB&w|M>$V#PV$ zX#XVMRq`_CT41*^n+e4dRx{i9Ny03Ns;!G5+)BVRD>A{R1n$(L1O-swQVL6zr|NS2 ztiq4>rq;qE&`qv0Y_ADhzczykTi6pp_)b7OVSM|Lh4&dpZVsaHBk=*YoU6PLa75Pq znthu6#9MD!dy2Ua;`R}m$Hkt{+P~oPWE0#@`11|z!TENg-eIxLu<WOiP~RkR5Oc*) z!(3KaXckk$pP|Tr9PgDwMIW@!gAhHsCu;Ah@;2VP(MY*bo-Cg!FFoEgUKMb#;Rs8D zx&54V0;C&#A6!5wTb5ivxedASFA@_7X$&5_-MjI>;yVGbu=_qv@smuRWx@%0mwpQ0 zK@nFt?@!=jbm!>sFcI!EJ2#CT92?*H?AQm!o*f&j<DKjx<S9wTGR9^D6kzGz6d;^? z?MZHx2SW!y06451A~;~7G5{+4uZeEC@UmgF`_TEUURz<52pEK5w^?kocmunVNs>ML zIDx02x0h{EdUb?%s2RHPMhxiDms6g)OuG`ei&~IJi3p+p0UksdW_Vm_)NeO&I1C=l z;&2QgXZ%GdX^S=y_K^5`yg{=SrPxt|ozfEO^lcA3P8an_NTF6MI&tSsYt#w<_oV;( zp{XfyGg48!X{Q2kT+Ffgd0dI+D>PGerMa4!8kVp_PF|nYY|Z1*qpWm-di54!PZ(~I zjx0Mwz~u6FTkDt@%`xkm!B2B+vuhX{lh{Aac(gLfndUU_3UmC!D5}_JiaB1z{MwBB zjpX^>VXt<;zU~IO$KFAP9LsPl<N{Z4_zm@!SQQ*!ObrZG>$;=(Edmv<<Ksx<>i{31 zh`q2=hJ@aE4xaXDgvh9iCUwFJ0|-kyh}p#uC18UIJSwALqM2PRG+@gsMm#J04uHY( zz*5i(hi2QqL7gP<_5mP;N0R$6&WW#C`;y}4Fdef-4{MlLaiF4WJ_MNH%=Bmk1=D+? z7rLOtxFLAkgQBV#RMXuuz`c+I+)e=;fsPbM0Qc8%XLQ8`!_G5|(g8FBZUK}ZqT-x? zEtO{H(|Rr4rZEP&`s_!C^Q?B<2h^P?N!)|aI_iG3C=a?$uo*w2O!5un{uESd>bP-t ze<UvtW&mXUTgqy$!%h``(32H-4_S9qShco1_LOQHS#~O@GrrWRdn^KCAJWHgU_@Rg zPR2t)*Hw`1lquKw2nFp^K|ia4Y^MzbNYwVY(kGN2Q2IWlPbxjA^eK0L3@v(^GIIAB zKEmqXj~`{vajbo^I@vb?LURZnQfuPe9b3EN4Yjd^AYKcrf)Z(lGsIz2z_P+~Zav`x zTTyf7sv#r{qaavWcn+l5>1al>(3SShMr)qtLJI^tx4P)>=GGmSO;wi+tHIsQ_73V% z?qYj18tj_Kk9*(bpNM{$rCfFQVFLA=Bt{px%Sul`dn{VgACDotdrE(wjpo##rOQlh zbq$C9$&n>ZRPnrGT$`rNDub$!4gwD@TPVhlX+-=%gtmyVo>rL!Ahg<YJcbkRz#$_B zu33^$hbpt!Q6AUTp=1D_EJ&!7w<OG}5pQ{l80f88;W@M3m&dyXaY}udK;WP$(P<N4 zXdm~OTxFTB5bc=^1^~)GF_GhG|3t)Hv1&*L@}YqrQ~*)Kwe))Es~A;a-8y%oZhtcS zQxjG^(djnrVhI<@*0DcJ`g5|KJ%?|vwQ7rqfrX~!_4!6{!Z~R?2rNQQ(-NHF)g-W> zouB??-hPG20+WQv?=kr~CjX4d?=txtOyp)q*KP~lcY#;G&Ezvoeu2qvF(JcOQaw;% z@IX6uOr}u%WMkPdznu{mCV4!9sKE$?HptlCPAYkpwBRKcwHJ1-VIE~Q4q2bl|F9zM zAjMPLI>k4=R|p#nYniCXFv`fO$Un)c2yhd4QgEXnAqT*uP{jNIc~m|cRxGBu@ODIR zuM(~MxBRmSF-busLMB8DL=j(Pyr`Z`JVYMG<!5Mt4|$=wL*D<0!#60ePMZ&T$CURs zX#sl2yg2m=<xW%}2r}>15K@X(JnI(!Y|_E@Xp@?TGixD0^Z9mrumfM=EO0*I(AfC` zUtC`%Xv9X#G_~iqXrp@DGEhGH(AzfN?M4$x0pa%uif9cD)F#(-elM2ZxnePsy4WK3 z*eK^SbJ~J)UFx@kpkR3o{|N8>=_fil`$-(_*+~UW5_<D1?~k3J6ah!dj+}u__sE+l zTY?!w5PFUxoHQc>AkN&2WrOKNWHbWj9hQlN-*-!O4x|n<IT<KO0^||&AYsPaM^S?q zDn16RqJSImab6MS$5Xr_?ulQ(ReXZUNj?K&qvHw^3e1ho%<>_=eUWds;5Ivnq#zF^ zg5&|%^o%4C%eBsJ1cLy@Ah95m6(40;A{L?vU*<Cr29bmNycIbp5IzyPDUm+FZ}&1G zb=VMVPcI6?<Xm6E<zFH$e*w?kV~oAG1HXOv9W4EJRSqfCd-(f&<v_sS98Hed41Nk3 z53E#DE$s2C1z-W7^pZw_-^~J_<Cq;&)qD<@@hLnov6-W<hMM5KF+qkSo~$mfm>84& zC+sOJc1C-6$*IoUzD{Ib-+W5SYJ2?A(AKzyGUAWqx11*RY-8p+Hkz&$zR|zb*Jw64 zZ*+<qG@7JRN}bzVT<4SS&K0lC0XUs|?SKL=TU0yDP42ev`ZF*`9ddUh51f1LaXCmS z0~!ySLuZA`>lYNF+dHT6<jWb!xkT4q{e$45UWS$PCj+9Cj!#K5ojVIGz*BcEM3cO_ zeW<7`t#xp~r#BKKx>t2>=IX2G=Sp@dp}ay$_eE71kpwbDUbzqNT?%M=o(|{=ToPi! zA^ZtWpfMyOJQSpL|5w|x*EhgIoLE(0opa5Hf_xvLo}x9VG-5$;-K$N7=~F|6rM$7s z)Mhw1RzYNfYxSUl`kOU~(GTOmcr=Dsd3ZNZ;3AvNrr3Emr7&;7%M-&-9BVVA`o(v9 zhcr^7K%0%xr_BXI8_gXOp{Nz}T8a|@NCgrtH97uuKIDRiY6pI^(r3sN-!qlLIFI4N znXed?Oqz~z9x>A!!h1xbHxQt~m#iZ%LXUriD~yU=x07dF9H{JtTHo(V>{)F@+dyw% zN1L)rff0ok9d1~_ZVX5Yo)xPL$6M%x3+_}@gnSdX$&AYPD*KDWkh3W)V-Z1k#$bBF z;fdSy$_k^sK(pE>S8Lq>{<!pVH)X&7Le4wI77e%+cBqsSEh-A+OvMa9xr}*$*tnWW zgr_tJ0Y7Wk=}9hNDK#tD%_1Mmuu6YImIMcp8j1jgL!*jFJSdCc24if=L$YXtzR2hh za+F&IP!0u86oV@>VBKWpQ#G95i@gdkU(prO)CH_8o1$&9+?L5BQ+xqu_Q@kOUZGw7 zd^zuGZ$dAY89$dRN_Y=%J_&jCrFI*OfKi)qR6K($dp49A?F=S)T~hPm7lS`txDPw2 zp{M&M*0ZV6EAV3^f8u5Pb?OjQoB#8RY~DIPKr_;>7YVB|*Kw3fo6OWt3h17Z4CgkA zxzyQ@=y5a+MPvgevtk#3bh2nOM%W)8t|9RUVL_eRX<)?4b+HQs01bu0L-CF?v<^;s zpu*2me^LC<gUe;0QxCi-FU+b+zI@K_2JrRrsIlNEl#i*3dRy>8<vJjER{DLRq??JH zW()-7t^;omkX_0pQ?+Jy3V>dfX2qY_jpgm|T-Jc%os9EfkF6ZzbckIPyh1NEUOIpm z2+7KqMF~9-vD*;hBLnMK(r*&a5e>V&JY=^HHbtBS+dG3_HjkvhZ@_Fhev>tx_)Vf{ zf!}hZCy!M|)CcPU=@uGa+I4~Q_%pQk<ERhHZ-Gl8?Rg<0m-b2!kqdrWV($&|;|V|5 zL<4MR*k#b_73gsNWBVE4*46|0h8#ndVq}8-7lWh81O9+gtWgeDCfK9>p16z%SLC!* zd7nMN%JHMpkX{hvn*{_KJg5~p=#VB;`q3h?&&Bc})>$(VHpEiCfxu_la#I5J_#F9H zi96>xQDVn0@aiHG!6(tKDl&_w7+yi}WoG*{lb4x%0$}uRV3o}8cL=N7P>|a}Smmix z<3oa2LTe7OAH~fEAhuFKC7n>>bQ#*yvc~(NpmN>laoNU$MvonVLo`fJ9*eL8aqqBg zXvE|LkXEhiF|2J{-$NWTi767v=q&WkCjd$x$gJ;p@}HG_2z~I`7i(EMRocwQdw;H= z{IUj@TFNE;WcE~1BK8<eK}W!I616?>1|pbe$1U4LZ?;msbSE=`!q;vsjr<1##$frQ zp`Fh>u+%({RnLj?9Rf#5cb#(A{i9Gn)`OEinO_X3A7NCR-KCJ5U?DG!kQ>L4f!$GH z$kH(MOlk(jKK$~RQUpUAkoEn(wXC~T8Yv;g5N`AZMfD^5Y|%BC+iO`3e&FRYiII<s zea~CD9$0GqBRZhld7kHr8bK<vW3?4*RxNcn&v>}hkGIWflN6P*6P8LvE`2Nvw<c%m z!8y_UWtg?VejM+%+s!O|r)dfUivciJ0Blirbulf?6FPG|-^z-7t1H?<G4(e2v(TJ@ z#bWmlTs4nhOuN@q)I6Ct>u_l`_VstM-M*bSU0n_fXwbw!sdg8~v*aH@;x9lFjmmKJ zanB6d%FM#@VVMuev#z1bwUwTtcpt`C4K0l^mv(s;t=dMGt)|(eY%KD{*^KWj+}sbL z;j)*<yYNhb*lOMOGWNmKa}v^*odStmE?z|<VKcdf6I<F-tKCL3lkwzM)^5pxTf125 zH$mHxx9Z)ftJU~Z{61xJh*f`rbrCC{zr)i|FkA6HzNGvqw@#y-oMo6}i&$?ZQ6RRn zW6yH?(u>T;j$z76Sv2o+Qh3C)!YC*Uv(#3)^`+X(5;qmMK(EQ|87(uN<14t=E=k)u zb(yy>GpRCJL?YhS%N|z41t<!yA^KdMSrR5~CO^RB%S?Wa$zNmg%S?Wi$v2pM6A8{U z##K2~JA>WH#kB06uLyYmEwldjOy~w3{|`+5M<)LplmDH`x0(DyCjS^omt)Ju>ZBz< za2lNWf9C7GNUp75UtE&Tx2LsrPjAK7_cSX0N5~rg4wK(v^7oiXCHs54lI4x?Yx-NN zDW&C5Y4h!~Fo{V4sDky7qo&6n=T~Q#%<wC=_yQCWbJB2JXm{qk-X*@m*PO8FbPl{? zJX_pf0Y?<S%KI&Pe@?sv3DL$FEtc{UyiQk}=_d$^*}q0II?R(j{eMvJ@?_89p}lZ$ z+>1N>t?V5h9p+&gqnn0@TxGL<n}^DO`IPtkk5}O?T=DPt$o`T2oAz&kuI1U$;jz7= zBio)I9p1lvY;;udpmv_!b{Nn1?VlXmH@0j4hfv=L@?Jwc#wYQ^P6g!GvppZ)_W0QD zu`OdL3#C-HJvX*z?0xw5N#D-l{nPuO-~R;u`@3U1@a|)HH-+EG*vPg6_{-np_-+3G E0jafiQ2+n{ literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/geo/__init__.py b/gam/gdata/analytics/geo/__init__.py new file mode 100755 index 00000000000..1fcf604c2c2 --- /dev/null +++ b/gam/gdata/analytics/geo/__init__.py @@ -0,0 +1,185 @@ +# -*-*- encoding: utf-8 -*-*- +# +# This is gdata.photos.geo, implementing geological positioning in gdata structures +# +# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $ +# +# Copyright 2007 HÃ¥vard Gulldahl +# Portions copyright 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Picasa Web Albums uses the georss and gml namespaces for +elements defined in the GeoRSS and Geography Markup Language specifications. + +Specifically, Picasa Web Albums uses the following elements: + +georss:where +gml:Point +gml:pos + +http://code.google.com/apis/picasaweb/reference.html#georss_reference + + +Picasa Web Albums also accepts geographic-location data in two other formats: +W3C format and plain-GeoRSS (without GML) format. +""" +# +#Over the wire, the Picasa Web Albums only accepts and sends the +#elements mentioned above, but this module will let you seamlessly convert +#between the different formats (TODO 2007-10-18 hg) + +__author__ = u'havard@gulldahl.no'# (HÃ¥vard Gulldahl)' #BUG: api chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' + + +import atom +import gdata + +GEO_NAMESPACE = 'http://www.w3.org/2003/01/geo/wgs84_pos#' +GML_NAMESPACE = 'http://www.opengis.net/gml' +GEORSS_NAMESPACE = 'http://www.georss.org/georss' + +class GeoBaseElement(atom.AtomBase): + """Base class for elements. + + To add new elements, you only need to add the element tag name to self._tag + and the namespace to self._namespace + """ + + _tag = '' + _namespace = GML_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class Pos(GeoBaseElement): + """(string) Specifies a latitude and longitude, separated by a space, + e.g. `35.669998 139.770004'""" + + _tag = 'pos' +def PosFromString(xml_string): + return atom.CreateClassFromXMLString(Pos, xml_string) + +class Point(GeoBaseElement): + """(container) Specifies a particular geographical point, by means of + a <gml:pos> element.""" + + _tag = 'Point' + _children = atom.AtomBase._children.copy() + _children['{%s}pos' % GML_NAMESPACE] = ('pos', Pos) + def __init__(self, pos=None, extension_elements=None, extension_attributes=None, text=None): + GeoBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + if pos is None: + pos = Pos() + self.pos=pos +def PointFromString(xml_string): + return atom.CreateClassFromXMLString(Point, xml_string) + +class Where(GeoBaseElement): + """(container) Specifies a geographical location or region. + A container element, containing a single <gml:Point> element. + (Not to be confused with <gd:where>.) + + Note that the (only) child attribute, .Point, is title-cased. + This reflects the names of elements in the xml stream + (principle of least surprise). + + As a convenience, you can get a tuple of (lat, lon) with Where.location(), + and set the same data with Where.setLocation( (lat, lon) ). + + Similarly, there are methods to set and get only latitude and longitude. + """ + + _tag = 'where' + _namespace = GEORSS_NAMESPACE + _children = atom.AtomBase._children.copy() + _children['{%s}Point' % GML_NAMESPACE] = ('Point', Point) + def __init__(self, point=None, extension_elements=None, extension_attributes=None, text=None): + GeoBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + if point is None: + point = Point() + self.Point=point + def location(self): + "(float, float) Return Where.Point.pos.text as a (lat,lon) tuple" + try: + return tuple([float(z) for z in self.Point.pos.text.split(' ')]) + except AttributeError: + return tuple() + def set_location(self, latlon): + """(bool) Set Where.Point.pos.text from a (lat,lon) tuple. + + Arguments: + lat (float): The latitude in degrees, from -90.0 to 90.0 + lon (float): The longitude in degrees, from -180.0 to 180.0 + + Returns True on success. + + """ + + assert(isinstance(latlon[0], float)) + assert(isinstance(latlon[1], float)) + try: + self.Point.pos.text = "%s %s" % (latlon[0], latlon[1]) + return True + except AttributeError: + return False + def latitude(self): + "(float) Get the latitude value of the geo-tag. See also .location()" + lat, lon = self.location() + return lat + + def longitude(self): + "(float) Get the longtitude value of the geo-tag. See also .location()" + lat, lon = self.location() + return lon + + longtitude = longitude + + def set_latitude(self, lat): + """(bool) Set the latitude value of the geo-tag. + + Args: + lat (float): The new latitude value + + See also .set_location() + """ + _lat, lon = self.location() + return self.set_location(lat, lon) + + def set_longitude(self, lon): + """(bool) Set the longtitude value of the geo-tag. + + Args: + lat (float): The new latitude value + + See also .set_location() + """ + lat, _lon = self.location() + return self.set_location(lat, lon) + + set_longtitude = set_longitude + +def WhereFromString(xml_string): + return atom.CreateClassFromXMLString(Where, xml_string) + diff --git a/gam/gdata/analytics/geo/data.py b/gam/gdata/analytics/geo/data.py new file mode 100755 index 00000000000..2aec9112bbd --- /dev/null +++ b/gam/gdata/analytics/geo/data.py @@ -0,0 +1,92 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains the data classes of the Geography Extension""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core + + +GEORSS_TEMPLATE = '{http://www.georss.org/georss/}%s' +GML_TEMPLATE = '{http://www.opengis.net/gml/}%s' +GEO_TEMPLATE = '{http://www.w3.org/2003/01/geo/wgs84_pos#/}%s' + + +class GeoLat(atom.core.XmlElement): + """Describes a W3C latitude.""" + _qname = GEO_TEMPLATE % 'lat' + + +class GeoLong(atom.core.XmlElement): + """Describes a W3C longitude.""" + _qname = GEO_TEMPLATE % 'long' + + +class GeoRssBox(atom.core.XmlElement): + """Describes a geographical region.""" + _qname = GEORSS_TEMPLATE % 'box' + + +class GeoRssPoint(atom.core.XmlElement): + """Describes a geographical location.""" + _qname = GEORSS_TEMPLATE % 'point' + + +class GmlLowerCorner(atom.core.XmlElement): + """Describes a lower corner of a region.""" + _qname = GML_TEMPLATE % 'lowerCorner' + + +class GmlPos(atom.core.XmlElement): + """Describes a latitude and longitude.""" + _qname = GML_TEMPLATE % 'pos' + + +class GmlPoint(atom.core.XmlElement): + """Describes a particular geographical point.""" + _qname = GML_TEMPLATE % 'Point' + pos = GmlPos + + +class GmlUpperCorner(atom.core.XmlElement): + """Describes an upper corner of a region.""" + _qname = GML_TEMPLATE % 'upperCorner' + + +class GmlEnvelope(atom.core.XmlElement): + """Describes a Gml geographical region.""" + _qname = GML_TEMPLATE % 'Envelope' + lower_corner = GmlLowerCorner + upper_corner = GmlUpperCorner + + +class GeoRssWhere(atom.core.XmlElement): + """Describes a geographical location or region.""" + _qname = GEORSS_TEMPLATE % 'where' + Point = GmlPoint + Envelope = GmlEnvelope + + +class W3CPoint(atom.core.XmlElement): + """Describes a W3C geographical location.""" + _qname = GEO_TEMPLATE % 'Point' + long = GeoLong + lat = GeoLat + + diff --git a/gam/gdata/analytics/health/__init__.py b/gam/gdata/analytics/health/__init__.py new file mode 100755 index 00000000000..1904ecdea66 --- /dev/null +++ b/gam/gdata/analytics/health/__init__.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to Atom objects used with Google Health.""" + +__author__ = 'api.eric@google.com (Eric Bidelman)' + +import atom +import gdata + + +CCR_NAMESPACE = 'urn:astm-org:CCR' +METADATA_NAMESPACE = 'http://schemas.google.com/health/metadata' + + +class Ccr(atom.AtomBase): + """Represents a Google Health <ContinuityOfCareRecord>.""" + + _tag = 'ContinuityOfCareRecord' + _namespace = CCR_NAMESPACE + _children = atom.AtomBase._children.copy() + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + def GetAlerts(self): + """Helper for extracting Alert/Allergy data from the CCR. + + Returns: + A list of ExtensionElements (one for each allergy found) or None if + no allergies where found in this CCR. + """ + try: + body = self.FindExtensions('Body')[0] + return body.FindChildren('Alerts')[0].FindChildren('Alert') + except: + return None + + def GetAllergies(self): + """Alias for GetAlerts().""" + return self.GetAlerts() + + def GetProblems(self): + """Helper for extracting Problem/Condition data from the CCR. + + Returns: + A list of ExtensionElements (one for each problem found) or None if + no problems where found in this CCR. + """ + try: + body = self.FindExtensions('Body')[0] + return body.FindChildren('Problems')[0].FindChildren('Problem') + except: + return None + + def GetConditions(self): + """Alias for GetProblems().""" + return self.GetProblems() + + def GetProcedures(self): + """Helper for extracting Procedure data from the CCR. + + Returns: + A list of ExtensionElements (one for each procedure found) or None if + no procedures where found in this CCR. + """ + try: + body = self.FindExtensions('Body')[0] + return body.FindChildren('Procedures')[0].FindChildren('Procedure') + except: + return None + + def GetImmunizations(self): + """Helper for extracting Immunization data from the CCR. + + Returns: + A list of ExtensionElements (one for each immunization found) or None if + no immunizations where found in this CCR. + """ + try: + body = self.FindExtensions('Body')[0] + return body.FindChildren('Immunizations')[0].FindChildren('Immunization') + except: + return None + + def GetMedications(self): + """Helper for extracting Medication data from the CCR. + + Returns: + A list of ExtensionElements (one for each medication found) or None if + no medications where found in this CCR. + """ + try: + body = self.FindExtensions('Body')[0] + return body.FindChildren('Medications')[0].FindChildren('Medication') + except: + return None + + def GetResults(self): + """Helper for extracting Results/Labresults data from the CCR. + + Returns: + A list of ExtensionElements (one for each result found) or None if + no results where found in this CCR. + """ + try: + body = self.FindExtensions('Body')[0] + return body.FindChildren('Results')[0].FindChildren('Result') + except: + return None + + +class ProfileEntry(gdata.GDataEntry): + """The Google Health version of an Atom Entry.""" + + _tag = gdata.GDataEntry._tag + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}ContinuityOfCareRecord' % CCR_NAMESPACE] = ('ccr', Ccr) + + def __init__(self, ccr=None, author=None, category=None, content=None, + atom_id=None, link=None, published=None, title=None, + updated=None, text=None, extension_elements=None, + extension_attributes=None): + self.ccr = ccr + gdata.GDataEntry.__init__( + self, author=author, category=category, content=content, + atom_id=atom_id, link=link, published=published, title=title, + updated=updated, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class ProfileFeed(gdata.GDataFeed): + """A feed containing a list of Google Health profile entries.""" + + _tag = gdata.GDataFeed._tag + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileEntry]) + + +class ProfileListEntry(gdata.GDataEntry): + """The Atom Entry in the Google Health profile list feed.""" + + _tag = gdata.GDataEntry._tag + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + def GetProfileId(self): + return self.content.text + + def GetProfileName(self): + return self.title.text + + +class ProfileListFeed(gdata.GDataFeed): + """A feed containing a list of Google Health profile list entries.""" + + _tag = gdata.GDataFeed._tag + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileListEntry]) + + +def ProfileEntryFromString(xml_string): + """Converts an XML string into a ProfileEntry object. + + Args: + xml_string: string The XML describing a Health profile feed entry. + + Returns: + A ProfileEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(ProfileEntry, xml_string) + + +def ProfileListEntryFromString(xml_string): + """Converts an XML string into a ProfileListEntry object. + + Args: + xml_string: string The XML describing a Health profile list feed entry. + + Returns: + A ProfileListEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(ProfileListEntry, xml_string) + + +def ProfileFeedFromString(xml_string): + """Converts an XML string into a ProfileFeed object. + + Args: + xml_string: string The XML describing a ProfileFeed feed. + + Returns: + A ProfileFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(ProfileFeed, xml_string) + + +def ProfileListFeedFromString(xml_string): + """Converts an XML string into a ProfileListFeed object. + + Args: + xml_string: string The XML describing a ProfileListFeed feed. + + Returns: + A ProfileListFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(ProfileListFeed, xml_string) diff --git a/gam/gdata/analytics/health/service.py b/gam/gdata/analytics/health/service.py new file mode 100755 index 00000000000..3d38411ebe3 --- /dev/null +++ b/gam/gdata/analytics/health/service.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""HealthService extends GDataService to streamline Google Health API access. + + HealthService: Provides methods to interact with the profile, profile list, + and register/notices feeds. Extends GDataService. + + HealthProfileQuery: Queries the Google Health Profile feed. + + HealthProfileListQuery: Queries the Google Health Profile list feed. +""" + +__author__ = 'api.eric@google.com (Eric Bidelman)' + + +import atom +import gdata.health +import gdata.service + + +class HealthService(gdata.service.GDataService): + + """Client extension for the Google Health service Document List feed.""" + + def __init__(self, email=None, password=None, source=None, + use_h9_sandbox=False, server='www.google.com', + additional_headers=None, **kwargs): + """Creates a client for the Google Health service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + use_h9_sandbox: boolean (optional) True to issue requests against the + /h9 developer's sandbox. + server: string (optional) The name of the server to which a connection + will be opened. + additional_headers: dictionary (optional) Any additional headers which + should be included with CRUD operations. + kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + service = use_h9_sandbox and 'weaver' or 'health' + gdata.service.GDataService.__init__( + self, email=email, password=password, service=service, source=source, + server=server, additional_headers=additional_headers, **kwargs) + self.ssl = True + self.use_h9_sandbox = use_h9_sandbox + + def __get_service(self): + return self.use_h9_sandbox and 'h9' or 'health' + + def GetProfileFeed(self, query=None, profile_id=None): + """Fetches the users Google Health profile feed. + + Args: + query: HealthProfileQuery or string (optional) A query to use on the + profile feed. If None, a HealthProfileQuery is constructed. + profile_id: string (optional) The profile id to query the profile feed + with when using ClientLogin. Note: this parameter is ignored if + query is set. + + Returns: + A gdata.health.ProfileFeed object containing the user's Health profile. + """ + if query is None: + projection = profile_id and 'ui' or 'default' + uri = HealthProfileQuery( + service=self.__get_service(), projection=projection, + profile_id=profile_id).ToUri() + elif isinstance(query, HealthProfileQuery): + uri = query.ToUri() + else: + uri = query + + return self.GetFeed(uri, converter=gdata.health.ProfileFeedFromString) + + def GetProfileListFeed(self, query=None): + """Fetches the users Google Health profile feed. + + Args: + query: HealthProfileListQuery or string (optional) A query to use + on the profile list feed. If None, a HealthProfileListQuery is + constructed to /health/feeds/profile/list or /h9/feeds/profile/list. + + Returns: + A gdata.health.ProfileListFeed object containing the user's list + of profiles. + """ + if not query: + uri = HealthProfileListQuery(service=self.__get_service()).ToUri() + elif isinstance(query, HealthProfileListQuery): + uri = query.ToUri() + else: + uri = query + + return self.GetFeed(uri, converter=gdata.health.ProfileListFeedFromString) + + def SendNotice(self, subject, body=None, content_type='html', + ccr=None, profile_id=None): + """Sends (posts) a notice to the user's Google Health profile. + + Args: + subject: A string representing the message's subject line. + body: string (optional) The message body. + content_type: string (optional) The content type of the notice message + body. This parameter is only honored when a message body is + specified. + ccr: string (optional) The CCR XML document to reconcile into the + user's profile. + profile_id: string (optional) The profile id to work with when using + ClientLogin. Note: this parameter is ignored if query is set. + + Returns: + A gdata.health.ProfileEntry object of the posted entry. + """ + if body: + content = atom.Content(content_type=content_type, text=body) + else: + content = body + + entry = gdata.GDataEntry( + title=atom.Title(text=subject), content=content, + extension_elements=[atom.ExtensionElementFromString(ccr)]) + + projection = profile_id and 'ui' or 'default' + query = HealthRegisterQuery(service=self.__get_service(), + projection=projection, profile_id=profile_id) + return self.Post(entry, query.ToUri(), + converter=gdata.health.ProfileEntryFromString) + + +class HealthProfileQuery(gdata.service.Query): + + """Object used to construct a URI to query the Google Health profile feed.""" + + def __init__(self, service='health', feed='feeds/profile', + projection='default', profile_id=None, text_query=None, + params=None, categories=None): + """Constructor for Health profile feed query. + + Args: + service: string (optional) The service to query. Either 'health' or 'h9'. + feed: string (optional) The path for the feed. The default value is + 'feeds/profile'. + projection: string (optional) The visibility of the data. Possible values + are 'default' for AuthSub and 'ui' for ClientLogin. If this value + is set to 'ui', the profile_id parameter should also be set. + profile_id: string (optional) The profile id to query. This should only + be used when using ClientLogin. + text_query: str (optional) The contents of the q query parameter. The + contents of the text_query are URL escaped upon conversion to a URI. + Note: this parameter can only be used on the register feed using + ClientLogin. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to + the query's items. + categories: list (optional) List of category strings which should be + included as query categories. See gdata.service.Query for + additional documentation. + """ + self.service = service + self.profile_id = profile_id + self.projection = projection + gdata.service.Query.__init__(self, feed=feed, text_query=text_query, + params=params, categories=categories) + + def ToUri(self): + """Generates a URI from the query parameters set in the object. + + Returns: + A string containing the URI used to retrieve entries from the Health + profile feed. + """ + old_feed = self.feed + self.feed = '/'.join([self.service, old_feed, self.projection]) + + if self.profile_id: + self.feed += '/' + self.profile_id + self.feed = '/%s' % (self.feed,) + + new_feed = gdata.service.Query.ToUri(self) + self.feed = old_feed + return new_feed + + +class HealthProfileListQuery(gdata.service.Query): + + """Object used to construct a URI to query a Health profile list feed.""" + + def __init__(self, service='health', feed='feeds/profile/list'): + """Constructor for Health profile list feed query. + + Args: + service: string (optional) The service to query. Either 'health' or 'h9'. + feed: string (optional) The path for the feed. The default value is + 'feeds/profile/list'. + """ + gdata.service.Query.__init__(self, feed) + self.service = service + + def ToUri(self): + """Generates a URI from the query parameters set in the object. + + Returns: + A string containing the URI used to retrieve entries from the + profile list feed. + """ + return '/%s' % ('/'.join([self.service, self.feed]),) + + +class HealthRegisterQuery(gdata.service.Query): + + """Object used to construct a URI to query a Health register/notice feed.""" + + def __init__(self, service='health', feed='feeds/register', + projection='default', profile_id=None): + """Constructor for Health profile list feed query. + + Args: + service: string (optional) The service to query. Either 'health' or 'h9'. + feed: string (optional) The path for the feed. The default value is + 'feeds/register'. + projection: string (optional) The visibility of the data. Possible values + are 'default' for AuthSub and 'ui' for ClientLogin. If this value + is set to 'ui', the profile_id parameter should also be set. + profile_id: string (optional) The profile id to query. This should only + be used when using ClientLogin. + """ + gdata.service.Query.__init__(self, feed) + self.service = service + self.projection = projection + self.profile_id = profile_id + + def ToUri(self): + """Generates a URI from the query parameters set in the object. + + Returns: + A string containing the URI needed to interact with the register feed. + """ + old_feed = self.feed + self.feed = '/'.join([self.service, old_feed, self.projection]) + new_feed = gdata.service.Query.ToUri(self) + self.feed = old_feed + + if self.profile_id: + new_feed += '/' + self.profile_id + return '/%s' % (new_feed,) diff --git a/gam/gdata/analytics/marketplace/__init__.py b/gam/gdata/analytics/marketplace/__init__.py new file mode 100755 index 00000000000..8b137891791 --- /dev/null +++ b/gam/gdata/analytics/marketplace/__init__.py @@ -0,0 +1 @@ + diff --git a/gam/gdata/analytics/marketplace/client.py b/gam/gdata/analytics/marketplace/client.py new file mode 100755 index 00000000000..8ffc348659b --- /dev/null +++ b/gam/gdata/analytics/marketplace/client.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""LicensingClient simplifies Google Apps Marketplace Licensing API calls. + +LicensingClient extends gdata.client.GDClient to ease interaction with +the Google Apps Marketplace Licensing API. These interactions include the ability +to retrieve License informations for an application in the Google Apps Marketplace. +""" + + +__author__ = 'Alexandre Vivien <alex@simplecode.fr>' + +import gdata.marketplace.data +import gdata.client +import urllib + + +# Feed URI template. This must end with a / +# The strings in this template are eventually replaced with the API version +# and Google Apps domain name, respectively. +LICENSE_ROOT_URL = 'http://feedserver-enterprise.googleusercontent.com' +LICENSE_FEED_TEMPLATE = '%s/license?bq=' % LICENSE_ROOT_URL +LICENSE_NOTIFICATIONS_FEED_TEMPLATE = '%s/licensenotification?bq=' % LICENSE_ROOT_URL + + +class LicensingClient(gdata.client.GDClient): + """Client extension for the Google Apps Marketplace Licensing API service. + + Attributes: + host: string The hostname for the Google Apps Marketplace Licensing API service. + api_version: string The version of the Google Apps Marketplace Licensing API. + """ + + api_version = '1.0' + auth_service = 'apps' + auth_scopes = gdata.gauth.AUTH_SCOPES['apps'] + ssl = False + + def __init__(self, domain, auth_token=None, **kwargs): + """Constructs a new client for the Google Apps Marketplace Licensing API. + + Args: + domain: string The Google Apps domain with the application installed. + auth_token: (optional) gdata.gauth.OAuthToken which authorizes this client to retrieve the License information. + kwargs: The other parameters to pass to the gdata.client.GDClient constructor. + """ + gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs) + self.domain = domain + + def make_license_feed_uri(self, app_id=None, params=None): + """Creates a license feed URI for the Google Apps Marketplace Licensing API. + + Using this client's Google Apps domain, create a license feed URI for a particular application + in this domain. If params are provided, append them as GET params. + + Args: + app_id: string The ID of the application for which to make a license feed URI. + params: dict (optional) key -> value params to append as GET vars to the + URI. Example: params={'start': 'my-resource-id'} + Returns: + A string giving the URI for the application's license for this client's Google + Apps domain. + """ + parameters = '[appid=%s][domain=%s]' % (app_id, self.domain) + uri = LICENSE_FEED_TEMPLATE + urllib.quote_plus(parameters) + if params: + uri += '&' + urllib.urlencode(params) + return uri + + MakeLicenseFeedUri = make_license_feed_uri + + def make_license_notifications_feed_uri(self, app_id=None, startdatetime=None, max_results=None, params=None): + """Creates a license notifications feed URI for the Google Apps Marketplace Licensing API. + + Using this client's Google Apps domain, create a license notifications feed URI for a particular application. + If params are provided, append them as GET params. + + Args: + app_id: string The ID of the application for which to make a license feed URI. + startdatetime: Start date to retrieve the License notifications. + max_results: Number of results per page. Maximum is 100. + params: dict (optional) key -> value params to append as GET vars to the + URI. Example: params={'start': 'my-resource-id'} + Returns: + A string giving the URI for the application's license notifications for this client's Google + Apps domain. + """ + parameters = '[appid=%s]' % (app_id) + if startdatetime: + parameters += '[startdatetime=%s]' % startdatetime + else: + parameters += '[startdatetime=1970-01-01T00:00:00Z]' + if max_results: + parameters += '[max-results=%s]' % max_results + else: + parameters += '[max-results=100]' + uri = LICENSE_NOTIFICATIONS_FEED_TEMPLATE + urllib.quote_plus(parameters) + if params: + uri += '&' + urllib.urlencode(params) + return uri + + MakeLicenseNotificationsFeedUri = make_license_notifications_feed_uri + + def get_license(self, uri=None, app_id=None, **kwargs): + """Fetches the application's license by application ID. + + Args: + uri: string The base URI of the feed from which to fetch the license. + app_id: string The string ID of the application for which to fetch the license. + kwargs: The other parameters to pass to gdata.client.GDClient.get_entry(). + + Returns: + A License feed object representing the license with the given + base URI and application ID. + """ + + if uri is None: + uri = self.MakeLicenseFeedUri(app_id) + return self.get_feed(uri, + desired_class=gdata.marketplace.data.LicenseFeed, + **kwargs) + + GetLicense = get_license + + def get_license_notifications(self, uri=None, app_id=None, startdatetime=None, max_results=None, **kwargs): + """Fetches the application's license notifications by application ID. + + Args: + uri: string The base URI of the feed from which to fetch the license. + app_id: string The string ID of the application for which to fetch the license. + startdatetime: Start date to retrieve the License notifications. + max_results: Number of results per page. Maximum is 100. + kwargs: The other parameters to pass to gdata.client.GDClient.get_entry(). + + Returns: + A License feed object representing the license notifications with the given + base URI and application ID. + """ + + if uri is None: + uri = self.MakeLicenseNotificationsFeedUri(app_id, startdatetime, max_results) + return self.get_feed(uri, + desired_class=gdata.marketplace.data.LicenseFeed, + **kwargs) + + GetLicenseNotifications = get_license_notifications diff --git a/gam/gdata/analytics/marketplace/data.py b/gam/gdata/analytics/marketplace/data.py new file mode 100755 index 00000000000..e8c76a25bc2 --- /dev/null +++ b/gam/gdata/analytics/marketplace/data.py @@ -0,0 +1,115 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data model for parsing and generating XML for the Google Apps Marketplace Licensing API.""" + + +__author__ = 'Alexandre Vivien <alex@simplecode.fr>' + + +import atom.core +import gdata +import gdata.data + + +LICENSES_NAMESPACE = 'http://www.w3.org/2005/Atom' +LICENSES_TEMPLATE = '{%s}%%s' % LICENSES_NAMESPACE + + +class Enabled(atom.core.XmlElement): + """ """ + + _qname = LICENSES_TEMPLATE % 'enabled' + + +class Id(atom.core.XmlElement): + """ """ + + _qname = LICENSES_TEMPLATE % 'id' + + +class CustomerId(atom.core.XmlElement): + """ """ + + _qname = LICENSES_TEMPLATE % 'customerid' + + +class DomainName(atom.core.XmlElement): + """ """ + + _qname = LICENSES_TEMPLATE % 'domainname' + + +class InstallerEmail(atom.core.XmlElement): + """ """ + + _qname = LICENSES_TEMPLATE % 'installeremail' + + +class TosAcceptanceTime(atom.core.XmlElement): + """ """ + + _qname = LICENSES_TEMPLATE % 'tosacceptancetime' + + +class LastChangeTime(atom.core.XmlElement): + """ """ + + _qname = LICENSES_TEMPLATE % 'lastchangetime' + + +class ProductConfigId(atom.core.XmlElement): + """ """ + + _qname = LICENSES_TEMPLATE % 'productconfigid' + + +class State(atom.core.XmlElement): + """ """ + + _qname = LICENSES_TEMPLATE % 'state' + + +class Entity(atom.core.XmlElement): + """ The entity representing the License. """ + + _qname = LICENSES_TEMPLATE % 'entity' + + enabled = Enabled + id = Id + customer_id = CustomerId + domain_name = DomainName + installer_email = InstallerEmail + tos_acceptance_time = TosAcceptanceTime + last_change_time = LastChangeTime + product_config_id = ProductConfigId + state = State + + +class Content(atom.data.Content): + entity = Entity + +class LicenseEntry(gdata.data.GDEntry): + """ Represents a LicenseEntry object. """ + + content = Content + + +class LicenseFeed(gdata.data.GDFeed): + """ Represents a feed of LicenseEntry objects. """ + + # Override entry so that this feed knows how to type its list of entries. + entry = [LicenseEntry] diff --git a/gam/gdata/analytics/media/__init__.py b/gam/gdata/analytics/media/__init__.py new file mode 100755 index 00000000000..e6af1ae52d1 --- /dev/null +++ b/gam/gdata/analytics/media/__init__.py @@ -0,0 +1,355 @@ +# -*-*- encoding: utf-8 -*-*- +# +# This is gdata.photos.media, implementing parts of the MediaRSS spec in gdata structures +# +# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $ +# +# Copyright 2007 HÃ¥vard Gulldahl +# Portions copyright 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Essential attributes of photos in Google Photos/Picasa Web Albums are +expressed using elements from the `media' namespace, defined in the +MediaRSS specification[1]. + +Due to copyright issues, the elements herein are documented sparingly, please +consult with the Google Photos API Reference Guide[2], alternatively the +official MediaRSS specification[1] for details. +(If there is a version conflict between the two sources, stick to the +Google Photos API). + +[1]: http://search.yahoo.com/mrss (version 1.1.1) +[2]: http://code.google.com/apis/picasaweb/reference.html#media_reference + +Keep in mind that Google Photos only uses a subset of the MediaRSS elements +(and some of the attributes are trimmed down, too): + +media:content +media:credit +media:description +media:group +media:keywords +media:thumbnail +media:title +""" + +__author__ = u'havard@gulldahl.no'# (HÃ¥vard Gulldahl)' #BUG: api chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' + + +import atom +import gdata + +MEDIA_NAMESPACE = 'http://search.yahoo.com/mrss/' +YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007' + + +class MediaBaseElement(atom.AtomBase): + """Base class for elements in the MEDIA_NAMESPACE. + To add new elements, you only need to add the element tag name to self._tag + """ + + _tag = '' + _namespace = MEDIA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Content(MediaBaseElement): + """(attribute container) This element describes the original content, + e.g. an image or a video. There may be multiple Content elements + in a media:Group. + + For example, a video may have a + <media:content medium="image"> element that specifies a JPEG + representation of the video, and a <media:content medium="video"> + element that specifies the URL of the video itself. + + Attributes: + url: non-ambigous reference to online object + width: width of the object frame, in pixels + height: width of the object frame, in pixels + medium: one of `image' or `video', allowing the api user to quickly + determine the object's type + type: Internet media Type[1] (a.k.a. mime type) of the object -- a more + verbose way of determining the media type. To set the type member + in the contructor, use the content_type parameter. + (optional) fileSize: the size of the object, in bytes + + [1]: http://en.wikipedia.org/wiki/Internet_media_type + """ + + _tag = 'content' + _attributes = atom.AtomBase._attributes.copy() + _attributes['url'] = 'url' + _attributes['width'] = 'width' + _attributes['height'] = 'height' + _attributes['medium'] = 'medium' + _attributes['type'] = 'type' + _attributes['fileSize'] = 'fileSize' + + def __init__(self, url=None, width=None, height=None, + medium=None, content_type=None, fileSize=None, format=None, + extension_elements=None, extension_attributes=None, text=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.url = url + self.width = width + self.height = height + self.medium = medium + self.type = content_type + self.fileSize = fileSize + + +def ContentFromString(xml_string): + return atom.CreateClassFromXMLString(Content, xml_string) + + +class Credit(MediaBaseElement): + """(string) Contains the nickname of the user who created the content, + e.g. `Liz Bennet'. + + This is a user-specified value that should be used when referring to + the user by name. + + Note that none of the attributes from the MediaRSS spec are supported. + """ + + _tag = 'credit' + + +def CreditFromString(xml_string): + return atom.CreateClassFromXMLString(Credit, xml_string) + + +class Description(MediaBaseElement): + """(string) A description of the media object. + Either plain unicode text, or entity-encoded html (look at the `type' + attribute). + + E.g `A set of photographs I took while vacationing in Italy.' + + For `api' projections, the description is in plain text; + for `base' projections, the description is in HTML. + + Attributes: + type: either `text' or `html'. To set the type member in the contructor, + use the description_type parameter. + """ + + _tag = 'description' + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + def __init__(self, description_type=None, + extension_elements=None, extension_attributes=None, text=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + self.type = description_type + + +def DescriptionFromString(xml_string): + return atom.CreateClassFromXMLString(Description, xml_string) + + +class Keywords(MediaBaseElement): + """(string) Lists the tags associated with the entry, + e.g `italy, vacation, sunset'. + + Contains a comma-separated list of tags that have been added to the photo, or + all tags that have been added to photos in the album. + """ + + _tag = 'keywords' + + +def KeywordsFromString(xml_string): + return atom.CreateClassFromXMLString(Keywords, xml_string) + + +class Thumbnail(MediaBaseElement): + """(attributes) Contains the URL of a thumbnail of a photo or album cover. + + There can be multiple <media:thumbnail> elements for a given <media:group>; + for example, a given item may have multiple thumbnails at different sizes. + Photos generally have two thumbnails at different sizes; + albums generally have one cropped thumbnail. + + If the thumbsize parameter is set to the initial query, this element points + to thumbnails of the requested sizes; otherwise the thumbnails are the + default thumbnail size. + + This element must not be confused with the <gphoto:thumbnail> element. + + Attributes: + url: The URL of the thumbnail image. + height: The height of the thumbnail image, in pixels. + width: The width of the thumbnail image, in pixels. + """ + + _tag = 'thumbnail' + _attributes = atom.AtomBase._attributes.copy() + _attributes['url'] = 'url' + _attributes['width'] = 'width' + _attributes['height'] = 'height' + def __init__(self, url=None, width=None, height=None, + extension_attributes=None, text=None, extension_elements=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.url = url + self.width = width + self.height = height + + +def ThumbnailFromString(xml_string): + return atom.CreateClassFromXMLString(Thumbnail, xml_string) + + +class Title(MediaBaseElement): + """(string) Contains the title of the entry's media content, in plain text. + + Attributes: + type: Always set to plain. To set the type member in the constructor, use + the title_type parameter. + """ + + _tag = 'title' + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + def __init__(self, title_type=None, + extension_attributes=None, text=None, extension_elements=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.type = title_type + + +def TitleFromString(xml_string): + return atom.CreateClassFromXMLString(Title, xml_string) + + +class Player(MediaBaseElement): + """(string) Contains the embeddable player URL for the entry's media content + if the media is a video. + + Attributes: + url: Always set to plain + """ + + _tag = 'player' + _attributes = atom.AtomBase._attributes.copy() + _attributes['url'] = 'url' + + def __init__(self, player_url=None, + extension_attributes=None, extension_elements=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.url= player_url + + +class Private(atom.AtomBase): + """The YouTube Private element""" + _tag = 'private' + _namespace = YOUTUBE_NAMESPACE + + +class Duration(atom.AtomBase): + """The YouTube Duration element""" + _tag = 'duration' + _namespace = YOUTUBE_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['seconds'] = 'seconds' + + +class Category(MediaBaseElement): + """The mediagroup:category element""" + + _tag = 'category' + _attributes = atom.AtomBase._attributes.copy() + _attributes['term'] = 'term' + _attributes['scheme'] = 'scheme' + _attributes['label'] = 'label' + + def __init__(self, term=None, scheme=None, label=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Category + + Args: + term: str + scheme: str + label: str + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.term = term + self.scheme = scheme + self.label = label + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Group(MediaBaseElement): + """Container element for all media elements. + The <media:group> element can appear as a child of an album, photo or + video entry.""" + + _tag = 'group' + _children = atom.AtomBase._children.copy() + _children['{%s}content' % MEDIA_NAMESPACE] = ('content', [Content,]) + _children['{%s}credit' % MEDIA_NAMESPACE] = ('credit', Credit) + _children['{%s}description' % MEDIA_NAMESPACE] = ('description', Description) + _children['{%s}keywords' % MEDIA_NAMESPACE] = ('keywords', Keywords) + _children['{%s}thumbnail' % MEDIA_NAMESPACE] = ('thumbnail', [Thumbnail,]) + _children['{%s}title' % MEDIA_NAMESPACE] = ('title', Title) + _children['{%s}category' % MEDIA_NAMESPACE] = ('category', [Category,]) + _children['{%s}duration' % YOUTUBE_NAMESPACE] = ('duration', Duration) + _children['{%s}private' % YOUTUBE_NAMESPACE] = ('private', Private) + _children['{%s}player' % MEDIA_NAMESPACE] = ('player', Player) + + def __init__(self, content=None, credit=None, description=None, keywords=None, + thumbnail=None, title=None, duration=None, private=None, + category=None, player=None, extension_elements=None, + extension_attributes=None, text=None): + + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.content=content + self.credit=credit + self.description=description + self.keywords=keywords + self.thumbnail=thumbnail or [] + self.title=title + self.duration=duration + self.private=private + self.category=category or [] + self.player=player + + +def GroupFromString(xml_string): + return atom.CreateClassFromXMLString(Group, xml_string) diff --git a/gam/gdata/analytics/media/data.py b/gam/gdata/analytics/media/data.py new file mode 100755 index 00000000000..bb5d2c80f85 --- /dev/null +++ b/gam/gdata/analytics/media/data.py @@ -0,0 +1,159 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains the data classes of the Yahoo! Media RSS Extension""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core + + +MEDIA_TEMPLATE = '{http://search.yahoo.com/mrss//}%s' + + +class MediaCategory(atom.core.XmlElement): + """Describes a media category.""" + _qname = MEDIA_TEMPLATE % 'category' + scheme = 'scheme' + label = 'label' + + +class MediaCopyright(atom.core.XmlElement): + """Describes a media copyright.""" + _qname = MEDIA_TEMPLATE % 'copyright' + url = 'url' + + +class MediaCredit(atom.core.XmlElement): + """Describes a media credit.""" + _qname = MEDIA_TEMPLATE % 'credit' + role = 'role' + scheme = 'scheme' + + +class MediaDescription(atom.core.XmlElement): + """Describes a media description.""" + _qname = MEDIA_TEMPLATE % 'description' + type = 'type' + + +class MediaHash(atom.core.XmlElement): + """Describes a media hash.""" + _qname = MEDIA_TEMPLATE % 'hash' + algo = 'algo' + + +class MediaKeywords(atom.core.XmlElement): + """Describes a media keywords.""" + _qname = MEDIA_TEMPLATE % 'keywords' + + +class MediaPlayer(atom.core.XmlElement): + """Describes a media player.""" + _qname = MEDIA_TEMPLATE % 'player' + height = 'height' + width = 'width' + url = 'url' + + +class MediaRating(atom.core.XmlElement): + """Describes a media rating.""" + _qname = MEDIA_TEMPLATE % 'rating' + scheme = 'scheme' + + +class MediaRestriction(atom.core.XmlElement): + """Describes a media restriction.""" + _qname = MEDIA_TEMPLATE % 'restriction' + relationship = 'relationship' + type = 'type' + + +class MediaText(atom.core.XmlElement): + """Describes a media text.""" + _qname = MEDIA_TEMPLATE % 'text' + end = 'end' + lang = 'lang' + type = 'type' + start = 'start' + + +class MediaThumbnail(atom.core.XmlElement): + """Describes a media thumbnail.""" + _qname = MEDIA_TEMPLATE % 'thumbnail' + time = 'time' + url = 'url' + width = 'width' + height = 'height' + + +class MediaTitle(atom.core.XmlElement): + """Describes a media title.""" + _qname = MEDIA_TEMPLATE % 'title' + type = 'type' + + +class MediaContent(atom.core.XmlElement): + """Describes a media content.""" + _qname = MEDIA_TEMPLATE % 'content' + bitrate = 'bitrate' + is_default = 'isDefault' + medium = 'medium' + height = 'height' + credit = [MediaCredit] + language = 'language' + hash = MediaHash + width = 'width' + player = MediaPlayer + url = 'url' + file_size = 'fileSize' + channels = 'channels' + expression = 'expression' + text = [MediaText] + samplingrate = 'samplingrate' + title = MediaTitle + category = [MediaCategory] + rating = [MediaRating] + type = 'type' + description = MediaDescription + framerate = 'framerate' + thumbnail = [MediaThumbnail] + duration = 'duration' + copyright = MediaCopyright + keywords = MediaKeywords + restriction = [MediaRestriction] + + +class MediaGroup(atom.core.XmlElement): + """Describes a media group.""" + _qname = MEDIA_TEMPLATE % 'group' + credit = [MediaCredit] + content = [MediaContent] + copyright = MediaCopyright + description = MediaDescription + category = [MediaCategory] + player = MediaPlayer + rating = [MediaRating] + hash = MediaHash + title = MediaTitle + keywords = MediaKeywords + restriction = [MediaRestriction] + thumbnail = [MediaThumbnail] + text = [MediaText] + + diff --git a/gam/gdata/analytics/notebook/__init__.py b/gam/gdata/analytics/notebook/__init__.py new file mode 100755 index 00000000000..22071f7a11e --- /dev/null +++ b/gam/gdata/analytics/notebook/__init__.py @@ -0,0 +1,15 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/gam/gdata/analytics/notebook/data.py b/gam/gdata/analytics/notebook/data.py new file mode 100755 index 00000000000..53405e01818 --- /dev/null +++ b/gam/gdata/analytics/notebook/data.py @@ -0,0 +1,55 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains the data classes of the Google Notebook Data API""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import atom.data +import gdata.data +import gdata.opensearch.data + + +NB_TEMPLATE = '{http://schemas.google.com/notes/2008/}%s' + + +class ComesAfter(atom.core.XmlElement): + """Preceding element.""" + _qname = NB_TEMPLATE % 'comesAfter' + id = 'id' + + +class NoteEntry(gdata.data.GDEntry): + """Describes a note entry in the feed of a user's notebook.""" + + +class NotebookFeed(gdata.data.GDFeed): + """Describes a notebook feed.""" + entry = [NoteEntry] + + +class NotebookListEntry(gdata.data.GDEntry): + """Describes a note list entry in the feed of a user's list of public notebooks.""" + + +class NotebookListFeed(gdata.data.GDFeed): + """Describes a notebook list feed.""" + entry = [NotebookListEntry] + + diff --git a/gam/gdata/analytics/oauth/CHANGES.txt b/gam/gdata/analytics/oauth/CHANGES.txt new file mode 100755 index 00000000000..7c2b92cd943 --- /dev/null +++ b/gam/gdata/analytics/oauth/CHANGES.txt @@ -0,0 +1,17 @@ +1. Moved oauth.py to __init__.py + +2. Refactored __init__.py for compatibility with python 2.2 (Issue 59) + +3. Refactored rsa.py for compatibility with python 2.2 (Issue 59) + +4. Refactored OAuthRequest.from_token_and_callback since the callback url was +getting double url-encoding the callback url in place of single. (Issue 43) + +5. Added build_signature_base_string method to rsa.py since it used the +implementation of this method from oauth.OAuthSignatureMethod_HMAC_SHA1 which +was incorrect since it enforced the presence of a consumer secret and a token +secret. Also, changed its super class from oauth.OAuthSignatureMethod_HMAC_SHA1 +to oauth.OAuthSignatureMethod (Issue 64) + +6. Refactored <OAuthRequest>.to_header method since it returned non-oauth params +as well which was incorrect. (Issue 31) \ No newline at end of file diff --git a/gam/gdata/analytics/oauth/__init__.py b/gam/gdata/analytics/oauth/__init__.py new file mode 100755 index 00000000000..44d9c7a4e96 --- /dev/null +++ b/gam/gdata/analytics/oauth/__init__.py @@ -0,0 +1,529 @@ +import cgi +import urllib +import time +import random +import urlparse +import hmac +import binascii + +VERSION = '1.0' # Hi Blaine! +HTTP_METHOD = 'GET' +SIGNATURE_METHOD = 'PLAINTEXT' + +# Generic exception class +class OAuthError(RuntimeError): + def __init__(self, message='OAuth error occured.'): + self.message = message + +# optional WWW-Authenticate header (401 error) +def build_authenticate_header(realm=''): + return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} + +# url escape +def escape(s): + # escape '/' too + return urllib.quote(s, safe='~') + +# util function: current timestamp +# seconds since epoch (UTC) +def generate_timestamp(): + return int(time.time()) + +# util function: nonce +# pseudorandom number +def generate_nonce(length=8): + return ''.join([str(random.randint(0, 9)) for i in range(length)]) + +# OAuthConsumer is a data type that represents the identity of the Consumer +# via its shared secret with the Service Provider. +class OAuthConsumer(object): + key = None + secret = None + + def __init__(self, key, secret): + self.key = key + self.secret = secret + +# OAuthToken is a data type that represents an End User via either an access +# or request token. +class OAuthToken(object): + # access tokens and request tokens + key = None + secret = None + + ''' + key = the token + secret = the token secret + ''' + def __init__(self, key, secret): + self.key = key + self.secret = secret + + def to_string(self): + return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret}) + + # return a token from something like: + # oauth_token_secret=digg&oauth_token=digg + def from_string(s): + params = cgi.parse_qs(s, keep_blank_values=False) + key = params['oauth_token'][0] + secret = params['oauth_token_secret'][0] + return OAuthToken(key, secret) + from_string = staticmethod(from_string) + + def __str__(self): + return self.to_string() + +# OAuthRequest represents the request and can be serialized +class OAuthRequest(object): + ''' + OAuth parameters: + - oauth_consumer_key + - oauth_token + - oauth_signature_method + - oauth_signature + - oauth_timestamp + - oauth_nonce + - oauth_version + ... any additional parameters, as defined by the Service Provider. + ''' + parameters = None # oauth parameters + http_method = HTTP_METHOD + http_url = None + version = VERSION + + def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None): + self.http_method = http_method + self.http_url = http_url + self.parameters = parameters or {} + + def set_parameter(self, parameter, value): + self.parameters[parameter] = value + + def get_parameter(self, parameter): + try: + return self.parameters[parameter] + except: + raise OAuthError('Parameter not found: %s' % parameter) + + def _get_timestamp_nonce(self): + return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce') + + # get any non-oauth parameters + def get_nonoauth_parameters(self): + parameters = {} + for k, v in self.parameters.iteritems(): + # ignore oauth parameters + if k.find('oauth_') < 0: + parameters[k] = v + return parameters + + # serialize as a header for an HTTPAuth request + def to_header(self, realm=''): + auth_header = 'OAuth realm="%s"' % realm + # add the oauth parameters + if self.parameters: + for k, v in self.parameters.iteritems(): + if k[:6] == 'oauth_': + auth_header += ', %s="%s"' % (k, escape(str(v))) + return {'Authorization': auth_header} + + # serialize as post data for a POST request + def to_postdata(self): + return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems()]) + + # serialize as a url for a GET request + def to_url(self): + return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata()) + + # return a string that consists of all the parameters that need to be signed + def get_normalized_parameters(self): + params = self.parameters + try: + # exclude the signature if it exists + del params['oauth_signature'] + except: + pass + key_values = params.items() + # sort lexicographically, first after key, then after value + key_values.sort() + # combine key value pairs in string and escape + return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values]) + + # just uppercases the http method + def get_normalized_http_method(self): + return self.http_method.upper() + + # parses the url and rebuilds it to be scheme://host/path + def get_normalized_http_url(self): + parts = urlparse.urlparse(self.http_url) + host = parts[1].lower() + if host.endswith(':80') or host.endswith(':443'): + host = host.split(':')[0] + url_string = '%s://%s%s' % (parts[0], host, parts[2]) # scheme, netloc, path + return url_string + + # set the signature parameter to the result of build_signature + def sign_request(self, signature_method, consumer, token): + # set the signature method + self.set_parameter('oauth_signature_method', signature_method.get_name()) + # set the signature + self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token)) + + def build_signature(self, signature_method, consumer, token): + # call the build signature method within the signature method + return signature_method.build_signature(self, consumer, token) + + def from_request(http_method, http_url, headers=None, parameters=None, query_string=None): + # combine multiple parameter sources + if parameters is None: + parameters = {} + + # headers + if headers and 'Authorization' in headers: + auth_header = headers['Authorization'] + # check that the authorization header is OAuth + if auth_header.index('OAuth') > -1: + try: + # get the parameters from the header + header_params = OAuthRequest._split_header(auth_header) + parameters.update(header_params) + except: + raise OAuthError('Unable to parse OAuth parameters from Authorization header.') + + # GET or POST query string + if query_string: + query_params = OAuthRequest._split_url_string(query_string) + parameters.update(query_params) + + # URL parameters + param_str = urlparse.urlparse(http_url)[4] # query + url_params = OAuthRequest._split_url_string(param_str) + parameters.update(url_params) + + if parameters: + return OAuthRequest(http_method, http_url, parameters) + + return None + from_request = staticmethod(from_request) + + def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None): + if not parameters: + parameters = {} + + defaults = { + 'oauth_consumer_key': oauth_consumer.key, + 'oauth_timestamp': generate_timestamp(), + 'oauth_nonce': generate_nonce(), + 'oauth_version': OAuthRequest.version, + } + + defaults.update(parameters) + parameters = defaults + + if token: + parameters['oauth_token'] = token.key + + return OAuthRequest(http_method, http_url, parameters) + from_consumer_and_token = staticmethod(from_consumer_and_token) + + def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None): + if not parameters: + parameters = {} + + parameters['oauth_token'] = token.key + + if callback: + parameters['oauth_callback'] = callback + + return OAuthRequest(http_method, http_url, parameters) + from_token_and_callback = staticmethod(from_token_and_callback) + + # util function: turn Authorization: header into parameters, has to do some unescaping + def _split_header(header): + params = {} + parts = header[6:].split(',') + for param in parts: + # ignore realm parameter + if param.find('realm') > -1: + continue + # remove whitespace + param = param.strip() + # split key-value + param_parts = param.split('=', 1) + # remove quotes and unescape the value + params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"')) + return params + _split_header = staticmethod(_split_header) + + # util function: turn url string into parameters, has to do some unescaping + # even empty values should be included + def _split_url_string(param_str): + parameters = cgi.parse_qs(param_str, keep_blank_values=True) + for k, v in parameters.iteritems(): + parameters[k] = urllib.unquote(v[0]) + return parameters + _split_url_string = staticmethod(_split_url_string) + +# OAuthServer is a worker to check a requests validity against a data store +class OAuthServer(object): + timestamp_threshold = 300 # in seconds, five minutes + version = VERSION + signature_methods = None + data_store = None + + def __init__(self, data_store=None, signature_methods=None): + self.data_store = data_store + self.signature_methods = signature_methods or {} + + def set_data_store(self, oauth_data_store): + self.data_store = oauth_data_store + + def get_data_store(self): + return self.data_store + + def add_signature_method(self, signature_method): + self.signature_methods[signature_method.get_name()] = signature_method + return self.signature_methods + + # process a request_token request + # returns the request token on success + def fetch_request_token(self, oauth_request): + try: + # get the request token for authorization + token = self._get_token(oauth_request, 'request') + except OAuthError: + # no token required for the initial token request + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + self._check_signature(oauth_request, consumer, None) + # fetch a new token + token = self.data_store.fetch_request_token(consumer) + return token + + # process an access_token request + # returns the access token on success + def fetch_access_token(self, oauth_request): + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + # get the request token + token = self._get_token(oauth_request, 'request') + self._check_signature(oauth_request, consumer, token) + new_token = self.data_store.fetch_access_token(consumer, token) + return new_token + + # verify an api call, checks all the parameters + def verify_request(self, oauth_request): + # -> consumer and token + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + # get the access token + token = self._get_token(oauth_request, 'access') + self._check_signature(oauth_request, consumer, token) + parameters = oauth_request.get_nonoauth_parameters() + return consumer, token, parameters + + # authorize a request token + def authorize_token(self, token, user): + return self.data_store.authorize_request_token(token, user) + + # get the callback url + def get_callback(self, oauth_request): + return oauth_request.get_parameter('oauth_callback') + + # optional support for the authenticate header + def build_authenticate_header(self, realm=''): + return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} + + # verify the correct version request for this server + def _get_version(self, oauth_request): + try: + version = oauth_request.get_parameter('oauth_version') + except: + version = VERSION + if version and version != self.version: + raise OAuthError('OAuth version %s not supported.' % str(version)) + return version + + # figure out the signature with some defaults + def _get_signature_method(self, oauth_request): + try: + signature_method = oauth_request.get_parameter('oauth_signature_method') + except: + signature_method = SIGNATURE_METHOD + try: + # get the signature method object + signature_method = self.signature_methods[signature_method] + except: + signature_method_names = ', '.join(self.signature_methods.keys()) + raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names)) + + return signature_method + + def _get_consumer(self, oauth_request): + consumer_key = oauth_request.get_parameter('oauth_consumer_key') + if not consumer_key: + raise OAuthError('Invalid consumer key.') + consumer = self.data_store.lookup_consumer(consumer_key) + if not consumer: + raise OAuthError('Invalid consumer.') + return consumer + + # try to find the token for the provided request token key + def _get_token(self, oauth_request, token_type='access'): + token_field = oauth_request.get_parameter('oauth_token') + consumer = self._get_consumer(oauth_request) + token = self.data_store.lookup_token(consumer, token_type, token_field) + if not token: + raise OAuthError('Invalid %s token: %s' % (token_type, token_field)) + return token + + def _check_signature(self, oauth_request, consumer, token): + timestamp, nonce = oauth_request._get_timestamp_nonce() + self._check_timestamp(timestamp) + self._check_nonce(consumer, token, nonce) + signature_method = self._get_signature_method(oauth_request) + try: + signature = oauth_request.get_parameter('oauth_signature') + except: + raise OAuthError('Missing signature.') + # validate the signature + valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature) + if not valid_sig: + key, base = signature_method.build_signature_base_string(oauth_request, consumer, token) + raise OAuthError('Invalid signature. Expected signature base string: %s' % base) + built = signature_method.build_signature(oauth_request, consumer, token) + + def _check_timestamp(self, timestamp): + # verify that timestamp is recentish + timestamp = int(timestamp) + now = int(time.time()) + lapsed = now - timestamp + if lapsed > self.timestamp_threshold: + raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold)) + + def _check_nonce(self, consumer, token, nonce): + # verify that the nonce is uniqueish + nonce = self.data_store.lookup_nonce(consumer, token, nonce) + if nonce: + raise OAuthError('Nonce already used: %s' % str(nonce)) + +# OAuthClient is a worker to attempt to execute a request +class OAuthClient(object): + consumer = None + token = None + + def __init__(self, oauth_consumer, oauth_token): + self.consumer = oauth_consumer + self.token = oauth_token + + def get_consumer(self): + return self.consumer + + def get_token(self): + return self.token + + def fetch_request_token(self, oauth_request): + # -> OAuthToken + raise NotImplementedError + + def fetch_access_token(self, oauth_request): + # -> OAuthToken + raise NotImplementedError + + def access_resource(self, oauth_request): + # -> some protected resource + raise NotImplementedError + +# OAuthDataStore is a database abstraction used to lookup consumers and tokens +class OAuthDataStore(object): + + def lookup_consumer(self, key): + # -> OAuthConsumer + raise NotImplementedError + + def lookup_token(self, oauth_consumer, token_type, token_token): + # -> OAuthToken + raise NotImplementedError + + def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp): + # -> OAuthToken + raise NotImplementedError + + def fetch_request_token(self, oauth_consumer): + # -> OAuthToken + raise NotImplementedError + + def fetch_access_token(self, oauth_consumer, oauth_token): + # -> OAuthToken + raise NotImplementedError + + def authorize_request_token(self, oauth_token, user): + # -> OAuthToken + raise NotImplementedError + +# OAuthSignatureMethod is a strategy class that implements a signature method +class OAuthSignatureMethod(object): + def get_name(self): + # -> str + raise NotImplementedError + + def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token): + # -> str key, str raw + raise NotImplementedError + + def build_signature(self, oauth_request, oauth_consumer, oauth_token): + # -> str + raise NotImplementedError + + def check_signature(self, oauth_request, consumer, token, signature): + built = self.build_signature(oauth_request, consumer, token) + return built == signature + +class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod): + + def get_name(self): + return 'HMAC-SHA1' + + def build_signature_base_string(self, oauth_request, consumer, token): + sig = ( + escape(oauth_request.get_normalized_http_method()), + escape(oauth_request.get_normalized_http_url()), + escape(oauth_request.get_normalized_parameters()), + ) + + key = '%s&' % escape(consumer.secret) + if token: + key += escape(token.secret) + raw = '&'.join(sig) + return key, raw + + def build_signature(self, oauth_request, consumer, token): + # build the base signature string + key, raw = self.build_signature_base_string(oauth_request, consumer, token) + + # hmac object + try: + import hashlib # 2.5 + hashed = hmac.new(key, raw, hashlib.sha1) + except: + import sha # deprecated + hashed = hmac.new(key, raw, sha) + + # calculate the digest base 64 + return binascii.b2a_base64(hashed.digest())[:-1] + +class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod): + + def get_name(self): + return 'PLAINTEXT' + + def build_signature_base_string(self, oauth_request, consumer, token): + # concatenate the consumer key and secret + sig = escape(consumer.secret) + '&' + if token: + sig = sig + escape(token.secret) + return sig + + def build_signature(self, oauth_request, consumer, token): + return self.build_signature_base_string(oauth_request, consumer, token) diff --git a/gam/gdata/analytics/oauth/__init__.pyc b/gam/gdata/analytics/oauth/__init__.pyc new file mode 100755 index 0000000000000000000000000000000000000000..447fd2ce16ab748c6d07864270823b905b7d43ca GIT binary patch literal 20135 zcmd5^UvOO4SwDAIlC`pA*|H^Dwv%k^I#q+M#Es(=<1}$?lenoj$(0kQ8mBw!y;ss& zt6h2Tl^q3}LhO_<v=nCOz?1@=p))Ck0xb-M85o9#KJdUWQyzHWfd>YL2OgN|@C?7- z@4J82l3hHz>P%(d<8#kF_uTJ%e}3otzH{W_564EHovKypF8L|q-xqO3tDbWK{(7$I z+<aE|+`N~z7u<Xytry*VF|7}{`GK@P=;jC0`jDF+asl4j<^uE|b^&^pTrlXu5x0P0 zhumG~?l||^`Ry)1r(t&&-y3y7i4W$-Tri@_4i})qsGg3yV9bR(_5P0R{ar2?Pg{3p ztrITTm9|b~t-D>YJ8j*QweE4jWZF8FwNAQVZ`!&qYn^hzblSQ<Yu)RD11{Vv$_{3& z`&{ry+WKhLI!#8;%{&IqH~tqtm572kKKpdce_uL(DK6pP*FJORLgmu=w=TsaxO?@? zTD)|=(`k3=xYJJo<)?5(dr*X6St={?vEcl<8TQaqy<HCZdbwS%uXVy;w$7*Nb(THH zP~7s|E$6CIfw>u|g{!EHx1qWcMp11s)VT2gUkRHFK8c>eWkocL+SyZA7TfK`W_YZ& zx*8o@skQK0=gMMGi)&ZfHB9A7wc2Pk;%aqvbv+)&E9w0>>JiCu-V8@h9=2*LVYMn& zRI4lPV6BN;e-It=Ppj6$_kDhD2QGbg$cs^Rb<Uj^i4c28Spz&4ho+#~C+9?ACEAH6 z#^O#`Ypy(h=ty)(jh8Q9K1RWXt+-LI#i7y4QDc&>-Z}j}kRDHCn5%1zW>6(p)9+N5 z!deh^cH?2f%WOIno2J8unCUf52b02t?s#4@C6jJ#A&g1v&zr=DP+jXZn~kfY{`y)Q zGty*OMEy;KgNISQwhALksV>w})ej{ks>y0l4YEM^9RLU|iJz|+jYU)&tyuKOjg^Gs zXzX1(C-CxO*a|xkV3lJ=acyPw5ws?}Z49F>29r!0ib(wgD&Xriye)!ZSD^OX+mE{2 zo{I|()_x7p2~;h*J7m85T)|!6ZBwF{Dn18p;BpNj!VGmrGrQKt9Hl|E)?z5wh~iF6 z9(HQ2puHk<WflE$;Wfk7V!R}gHu{qmttI10tKF)HhtQJnBPNi@9?rH~(b`IwV-_X# zDfT4$Lc=Ve6;>0JT1SZegDH5`DNbIQ{HJh}VbraHz)HBcNeh<4bulCg>zy!`rTXNm z&rkc0qUehS!k(YPC=myBUdgM>kpI34DuE<bX}7}si(3U)2QmCdaYg*tkcU5oe3iJ= zUJhGzJkSq0Cyt*(CE++$VsJcNxsBrx04rLvS=r{oVHa+rHrf)-#u5cXz3WjhIVXNR z@;C+3GI_`%sc-|hWRZ`fVvyI6=3ImAdF~_(^BVX8K*wZL0NjqgTB)ir`56-hy6&rq zT5)Wj`mQw_=m^%ggD#N^oTPYtshft6+f|r(qqX=+e6SnXNkukGKI{f|Kn;K$m;<}P zB&Pv4)3-5y+zljmMS>BnNl0WOuEe`A&~g~AR<AZ|t>x;CT5~Ood@@m(<R3z9hMyGx z)fXFDt5<8CD6C$Ogik+PYeu18PCNQsLuHldRa=R=^Z^fghiD3Gq0?ST*dt*RJDoS_ z?ZBT*6{rq+N}q~ymkXL=2cP;ML9tarBMekYRJHnPR1=X^X883SD13T?IV%JbBEx?Y z58_e28Q0)Jtc3AWJMf7*xiYENgX<uv9Ou{s^PvJjWT-Sy8Y~Tt7W7RaxF24J(BlkF zX#9ec<6u?-CjbT*(jEeMh!6k}g1YbnfPog=@-F}efxrlL@Gf3~Q(sgCF8_ckaQO#S zfy<9UwxupVA_Mf_nMMY1_boDjhreBq;M5~NKp%W%Ocl89iVx=Dt=rd8j*9@e^!w<O z6WHLj@9Clt!2>`M-aR@@s{>s1?hd=uVZS=;SBL#BP;fC1?;md+P~!mx8Kmc+8V{=R zAhC0j{C*6q8!6+*d`6i<7>Awclv?@ZSlO_qo-CkMtY+m+PlZgITBF8ds}{q@ubRF* zxWj!$Ox>H!kF?5f8tVqgZnRrj`|Rv&xz<`Q*MguC<3X)i&gXZuT#L#<xX@^YLHX)> z84hcCF6`WB)Wh;?o%W3eeAZbFRUZJOcolKUV2_}(dEp0OtsWp8!IrCvPg7gwi!pIw zDUMeYP6(fL51`h9nbRg9GY5eYjQ<pir%~jV){BwRUJy;$N=+}J<YjATHa7yT`7*#G zO!9Q#GOt#cBs$n#1ZpkFC>PEOoBN_vPFobl)ofr8ks?+WD`q4g!^8FQ1VTab^xWmv zUgvopyh~U*TBcxN?|H7}xwkYOIwF;b&i!k|eky#q)sD*x?X^~Ls(d7ppDLCsAhTQQ zfKQa_e{yh?^U1*_chN8k=M}wiT-g$lO_5HPEfJUyCX>|VsV@{VM}bRP;uxgHBfYaW zS&c0xC(v=L7M492tI7#w@kmu)T0#2*9fQx{Axs6Hz&kG_v#}uR+}pG6O2NS?xGgup z{31*lP`!+hVv3Fdjj!NNU!QXLdl`4Br$isrCM&nVJUeQv^t1sv<64R24#V_=E=yA{ zYWT8(k{kUAnYu-ufD=YfrpnQEJc_vLIylP*3zOb1Pr;QGn>Nao`Yf&po>Qv8bduyE zbQ<oRxeXk9K^!Z(+XG|{7*)n{wp?(X!(>ySRtxfO4N!aV7fLhr90t99n13&~APw;p zs~%DbWaIr)2@Qo53@4)!ileX<r5I$gOfR%Mjd$T6v|Gkna@GH77CTumWRlZv=*A(W zpiCGfi30y;*zq(w;<BO#v`d{-%Ran|ks>-T&KoI|yo26O?@2Q=niy?Q41XOJ7#NTb zHc5V?4jXfbluwAq9u*S|hL;H)g5nuBlUtc%!d%ABQ+%ANCROW+i1G!?fH182v>k() z4Ll-Q#-lA_2bUlC?5pi4X6EZIZo7?>)bX2Q!+ul<oT*Q0oB$iBHI~Y&Br|y-!OA#S z_|8hL*?2b$s_E+H(@EPZw4wfC9#}L#K&^WUWN0QyketvDh*{rGQDH{UUv6#V!oY<9 zErvKWtwJJa53)u4Md=qZq06vfQII$mIL0NCdve=tQ55<a6FL`*N}L07<_Y;{NO!IW zGAMx$+j4k{+MU?+<9?R&G4z$!0B0<XhPFskn8q{c$|RYC*B41V38Mc{ENsVWHQ<dF zMhbOmP<ly=7>Sw+8J|;ch>NdJzee7|+Ujc9*(&9r;%*?l%jNnio^~q?MMUn4+BH;w zw{&`B_mFWCroy2x5J(h72#d$ji_#n1N+UXN>K2J3X#{|jku87~@dLFF_s}WUwz=tr zqMPFSRs=jyXDdKo5LZNT>Jv{ZZE)(uiD$GDozhY!xIGe`dg`eo(UAx_g9J$BN5UE) zB1E_J6vftN`)1hDv#=FJHycQ`QN5zoW+ToQJ+<M@*jB_PfP<6@<0awi3|{IPgMD)L zcllX^-U-0&qu!Kvz{`B<jQ_+x(-tC6YMaPKdP%D-fZ`ze2%h6F9tZSK$+$|))x}oZ z%-Rxf+dq#-<_$c9niwNRq4P&0wY+uqS{I9`>{)xa^Pa{-Vxq#IzL;gPHR7de$Ku=% z`C;m)D@+wKhOt2k%bYp+B(`N7NsWx^<iCj`pNW5&ZEvyoEFa+NU9ued+qlf03Uw+M zeH#8N^8OnYIwSC>hFxb;&R&t`08%^eX?cM|H^e$xAoJ<lz}7)xGB6nCMHgm(kJro0 zPy?0mlvW9IQnvcm5O2N9*FP^`AF;g*4}Lyc-n#ytK86uK)a!Ulk8f{FUspcS14T$s zUqh=vU1mF+zA>y1meMy$`XIjE`7ic>V&Wy!#5t)UwYlOe=|x5;rq;GvL~MSeRlC{@ z%W<1wXcTS=xywu&mG5z)%I3bzHYj!>g%UJUIk>OkJ3fOSIbT)TcH%@SueG*{l|EDe zqx@OYBR6jUB1fgzgvW*+^givYkvtRWB@!>4^#l_97t&8!Hw$E<u6|lBqC7O@iy5xi z!@jgnpHhx3F_rItqUZ?}t}yDAiW6}1Cb2q=!?iAYQ^g@fRXdBjQH~+Cw9~YevdWC< z25z7q(YW+<fT{zQXLxiHKzs*Cj<hT@n5Y#|vptgmcQVmj=&w}FgQC(E_grToHBl@J zgYGny4SFp2Vqe0)bfzn%o5BjyEdH~g%Rh-iA~tMG(i}>DTAE+G2*d;b01K%E#hCW| z9O~*B??BB)Nk1bj$i;6WA4WIiMrv!#xX-OLx`mQd`n@VPf-H^w13ZfMp<o<TDipmD z{TVBcVGAJ>6YVGy6E$yxm`p}!1caO>%b$`_;vNAa`K_=9Mw;@wh%qW+UN<*t&F0lw zec1wi@snIMC!J`i6{DW?+Q6><bUEXYmJqQg{my^j!^zS~4pDi>%+VrOGN}C(+`vbm zy8^L!?lG(hhM{mk-dU!;oqM~kcp54zWF@{12w%^*>t!J&9!a&$E#VMAR6r~OarnK4 zxZvuLydn577y^F@fM^|d)2A_xh2o6rDhd?kzZ~`1AQ;sB^XBVtg*qhH#<=_%?)_6J z%u&JK!m4d%P?OhMw!_Og3)>8O{EzV^Ms2AlQ(I9yDUcyB^{K!9+Zg+YxFYVkP*Hb7 z1d~wE(ZWPw#zIJ{XQrZrRQi#rFo+l0@p$LBumDaA!^y*Wk^^zXHH8(B*>u8ECN~as zXEyvJ7}vN(!0?}7@lk$@U%u2?3oXhXV|#?c*8PCDPqWyX;J|18a1f$P@>P@GG`zXW zj4VckQCiyZtyY6}9p@q*OG%JmUcPkcwd%$5mtKDLTy90`|K@pr?!v1TpOdt0hi6zj z!Qwd<&$D=u1>;Ho9E%GqUSV;T#XJkTCjJ78qb!;%+AJtbO&~W|)q2P-2xBW4FJdZW zXuI}3wf8wOSSpsdc~Kf2K{-&`!8%(e9vdx8PT(3DEtYnSVtYdzk(0p8bJx~1F^ZFn zzK<eA@*d`jvfx7OTEN_}r;+Y#BwGsD(7^d7oNTfU4IFMNy1RG<TO78bfh`W((7+al zjyv&G6Sg(b2U{Grt$`y=cHjy78+Z{L8rYY>7Kd$UV2i^xG_b{C8yecj2(VLuZrJ6( zjs!--E{E+xU_--p9`>cX4UMDFoXsicgP0_7MA>P8)#?N_owu5H>e4dgkr%=I1zd_Y zw$gWzNgMVUqOSd14C4ITC~}eaB5lIXZfgAiDvW%Ut0O_YsCqy0z>-fg@*Hx!6SuYs z=Tqs6K&@m<nCIi|CAp-|<>_QJ8L>1{&;*cAnaf8;rfQsL{}eI8$|>64MN!H+D!SOm z$lY`RM4@6SgVXmC$+sj;{|(ZoTT-gXray9-h{HZnb)zypLZVITyG`oU1zsZiIx6c( zNn7Fo>WW2C$?MClOKt$m-SUX*G#Q_;3;aGUnp|RWi_&h*)0D@^#T5I%MGJd}6qs1t zf?NW#R!sJD;uSkqW1*rV1KEZp;(1EI{C)=BYD;`U3l%7s)FG2LR_jY)eL0`Kp*Dzg zG5t!My{a#zajAK4c^0?JHBH=c^C<=73^)6djQ>j*;V`8@tHBOQVG1koIDGltsL2kw z?odRM$Q4w`Txk(jQzmV2D^G>D#(GI25teaZ!pPi2i7Q=~={Z8E3QrT}`|sg1=6v5p z&3~W89Ts0;A;!^TFvivDb(~gA_@-5o<oX}5$hah?DQ0SgH<P~T(0hpf%Xs<YWSk{m zz_P*w+?UB8Bhz5F;1yg^`7_`fA|a%@QjO)*jTfdp_hTJbd1LH@cqD(OFP#!x!XO$8 z>#0(o&KO1A5~D`%vpY&-Fzqspf;mDpQV<m&N%nN=xu?P1kTCs5BGe+DS!!Q_1*f?d zAqR?odlt5GSosGPO1#7=ktFxv<%({;FeugQ6%v6L&GG7jSSo9OBvK6pJV_MwN={$C z|3!MqptHCTjl<^^r~)YaIpr$BNS{g=vaf0QBN*Y;oOB@&DbCgeu1)K^wTPwM1qfYk zjU`*s%nBQlcV1U6Kr6#CvP!Qr&VmOApPq`9?^{FZvQ1up3FAlfs_n=|0_@m`(-wAw zggSN(aH!gBO#bt`=xs<NPE6sp%SobW^|U|6(3<DS7$zCGmAxC+2=Hdw^pE`KCDE>; z@(@f~-z9ScTq8CT7oQ~;!6UAoVw@q4K|7*A_`T>0Am?5cT9a*m@sD4x_?J-1E{>KX zrt0QW|FLXmR5o$49Cy~sI6GNxFX))aLc59dI*z{Sq=4|8T-HK5cj2YVnM-f@=hGmE z&Zo9<6sRGm6WNIZ(qbqe_Fv`zl*>j3q{`FgeI{WhF|M3|jf1~Uanbe3#Z@dYXSUDV zUyx>#OzMn;_9`l{#0)gJwiaCU5hht6l7!oakR~BDT1FvyMcokpno!V<D}%{o5hF(= z^Fj;9M;bvn^-jv**la{Mf8&!`i*Tji(rN)OA*0!DFR!g;4o+7FoUB&zFdbe=w*Gw- zF8|B9>W?E59QP(v`pnKK=Aoi+wqhlr{~lJFI%jT*IVHwKp#hi$uh5*3=nX;yIKD~K zG3X$eN<6Y0m!zAr4iKT0L3&HPvglgQi?~OKPPnIzrmFXPEOIj~R~4_XhKgbBcA*hA zgRR<NxC2tw$kNPvfl>|;OUOeuW`O&TxG`Shr7Ox;kx%CJ$$3t_Zvg|h5fcAeP9LFT zC<P<1)qL`8K<wS=#zLms#BDg`+R-!S-$EvAz}+5pw}xHx7miQ0mtarvAre$_w?~+w zA~=m?umq~n8*f7DqKpDYoX)7s%;G#|rp(jDMic>X%9#PrM$h6|D%iZaeEyx)unzdo zyOyuku-RaTbtQSOfl8@#yZ+hX9oa_`59l0^{~PR|*_m984dFt}yz_=Fv?@QFZW%Bq zGee<uK_1O6cY5>hQ?B-5fg-8R<|qGH2*FrI=TwEO{FYLHBqHu_9s2@4h+L|R7?V1f z@fcPz9?Rl-wzEJjJb-!zkC`4a`xHwRsZ#j83f!iVEL+T8J-5#Q(UvdnalNT^U$#$0 z(6YuI;tVOJ5(K$Q#AGLA+`z9*l{225Dlax}gst+C00$le_yIS$h%MoSW39Z1-|XS} zwxF@F5O#1R96L};wHAs_7%jD%0p2o*B+B}XS`_nTgIBYj8&@uF?ZRWBxNa&d+pMid zVbCA@zl!m`ME29s;(iWxbHIL?J*|S*SzOvuBd^TPyKM+o25;&)?$N=Tx#OZ$=f3>p znp@#7jg)Ima4J|Y!x^_Tfc%n$a$UG+nwh&%>ZBsmXaD!`Vt-8dZH(|0GJ)9Zq;r*- zX;iyTYJY>bxo4+5tGo>`CCQ+Ep0!_K@rx`NO8X28{V%fkGK*hhL00>BS;*GkXAM}I zM6;BLPm1z6sn$y-?dLGe7$^25DtXp)d;nQf=1s>-Bc<)kn<76tg52rI=x}LgX*kc6 zo^3Yp8xVE8tse|!nbJ?A5+eNwPXxd22{#(0aw(V=e-m`muaqPSQvEV0`{?&USs7uv zk{Pl(Rhw7l08v?p)@i1a6?`yHqe(9^Md<Rep{34t>`*aA_QF15&DY7k0S?1NBpHxi zXHkzxxtT>8mh!Qd<*hnqLv!d(A$@}saq8~j>7fI(RrGL)AU$$JKI|ffH1Q$_#2hL* zU>QL{m(ciBkXLEP7gkoA;R-Y?4D=hWTNwe%{=SuwXAbtmk>~#*Xxvg-2m^oolR=9m zFDyrfHL1PUsfXXqNvq6k2d!PeC#1`a%=U`@fL*@DA_uuY!tEA7);BTC*GLW%kb<lN z0E%!0m{KZ9f%rM>O3ZOnqK<C*!8?Xcj4~5!a@r%&x<sr|xX%ip$~J5ORkmS+4IbOD z*+x9#Iv|guH+^{oujj1j1$n-L5&kL{O12zxl0VwCOm_)b=KKi?e+#!=p@S^9KLURf zBmPYnfrLt44@)5Xb`khEZ@G~B^m6(m?L&<4cU`1CJS=Urd-AS7T4)~M@1muMdtMLA zCidOHrv3;b5dHm@5cD`n`&AVE5%eXD@DDjbcIrwoFqc-JhTv1#e2TRTZunmQWz~8y zv~OdW@A30oWrqkg$)9)+8f>>X%~)O3FX7eEt{=Q*fO8a;hX)+M3$1grEAhb9($tcl zlPcu1?q$MnV3>c(`P~Pyd_aN(FOQ?vA637D5&ronjw;5{|I$SjL5r$M`JKh3ys&wH z_hc2ccZK~J{8Gq0ULnWZxZ9GitY61d|M%GimleGT_-lCiUr98n<gdZn7ju)M2KZ#S zo!wyn{EMpf^4UMZFhAgDhf%@#DVb6HC~od&MAerso;h2cd-=@q&E|9%@BF{!1S=|{ z-)1Y~tPy@BJ1jcK&2Js!w``gqa;psK-!tW~h?$U;DF=fuc)!pL+g?Y0A-P3@OaCK8 za*J?Tw@5<Z@E2ofyoF?>l4(=6yVtqOTq7UoY#7FR4i9kRj>&oGoBHywF6u+f#wTe> z9dbVsJ)z%&FioIi)pd&J0&giN<;@j}*^2W4+cNc3<{`@hk@ub2O=BqPt{0V|4*n-O zNQ#_xnI~~>bO>kGlMF-&?HNDc$IWJAW5N#^!9lk^L)HaIsVc*WYy>wgv?<EO9~cDo zGS4`pJq_!Mqoa2GIO~c-$m+*UBV<{aUc`ZAv`R$yLOD?_9kH7sg)$}`5I_!SiT_Co zX<k~X)h%5U-c&=hR6A~5T&n4S<JCs17S$UK<#?}tyrvx9$rH-$1dT=f^0f`m(QNxV zlZO4y_;>l>&rsMQJ$yV2{O{quKe-U5{wJ5qfnw1c#V>KOoG#iv*Mra`|FCMkXvLr6 z>;J~D^BY)^i>0v#!$|gDCDqYcKX}tt<-@3K6^!^C)#k?97A?a=@J!P{o-*e0Lc=<X zE8=my1P7ENhvx0&F=wROaOM_gL=6h`j1K?>XiYbk&){tfU}fzz@B9eKMqT<2pW?Ej z7n#wLOPtLxhbfp1XRZb3ra9&N`Q6LqJxe4<+OUO`V^DuYQmuF9EdQ}9@?Yb71N>g5 z$lGT2qs>l}j<Lq{ufLbYMHYX~;xAb!(YVgqucOc=fxm|TN@^uko<yl(%Ogs@WUNl3 z3kcd*uVGWdKgh;1i(@Rf9Qog5L7&&>dhOHJeA1b7R<#~<K!|qQ{(r7d(nvJMC@>@Y l!~bZ)8!Ao>BHK8G|7>D#YGmq>slT22^3(zR*){d}{{gg(an}F< literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/oauth/rsa.py b/gam/gdata/analytics/oauth/rsa.py new file mode 100755 index 00000000000..f8d9b8503f7 --- /dev/null +++ b/gam/gdata/analytics/oauth/rsa.py @@ -0,0 +1,120 @@ +#!/usr/bin/python + +""" +requires tlslite - http://trevp.net/tlslite/ + +""" + +import binascii + +from gdata.tlslite.utils import keyfactory +from gdata.tlslite.utils import cryptomath + +# XXX andy: ugly local import due to module name, oauth.oauth +import gdata.oauth as oauth + +class OAuthSignatureMethod_RSA_SHA1(oauth.OAuthSignatureMethod): + def get_name(self): + return "RSA-SHA1" + + def _fetch_public_cert(self, oauth_request): + # not implemented yet, ideas are: + # (1) do a lookup in a table of trusted certs keyed off of consumer + # (2) fetch via http using a url provided by the requester + # (3) some sort of specific discovery code based on request + # + # either way should return a string representation of the certificate + raise NotImplementedError + + def _fetch_private_cert(self, oauth_request): + # not implemented yet, ideas are: + # (1) do a lookup in a table of trusted certs keyed off of consumer + # + # either way should return a string representation of the certificate + raise NotImplementedError + + def build_signature_base_string(self, oauth_request, consumer, token): + sig = ( + oauth.escape(oauth_request.get_normalized_http_method()), + oauth.escape(oauth_request.get_normalized_http_url()), + oauth.escape(oauth_request.get_normalized_parameters()), + ) + key = '' + raw = '&'.join(sig) + return key, raw + + def build_signature(self, oauth_request, consumer, token): + key, base_string = self.build_signature_base_string(oauth_request, + consumer, + token) + + # Fetch the private key cert based on the request + cert = self._fetch_private_cert(oauth_request) + + # Pull the private key from the certificate + privatekey = keyfactory.parsePrivateKey(cert) + + # Convert base_string to bytes + #base_string_bytes = cryptomath.createByteArraySequence(base_string) + + # Sign using the key + signed = privatekey.hashAndSign(base_string) + + return binascii.b2a_base64(signed)[:-1] + + def check_signature(self, oauth_request, consumer, token, signature): + decoded_sig = base64.b64decode(signature); + + key, base_string = self.build_signature_base_string(oauth_request, + consumer, + token) + + # Fetch the public key cert based on the request + cert = self._fetch_public_cert(oauth_request) + + # Pull the public key from the certificate + publickey = keyfactory.parsePEMKey(cert, public=True) + + # Check the signature + ok = publickey.hashAndVerify(decoded_sig, base_string) + + return ok + + +class TestOAuthSignatureMethod_RSA_SHA1(OAuthSignatureMethod_RSA_SHA1): + def _fetch_public_cert(self, oauth_request): + cert = """ +-----BEGIN CERTIFICATE----- +MIIBpjCCAQ+gAwIBAgIBATANBgkqhkiG9w0BAQUFADAZMRcwFQYDVQQDDA5UZXN0 +IFByaW5jaXBhbDAeFw03MDAxMDEwODAwMDBaFw0zODEyMzEwODAwMDBaMBkxFzAV +BgNVBAMMDlRlc3QgUHJpbmNpcGFsMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB +gQC0YjCwIfYoprq/FQO6lb3asXrxLlJFuCvtinTF5p0GxvQGu5O3gYytUvtC2JlY +zypSRjVxwxrsuRcP3e641SdASwfrmzyvIgP08N4S0IFzEURkV1wp/IpH7kH41Etb +mUmrXSwfNZsnQRE5SYSOhh+LcK2wyQkdgcMv11l4KoBkcwIDAQABMA0GCSqGSIb3 +DQEBBQUAA4GBAGZLPEuJ5SiJ2ryq+CmEGOXfvlTtEL2nuGtr9PewxkgnOjZpUy+d +4TvuXJbNQc8f4AMWL/tO9w0Fk80rWKp9ea8/df4qMq5qlFWlx6yOLQxumNOmECKb +WpkUQDIDJEoFUzKMVuJf4KO/FJ345+BNLGgbJ6WujreoM1X/gYfdnJ/J +-----END CERTIFICATE----- +""" + return cert + + def _fetch_private_cert(self, oauth_request): + cert = """ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V +A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d +7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ +hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H +X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm +uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw +rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z +zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn +qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG +WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno +cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+ +3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8 +AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54 +Lw03eHTNQghS0A== +-----END PRIVATE KEY----- +""" + return cert diff --git a/gam/gdata/analytics/oauth/rsa.pyc b/gam/gdata/analytics/oauth/rsa.pyc new file mode 100755 index 0000000000000000000000000000000000000000..d64c287f044514fb65ff09ca83b620b5a98d0a07 GIT binary patch literal 4470 zcmb_gd6U~n6_@SFOh)XIkcEZiC_)uQ4Vc9D*xq6(T8HIJk|p^jRW4bQWm%`Cmam%q zhvh5qN%;WK-JZ;Z%~Djdc+yYZPp$6v-tQgV$^G+Fy7sv)8PxqRhn{~yErE!b6g5XQ zLz!91%TROFLH}};m!<q1HP2BQJiZ{~3u=CG9;3NWsLdrczog73lyymMuBa87cS-%7 zqQ0T1+59PGUQyOl!hQO{y~f*UH`jRg0wQ#5ZBr+<l9$j+JO^4YU%gyI7=2kNz}VVF zuLBDf?q3yX+W8myNjDkvC8QB?t>a2JU>F}Cc}9GUVCd^`4S$8E$RLGl&9MU=rm-bj za2=WpRRasH4N4Ct!7DPTC4WZ5LMB1FP+01D7TW`Na#(cSxxnVZohR5XxI>J9J6Dvd z-CUw247}jm#^Qx95J4+xH(At?lBb9wDPCbIZcv{(D}ZMumbW^yFhBV{BJ7uM>@c)F z>y;iw$tzzEblZyG*rpEkH=&L+d=n@7>*xsay0!%uf$m#B!k^>qsEmOhxM7)yJlK2) z|GyDMHM0A?VLwA}q!4!e$g_MafR@R}aTpW9)g1opQ$)zF3k;u?z>gO91zKsbvY@eE zMCsCVjD=yv@W+UI7X0c11XrJ+&mN^zF%BOs7&gLN!D45pLyJg;J?bpk69Z#)g&KYm zll~QTkV!Wg>MlcVvN#UD$-KkKy~nw@=d5zn*T}6h)Yr(Tvef1Yb%$P|;U#s4`j^xS zM-8^UgkzS17{=cbPlH%%0tUniTZy4Z7W_Ht5;q8AU-z7oWiD`7Eqvnt@TdRfbsBr{ zi}&9}dW_r&T5$rgvYXHeZm{OW8;G5W-x*<$q&~tAiv&X#`3@F2acuHL!AhR7zW-=g zeu36orjBPW(5mN<UM%&*S|l)b0{gdkyH60IvM(|(GC#{+W}amTP&nf}05wB{JERAq z2&*i0hmgrpZ?YH!gaCd;Fgzmw5g<7X0v=vbcURQr0>grbPbs7<%7b@L@el#{@;<;x zraBwxbF`a^<5ES9g3k~^22HHN*{^!YEI&ZJwVtd&VB%CothsaoJuw`IB)Vl;Cwksi zN$z|!yo!Td#bKo4s39x8V-SKD4k@xqblwpIfqey=&@w+73#`buuy~DE{UIV$<~ozh zJj<cK%V${wI|SD80Ra;Y?nCA5=x3`VD82>AB{4Yy1R;=lYw`;cA*jNj=)$mMzP_gJ zu5k^*(Q-w7gOH@DJNj+5CEA|z%MAvCjIot&s}MWJ^0sPPMrc|p!HT#xVxO&f#)=m) zPFVh7oLVFU-UBwaVrO-{!Rmb%EM$WRXC?TY_aZJ#KvS_-tG~n>VrkBTVvumc4&Cns zkg>K5_c2%iS}}Q!2$gx3eGAb~axb!HyI|uiNbOiGh{r4zerTqiwOFWFca@k1&pKP` z*SPlwL>^@%y$>r|D%KMVMMC#7bDg`szP`GquRnvoLEIrKFdtaOar(1RJ^rpLjw&?X zj}#W1p8lgE;uMUW5NBIFNlFgrLBGO3h8H^0OO{uMomPhhL!NMHvD0Cq4a)*0Z-ae@ z0XAwwAThSPUAvCZ+!q<3j9P#Lb5S++EoH`y6@}wKZ8V=sMY_{sj{2mw(WlIM$pNdi zFIGej97K-a%N*E?9HS%NN#^*YczWcCjC*LEz?f!iY0Lmo<UG|gDvCX7ccY~*MTXEy z#E!5MK~Z3}t)O+56;9!?Gyqfu32-dqC_RR@6}C9ru=~zx7Dn;5&{E`EZ&}fkX?*B= z-B!x(pc4#RwWuf@c8ZYJ<cd8z!qE=0<*qlQPsd18H{-+p5GSc>3@RwFN}36@{VMiP z$6d!B6dO`iD|T8Zex$nN(mpD5qITVFS4%ux(*DSgr)ah`PXa~dYuZec*Xw-W=#}?J z#Wih1+?7gRwHGq3vF~s|0nEE<(O56$tQP}>d4IsC-J0ff%kgoWXMJ9fr>mVegnYjo zqymha18aY9?LgklqtP*M($(QEopzU!Vl-A&AWr%PC?n`vZlf4adQsET8wGP!-HO}V z)@x0?!|hS-D~HsVWS?hyOL`KyBZcd5T|R7$PCap)c30J&TxfMG)momB`hvae-cHg@ zY=vTJTCis;Gw2q&XXoN2?tRxf2Nm|wcM=Ou6OYmMfq{m@cZ3=_jZ&Xpp3jB<I|}}R zro0;k1GO_o5%99d&mN+HHL>S&-}ZdI1%So^AM9<rYXF-EAQTwTR~@kr)=kIp)3`CF zL47;kncIW4<I|BlsrK^ao+(cKKv@b+px52H#cX;dEnn=<{PAYc^SXjGZF53;thM$9 zldi{`akt3ynS8n&^coH*4EL3FU$Lj6stW3K$_5R`U8TxuGtx@wT+^Z1XBs8{XfpiJ z>(cAat<bK*eZ1tltXgLF$HH_nwYRpQo`5Yf0Pw6mV`h^v^sLcI-?__pusIfowU!vB zjW#`Pre(k)k8A9>f>$@TmI?RbAZohxVY;-XR+yKBT`%#<$KV*n@?0$)HcEYB$M%@X z`#zm&hAb9p?Xh9CQbFa`vmq-l`TViu9#k)sX4BwMv7FLrY0dfrwr#Et>TFUU7PrPR zko-Lz2gO>UCzsX!##SWVOO!4HBnwoOs+8K>V^v{BEYB93+V<!T%e`hRjyrJMFDpHx zxhu};lUz>sp2sfjqPdU8>1;5Y&UTI~wcB=HXD6-vc$zGmaWqI+l*1-!b2zNZs(A`G zdC4#Wdh3b_=bvDaUr9QU_ifo9cgyOSNuq&qfHg;|wmbE<WC^#U+AL`?%X~qYFYOgG zthkXR(38P10xr|Z+pYCx)!nK4N+YP)h5X0@?q2Mahh0Uh295mMF^g%MR0=0~!yUDD zW!h#(K}Z`x=^%H(aV~RASgrH<+qzoaj7tUkv;-~>T(7Gqkvf<T8uhxABw{T<iFs5k z$OTDY=Xtua8Q2_J>2|X01~p^rH^l8^Jt-Xox8vKYn_sycgD$Pxsy%Rqjb>w$CbrTA zW^EqFw@QNsa-rxXaiOB?xALuovP4Jg_lyDOmL=IRz)C9|WVn(~)ucNUD|sfVcEV=f zoC_Mqf?AdCqY7ZPhmvBiwIcZHtH%`aUflivQ^Zr09#HALPZ2LJGdHC0=MhW&BPI_= zfX6IEdLNjIUZJ&yZ}&kp|M?BMk>b@C3o|s(i38t6Aiju^d+zJ|{|0!SLdQ$USz5)l zn;famGsrba#V0Xxh5W8V137D`c=PWiJ~^TP>3lZ&GdWA+TgPSglkD@%^W3xS7u>%A DfqzM$ literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/opensearch/__init__.py b/gam/gdata/analytics/opensearch/__init__.py new file mode 100755 index 00000000000..22071f7a11e --- /dev/null +++ b/gam/gdata/analytics/opensearch/__init__.py @@ -0,0 +1,15 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/gam/gdata/analytics/opensearch/data.py b/gam/gdata/analytics/opensearch/data.py new file mode 100755 index 00000000000..89d7a280f30 --- /dev/null +++ b/gam/gdata/analytics/opensearch/data.py @@ -0,0 +1,48 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains the data classes of the OpenSearch Extension""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core + + +OPENSEARCH_TEMPLATE_V1 = '{http://a9.com/-/spec/opensearchrss/1.0//}%s' +OPENSEARCH_TEMPLATE_V2 = '{http://a9.com/-/spec/opensearch/1.1//}%s' + + +class ItemsPerPage(atom.core.XmlElement): + """Describes the number of items that will be returned per page for paged feeds""" + _qname = (OPENSEARCH_TEMPLATE_V1 % 'itemsPerPage', + OPENSEARCH_TEMPLATE_V2 % 'itemsPerPage') + + +class StartIndex(atom.core.XmlElement): + """Describes the starting index of the contained entries for paged feeds""" + _qname = (OPENSEARCH_TEMPLATE_V1 % 'startIndex', + OPENSEARCH_TEMPLATE_V2 % 'startIndex') + + +class TotalResults(atom.core.XmlElement): + """Describes the total number of results associated with this feed""" + _qname = (OPENSEARCH_TEMPLATE_V1 % 'totalResults', + OPENSEARCH_TEMPLATE_V2 % 'totalResults') + + diff --git a/gam/gdata/analytics/photos/__init__.py b/gam/gdata/analytics/photos/__init__.py new file mode 100755 index 00000000000..1952135c463 --- /dev/null +++ b/gam/gdata/analytics/photos/__init__.py @@ -0,0 +1,1112 @@ +# -*-*- encoding: utf-8 -*-*- +# +# This is the base file for the PicasaWeb python client. +# It is used for lower level operations. +# +# $Id: __init__.py 148 2007-10-28 15:09:19Z havard.gulldahl $ +# +# Copyright 2007 HÃ¥vard Gulldahl +# Portions (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module provides a pythonic, gdata-centric interface to Google Photos +(a.k.a. Picasa Web Services. + +It is modelled after the gdata/* interfaces from the gdata-python-client +project[1] by Google. + +You'll find the user-friendly api in photos.service. Please see the +documentation or live help() system for available methods. + +[1]: http://gdata-python-client.googlecode.com/ + + """ + +__author__ = u'havard@gulldahl.no'# (HÃ¥vard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' +__version__ = '$Revision: 164 $'[11:-2] + +import re +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata + +# importing google photo submodules +import gdata.media as Media, gdata.exif as Exif, gdata.geo as Geo + +# XML namespaces which are often used in Google Photo elements +PHOTOS_NAMESPACE = 'http://schemas.google.com/photos/2007' +MEDIA_NAMESPACE = 'http://search.yahoo.com/mrss/' +EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007' +OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/' +GEO_NAMESPACE = 'http://www.w3.org/2003/01/geo/wgs84_pos#' +GML_NAMESPACE = 'http://www.opengis.net/gml' +GEORSS_NAMESPACE = 'http://www.georss.org/georss' +PHEED_NAMESPACE = 'http://www.pheed.com/pheed/' +BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch' + + +class PhotosBaseElement(atom.AtomBase): + """Base class for elements in the PHOTO_NAMESPACE. To add new elements, + you only need to add the element tag name to self._tag + """ + + _tag = '' + _namespace = PHOTOS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + #def __str__(self): + #return str(self.text) + #def __unicode__(self): + #return unicode(self.text) + def __int__(self): + return int(self.text) + def bool(self): + return self.text == 'true' + +class GPhotosBaseFeed(gdata.GDataFeed, gdata.LinkFinder): + "Base class for all Feeds in gdata.photos" + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _attributes = gdata.GDataFeed._attributes.copy() + _children = gdata.GDataFeed._children.copy() + # We deal with Entry elements ourselves + del _children['{%s}entry' % atom.ATOM_NAMESPACE] + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + def kind(self): + "(string) Returns the kind" + try: + return self.category[0].term.split('#')[1] + except IndexError: + return None + + def _feedUri(self, kind): + "Convenience method to return a uri to a feed of a special kind" + assert(kind in ('album', 'tag', 'photo', 'comment', 'user')) + here_href = self.GetSelfLink().href + if 'kind=%s' % kind in here_href: + return here_href + if not 'kind=' in here_href: + sep = '?' + if '?' in here_href: sep = '&' + return here_href + "%skind=%s" % (sep, kind) + rx = re.match('.*(kind=)(album|tag|photo|comment)', here_href) + return here_href[:rx.end(1)] + kind + here_href[rx.end(2):] + + def _ConvertElementTreeToMember(self, child_tree): + """Re-implementing the method from AtomBase, since we deal with + Entry elements specially""" + category = child_tree.find('{%s}category' % atom.ATOM_NAMESPACE) + if category is None: + return atom.AtomBase._ConvertElementTreeToMember(self, child_tree) + namespace, kind = category.get('term').split('#') + if namespace != PHOTOS_NAMESPACE: + return atom.AtomBase._ConvertElementTreeToMember(self, child_tree) + ## TODO: is it safe to use getattr on gdata.photos? + entry_class = getattr(gdata.photos, '%sEntry' % kind.title()) + if not hasattr(self, 'entry') or self.entry is None: + self.entry = [] + self.entry.append(atom._CreateClassFromElementTree( + entry_class, child_tree)) + +class GPhotosBaseEntry(gdata.GDataEntry, gdata.LinkFinder): + "Base class for all Entry elements in gdata.photos" + _tag = 'entry' + _kind = '' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.category.append( + atom.Category(scheme='http://schemas.google.com/g/2005#kind', + term = 'http://schemas.google.com/photos/2007#%s' % self._kind)) + + def kind(self): + "(string) Returns the kind" + try: + return self.category[0].term.split('#')[1] + except IndexError: + return None + + def _feedUri(self, kind): + "Convenience method to get the uri to this entry's feed of the some kind" + try: + href = self.GetFeedLink().href + except AttributeError: + return None + sep = '?' + if '?' in href: sep = '&' + return '%s%skind=%s' % (href, sep, kind) + + +class PhotosBaseEntry(GPhotosBaseEntry): + pass + +class PhotosBaseFeed(GPhotosBaseFeed): + pass + +class GPhotosBaseData(object): + pass + +class Access(PhotosBaseElement): + """The Google Photo `Access' element. + + The album's access level. Valid values are `public' or `private'. + In documentation, access level is also referred to as `visibility.'""" + + _tag = 'access' +def AccessFromString(xml_string): + return atom.CreateClassFromXMLString(Access, xml_string) + +class Albumid(PhotosBaseElement): + "The Google Photo `Albumid' element" + + _tag = 'albumid' +def AlbumidFromString(xml_string): + return atom.CreateClassFromXMLString(Albumid, xml_string) + +class BytesUsed(PhotosBaseElement): + "The Google Photo `BytesUsed' element" + + _tag = 'bytesUsed' +def BytesUsedFromString(xml_string): + return atom.CreateClassFromXMLString(BytesUsed, xml_string) + +class Client(PhotosBaseElement): + "The Google Photo `Client' element" + + _tag = 'client' +def ClientFromString(xml_string): + return atom.CreateClassFromXMLString(Client, xml_string) + +class Checksum(PhotosBaseElement): + "The Google Photo `Checksum' element" + + _tag = 'checksum' +def ChecksumFromString(xml_string): + return atom.CreateClassFromXMLString(Checksum, xml_string) + +class CommentCount(PhotosBaseElement): + "The Google Photo `CommentCount' element" + + _tag = 'commentCount' +def CommentCountFromString(xml_string): + return atom.CreateClassFromXMLString(CommentCount, xml_string) + +class CommentingEnabled(PhotosBaseElement): + "The Google Photo `CommentingEnabled' element" + + _tag = 'commentingEnabled' +def CommentingEnabledFromString(xml_string): + return atom.CreateClassFromXMLString(CommentingEnabled, xml_string) + +class Height(PhotosBaseElement): + "The Google Photo `Height' element" + + _tag = 'height' +def HeightFromString(xml_string): + return atom.CreateClassFromXMLString(Height, xml_string) + +class Id(PhotosBaseElement): + "The Google Photo `Id' element" + + _tag = 'id' +def IdFromString(xml_string): + return atom.CreateClassFromXMLString(Id, xml_string) + +class Location(PhotosBaseElement): + "The Google Photo `Location' element" + + _tag = 'location' +def LocationFromString(xml_string): + return atom.CreateClassFromXMLString(Location, xml_string) + +class MaxPhotosPerAlbum(PhotosBaseElement): + "The Google Photo `MaxPhotosPerAlbum' element" + + _tag = 'maxPhotosPerAlbum' +def MaxPhotosPerAlbumFromString(xml_string): + return atom.CreateClassFromXMLString(MaxPhotosPerAlbum, xml_string) + +class Name(PhotosBaseElement): + "The Google Photo `Name' element" + + _tag = 'name' +def NameFromString(xml_string): + return atom.CreateClassFromXMLString(Name, xml_string) + +class Nickname(PhotosBaseElement): + "The Google Photo `Nickname' element" + + _tag = 'nickname' +def NicknameFromString(xml_string): + return atom.CreateClassFromXMLString(Nickname, xml_string) + +class Numphotos(PhotosBaseElement): + "The Google Photo `Numphotos' element" + + _tag = 'numphotos' +def NumphotosFromString(xml_string): + return atom.CreateClassFromXMLString(Numphotos, xml_string) + +class Numphotosremaining(PhotosBaseElement): + "The Google Photo `Numphotosremaining' element" + + _tag = 'numphotosremaining' +def NumphotosremainingFromString(xml_string): + return atom.CreateClassFromXMLString(Numphotosremaining, xml_string) + +class Position(PhotosBaseElement): + "The Google Photo `Position' element" + + _tag = 'position' +def PositionFromString(xml_string): + return atom.CreateClassFromXMLString(Position, xml_string) + +class Photoid(PhotosBaseElement): + "The Google Photo `Photoid' element" + + _tag = 'photoid' +def PhotoidFromString(xml_string): + return atom.CreateClassFromXMLString(Photoid, xml_string) + +class Quotacurrent(PhotosBaseElement): + "The Google Photo `Quotacurrent' element" + + _tag = 'quotacurrent' +def QuotacurrentFromString(xml_string): + return atom.CreateClassFromXMLString(Quotacurrent, xml_string) + +class Quotalimit(PhotosBaseElement): + "The Google Photo `Quotalimit' element" + + _tag = 'quotalimit' +def QuotalimitFromString(xml_string): + return atom.CreateClassFromXMLString(Quotalimit, xml_string) + +class Rotation(PhotosBaseElement): + "The Google Photo `Rotation' element" + + _tag = 'rotation' +def RotationFromString(xml_string): + return atom.CreateClassFromXMLString(Rotation, xml_string) + +class Size(PhotosBaseElement): + "The Google Photo `Size' element" + + _tag = 'size' +def SizeFromString(xml_string): + return atom.CreateClassFromXMLString(Size, xml_string) + +class Snippet(PhotosBaseElement): + """The Google Photo `snippet' element. + + When searching, the snippet element will contain a + string with the word you're looking for, highlighted in html markup + E.g. when your query is `hafjell', this element may contain: + `... here at <b>Hafjell</b>.' + + You'll find this element in searches -- that is, feeds that combine the + `kind=photo' and `q=yoursearch' parameters in the request. + + See also gphoto:truncated and gphoto:snippettype. + + """ + + _tag = 'snippet' +def SnippetFromString(xml_string): + return atom.CreateClassFromXMLString(Snippet, xml_string) + +class Snippettype(PhotosBaseElement): + """The Google Photo `Snippettype' element + + When searching, this element will tell you the type of element that matches. + + You'll find this element in searches -- that is, feeds that combine the + `kind=photo' and `q=yoursearch' parameters in the request. + + See also gphoto:snippet and gphoto:truncated. + + Possible values and their interpretation: + o ALBUM_TITLE - The album title matches + o PHOTO_TAGS - The match is a tag/keyword + o PHOTO_DESCRIPTION - The match is in the photo's description + + If you discover a value not listed here, please submit a patch to update this docstring. + + """ + + _tag = 'snippettype' +def SnippettypeFromString(xml_string): + return atom.CreateClassFromXMLString(Snippettype, xml_string) + +class Thumbnail(PhotosBaseElement): + """The Google Photo `Thumbnail' element + + Used to display user's photo thumbnail (hackergotchi). + + (Not to be confused with the <media:thumbnail> element, which gives you + small versions of the photo object.)""" + + _tag = 'thumbnail' +def ThumbnailFromString(xml_string): + return atom.CreateClassFromXMLString(Thumbnail, xml_string) + +class Timestamp(PhotosBaseElement): + """The Google Photo `Timestamp' element + Represented as the number of milliseconds since January 1st, 1970. + + + Take a look at the convenience methods .isoformat() and .datetime(): + + photo_epoch = Time.text # 1180294337000 + photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z' + + Alternatively: + photo_datetime = Time.datetime() # (requires python >= 2.3) + """ + + _tag = 'timestamp' + def isoformat(self): + """(string) Return the timestamp as a ISO 8601 formatted string, + e.g. '2007-05-27T19:32:17.000Z' + """ + import time + epoch = float(self.text)/1000 + return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch)) + + def datetime(self): + """(datetime.datetime) Return the timestamp as a datetime.datetime object + + Requires python 2.3 + """ + import datetime + epoch = float(self.text)/1000 + return datetime.datetime.fromtimestamp(epoch) +def TimestampFromString(xml_string): + return atom.CreateClassFromXMLString(Timestamp, xml_string) + +class Truncated(PhotosBaseElement): + """The Google Photo `Truncated' element + + You'll find this element in searches -- that is, feeds that combine the + `kind=photo' and `q=yoursearch' parameters in the request. + + See also gphoto:snippet and gphoto:snippettype. + + Possible values and their interpretation: + 0 -- unknown + """ + + _tag = 'Truncated' +def TruncatedFromString(xml_string): + return atom.CreateClassFromXMLString(Truncated, xml_string) + +class User(PhotosBaseElement): + "The Google Photo `User' element" + + _tag = 'user' +def UserFromString(xml_string): + return atom.CreateClassFromXMLString(User, xml_string) + +class Version(PhotosBaseElement): + "The Google Photo `Version' element" + + _tag = 'version' +def VersionFromString(xml_string): + return atom.CreateClassFromXMLString(Version, xml_string) + +class Width(PhotosBaseElement): + "The Google Photo `Width' element" + + _tag = 'width' +def WidthFromString(xml_string): + return atom.CreateClassFromXMLString(Width, xml_string) + +class Weight(PhotosBaseElement): + """The Google Photo `Weight' element. + + The weight of the tag is the number of times the tag + appears in the collection of tags currently being viewed. + The default weight is 1, in which case this tags is omitted.""" + _tag = 'weight' +def WeightFromString(xml_string): + return atom.CreateClassFromXMLString(Weight, xml_string) + +class CommentAuthor(atom.Author): + """The Atom `Author' element in CommentEntry entries is augmented to + contain elements from the PHOTOS_NAMESPACE + + http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38 + """ + _children = atom.Author._children.copy() + _children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User) + _children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname) + _children['{%s}thumbnail' % PHOTOS_NAMESPACE] = ('thumbnail', Thumbnail) +def CommentAuthorFromString(xml_string): + return atom.CreateClassFromXMLString(CommentAuthor, xml_string) + +########################## ################################ + +class AlbumData(object): + _children = {} + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + _children['{%s}name' % PHOTOS_NAMESPACE] = ('name', Name) + _children['{%s}location' % PHOTOS_NAMESPACE] = ('location', Location) + _children['{%s}access' % PHOTOS_NAMESPACE] = ('access', Access) + _children['{%s}bytesUsed' % PHOTOS_NAMESPACE] = ('bytesUsed', BytesUsed) + _children['{%s}timestamp' % PHOTOS_NAMESPACE] = ('timestamp', Timestamp) + _children['{%s}numphotos' % PHOTOS_NAMESPACE] = ('numphotos', Numphotos) + _children['{%s}numphotosremaining' % PHOTOS_NAMESPACE] = \ + ('numphotosremaining', Numphotosremaining) + _children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User) + _children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname) + _children['{%s}commentingEnabled' % PHOTOS_NAMESPACE] = \ + ('commentingEnabled', CommentingEnabled) + _children['{%s}commentCount' % PHOTOS_NAMESPACE] = \ + ('commentCount', CommentCount) + ## NOTE: storing media:group as self.media, to create a self-explaining api + gphoto_id = None + name = None + location = None + access = None + bytesUsed = None + timestamp = None + numphotos = None + numphotosremaining = None + user = None + nickname = None + commentingEnabled = None + commentCount = None + +class AlbumEntry(GPhotosBaseEntry, AlbumData): + """All metadata for a Google Photos Album + + Take a look at AlbumData for metadata accessible as attributes to this object. + + Notes: + To avoid name clashes, and to create a more sensible api, some + objects have names that differ from the original elements: + + o media:group -> self.media, + o geo:where -> self.geo, + o photo:id -> self.gphoto_id + """ + + _kind = 'album' + _children = GPhotosBaseEntry._children.copy() + _children.update(AlbumData._children.copy()) + # child tags only for Album entries, not feeds + _children['{%s}where' % GEORSS_NAMESPACE] = ('geo', Geo.Where) + _children['{%s}group' % MEDIA_NAMESPACE] = ('media', Media.Group) + media = Media.Group() + geo = Geo.Where() + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + #GPHOTO NAMESPACE: + gphoto_id=None, name=None, location=None, access=None, + timestamp=None, numphotos=None, user=None, nickname=None, + commentingEnabled=None, commentCount=None, thumbnail=None, + # MEDIA NAMESPACE: + media=None, + # GEORSS NAMESPACE: + geo=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + self.gphoto_id = gphoto_id + self.name = name + self.location = location + self.access = access + self.timestamp = timestamp + self.numphotos = numphotos + self.user = user + self.nickname = nickname + self.commentingEnabled = commentingEnabled + self.commentCount = commentCount + self.thumbnail = thumbnail + self.extended_property = extended_property or [] + self.text = text + ## NOTE: storing media:group as self.media, and geo:where as geo, + ## to create a self-explaining api + self.media = media or Media.Group() + self.geo = geo or Geo.Where() + + def GetAlbumId(self): + "Return the id of this album" + + return self.GetFeedLink().href.split('/')[-1] + + def GetPhotosUri(self): + "(string) Return the uri to this albums feed of the PhotoEntry kind" + return self._feedUri('photo') + + def GetCommentsUri(self): + "(string) Return the uri to this albums feed of the CommentEntry kind" + return self._feedUri('comment') + + def GetTagsUri(self): + "(string) Return the uri to this albums feed of the TagEntry kind" + return self._feedUri('tag') + +def AlbumEntryFromString(xml_string): + return atom.CreateClassFromXMLString(AlbumEntry, xml_string) + +class AlbumFeed(GPhotosBaseFeed, AlbumData): + """All metadata for a Google Photos Album, including its sub-elements + + This feed represents an album as the container for other objects. + + A Album feed contains entries of + PhotoEntry, CommentEntry or TagEntry, + depending on the `kind' parameter in the original query. + + Take a look at AlbumData for accessible attributes. + + """ + + _children = GPhotosBaseFeed._children.copy() + _children.update(AlbumData._children.copy()) + + def GetPhotosUri(self): + "(string) Return the uri to the same feed, but of the PhotoEntry kind" + + return self._feedUri('photo') + + def GetTagsUri(self): + "(string) Return the uri to the same feed, but of the TagEntry kind" + + return self._feedUri('tag') + + def GetCommentsUri(self): + "(string) Return the uri to the same feed, but of the CommentEntry kind" + + return self._feedUri('comment') + +def AlbumFeedFromString(xml_string): + return atom.CreateClassFromXMLString(AlbumFeed, xml_string) + + +class PhotoData(object): + _children = {} + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + _children['{%s}albumid' % PHOTOS_NAMESPACE] = ('albumid', Albumid) + _children['{%s}checksum' % PHOTOS_NAMESPACE] = ('checksum', Checksum) + _children['{%s}client' % PHOTOS_NAMESPACE] = ('client', Client) + _children['{%s}height' % PHOTOS_NAMESPACE] = ('height', Height) + _children['{%s}position' % PHOTOS_NAMESPACE] = ('position', Position) + _children['{%s}rotation' % PHOTOS_NAMESPACE] = ('rotation', Rotation) + _children['{%s}size' % PHOTOS_NAMESPACE] = ('size', Size) + _children['{%s}timestamp' % PHOTOS_NAMESPACE] = ('timestamp', Timestamp) + _children['{%s}version' % PHOTOS_NAMESPACE] = ('version', Version) + _children['{%s}width' % PHOTOS_NAMESPACE] = ('width', Width) + _children['{%s}commentingEnabled' % PHOTOS_NAMESPACE] = \ + ('commentingEnabled', CommentingEnabled) + _children['{%s}commentCount' % PHOTOS_NAMESPACE] = \ + ('commentCount', CommentCount) + ## NOTE: storing media:group as self.media, exif:tags as self.exif, and + ## geo:where as self.geo, to create a self-explaining api + _children['{%s}tags' % EXIF_NAMESPACE] = ('exif', Exif.Tags) + _children['{%s}where' % GEORSS_NAMESPACE] = ('geo', Geo.Where) + _children['{%s}group' % MEDIA_NAMESPACE] = ('media', Media.Group) + # These elements show up in search feeds + _children['{%s}snippet' % PHOTOS_NAMESPACE] = ('snippet', Snippet) + _children['{%s}snippettype' % PHOTOS_NAMESPACE] = ('snippettype', Snippettype) + _children['{%s}truncated' % PHOTOS_NAMESPACE] = ('truncated', Truncated) + gphoto_id = None + albumid = None + checksum = None + client = None + height = None + position = None + rotation = None + size = None + timestamp = None + version = None + width = None + commentingEnabled = None + commentCount = None + snippet=None + snippettype=None + truncated=None + media = Media.Group() + geo = Geo.Where() + tags = Exif.Tags() + +class PhotoEntry(GPhotosBaseEntry, PhotoData): + """All metadata for a Google Photos Photo + + Take a look at PhotoData for metadata accessible as attributes to this object. + + Notes: + To avoid name clashes, and to create a more sensible api, some + objects have names that differ from the original elements: + + o media:group -> self.media, + o exif:tags -> self.exif, + o geo:where -> self.geo, + o photo:id -> self.gphoto_id + """ + + _kind = 'photo' + _children = GPhotosBaseEntry._children.copy() + _children.update(PhotoData._children.copy()) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, text=None, + # GPHOTO NAMESPACE: + gphoto_id=None, albumid=None, checksum=None, client=None, height=None, + position=None, rotation=None, size=None, timestamp=None, version=None, + width=None, commentCount=None, commentingEnabled=None, + # MEDIARSS NAMESPACE: + media=None, + # EXIF_NAMESPACE: + exif=None, + # GEORSS NAMESPACE: + geo=None, + extension_elements=None, extension_attributes=None): + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + self.gphoto_id = gphoto_id + self.albumid = albumid + self.checksum = checksum + self.client = client + self.height = height + self.position = position + self.rotation = rotation + self.size = size + self.timestamp = timestamp + self.version = version + self.width = width + self.commentingEnabled = commentingEnabled + self.commentCount = commentCount + ## NOTE: storing media:group as self.media, to create a self-explaining api + self.media = media or Media.Group() + self.exif = exif or Exif.Tags() + self.geo = geo or Geo.Where() + + def GetPostLink(self): + "Return the uri to this photo's `POST' link (use it for updates of the object)" + + return self.GetFeedLink() + + def GetCommentsUri(self): + "Return the uri to this photo's feed of CommentEntry comments" + return self._feedUri('comment') + + def GetTagsUri(self): + "Return the uri to this photo's feed of TagEntry tags" + return self._feedUri('tag') + + def GetAlbumUri(self): + """Return the uri to the AlbumEntry containing this photo""" + + href = self.GetSelfLink().href + return href[:href.find('/photoid')] + +def PhotoEntryFromString(xml_string): + return atom.CreateClassFromXMLString(PhotoEntry, xml_string) + +class PhotoFeed(GPhotosBaseFeed, PhotoData): + """All metadata for a Google Photos Photo, including its sub-elements + + This feed represents a photo as the container for other objects. + + A Photo feed contains entries of + CommentEntry or TagEntry, + depending on the `kind' parameter in the original query. + + Take a look at PhotoData for metadata accessible as attributes to this object. + + """ + _children = GPhotosBaseFeed._children.copy() + _children.update(PhotoData._children.copy()) + + def GetTagsUri(self): + "(string) Return the uri to the same feed, but of the TagEntry kind" + + return self._feedUri('tag') + + def GetCommentsUri(self): + "(string) Return the uri to the same feed, but of the CommentEntry kind" + + return self._feedUri('comment') + +def PhotoFeedFromString(xml_string): + return atom.CreateClassFromXMLString(PhotoFeed, xml_string) + +class TagData(GPhotosBaseData): + _children = {} + _children['{%s}weight' % PHOTOS_NAMESPACE] = ('weight', Weight) + weight=None + +class TagEntry(GPhotosBaseEntry, TagData): + """All metadata for a Google Photos Tag + + The actual tag is stored in the .title.text attribute + + """ + + _kind = 'tag' + _children = GPhotosBaseEntry._children.copy() + _children.update(TagData._children.copy()) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + # GPHOTO NAMESPACE: + weight=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + self.weight = weight + + def GetAlbumUri(self): + """Return the uri to the AlbumEntry containing this tag""" + + href = self.GetSelfLink().href + pos = href.find('/photoid') + if pos == -1: + return None + return href[:pos] + + def GetPhotoUri(self): + """Return the uri to the PhotoEntry containing this tag""" + + href = self.GetSelfLink().href + pos = href.find('/tag') + if pos == -1: + return None + return href[:pos] + +def TagEntryFromString(xml_string): + return atom.CreateClassFromXMLString(TagEntry, xml_string) + + +class TagFeed(GPhotosBaseFeed, TagData): + """All metadata for a Google Photos Tag, including its sub-elements""" + + _children = GPhotosBaseFeed._children.copy() + _children.update(TagData._children.copy()) + +def TagFeedFromString(xml_string): + return atom.CreateClassFromXMLString(TagFeed, xml_string) + +class CommentData(GPhotosBaseData): + _children = {} + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + _children['{%s}albumid' % PHOTOS_NAMESPACE] = ('albumid', Albumid) + _children['{%s}photoid' % PHOTOS_NAMESPACE] = ('photoid', Photoid) + _children['{%s}author' % atom.ATOM_NAMESPACE] = ('author', [CommentAuthor,]) + gphoto_id=None + albumid=None + photoid=None + author=None + +class CommentEntry(GPhotosBaseEntry, CommentData): + """All metadata for a Google Photos Comment + + The comment is stored in the .content.text attribute, + with a content type in .content.type. + + + """ + + _kind = 'comment' + _children = GPhotosBaseEntry._children.copy() + _children.update(CommentData._children.copy()) + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + # GPHOTO NAMESPACE: + gphoto_id=None, albumid=None, photoid=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + self.gphoto_id = gphoto_id + self.albumid = albumid + self.photoid = photoid + + def GetCommentId(self): + """Return the globally unique id of this comment""" + return self.GetSelfLink().href.split('/')[-1] + + def GetAlbumUri(self): + """Return the uri to the AlbumEntry containing this comment""" + + href = self.GetSelfLink().href + return href[:href.find('/photoid')] + + def GetPhotoUri(self): + """Return the uri to the PhotoEntry containing this comment""" + + href = self.GetSelfLink().href + return href[:href.find('/commentid')] + +def CommentEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CommentEntry, xml_string) + +class CommentFeed(GPhotosBaseFeed, CommentData): + """All metadata for a Google Photos Comment, including its sub-elements""" + + _children = GPhotosBaseFeed._children.copy() + _children.update(CommentData._children.copy()) + +def CommentFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CommentFeed, xml_string) + +class UserData(GPhotosBaseData): + _children = {} + _children['{%s}maxPhotosPerAlbum' % PHOTOS_NAMESPACE] = ('maxPhotosPerAlbum', MaxPhotosPerAlbum) + _children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname) + _children['{%s}quotalimit' % PHOTOS_NAMESPACE] = ('quotalimit', Quotalimit) + _children['{%s}quotacurrent' % PHOTOS_NAMESPACE] = ('quotacurrent', Quotacurrent) + _children['{%s}thumbnail' % PHOTOS_NAMESPACE] = ('thumbnail', Thumbnail) + _children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User) + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + + maxPhotosPerAlbum=None + nickname=None + quotalimit=None + quotacurrent=None + thumbnail=None + user=None + gphoto_id=None + + +class UserEntry(GPhotosBaseEntry, UserData): + """All metadata for a Google Photos User + + This entry represents an album owner and all appropriate metadata. + + Take a look at at the attributes of the UserData for metadata available. + """ + _children = GPhotosBaseEntry._children.copy() + _children.update(UserData._children.copy()) + _kind = 'user' + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + # GPHOTO NAMESPACE: + gphoto_id=None, maxPhotosPerAlbum=None, nickname=None, quotalimit=None, + quotacurrent=None, thumbnail=None, user=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + + self.gphoto_id=gphoto_id + self.maxPhotosPerAlbum=maxPhotosPerAlbum + self.nickname=nickname + self.quotalimit=quotalimit + self.quotacurrent=quotacurrent + self.thumbnail=thumbnail + self.user=user + + def GetAlbumsUri(self): + "(string) Return the uri to this user's feed of the AlbumEntry kind" + return self._feedUri('album') + + def GetPhotosUri(self): + "(string) Return the uri to this user's feed of the PhotoEntry kind" + return self._feedUri('photo') + + def GetCommentsUri(self): + "(string) Return the uri to this user's feed of the CommentEntry kind" + return self._feedUri('comment') + + def GetTagsUri(self): + "(string) Return the uri to this user's feed of the TagEntry kind" + return self._feedUri('tag') + +def UserEntryFromString(xml_string): + return atom.CreateClassFromXMLString(UserEntry, xml_string) + +class UserFeed(GPhotosBaseFeed, UserData): + """Feed for a User in the google photos api. + + This feed represents a user as the container for other objects. + + A User feed contains entries of + AlbumEntry, PhotoEntry, CommentEntry, UserEntry or TagEntry, + depending on the `kind' parameter in the original query. + + The user feed itself also contains all of the metadata available + as part of a UserData object.""" + _children = GPhotosBaseFeed._children.copy() + _children.update(UserData._children.copy()) + + def GetAlbumsUri(self): + """Get the uri to this feed, but with entries of the AlbumEntry kind.""" + return self._feedUri('album') + + def GetTagsUri(self): + """Get the uri to this feed, but with entries of the TagEntry kind.""" + return self._feedUri('tag') + + def GetPhotosUri(self): + """Get the uri to this feed, but with entries of the PhotosEntry kind.""" + return self._feedUri('photo') + + def GetCommentsUri(self): + """Get the uri to this feed, but with entries of the CommentsEntry kind.""" + return self._feedUri('comment') + +def UserFeedFromString(xml_string): + return atom.CreateClassFromXMLString(UserFeed, xml_string) + + + +def AnyFeedFromString(xml_string): + """Creates an instance of the appropriate feed class from the + xml string contents. + + Args: + xml_string: str A string which contains valid XML. The root element + of the XML string should match the tag and namespace of the desired + class. + + Returns: + An instance of the target class with members assigned according to the + contents of the XML - or a basic gdata.GDataFeed instance if it is + impossible to determine the appropriate class (look for extra elements + in GDataFeed's .FindExtensions() and extension_elements[] ). + """ + tree = ElementTree.fromstring(xml_string) + category = tree.find('{%s}category' % atom.ATOM_NAMESPACE) + if category is None: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseFeed, tree) + namespace, kind = category.get('term').split('#') + if namespace != PHOTOS_NAMESPACE: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseFeed, tree) + ## TODO: is getattr safe this way? + feed_class = getattr(gdata.photos, '%sFeed' % kind.title()) + return atom._CreateClassFromElementTree(feed_class, tree) + +def AnyEntryFromString(xml_string): + """Creates an instance of the appropriate entry class from the + xml string contents. + + Args: + xml_string: str A string which contains valid XML. The root element + of the XML string should match the tag and namespace of the desired + class. + + Returns: + An instance of the target class with members assigned according to the + contents of the XML - or a basic gdata.GDataEndry instance if it is + impossible to determine the appropriate class (look for extra elements + in GDataEntry's .FindExtensions() and extension_elements[] ). + """ + tree = ElementTree.fromstring(xml_string) + category = tree.find('{%s}category' % atom.ATOM_NAMESPACE) + if category is None: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseEntry, tree) + namespace, kind = category.get('term').split('#') + if namespace != PHOTOS_NAMESPACE: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseEntry, tree) + ## TODO: is getattr safe this way? + feed_class = getattr(gdata.photos, '%sEntry' % kind.title()) + return atom._CreateClassFromElementTree(feed_class, tree) + diff --git a/gam/gdata/analytics/photos/service.py b/gam/gdata/analytics/photos/service.py new file mode 100755 index 00000000000..23c5febc2a4 --- /dev/null +++ b/gam/gdata/analytics/photos/service.py @@ -0,0 +1,681 @@ +#!/usr/bin/env python +# -*-*- encoding: utf-8 -*-*- +# +# This is the service file for the Google Photo python client. +# It is used for higher level operations. +# +# $Id: service.py 144 2007-10-25 21:03:34Z havard.gulldahl $ +# +# Copyright 2007 HÃ¥vard Gulldahl +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google PhotoService provides a human-friendly interface to +Google Photo (a.k.a Picasa Web) services[1]. + +It extends gdata.service.GDataService and as such hides all the +nasty details about authenticating, parsing and communicating with +Google Photos. + +[1]: http://code.google.com/apis/picasaweb/gdata.html + +Example: + import gdata.photos, gdata.photos.service + pws = gdata.photos.service.PhotosService() + pws.ClientLogin(username, password) + #Get all albums + albums = pws.GetUserFeed().entry + # Get all photos in second album + photos = pws.GetFeed(albums[1].GetPhotosUri()).entry + # Get all tags for photos in second album and print them + tags = pws.GetFeed(albums[1].GetTagsUri()).entry + print [ tag.summary.text for tag in tags ] + # Get all comments for the first photos in list and print them + comments = pws.GetCommentFeed(photos[0].GetCommentsUri()).entry + print [ c.summary.text for c in comments ] + + # Get a photo to work with + photo = photos[0] + # Update metadata + + # Attributes from the <gphoto:*> namespace + photo.summary.text = u'A nice view from my veranda' + photo.title.text = u'Verandaview.jpg' + + # Attributes from the <media:*> namespace + photo.media.keywords.text = u'Home, Long-exposure, Sunset' # Comma-separated + + # Adding attributes to media object + + # Rotate 90 degrees clockwise + photo.rotation = gdata.photos.Rotation(text='90') + + # Submit modified photo object + photo = pws.UpdatePhotoMetadata(photo) + + # Make sure you only modify the newly returned object, else you'll get + # versioning errors. See Optimistic-concurrency + + # Add comment to a picture + comment = pws.InsertComment(photo, u'I wish the water always was this warm') + + # Remove comment because it was silly + print "*blush*" + pws.Delete(comment.GetEditLink().href) + +""" + +__author__ = u'havard@gulldahl.no'# (HÃ¥vard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' +__version__ = '$Revision: 176 $'[11:-2] + + +import sys, os.path, StringIO +import time +import gdata.service +import gdata +import atom.service +import atom +import gdata.photos + +SUPPORTED_UPLOAD_TYPES = ('bmp', 'jpeg', 'jpg', 'gif', 'png') + +UNKOWN_ERROR=1000 +GPHOTOS_BAD_REQUEST=400 +GPHOTOS_CONFLICT=409 +GPHOTOS_INTERNAL_SERVER_ERROR=500 +GPHOTOS_INVALID_ARGUMENT=601 +GPHOTOS_INVALID_CONTENT_TYPE=602 +GPHOTOS_NOT_AN_IMAGE=603 +GPHOTOS_INVALID_KIND=604 + +class GooglePhotosException(Exception): + def __init__(self, response): + + self.error_code = response['status'] + self.reason = response['reason'].strip() + if '<html>' in str(response['body']): #general html message, discard it + response['body'] = "" + self.body = response['body'].strip() + self.message = "(%(status)s) %(body)s -- %(reason)s" % response + + #return explicit error codes + error_map = { '(12) Not an image':GPHOTOS_NOT_AN_IMAGE, + 'kind: That is not one of the acceptable values': + GPHOTOS_INVALID_KIND, + + } + for msg, code in error_map.iteritems(): + if self.body == msg: + self.error_code = code + break + self.args = [self.error_code, self.reason, self.body] + +class PhotosService(gdata.service.GDataService): + ssl = True + userUri = '/data/feed/api/user/%s' + + def __init__(self, email=None, password=None, source=None, + server='picasaweb.google.com', additional_headers=None, + **kwargs): + """Creates a client for the Google Photos service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'picasaweb.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + self.email = email + self.client = source + gdata.service.GDataService.__init__( + self, email=email, password=password, service='lh2', source=source, + server=server, additional_headers=additional_headers, **kwargs) + + def GetFeed(self, uri, limit=None, start_index=None): + """Get a feed. + + The results are ordered by the values of their `updated' elements, + with the most recently updated entry appearing first in the feed. + + Arguments: + uri: the uri to fetch + limit (optional): the maximum number of entries to return. Defaults to what + the server returns. + + Returns: + one of gdata.photos.AlbumFeed, + gdata.photos.UserFeed, + gdata.photos.PhotoFeed, + gdata.photos.CommentFeed, + gdata.photos.TagFeed, + depending on the results of the query. + Raises: + GooglePhotosException + + See: + http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual + """ + if limit is not None: + uri += '&max-results=%s' % limit + if start_index is not None: + uri += '&start-index=%s' % start_index + try: + return self.Get(uri, converter=gdata.photos.AnyFeedFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def GetEntry(self, uri, limit=None, start_index=None): + """Get an Entry. + + Arguments: + uri: the uri to the entry + limit (optional): the maximum number of entries to return. Defaults to what + the server returns. + + Returns: + one of gdata.photos.AlbumEntry, + gdata.photos.UserEntry, + gdata.photos.PhotoEntry, + gdata.photos.CommentEntry, + gdata.photos.TagEntry, + depending on the results of the query. + Raises: + GooglePhotosException + """ + if limit is not None: + uri += '&max-results=%s' % limit + if start_index is not None: + uri += '&start-index=%s' % start_index + try: + return self.Get(uri, converter=gdata.photos.AnyEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def GetUserFeed(self, kind='album', user='default', limit=None): + """Get user-based feed, containing albums, photos, comments or tags; + defaults to albums. + + The entries are ordered by the values of their `updated' elements, + with the most recently updated entry appearing first in the feed. + + Arguments: + kind: the kind of entries to get, either `album', `photo', + `comment' or `tag', or a python list of these. Defaults to `album'. + user (optional): whose albums we're querying. Defaults to current user. + limit (optional): the maximum number of entries to return. + Defaults to everything the server returns. + + + Returns: + gdata.photos.UserFeed, containing appropriate Entry elements + + See: + http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual + http://googledataapis.blogspot.com/2007/07/picasa-web-albums-adds-new-api-features.html + """ + if isinstance(kind, (list, tuple) ): + kind = ",".join(kind) + + uri = '/data/feed/api/user/%s?kind=%s' % (user, kind) + return self.GetFeed(uri, limit=limit) + + def GetTaggedPhotos(self, tag, user='default', limit=None): + """Get all photos belonging to a specific user, tagged by the given keyword + + Arguments: + tag: The tag you're looking for, e.g. `dog' + user (optional): Whose images/videos you want to search, defaults + to current user + limit (optional): the maximum number of entries to return. + Defaults to everything the server returns. + + Returns: + gdata.photos.UserFeed containing PhotoEntry elements + """ + # Lower-casing because of + # http://code.google.com/p/gdata-issues/issues/detail?id=194 + uri = '/data/feed/api/user/%s?kind=photo&tag=%s' % (user, tag.lower()) + return self.GetFeed(uri, limit) + + def SearchUserPhotos(self, query, user='default', limit=100): + """Search through all photos for a specific user and return a feed. + This will look for matches in file names and image tags (a.k.a. keywords) + + Arguments: + query: The string you're looking for, e.g. `vacation' + user (optional): The username of whose photos you want to search, defaults + to current user. + limit (optional): Don't return more than `limit' hits, defaults to 100 + + Only public photos are searched, unless you are authenticated and + searching through your own photos. + + Returns: + gdata.photos.UserFeed with PhotoEntry elements + """ + uri = '/data/feed/api/user/%s?kind=photo&q=%s' % (user, query) + return self.GetFeed(uri, limit=limit) + + def SearchCommunityPhotos(self, query, limit=100): + """Search through all public photos and return a feed. + This will look for matches in file names and image tags (a.k.a. keywords) + + Arguments: + query: The string you're looking for, e.g. `vacation' + limit (optional): Don't return more than `limit' hits, defaults to 100 + + Returns: + gdata.GDataFeed with PhotoEntry elements + """ + uri='/data/feed/api/all?q=%s' % query + return self.GetFeed(uri, limit=limit) + + def GetContacts(self, user='default', limit=None): + """Retrieve a feed that contains a list of your contacts + + Arguments: + user: Username of the user whose contacts you want + + Returns + gdata.photos.UserFeed, with UserEntry entries + + See: + http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38 + """ + uri = '/data/feed/api/user/%s/contacts?kind=user' % user + return self.GetFeed(uri, limit=limit) + + def SearchContactsPhotos(self, user='default', search=None, limit=None): + """Search over your contacts' photos and return a feed + + Arguments: + user: Username of the user whose contacts you want + search (optional): What to search for (photo title, description and keywords) + + Returns + gdata.photos.UserFeed, with PhotoEntry elements + + See: + http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38 + """ + + uri = '/data/feed/api/user/%s/contacts?kind=photo&q=%s' % (user, search) + return self.GetFeed(uri, limit=limit) + + def InsertAlbum(self, title, summary, location=None, access='public', + commenting_enabled='true', timestamp=None): + """Add an album. + + Needs authentication, see self.ClientLogin() + + Arguments: + title: Album title + summary: Album summary / description + access (optional): `private' or `public'. Public albums are searchable + by everyone on the internet. Defaults to `public' + commenting_enabled (optional): `true' or `false'. Defaults to `true'. + timestamp (optional): A date and time for the album, in milliseconds since + Unix epoch[1] UTC. Defaults to now. + + Returns: + The newly created gdata.photos.AlbumEntry + + See: + http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed + + [1]: http://en.wikipedia.org/wiki/Unix_epoch + """ + album = gdata.photos.AlbumEntry() + album.title = atom.Title(text=title, title_type='text') + album.summary = atom.Summary(text=summary, summary_type='text') + if location is not None: + album.location = gdata.photos.Location(text=location) + album.access = gdata.photos.Access(text=access) + if commenting_enabled in ('true', 'false'): + album.commentingEnabled = gdata.photos.CommentingEnabled(text=commenting_enabled) + if timestamp is None: + timestamp = '%i' % int(time.time() * 1000) + album.timestamp = gdata.photos.Timestamp(text=timestamp) + try: + return self.Post(album, uri=self.userUri % self.email, + converter=gdata.photos.AlbumEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def InsertPhoto(self, album_or_uri, photo, filename_or_handle, + content_type='image/jpeg'): + """Add a PhotoEntry + + Needs authentication, see self.ClientLogin() + + Arguments: + album_or_uri: AlbumFeed or uri of the album where the photo should go + photo: PhotoEntry to add + filename_or_handle: A file-like object or file name where the image/video + will be read from + content_type (optional): Internet media type (a.k.a. mime type) of + media object. Currently Google Photos supports these types: + o image/bmp + o image/gif + o image/jpeg + o image/png + + Images will be converted to jpeg on upload. Defaults to `image/jpeg' + + """ + + try: + assert(isinstance(photo, gdata.photos.PhotoEntry)) + except AssertionError: + raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, + 'body':'`photo` must be a gdata.photos.PhotoEntry instance', + 'reason':'Found %s, not PhotoEntry' % type(photo) + }) + try: + majtype, mintype = content_type.split('/') + assert(mintype in SUPPORTED_UPLOAD_TYPES) + except (ValueError, AssertionError): + raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE, + 'body':'This is not a valid content type: %s' % content_type, + 'reason':'Accepted content types: %s' % \ + ['image/'+t for t in SUPPORTED_UPLOAD_TYPES] + }) + if isinstance(filename_or_handle, (str, unicode)) and \ + os.path.exists(filename_or_handle): # it's a file name + mediasource = gdata.MediaSource() + mediasource.setFile(filename_or_handle, content_type) + elif hasattr(filename_or_handle, 'read'):# it's a file-like resource + if hasattr(filename_or_handle, 'seek'): + filename_or_handle.seek(0) # rewind pointer to the start of the file + # gdata.MediaSource needs the content length, so read the whole image + file_handle = StringIO.StringIO(filename_or_handle.read()) + name = 'image' + if hasattr(filename_or_handle, 'name'): + name = filename_or_handle.name + mediasource = gdata.MediaSource(file_handle, content_type, + content_length=file_handle.len, file_name=name) + else: #filename_or_handle is not valid + raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, + 'body':'`filename_or_handle` must be a path name or a file-like object', + 'reason':'Found %s, not path name or object with a .read() method' % \ + filename_or_handle + }) + + if isinstance(album_or_uri, (str, unicode)): # it's a uri + feed_uri = album_or_uri + elif hasattr(album_or_uri, 'GetFeedLink'): # it's a AlbumFeed object + feed_uri = album_or_uri.GetFeedLink().href + + try: + return self.Post(photo, uri=feed_uri, media_source=mediasource, + converter=gdata.photos.PhotoEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def InsertPhotoSimple(self, album_or_uri, title, summary, filename_or_handle, + content_type='image/jpeg', keywords=None): + """Add a photo without constructing a PhotoEntry. + + Needs authentication, see self.ClientLogin() + + Arguments: + album_or_uri: AlbumFeed or uri of the album where the photo should go + title: Photo title + summary: Photo summary / description + filename_or_handle: A file-like object or file name where the image/video + will be read from + content_type (optional): Internet media type (a.k.a. mime type) of + media object. Currently Google Photos supports these types: + o image/bmp + o image/gif + o image/jpeg + o image/png + + Images will be converted to jpeg on upload. Defaults to `image/jpeg' + keywords (optional): a 1) comma separated string or 2) a python list() of + keywords (a.k.a. tags) to add to the image. + E.g. 1) `dog, vacation, happy' 2) ['dog', 'happy', 'vacation'] + + Returns: + The newly created gdata.photos.PhotoEntry or GooglePhotosException on errors + + See: + http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed + [1]: http://en.wikipedia.org/wiki/Unix_epoch + """ + + metadata = gdata.photos.PhotoEntry() + metadata.title=atom.Title(text=title) + metadata.summary = atom.Summary(text=summary, summary_type='text') + if keywords is not None: + if isinstance(keywords, list): + keywords = ','.join(keywords) + metadata.media.keywords = gdata.media.Keywords(text=keywords) + return self.InsertPhoto(album_or_uri, metadata, filename_or_handle, + content_type) + + def UpdatePhotoMetadata(self, photo): + """Update a photo's metadata. + + Needs authentication, see self.ClientLogin() + + You can update any or all of the following metadata properties: + * <title> + * <media:description> + * <gphoto:checksum> + * <gphoto:client> + * <gphoto:rotation> + * <gphoto:timestamp> + * <gphoto:commentingEnabled> + + Arguments: + photo: a gdata.photos.PhotoEntry object with updated elements + + Returns: + The modified gdata.photos.PhotoEntry + + Example: + p = GetFeed(uri).entry[0] + p.title.text = u'My new text' + p.commentingEnabled.text = 'false' + p = UpdatePhotoMetadata(p) + + It is important that you don't keep the old object around, once + it has been updated. See + http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency + """ + try: + return self.Put(data=photo, uri=photo.GetEditLink().href, + converter=gdata.photos.PhotoEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + + def UpdatePhotoBlob(self, photo_or_uri, filename_or_handle, + content_type = 'image/jpeg'): + """Update a photo's binary data. + + Needs authentication, see self.ClientLogin() + + Arguments: + photo_or_uri: a gdata.photos.PhotoEntry that will be updated, or a + `edit-media' uri pointing to it + filename_or_handle: A file-like object or file name where the image/video + will be read from + content_type (optional): Internet media type (a.k.a. mime type) of + media object. Currently Google Photos supports these types: + o image/bmp + o image/gif + o image/jpeg + o image/png + Images will be converted to jpeg on upload. Defaults to `image/jpeg' + + Returns: + The modified gdata.photos.PhotoEntry + + Example: + p = GetFeed(PhotoUri) + p = UpdatePhotoBlob(p, '/tmp/newPic.jpg') + + It is important that you don't keep the old object around, once + it has been updated. See + http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency + """ + + try: + majtype, mintype = content_type.split('/') + assert(mintype in SUPPORTED_UPLOAD_TYPES) + except (ValueError, AssertionError): + raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE, + 'body':'This is not a valid content type: %s' % content_type, + 'reason':'Accepted content types: %s' % \ + ['image/'+t for t in SUPPORTED_UPLOAD_TYPES] + }) + + if isinstance(filename_or_handle, (str, unicode)) and \ + os.path.exists(filename_or_handle): # it's a file name + photoblob = gdata.MediaSource() + photoblob.setFile(filename_or_handle, content_type) + elif hasattr(filename_or_handle, 'read'):# it's a file-like resource + if hasattr(filename_or_handle, 'seek'): + filename_or_handle.seek(0) # rewind pointer to the start of the file + # gdata.MediaSource needs the content length, so read the whole image + file_handle = StringIO.StringIO(filename_or_handle.read()) + name = 'image' + if hasattr(filename_or_handle, 'name'): + name = filename_or_handle.name + mediasource = gdata.MediaSource(file_handle, content_type, + content_length=file_handle.len, file_name=name) + else: #filename_or_handle is not valid + raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, + 'body':'`filename_or_handle` must be a path name or a file-like object', + 'reason':'Found %s, not path name or an object with .read() method' % \ + type(filename_or_handle) + }) + + if isinstance(photo_or_uri, (str, unicode)): + entry_uri = photo_or_uri # it's a uri + elif hasattr(photo_or_uri, 'GetEditMediaLink'): + entry_uri = photo_or_uri.GetEditMediaLink().href + try: + return self.Put(photoblob, entry_uri, + converter=gdata.photos.PhotoEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def InsertTag(self, photo_or_uri, tag): + """Add a tag (a.k.a. keyword) to a photo. + + Needs authentication, see self.ClientLogin() + + Arguments: + photo_or_uri: a gdata.photos.PhotoEntry that will be tagged, or a + `post' uri pointing to it + (string) tag: The tag/keyword + + Returns: + The new gdata.photos.TagEntry + + Example: + p = GetFeed(PhotoUri) + tag = InsertTag(p, 'Beautiful sunsets') + + """ + tag = gdata.photos.TagEntry(title=atom.Title(text=tag)) + if isinstance(photo_or_uri, (str, unicode)): + post_uri = photo_or_uri # it's a uri + elif hasattr(photo_or_uri, 'GetEditMediaLink'): + post_uri = photo_or_uri.GetPostLink().href + try: + return self.Post(data=tag, uri=post_uri, + converter=gdata.photos.TagEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + + def InsertComment(self, photo_or_uri, comment): + """Add a comment to a photo. + + Needs authentication, see self.ClientLogin() + + Arguments: + photo_or_uri: a gdata.photos.PhotoEntry that is about to be commented + , or a `post' uri pointing to it + (string) comment: The actual comment + + Returns: + The new gdata.photos.CommentEntry + + Example: + p = GetFeed(PhotoUri) + tag = InsertComment(p, 'OOOH! I would have loved to be there. + Who's that in the back?') + + """ + comment = gdata.photos.CommentEntry(content=atom.Content(text=comment)) + if isinstance(photo_or_uri, (str, unicode)): + post_uri = photo_or_uri # it's a uri + elif hasattr(photo_or_uri, 'GetEditMediaLink'): + post_uri = photo_or_uri.GetPostLink().href + try: + return self.Post(data=comment, uri=post_uri, + converter=gdata.photos.CommentEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def Delete(self, object_or_uri, *args, **kwargs): + """Delete an object. + + Re-implementing the GDataService.Delete method, to add some + convenience. + + Arguments: + object_or_uri: Any object that has a GetEditLink() method that + returns a link, or a uri to that object. + + Returns: + ? or GooglePhotosException on errors + """ + try: + uri = object_or_uri.GetEditLink().href + except AttributeError: + uri = object_or_uri + try: + return gdata.service.GDataService.Delete(self, uri, *args, **kwargs) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + +def GetSmallestThumbnail(media_thumbnail_list): + """Helper function to get the smallest thumbnail of a list of + gdata.media.Thumbnail. + Returns gdata.media.Thumbnail """ + r = {} + for thumb in media_thumbnail_list: + r[int(thumb.width)*int(thumb.height)] = thumb + keys = r.keys() + keys.sort() + return r[keys[0]] + +def ConvertAtomTimestampToEpoch(timestamp): + """Helper function to convert a timestamp string, for instance + from atom:updated or atom:published, to milliseconds since Unix epoch + (a.k.a. POSIX time). + + `2007-07-22T00:45:10.000Z' -> """ + return time.mktime(time.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.000Z')) + ## TODO: Timezone aware diff --git a/gam/gdata/analytics/projecthosting/__init__.py b/gam/gdata/analytics/projecthosting/__init__.py new file mode 100755 index 00000000000..8b137891791 --- /dev/null +++ b/gam/gdata/analytics/projecthosting/__init__.py @@ -0,0 +1 @@ + diff --git a/gam/gdata/analytics/projecthosting/client.py b/gam/gdata/analytics/projecthosting/client.py new file mode 100755 index 00000000000..512eb32bbb1 --- /dev/null +++ b/gam/gdata/analytics/projecthosting/client.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python +# +# Copyright 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import atom.data +import gdata.client +import gdata.gauth +import gdata.projecthosting.data + + +class ProjectHostingClient(gdata.client.GDClient): + """Client to interact with the Project Hosting GData API.""" + api_version = '1.0' + auth_service = 'code' + auth_scopes = gdata.gauth.AUTH_SCOPES['code'] + host = 'code.google.com' + ssl = True + + def get_issues(self, project_name, + desired_class=gdata.projecthosting.data.IssuesFeed, **kwargs): + """Get a feed of issues for a particular project. + + Args: + project_name str The name of the project. + query Query Set returned issues parameters. + + Returns: + data.IssuesFeed + """ + return self.get_feed(gdata.projecthosting.data.ISSUES_FULL_FEED % + project_name, desired_class=desired_class, **kwargs) + + def add_issue(self, project_name, title, content, author, + status=None, owner=None, labels=None, ccs=None, **kwargs): + """Create a new issue for the project. + + Args: + project_name str The name of the project. + title str The title of the new issue. + content str The summary of the new issue. + author str The authenticated user's username. + status str The status of the new issue, Accepted, etc. + owner str The username of new issue's owner. + labels [str] Labels to associate with the new issue. + ccs [str] usernames to Cc on the new issue. + Returns: + data.IssueEntry + """ + new_entry = gdata.projecthosting.data.IssueEntry( + title=atom.data.Title(text=title), + content=atom.data.Content(text=content), + author=[atom.data.Author(name=atom.data.Name(text=author))]) + + if status: + new_entry.status = gdata.projecthosting.data.Status(text=status) + + if owner: + owner = [gdata.projecthosting.data.Owner( + username=gdata.projecthosting.data.Username(text=owner))] + + if labels: + new_entry.label = [gdata.projecthosting.data.Label(text=label) + for label in labels] + if ccs: + new_entry.cc = [ + gdata.projecthosting.data.Cc( + username=gdata.projecthosting.data.Username(text=cc)) + for cc in ccs] + + return self.post( + new_entry, + gdata.projecthosting.data.ISSUES_FULL_FEED % project_name, + **kwargs) + + def update_issue(self, project_name, issue_id, author, comment=None, + summary=None, status=None, owner=None, labels=None, ccs=None, + **kwargs): + """Update or comment on one issue for the project. + + Args: + project_name str The name of the issue's project. + issue_id str The issue number needing updated. + author str The authenticated user's username. + comment str A comment to append to the issue + summary str Rewrite the summary of the issue. + status str A new status for the issue. + owner str The username of the new owner. + labels [str] Labels to set on the issue (prepend issue with - to remove a + label). + ccs [str] Ccs to set on th enew issue (prepend cc with - to remove a cc). + + Returns: + data.CommentEntry + """ + updates = gdata.projecthosting.data.Updates() + + if summary: + updates.summary = gdata.projecthosting.data.Summary(text=summary) + + if status: + updates.status = gdata.projecthosting.data.Status(text=status) + + if owner: + updates.ownerUpdate = gdata.projecthosting.data.OwnerUpdate(text=owner) + + if labels: + updates.label = [gdata.projecthosting.data.Label(text=label) + for label in labels] + if ccs: + updates.ccUpdate = [gdata.projecthosting.data.CcUpdate(text=cc) + for cc in ccs] + + update_entry = gdata.projecthosting.data.CommentEntry( + content=atom.data.Content(text=comment), + author=[atom.data.Author(name=atom.data.Name(text=author))], + updates=updates) + + return self.post( + update_entry, + gdata.projecthosting.data.COMMENTS_FULL_FEED % (project_name, issue_id), + **kwargs) + + def get_comments(self, project_name, issue_id, + desired_class=gdata.projecthosting.data.CommentsFeed, + **kwargs): + """Get a feed of all updates to an issue. + + Args: + project_name str The name of the issue's project. + issue_id str The issue number needing updated. + + Returns: + data.CommentsFeed + """ + return self.get_feed( + gdata.projecthosting.data.COMMENTS_FULL_FEED % (project_name, issue_id), + desired_class=desired_class, **kwargs) + + def update(self, entry, auth_token=None, force=False, **kwargs): + """Unsupported GData update method. + + Use update_*() instead. + """ + raise NotImplementedError( + 'GData Update operation unsupported, try update_*') + + def delete(self, entry_or_uri, auth_token=None, force=False, **kwargs): + """Unsupported GData delete method. + + Use update_issue(status='Closed') instead. + """ + raise NotImplementedError( + 'GData Delete API unsupported, try closing the issue instead.') + + +class Query(gdata.client.Query): + + def __init__(self, issue_id=None, label=None, canned_query=None, owner=None, + status=None, **kwargs): + """Constructs a Google Data Query to filter feed contents serverside. + Args: + issue_id: int or str The issue to return based on the issue id. + label: str A label returned issues must have. + canned_query: str Return issues based on a canned query identifier + owner: str Return issues based on the owner of the issue. For Gmail users, + this will be the part of the email preceding the '@' sign. + status: str Return issues based on the status of the issue. + """ + super(Query, self).__init__(**kwargs) + self.label = label + self.issue_id = issue_id + self.canned_query = canned_query + self.owner = owner + self.status = status + + def modify_request(self, http_request): + if self.issue_id: + gdata.client._add_query_param('id', self.issue_id, http_request) + if self.label: + gdata.client._add_query_param('label', self.label, http_request) + if self.canned_query: + gdata.client._add_query_param('can', self.canned_query, http_request) + if self.owner: + gdata.client._add_query_param('owner', self.owner, http_request) + if self.status: + gdata.client._add_query_param('status', self.status, http_request) + super(Query, self).modify_request(http_request) + + ModifyRequest = modify_request diff --git a/gam/gdata/analytics/projecthosting/data.py b/gam/gdata/analytics/projecthosting/data.py new file mode 100755 index 00000000000..b0af2f5f71e --- /dev/null +++ b/gam/gdata/analytics/projecthosting/data.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python +# +# Copyright 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +"""Provides classes and constants for XML in the Google Project Hosting API. + +Canonical documentation for the raw XML which these classes represent can be +found here: http://code.google.com/p/support/wiki/IssueTrackerAPI +""" + + +__author__ = 'jlapenna@google.com (Joe LaPenna)' + +import atom.core +import gdata.data + + +ISSUES_TEMPLATE = '{http://schemas.google.com/projecthosting/issues/2009}%s' + + +ISSUES_FULL_FEED = '/feeds/issues/p/%s/issues/full' +COMMENTS_FULL_FEED = '/feeds/issues/p/%s/issues/%s/comments/full' + + +class Uri(atom.core.XmlElement): + """The issues:uri element.""" + _qname = ISSUES_TEMPLATE % 'uri' + + +class Username(atom.core.XmlElement): + """The issues:username element.""" + _qname = ISSUES_TEMPLATE % 'username' + + +class Cc(atom.core.XmlElement): + """The issues:cc element.""" + _qname = ISSUES_TEMPLATE % 'cc' + uri = Uri + username = Username + + +class Label(atom.core.XmlElement): + """The issues:label element.""" + _qname = ISSUES_TEMPLATE % 'label' + + +class Owner(atom.core.XmlElement): + """The issues:owner element.""" + _qname = ISSUES_TEMPLATE % 'owner' + uri = Uri + username = Username + + +class Stars(atom.core.XmlElement): + """The issues:stars element.""" + _qname = ISSUES_TEMPLATE % 'stars' + + +class State(atom.core.XmlElement): + """The issues:state element.""" + _qname = ISSUES_TEMPLATE % 'state' + + +class Status(atom.core.XmlElement): + """The issues:status element.""" + _qname = ISSUES_TEMPLATE % 'status' + + +class Summary(atom.core.XmlElement): + """The issues:summary element.""" + _qname = ISSUES_TEMPLATE % 'summary' + + +class OwnerUpdate(atom.core.XmlElement): + """The issues:ownerUpdate element.""" + _qname = ISSUES_TEMPLATE % 'ownerUpdate' + + +class CcUpdate(atom.core.XmlElement): + """The issues:ccUpdate element.""" + _qname = ISSUES_TEMPLATE % 'ccUpdate' + + +class Updates(atom.core.XmlElement): + """The issues:updates element.""" + _qname = ISSUES_TEMPLATE % 'updates' + summary = Summary + status = Status + ownerUpdate = OwnerUpdate + label = [Label] + ccUpdate = [CcUpdate] + + +class IssueEntry(gdata.data.GDEntry): + """Represents the information of one issue.""" + _qname = atom.data.ATOM_TEMPLATE % 'entry' + owner = Owner + cc = [Cc] + label = [Label] + stars = Stars + state = State + status = Status + + +class IssuesFeed(gdata.data.GDFeed): + """An Atom feed listing a project's issues.""" + entry = [IssueEntry] + + +class CommentEntry(gdata.data.GDEntry): + """An entry detailing one comment on an issue.""" + _qname = atom.data.ATOM_TEMPLATE % 'entry' + updates = Updates + + +class CommentsFeed(gdata.data.GDFeed): + """An Atom feed listing a project's issue's comments.""" + entry = [CommentEntry] diff --git a/gam/gdata/analytics/sample_util.py b/gam/gdata/analytics/sample_util.py new file mode 100755 index 00000000000..aae866e2afc --- /dev/null +++ b/gam/gdata/analytics/sample_util.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Provides utility functions used with command line samples.""" + +# This module is used for version 2 of the Google Data APIs. + +import sys +import getpass +import urllib +import gdata.gauth + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +CLIENT_LOGIN = 1 +AUTHSUB = 2 +OAUTH = 3 + +HMAC = 1 +RSA = 2 + + +class SettingsUtil(object): + """Gather's user preferences from flags or command prompts. + + An instance of this object stores the choices made by the user. At some + point it might be useful to save the user's preferences so that they do + not need to always set flags or answer preference prompts. + """ + + def __init__(self, prefs=None): + self.prefs = prefs or {} + + def get_param(self, name, prompt='', secret=False, ask=True, reuse=False): + # First, check in this objects stored preferences. + if name in self.prefs: + return self.prefs[name] + # Second, check for a command line parameter. + value = None + for i in xrange(len(sys.argv)): + if sys.argv[i].startswith('--%s=' % name): + value = sys.argv[i].split('=')[1] + elif sys.argv[i] == '--%s' % name: + value = sys.argv[i + 1] + # Third, if it was not on the command line, ask the user to input the + # value. + if value is None and ask: + prompt = '%s: ' % prompt + if secret: + value = getpass.getpass(prompt) + else: + value = raw_input(prompt) + # If we want to save the preference for reuse in future requests, add it + # to this object's prefs. + if value is not None and reuse: + self.prefs[name] = value + return value + + def authorize_client(self, client, auth_type=None, service=None, + source=None, scopes=None, oauth_type=None, + consumer_key=None, consumer_secret=None): + """Uses command line arguments, or prompts user for token values.""" + if 'client_auth_token' in self.prefs: + return + if auth_type is None: + auth_type = int(self.get_param( + 'auth_type', 'Please choose the authorization mechanism you want' + ' to use.\n' + '1. to use your email address and password (ClientLogin)\n' + '2. to use a web browser to visit an auth web page (AuthSub)\n' + '3. if you have registed to use OAuth\n', reuse=True)) + + # Get the scopes for the services we want to access. + if auth_type == AUTHSUB or auth_type == OAUTH: + if scopes is None: + scopes = self.get_param( + 'scopes', 'Enter the URL prefixes (scopes) for the resources you ' + 'would like to access.\nFor multiple scope URLs, place a comma ' + 'between each URL.\n' + 'Example: http://www.google.com/calendar/feeds/,' + 'http://www.google.com/m8/feeds/\n', reuse=True).split(',') + elif isinstance(scopes, (str, unicode)): + scopes = scopes.split(',') + + if auth_type == CLIENT_LOGIN: + email = self.get_param('email', 'Please enter your username', + reuse=False) + password = self.get_param('password', 'Password', True, reuse=False) + if service is None: + service = self.get_param( + 'service', 'What is the name of the service you wish to access?' + '\n(See list:' + ' http://code.google.com/apis/gdata/faq.html#clientlogin)', + reuse=True) + if source is None: + source = self.get_param('source', ask=False, reuse=True) + client.client_login(email, password, source=source, service=service) + elif auth_type == AUTHSUB: + auth_sub_token = self.get_param('auth_sub_token', ask=False, reuse=True) + session_token = self.get_param('session_token', ask=False, reuse=True) + private_key = None + auth_url = None + single_use_token = None + rsa_private_key = self.get_param( + 'rsa_private_key', + 'If you want to use secure mode AuthSub, please provide the\n' + ' location of your RSA private key which corresponds to the\n' + ' certificate you have uploaded for your domain. If you do not\n' + ' have an RSA key, simply press enter', reuse=True) + + if rsa_private_key: + try: + private_key_file = open(rsa_private_key, 'rb') + private_key = private_key_file.read() + private_key_file.close() + except IOError: + print 'Unable to read private key from file' + + if private_key is not None: + if client.auth_token is None: + if session_token: + client.auth_token = gdata.gauth.SecureAuthSubToken( + session_token, private_key, scopes) + self.prefs['client_auth_token'] = gdata.gauth.token_to_blob( + client.auth_token) + return + elif auth_sub_token: + client.auth_token = gdata.gauth.SecureAuthSubToken( + auth_sub_token, private_key, scopes) + client.upgrade_token() + self.prefs['client_auth_token'] = gdata.gauth.token_to_blob( + client.auth_token) + return + + auth_url = gdata.gauth.generate_auth_sub_url( + 'http://gauthmachine.appspot.com/authsub', scopes, True) + print 'with a private key, get ready for this URL', auth_url + + else: + if client.auth_token is None: + if session_token: + client.auth_token = gdata.gauth.AuthSubToken(session_token, + scopes) + self.prefs['client_auth_token'] = gdata.gauth.token_to_blob( + client.auth_token) + return + elif auth_sub_token: + client.auth_token = gdata.gauth.AuthSubToken(auth_sub_token, + scopes) + client.upgrade_token() + self.prefs['client_auth_token'] = gdata.gauth.token_to_blob( + client.auth_token) + return + + auth_url = gdata.gauth.generate_auth_sub_url( + 'http://gauthmachine.appspot.com/authsub', scopes) + + print 'Visit the following URL in your browser to authorize this app:' + print str(auth_url) + print 'After agreeing to authorize the app, copy the token value from' + print ' the URL. Example: "www.google.com/?token=ab12" token value is' + print ' ab12' + token_value = raw_input('Please enter the token value: ') + if private_key is not None: + single_use_token = gdata.gauth.SecureAuthSubToken( + token_value, private_key, scopes) + else: + single_use_token = gdata.gauth.AuthSubToken(token_value, scopes) + client.auth_token = single_use_token + client.upgrade_token() + + elif auth_type == OAUTH: + if oauth_type is None: + oauth_type = int(self.get_param( + 'oauth_type', 'Please choose the authorization mechanism you want' + ' to use.\n' + '1. use an HMAC signature using your consumer key and secret\n' + '2. use RSA with your private key to sign requests\n', + reuse=True)) + + consumer_key = self.get_param( + 'consumer_key', 'Please enter your OAuth conumer key ' + 'which identifies your app', reuse=True) + + if oauth_type == HMAC: + consumer_secret = self.get_param( + 'consumer_secret', 'Please enter your OAuth conumer secret ' + 'which you share with the OAuth provider', True, reuse=False) + # Swap out this code once the client supports requesting an oauth + # token. + # Get a request token. + request_token = client.get_oauth_token( + scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key, + consumer_secret=consumer_secret) + elif oauth_type == RSA: + rsa_private_key = self.get_param( + 'rsa_private_key', + 'Please provide the location of your RSA private key which\n' + ' corresponds to the certificate you have uploaded for your' + ' domain.', + reuse=True) + try: + private_key_file = open(rsa_private_key, 'rb') + private_key = private_key_file.read() + private_key_file.close() + except IOError: + print 'Unable to read private key from file' + + request_token = client.get_oauth_token( + scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key, + rsa_private_key=private_key) + else: + print 'Invalid OAuth signature type' + return None + + # Authorize the request token in the browser. + print 'Visit the following URL in your browser to authorize this app:' + print str(request_token.generate_authorization_url()) + print 'After agreeing to authorize the app, copy URL from the browser\'s' + print ' address bar.' + url = raw_input('Please enter the url: ') + gdata.gauth.authorize_request_token(request_token, url) + # Exchange for an access token. + client.auth_token = client.get_access_token(request_token) + else: + print 'Invalid authorization type.' + return None + if client.auth_token: + self.prefs['client_auth_token'] = gdata.gauth.token_to_blob( + client.auth_token) + + +def get_param(name, prompt='', secret=False, ask=True): + settings = SettingsUtil() + return settings.get_param(name=name, prompt=prompt, secret=secret, ask=ask) + + +def authorize_client(client, auth_type=None, service=None, source=None, + scopes=None, oauth_type=None, consumer_key=None, + consumer_secret=None): + """Uses command line arguments, or prompts user for token values.""" + settings = SettingsUtil() + return settings.authorize_client(client=client, auth_type=auth_type, + service=service, source=source, + scopes=scopes, oauth_type=oauth_type, + consumer_key=consumer_key, + consumer_secret=consumer_secret) + + +def print_options(): + """Displays usage information, available command line params.""" + # TODO: fill in the usage description for authorizing the client. + print '' + diff --git a/gam/gdata/analytics/service.py b/gam/gdata/analytics/service.py new file mode 100755 index 00000000000..df05d1f50e8 --- /dev/null +++ b/gam/gdata/analytics/service.py @@ -0,0 +1,1718 @@ +#!/usr/bin/python +# +# Copyright (C) 2006,2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""GDataService provides CRUD ops. and programmatic login for GData services. + + Error: A base exception class for all exceptions in the gdata_client + module. + + CaptchaRequired: This exception is thrown when a login attempt results in a + captcha challenge from the ClientLogin service. When this + exception is thrown, the captcha_token and captcha_url are + set to the values provided in the server's response. + + BadAuthentication: Raised when a login attempt is made with an incorrect + username or password. + + NotAuthenticated: Raised if an operation requiring authentication is called + before a user has authenticated. + + NonAuthSubToken: Raised if a method to modify an AuthSub token is used when + the user is either not authenticated or is authenticated + through another authentication mechanism. + + NonOAuthToken: Raised if a method to modify an OAuth token is used when the + user is either not authenticated or is authenticated through + another authentication mechanism. + + RequestError: Raised if a CRUD request returned a non-success code. + + UnexpectedReturnType: Raised if the response from the server was not of the + desired type. For example, this would be raised if the + server sent a feed when the client requested an entry. + + GDataService: Encapsulates user credentials needed to perform insert, update + and delete operations with the GData API. An instance can + perform user authentication, query, insertion, deletion, and + update. + + Query: Eases query URI creation by allowing URI parameters to be set as + dictionary attributes. For example a query with a feed of + '/base/feeds/snippets' and ['bq'] set to 'digital camera' will + produce '/base/feeds/snippets?bq=digital+camera' when .ToUri() is + called on it. +""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + +import re +import urllib +import urlparse +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom.service +import gdata +import atom +import atom.http_interface +import atom.token_store +import gdata.auth +import gdata.gauth + + +AUTH_SERVER_HOST = 'https://www.google.com' + + +# When requesting an AuthSub token, it is often helpful to track the scope +# which is being requested. One way to accomplish this is to add a URL +# parameter to the 'next' URL which contains the requested scope. This +# constant is the default name (AKA key) for the URL parameter. +SCOPE_URL_PARAM_NAME = 'authsub_token_scope' +# When requesting an OAuth access token or authorization of an existing OAuth +# request token, it is often helpful to track the scope(s) which is/are being +# requested. One way to accomplish this is to add a URL parameter to the +# 'callback' URL which contains the requested scope. This constant is the +# default name (AKA key) for the URL parameter. +OAUTH_SCOPE_URL_PARAM_NAME = 'oauth_token_scope' +# Maps the service names used in ClientLogin to scope URLs. +CLIENT_LOGIN_SCOPES = gdata.gauth.AUTH_SCOPES +# Default parameters for GDataService.GetWithRetries method +DEFAULT_NUM_RETRIES = 3 +DEFAULT_DELAY = 1 +DEFAULT_BACKOFF = 2 + + +def lookup_scopes(service_name): + """Finds the scope URLs for the desired service. + + In some cases, an unknown service may be used, and in those cases this + function will return None. + """ + if service_name in CLIENT_LOGIN_SCOPES: + return CLIENT_LOGIN_SCOPES[service_name] + return None + + +# Module level variable specifies which module should be used by GDataService +# objects to make HttpRequests. This setting can be overridden on each +# instance of GDataService. +# This module level variable is deprecated. Reassign the http_client member +# of a GDataService object instead. +http_request_handler = atom.service + + +class Error(Exception): + pass + + +class CaptchaRequired(Error): + pass + + +class BadAuthentication(Error): + pass + + +class NotAuthenticated(Error): + pass + + +class NonAuthSubToken(Error): + pass + + +class NonOAuthToken(Error): + pass + + +class RequestError(Error): + pass + + +class UnexpectedReturnType(Error): + pass + + +class BadAuthenticationServiceURL(Error): + pass + + +class FetchingOAuthRequestTokenFailed(RequestError): + pass + + +class TokenUpgradeFailed(RequestError): + pass + + +class RevokingOAuthTokenFailed(RequestError): + pass + + +class AuthorizationRequired(Error): + pass + + +class TokenHadNoScope(Error): + pass + + +class RanOutOfTries(Error): + pass + + +class GDataService(atom.service.AtomService): + """Contains elements needed for GData login and CRUD request headers. + + Maintains additional headers (tokens for example) needed for the GData + services to allow a user to perform inserts, updates, and deletes. + """ + # The hander member is deprecated, use http_client instead. + handler = None + # The auth_token member is deprecated, use the token_store instead. + auth_token = None + # The tokens dict is deprecated in favor of the token_store. + tokens = None + + def __init__(self, email=None, password=None, account_type='HOSTED_OR_GOOGLE', + service=None, auth_service_url=None, source=None, server=None, + additional_headers=None, handler=None, tokens=None, + http_client=None, token_store=None): + """Creates an object of type GDataService. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + account_type: string (optional) The type of account to use. Use + 'GOOGLE' for regular Google accounts or 'HOSTED' for Google + Apps accounts, or 'HOSTED_OR_GOOGLE' to try finding a HOSTED + account first and, if it doesn't exist, try finding a regular + GOOGLE account. Default value: 'HOSTED_OR_GOOGLE'. + service: string (optional) The desired service for which credentials + will be obtained. + auth_service_url: string (optional) User-defined auth token request URL + allows users to explicitly specify where to send auth token requests. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'base.google.com'. + additional_headers: dictionary (optional) Any additional headers which + should be included with CRUD operations. + handler: module (optional) This parameter is deprecated and has been + replaced by http_client. + tokens: This parameter is deprecated, calls should be made to + token_store instead. + http_client: An object responsible for making HTTP requests using a + request method. If none is provided, a new instance of + atom.http.ProxiedHttpClient will be used. + token_store: Keeps a collection of authorization tokens which can be + applied to requests for a specific URLs. Critical methods are + find_token based on a URL (atom.url.Url or a string), add_token, + and remove_token. + """ + atom.service.AtomService.__init__(self, http_client=http_client, + token_store=token_store) + self.email = email + self.password = password + self.account_type = account_type + self.service = service + self.auth_service_url = auth_service_url + self.server = server + self.additional_headers = additional_headers or {} + self._oauth_input_params = None + self.__SetSource(source) + self.__captcha_token = None + self.__captcha_url = None + self.__gsessionid = None + + if http_request_handler.__name__ == 'gdata.urlfetch': + import gdata.alt.appengine + self.http_client = gdata.alt.appengine.AppEngineHttpClient() + + def _SetSessionId(self, session_id): + """Used in unit tests to simulate a 302 which sets a gsessionid.""" + self.__gsessionid = session_id + + # Define properties for GDataService + def _SetAuthSubToken(self, auth_token, scopes=None): + """Deprecated, use SetAuthSubToken instead.""" + self.SetAuthSubToken(auth_token, scopes=scopes) + + def __SetAuthSubToken(self, auth_token, scopes=None): + """Deprecated, use SetAuthSubToken instead.""" + self._SetAuthSubToken(auth_token, scopes=scopes) + + def _GetAuthToken(self): + """Returns the auth token used for authenticating requests. + + Returns: + string + """ + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if hasattr(token, 'auth_header'): + return token.auth_header + return None + + def _GetCaptchaToken(self): + """Returns a captcha token if the most recent login attempt generated one. + + The captcha token is only set if the Programmatic Login attempt failed + because the Google service issued a captcha challenge. + + Returns: + string + """ + return self.__captcha_token + + def __GetCaptchaToken(self): + return self._GetCaptchaToken() + + captcha_token = property(__GetCaptchaToken, + doc="""Get the captcha token for a login request.""") + + def _GetCaptchaURL(self): + """Returns the URL of the captcha image if a login attempt generated one. + + The captcha URL is only set if the Programmatic Login attempt failed + because the Google service issued a captcha challenge. + + Returns: + string + """ + return self.__captcha_url + + def __GetCaptchaURL(self): + return self._GetCaptchaURL() + + captcha_url = property(__GetCaptchaURL, + doc="""Get the captcha URL for a login request.""") + + def GetGeneratorFromLinkFinder(self, link_finder, func, + num_retries=DEFAULT_NUM_RETRIES, + delay=DEFAULT_DELAY, + backoff=DEFAULT_BACKOFF): + """returns a generator for pagination""" + yield link_finder + next = link_finder.GetNextLink() + while next is not None: + next_feed = func(str(self.GetWithRetries( + next.href, num_retries=num_retries, delay=delay, backoff=backoff))) + yield next_feed + next = next_feed.GetNextLink() + + def _GetElementGeneratorFromLinkFinder(self, link_finder, func, + num_retries=DEFAULT_NUM_RETRIES, + delay=DEFAULT_DELAY, + backoff=DEFAULT_BACKOFF): + for element in self.GetGeneratorFromLinkFinder(link_finder, func, + num_retries=num_retries, + delay=delay, + backoff=backoff).entry: + yield element + + def GetOAuthInputParameters(self): + return self._oauth_input_params + + def SetOAuthInputParameters(self, signature_method, consumer_key, + consumer_secret=None, rsa_key=None, + two_legged_oauth=False, requestor_id=None): + """Sets parameters required for using OAuth authentication mechanism. + + NOTE: Though consumer_secret and rsa_key are optional, either of the two + is required depending on the value of the signature_method. + + Args: + signature_method: class which provides implementation for strategy class + oauth.oauth.OAuthSignatureMethod. Signature method to be used for + signing each request. Valid implementations are provided as the + constants defined by gdata.auth.OAuthSignatureMethod. Currently + they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and + gdata.auth.OAuthSignatureMethod.HMAC_SHA1 + consumer_key: string Domain identifying third_party web application. + consumer_secret: string (optional) Secret generated during registration. + Required only for HMAC_SHA1 signature method. + rsa_key: string (optional) Private key required for RSA_SHA1 signature + method. + two_legged_oauth: boolean (optional) Enables two-legged OAuth process. + requestor_id: string (optional) User email adress to make requests on + their behalf. This parameter should only be set when two_legged_oauth + is True. + """ + self._oauth_input_params = gdata.auth.OAuthInputParams( + signature_method, consumer_key, consumer_secret=consumer_secret, + rsa_key=rsa_key, requestor_id=requestor_id) + if two_legged_oauth: + oauth_token = gdata.auth.OAuthToken( + oauth_input_params=self._oauth_input_params) + self.SetOAuthToken(oauth_token) + + def FetchOAuthRequestToken(self, scopes=None, extra_parameters=None, + request_url='%s/accounts/OAuthGetRequestToken' % \ + AUTH_SERVER_HOST, oauth_callback=None): + """Fetches and sets the OAuth request token and returns it. + + Args: + scopes: string or list of string base URL(s) of the service(s) to be + accessed. If None, then this method tries to determine the + scope(s) from the current service. + extra_parameters: dict (optional) key-value pairs as any additional + parameters to be included in the URL and signature while making a + request for fetching an OAuth request token. All the OAuth parameters + are added by default. But if provided through this argument, any + default parameters will be overwritten. For e.g. a default parameter + oauth_version 1.0 can be overwritten if + extra_parameters = {'oauth_version': '2.0'} + request_url: Request token URL. The default is + 'https://www.google.com/accounts/OAuthGetRequestToken'. + oauth_callback: str (optional) If set, it is assume the client is using + the OAuth v1.0a protocol where the callback url is sent in the + request token step. If the oauth_callback is also set in + extra_params, this value will override that one. + + Returns: + The fetched request token as a gdata.auth.OAuthToken object. + + Raises: + FetchingOAuthRequestTokenFailed if the server responded to the request + with an error. + """ + if scopes is None: + scopes = lookup_scopes(self.service) + if not isinstance(scopes, (list, tuple)): + scopes = [scopes,] + if oauth_callback: + if extra_parameters is not None: + extra_parameters['oauth_callback'] = oauth_callback + else: + extra_parameters = {'oauth_callback': oauth_callback} + request_token_url = gdata.auth.GenerateOAuthRequestTokenUrl( + self._oauth_input_params, scopes, + request_token_url=request_url, + extra_parameters=extra_parameters) + response = self.http_client.request('GET', str(request_token_url)) + if response.status == 200: + token = gdata.auth.OAuthToken() + token.set_token_string(response.read()) + token.scopes = scopes + token.oauth_input_params = self._oauth_input_params + self.SetOAuthToken(token) + return token + error = { + 'status': response.status, + 'reason': 'Non 200 response on fetch request token', + 'body': response.read() + } + raise FetchingOAuthRequestTokenFailed(error) + + def SetOAuthToken(self, oauth_token): + """Attempts to set the current token and add it to the token store. + + The oauth_token can be any OAuth token i.e. unauthorized request token, + authorized request token or access token. + This method also attempts to add the token to the token store. + Use this method any time you want the current token to point to the + oauth_token passed. For e.g. call this method with the request token + you receive from FetchOAuthRequestToken. + + Args: + request_token: gdata.auth.OAuthToken OAuth request token. + """ + if self.auto_set_current_token: + self.current_token = oauth_token + if self.auto_store_tokens: + self.token_store.add_token(oauth_token) + + def GenerateOAuthAuthorizationURL( + self, request_token=None, callback_url=None, extra_params=None, + include_scopes_in_callback=False, + scopes_param_prefix=OAUTH_SCOPE_URL_PARAM_NAME, + request_url='%s/accounts/OAuthAuthorizeToken' % AUTH_SERVER_HOST): + """Generates URL at which user will login to authorize the request token. + + Args: + request_token: gdata.auth.OAuthToken (optional) OAuth request token. + If not specified, then the current token will be used if it is of + type <gdata.auth.OAuthToken>, else it is found by looking in the + token_store by looking for a token for the current scope. + callback_url: string (optional) The URL user will be sent to after + logging in and granting access. + extra_params: dict (optional) Additional parameters to be sent. + include_scopes_in_callback: Boolean (default=False) if set to True, and + if 'callback_url' is present, the 'callback_url' will be modified to + include the scope(s) from the request token as a URL parameter. The + key for the 'callback' URL's scope parameter will be + OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as + a parameter to the 'callback' URL, is that the page which receives + the OAuth token will be able to tell which URLs the token grants + access to. + scopes_param_prefix: string (default='oauth_token_scope') The URL + parameter key which maps to the list of valid scopes for the token. + This URL parameter will be included in the callback URL along with + the scopes of the token as value if include_scopes_in_callback=True. + request_url: Authorization URL. The default is + 'https://www.google.com/accounts/OAuthAuthorizeToken'. + Returns: + A string URL at which the user is required to login. + + Raises: + NonOAuthToken if the user's request token is not an OAuth token or if a + request token was not available. + """ + if request_token and not isinstance(request_token, gdata.auth.OAuthToken): + raise NonOAuthToken + if not request_token: + if isinstance(self.current_token, gdata.auth.OAuthToken): + request_token = self.current_token + else: + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.OAuthToken): + request_token = token + if not request_token: + raise NonOAuthToken + return str(gdata.auth.GenerateOAuthAuthorizationUrl( + request_token, + authorization_url=request_url, + callback_url=callback_url, extra_params=extra_params, + include_scopes_in_callback=include_scopes_in_callback, + scopes_param_prefix=scopes_param_prefix)) + + def UpgradeToOAuthAccessToken(self, authorized_request_token=None, + request_url='%s/accounts/OAuthGetAccessToken' \ + % AUTH_SERVER_HOST, oauth_version='1.0', + oauth_verifier=None): + """Upgrades the authorized request token to an access token and returns it + + Args: + authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request + token. If not specified, then the current token will be used if it is + of type <gdata.auth.OAuthToken>, else it is found by looking in the + token_store by looking for a token for the current scope. + request_url: Access token URL. The default is + 'https://www.google.com/accounts/OAuthGetAccessToken'. + oauth_version: str (default='1.0') oauth_version parameter. All other + 'oauth_' parameters are added by default. This parameter too, is + added by default but here you can override it's value. + oauth_verifier: str (optional) If present, it is assumed that the client + will use the OAuth v1.0a protocol which includes passing the + oauth_verifier (as returned by the SP) in the access token step. + + Returns: + Access token + + Raises: + NonOAuthToken if the user's authorized request token is not an OAuth + token or if an authorized request token was not available. + TokenUpgradeFailed if the server responded to the request with an + error. + """ + if (authorized_request_token and + not isinstance(authorized_request_token, gdata.auth.OAuthToken)): + raise NonOAuthToken + if not authorized_request_token: + if isinstance(self.current_token, gdata.auth.OAuthToken): + authorized_request_token = self.current_token + else: + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.OAuthToken): + authorized_request_token = token + if not authorized_request_token: + raise NonOAuthToken + access_token_url = gdata.auth.GenerateOAuthAccessTokenUrl( + authorized_request_token, + self._oauth_input_params, + access_token_url=request_url, + oauth_version=oauth_version, + oauth_verifier=oauth_verifier) + response = self.http_client.request('GET', str(access_token_url)) + if response.status == 200: + token = gdata.auth.OAuthTokenFromHttpBody(response.read()) + token.scopes = authorized_request_token.scopes + token.oauth_input_params = authorized_request_token.oauth_input_params + self.SetOAuthToken(token) + return token + else: + raise TokenUpgradeFailed({'status': response.status, + 'reason': 'Non 200 response on upgrade', + 'body': response.read()}) + + def RevokeOAuthToken(self, request_url='%s/accounts/AuthSubRevokeToken' % \ + AUTH_SERVER_HOST): + """Revokes an existing OAuth token. + + request_url: Token revoke URL. The default is + 'https://www.google.com/accounts/AuthSubRevokeToken'. + Raises: + NonOAuthToken if the user's auth token is not an OAuth token. + RevokingOAuthTokenFailed if request for revoking an OAuth token failed. + """ + scopes = lookup_scopes(self.service) + token = self.token_store.find_token(scopes[0]) + if not isinstance(token, gdata.auth.OAuthToken): + raise NonOAuthToken + + response = token.perform_request(self.http_client, 'GET', request_url, + headers={'Content-Type':'application/x-www-form-urlencoded'}) + if response.status == 200: + self.token_store.remove_token(token) + else: + raise RevokingOAuthTokenFailed + + def GetAuthSubToken(self): + """Returns the AuthSub token as a string. + + If the token is an gdta.auth.AuthSubToken, the Authorization Label + ("AuthSub token") is removed. + + This method examines the current_token to see if it is an AuthSubToken + or SecureAuthSubToken. If not, it searches the token_store for a token + which matches the current scope. + + The current scope is determined by the service name string member. + + Returns: + If the current_token is set to an AuthSubToken/SecureAuthSubToken, + return the token string. If there is no current_token, a token string + for a token which matches the service object's default scope is returned. + If there are no tokens valid for the scope, returns None. + """ + if isinstance(self.current_token, gdata.auth.AuthSubToken): + return self.current_token.get_token_string() + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.AuthSubToken): + return token.get_token_string() + else: + token = self.token_store.find_token(atom.token_store.SCOPE_ALL) + if isinstance(token, gdata.auth.ClientLoginToken): + return token.get_token_string() + return None + + def SetAuthSubToken(self, token, scopes=None, rsa_key=None): + """Sets the token sent in requests to an AuthSub token. + + Sets the current_token and attempts to add the token to the token_store. + + Only use this method if you have received a token from the AuthSub + service. The auth token is set automatically when UpgradeToSessionToken() + is used. See documentation for Google AuthSub here: + http://code.google.com/apis/accounts/AuthForWebApps.html + + Args: + token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string + The token returned by the AuthSub service. If the token is an + AuthSubToken or SecureAuthSubToken, the scope information stored in + the token is used. If the token is a string, the scopes parameter is + used to determine the valid scopes. + scopes: list of URLs for which the token is valid. This is only used + if the token parameter is a string. + rsa_key: string (optional) Private key required for RSA_SHA1 signature + method. This parameter is necessary if the token is a string + representing a secure token. + """ + if not isinstance(token, gdata.auth.AuthSubToken): + token_string = token + if rsa_key: + token = gdata.auth.SecureAuthSubToken(rsa_key) + else: + token = gdata.auth.AuthSubToken() + + token.set_token_string(token_string) + + # If no scopes were set for the token, use the scopes passed in, or + # try to determine the scopes based on the current service name. If + # all else fails, set the token to match all requests. + if not token.scopes: + if scopes is None: + scopes = lookup_scopes(self.service) + if scopes is None: + scopes = [atom.token_store.SCOPE_ALL] + token.scopes = scopes + if self.auto_set_current_token: + self.current_token = token + if self.auto_store_tokens: + self.token_store.add_token(token) + + def GetClientLoginToken(self): + """Returns the token string for the current token or a token matching the + service scope. + + If the current_token is a ClientLoginToken, the token string for + the current token is returned. If the current_token is not set, this method + searches for a token in the token_store which is valid for the service + object's current scope. + + The current scope is determined by the service name string member. + The token string is the end of the Authorization header, it doesn not + include the ClientLogin label. + """ + if isinstance(self.current_token, gdata.auth.ClientLoginToken): + return self.current_token.get_token_string() + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.ClientLoginToken): + return token.get_token_string() + else: + token = self.token_store.find_token(atom.token_store.SCOPE_ALL) + if isinstance(token, gdata.auth.ClientLoginToken): + return token.get_token_string() + return None + + def SetClientLoginToken(self, token, scopes=None): + """Sets the token sent in requests to a ClientLogin token. + + This method sets the current_token to a new ClientLoginToken and it + also attempts to add the ClientLoginToken to the token_store. + + Only use this method if you have received a token from the ClientLogin + service. The auth_token is set automatically when ProgrammaticLogin() + is used. See documentation for Google ClientLogin here: + http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html + + Args: + token: string or instance of a ClientLoginToken. + """ + if not isinstance(token, gdata.auth.ClientLoginToken): + token_string = token + token = gdata.auth.ClientLoginToken() + token.set_token_string(token_string) + + if not token.scopes: + if scopes is None: + scopes = lookup_scopes(self.service) + if scopes is None: + scopes = [atom.token_store.SCOPE_ALL] + token.scopes = scopes + if self.auto_set_current_token: + self.current_token = token + if self.auto_store_tokens: + self.token_store.add_token(token) + + # Private methods to create the source property. + def __GetSource(self): + return self.__source + + def __SetSource(self, new_source): + self.__source = new_source + # Update the UserAgent header to include the new application name. + self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % ( + self.__source,) + + source = property(__GetSource, __SetSource, + doc="""The source is the name of the application making the request. + It should be in the form company_id-app_name-app_version""") + + # Authentication operations + + def ProgrammaticLogin(self, captcha_token=None, captcha_response=None): + """Authenticates the user and sets the GData Auth token. + + Login retreives a temporary auth token which must be used with all + requests to GData services. The auth token is stored in the GData client + object. + + Login is also used to respond to a captcha challenge. If the user's login + attempt failed with a CaptchaRequired error, the user can respond by + calling Login with the captcha token and the answer to the challenge. + + Args: + captcha_token: string (optional) The identifier for the captcha challenge + which was presented to the user. + captcha_response: string (optional) The user's answer to the captch + challenge. + + Raises: + CaptchaRequired if the login service will require a captcha response + BadAuthentication if the login service rejected the username or password + Error if the login service responded with a 403 different from the above + """ + request_body = gdata.auth.generate_client_login_request_body(self.email, + self.password, self.service, self.source, self.account_type, + captcha_token, captcha_response) + + # If the user has defined their own authentication service URL, + # send the ClientLogin requests to this URL: + if not self.auth_service_url: + auth_request_url = AUTH_SERVER_HOST + '/accounts/ClientLogin' + else: + auth_request_url = self.auth_service_url + + auth_response = self.http_client.request('POST', auth_request_url, + data=request_body, + headers={'Content-Type':'application/x-www-form-urlencoded'}) + response_body = auth_response.read() + + if auth_response.status == 200: + # TODO: insert the token into the token_store directly. + self.SetClientLoginToken( + gdata.auth.get_client_login_token(response_body)) + self.__captcha_token = None + self.__captcha_url = None + + elif auth_response.status == 403: + # Examine each line to find the error type and the captcha token and + # captch URL if they are present. + captcha_parameters = gdata.auth.get_captcha_challenge(response_body, + captcha_base_url='%s/accounts/' % AUTH_SERVER_HOST) + if captcha_parameters: + self.__captcha_token = captcha_parameters['token'] + self.__captcha_url = captcha_parameters['url'] + raise CaptchaRequired, 'Captcha Required' + elif response_body.splitlines()[0] == 'Error=BadAuthentication': + self.__captcha_token = None + self.__captcha_url = None + raise BadAuthentication, 'Incorrect username or password' + else: + self.__captcha_token = None + self.__captcha_url = None + raise Error, 'Server responded with a 403 code' + elif auth_response.status == 302: + self.__captcha_token = None + self.__captcha_url = None + # Google tries to redirect all bad URLs back to + # http://www.google.<locale>. If a redirect + # attempt is made, assume the user has supplied an incorrect authentication URL + raise BadAuthenticationServiceURL, 'Server responded with a 302 code.' + + def ClientLogin(self, username, password, account_type=None, service=None, + auth_service_url=None, source=None, captcha_token=None, + captcha_response=None): + """Convenience method for authenticating using ProgrammaticLogin. + + Sets values for email, password, and other optional members. + + Args: + username: + password: + account_type: string (optional) + service: string (optional) + auth_service_url: string (optional) + captcha_token: string (optional) + captcha_response: string (optional) + """ + self.email = username + self.password = password + + if account_type: + self.account_type = account_type + if service: + self.service = service + if source: + self.source = source + if auth_service_url: + self.auth_service_url = auth_service_url + + self.ProgrammaticLogin(captcha_token, captcha_response) + + def GenerateAuthSubURL(self, next, scope, secure=False, session=True, + domain='default'): + """Generate a URL at which the user will login and be redirected back. + + Users enter their credentials on a Google login page and a token is sent + to the URL specified in next. See documentation for AuthSub login at: + http://code.google.com/apis/accounts/docs/AuthSub.html + + Args: + next: string The URL user will be sent to after logging in. + scope: string or list of strings. The URLs of the services to be + accessed. + secure: boolean (optional) Determines whether or not the issued token + is a secure token. + session: boolean (optional) Determines whether or not the issued token + can be upgraded to a session token. + """ + if not isinstance(scope, (list, tuple)): + scope = (scope,) + return gdata.auth.generate_auth_sub_url(next, scope, secure=secure, + session=session, + request_url='%s/accounts/AuthSubRequest' % AUTH_SERVER_HOST, + domain=domain) + + def UpgradeToSessionToken(self, token=None): + """Upgrades a single use AuthSub token to a session token. + + Args: + token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken + (optional) which is good for a single use but can be upgraded + to a session token. If no token is passed in, the token + is found by looking in the token_store by looking for a token + for the current scope. + + Raises: + NonAuthSubToken if the user's auth token is not an AuthSub token + TokenUpgradeFailed if the server responded to the request with an + error. + """ + if token is None: + scopes = lookup_scopes(self.service) + if scopes: + token = self.token_store.find_token(scopes[0]) + else: + token = self.token_store.find_token(atom.token_store.SCOPE_ALL) + if not isinstance(token, gdata.auth.AuthSubToken): + raise NonAuthSubToken + + self.SetAuthSubToken(self.upgrade_to_session_token(token)) + + def upgrade_to_session_token(self, token): + """Upgrades a single use AuthSub token to a session token. + + Args: + token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken + which is good for a single use but can be upgraded to a + session token. + + Returns: + The upgraded token as a gdata.auth.AuthSubToken object. + + Raises: + TokenUpgradeFailed if the server responded to the request with an + error. + """ + response = token.perform_request(self.http_client, 'GET', + AUTH_SERVER_HOST + '/accounts/AuthSubSessionToken', + headers={'Content-Type':'application/x-www-form-urlencoded'}) + response_body = response.read() + if response.status == 200: + token.set_token_string( + gdata.auth.token_from_http_body(response_body)) + return token + else: + raise TokenUpgradeFailed({'status': response.status, + 'reason': 'Non 200 response on upgrade', + 'body': response_body}) + + def RevokeAuthSubToken(self): + """Revokes an existing AuthSub token. + + Raises: + NonAuthSubToken if the user's auth token is not an AuthSub token + """ + scopes = lookup_scopes(self.service) + token = self.token_store.find_token(scopes[0]) + if not isinstance(token, gdata.auth.AuthSubToken): + raise NonAuthSubToken + + response = token.perform_request(self.http_client, 'GET', + AUTH_SERVER_HOST + '/accounts/AuthSubRevokeToken', + headers={'Content-Type':'application/x-www-form-urlencoded'}) + if response.status == 200: + self.token_store.remove_token(token) + + def AuthSubTokenInfo(self): + """Fetches the AuthSub token's metadata from the server. + + Raises: + NonAuthSubToken if the user's auth token is not an AuthSub token + """ + scopes = lookup_scopes(self.service) + token = self.token_store.find_token(scopes[0]) + if not isinstance(token, gdata.auth.AuthSubToken): + raise NonAuthSubToken + + response = token.perform_request(self.http_client, 'GET', + AUTH_SERVER_HOST + '/accounts/AuthSubTokenInfo', + headers={'Content-Type':'application/x-www-form-urlencoded'}) + result_body = response.read() + if response.status == 200: + return result_body + else: + raise RequestError, {'status': response.status, + 'body': result_body} + + def GetWithRetries(self, uri, extra_headers=None, redirects_remaining=4, + encoding='UTF-8', converter=None, num_retries=DEFAULT_NUM_RETRIES, + delay=DEFAULT_DELAY, backoff=DEFAULT_BACKOFF, logger=None): + """This is a wrapper method for Get with retrying capability. + + To avoid various errors while retrieving bulk entities by retrying + specified times. + + Note this method relies on the time module and so may not be usable + by default in Python2.2. + + Args: + num_retries: Integer; the retry count. + delay: Integer; the initial delay for retrying. + backoff: Integer; how much the delay should lengthen after each failure. + logger: An object which has a debug(str) method to receive logging + messages. Recommended that you pass in the logging module. + Raises: + ValueError if any of the parameters has an invalid value. + RanOutOfTries on failure after number of retries. + """ + # Moved import for time module inside this method since time is not a + # default module in Python2.2. This method will not be usable in + # Python2.2. + import time + if backoff <= 1: + raise ValueError("backoff must be greater than 1") + num_retries = int(num_retries) + + if num_retries < 0: + raise ValueError("num_retries must be 0 or greater") + + if delay <= 0: + raise ValueError("delay must be greater than 0") + + # Let's start + mtries, mdelay = num_retries, delay + while mtries > 0: + if mtries != num_retries: + if logger: + logger.debug("Retrying: %s" % uri) + try: + rv = self.Get(uri, extra_headers=extra_headers, + redirects_remaining=redirects_remaining, + encoding=encoding, converter=converter) + except SystemExit: + # Allow this error + raise + except RequestError, e: + # Error 500 is 'internal server error' and warrants a retry + # Error 503 is 'service unavailable' and warrants a retry + if e[0]['status'] not in [500, 503]: + raise e + # Else, fall through to the retry code... + except Exception, e: + if logger: + logger.debug(e) + # Fall through to the retry code... + else: + # This is the right path. + return rv + mtries -= 1 + time.sleep(mdelay) + mdelay *= backoff + raise RanOutOfTries('Ran out of tries.') + + # CRUD operations + def Get(self, uri, extra_headers=None, redirects_remaining=4, + encoding='UTF-8', converter=None): + """Query the GData API with the given URI + + The uri is the portion of the URI after the server value + (ex: www.google.com). + + To perform a query against Google Base, set the server to + 'base.google.com' and set the uri to '/base/feeds/...', where ... is + your query. For example, to find snippets for all digital cameras uri + should be set to: '/base/feeds/snippets?bq=digital+camera' + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dictionary (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + redirects_remaining: int (optional) Tracks the number of additional + redirects this method will allow. If the service object receives + a redirect and remaining is 0, it will not follow the redirect. + This was added to avoid infinite redirect loops. + encoding: string (optional) The character encoding for the server's + response. Default is UTF-8 + converter: func (optional) A function which will transform + the server's results before it is returned. Example: use + GDataFeedFromString to parse the server response as if it + were a GDataFeed. + + Returns: + If there is no ResultsTransformer specified in the call, a GDataFeed + or GDataEntry depending on which is sent from the server. If the + response is niether a feed or entry and there is no ResultsTransformer, + return a string. If there is a ResultsTransformer, the returned value + will be that of the ResultsTransformer function. + """ + + if extra_headers is None: + extra_headers = {} + + if self.__gsessionid is not None: + if uri.find('gsessionid=') < 0: + if uri.find('?') > -1: + uri += '&gsessionid=%s' % (self.__gsessionid,) + else: + uri += '?gsessionid=%s' % (self.__gsessionid,) + + server_response = self.request('GET', uri, + headers=extra_headers) + result_body = server_response.read() + + if server_response.status == 200: + if converter: + return converter(result_body) + # There was no ResultsTransformer specified, so try to convert the + # server's response into a GDataFeed. + feed = gdata.GDataFeedFromString(result_body) + if not feed: + # If conversion to a GDataFeed failed, try to convert the server's + # response to a GDataEntry. + entry = gdata.GDataEntryFromString(result_body) + if not entry: + # The server's response wasn't a feed, or an entry, so return the + # response body as a string. + return result_body + return entry + return feed + elif server_response.status == 302: + if redirects_remaining > 0: + location = (server_response.getheader('Location') + or server_response.getheader('location')) + if location is not None: + m = re.compile('[\?\&]gsessionid=(\w*\-)').search(location) + if m is not None: + self.__gsessionid = m.group(1) + return GDataService.Get(self, location, extra_headers, redirects_remaining - 1, + encoding=encoding, converter=converter) + else: + raise RequestError, {'status': server_response.status, + 'reason': '302 received without Location header', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': 'Redirect received, but redirects_remaining <= 0', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': server_response.reason, 'body': result_body} + + def GetMedia(self, uri, extra_headers=None): + """Returns a MediaSource containing media and its metadata from the given + URI string. + """ + response_handle = self.request('GET', uri, + headers=extra_headers) + return gdata.MediaSource(response_handle, response_handle.getheader( + 'Content-Type'), + response_handle.getheader('Content-Length')) + + def GetEntry(self, uri, extra_headers=None): + """Query the GData API with the given URI and receive an Entry. + + See also documentation for gdata.service.Get + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dictionary (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + + Returns: + A GDataEntry built from the XML in the server's response. + """ + + result = GDataService.Get(self, uri, extra_headers, + converter=atom.EntryFromString) + if isinstance(result, atom.Entry): + return result + else: + raise UnexpectedReturnType, 'Server did not send an entry' + + def GetFeed(self, uri, extra_headers=None, + converter=gdata.GDataFeedFromString): + """Query the GData API with the given URI and receive a Feed. + + See also documentation for gdata.service.Get + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dictionary (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + + Returns: + A GDataFeed built from the XML in the server's response. + """ + + result = GDataService.Get(self, uri, extra_headers, converter=converter) + if isinstance(result, atom.Feed): + return result + else: + raise UnexpectedReturnType, 'Server did not send a feed' + + def GetNext(self, feed): + """Requests the next 'page' of results in the feed. + + This method uses the feed's next link to request an additional feed + and uses the class of the feed to convert the results of the GET request. + + Args: + feed: atom.Feed or a subclass. The feed should contain a next link and + the type of the feed will be applied to the results from the + server. The new feed which is returned will be of the same class + as this feed which was passed in. + + Returns: + A new feed representing the next set of results in the server's feed. + The type of this feed will match that of the feed argument. + """ + next_link = feed.GetNextLink() + # Create a closure which will convert an XML string to the class of + # the feed object passed in. + def ConvertToFeedClass(xml_string): + return atom.CreateClassFromXMLString(feed.__class__, xml_string) + # Make a GET request on the next link and use the above closure for the + # converted which processes the XML string from the server. + if next_link and next_link.href: + return GDataService.Get(self, next_link.href, + converter=ConvertToFeedClass) + else: + return None + + def Post(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=4, media_source=None, + converter=None): + """Insert or update data into a GData service at the given URI. + + Args: + data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The + XML to be sent to the uri. + uri: string The location (feed) to which the data should be inserted. + Example: '/base/feeds/items'. + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + media_source: MediaSource (optional) Container for the media to be sent + along with the entry, if provided. + converter: func (optional) A function which will be executed on the + server's response. Often this is a function like + GDataEntryFromString which will parse the body of the server's + response and return a GDataEntry. + + Returns: + If the post succeeded, this method will return a GDataFeed, GDataEntry, + or the results of running converter on the server's result body (if + converter was specified). + """ + return GDataService.PostOrPut(self, 'POST', data, uri, + extra_headers=extra_headers, url_params=url_params, + escape_params=escape_params, redirects_remaining=redirects_remaining, + media_source=media_source, converter=converter) + + def PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=4, media_source=None, + converter=None): + """Insert data into a GData service at the given URI. + + Args: + verb: string, either 'POST' or 'PUT' + data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The + XML to be sent to the uri. + uri: string The location (feed) to which the data should be inserted. + Example: '/base/feeds/items'. + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + media_source: MediaSource (optional) Container for the media to be sent + along with the entry, if provided. + converter: func (optional) A function which will be executed on the + server's response. Often this is a function like + GDataEntryFromString which will parse the body of the server's + response and return a GDataEntry. + + Returns: + If the post succeeded, this method will return a GDataFeed, GDataEntry, + or the results of running converter on the server's result body (if + converter was specified). + """ + if extra_headers is None: + extra_headers = {} + + if self.__gsessionid is not None: + if uri.find('gsessionid=') < 0: + if url_params is None: + url_params = {} + url_params['gsessionid'] = self.__gsessionid + + if data and media_source: + if ElementTree.iselement(data): + data_str = ElementTree.tostring(data) + else: + data_str = str(data) + + multipart = [] + multipart.append('Media multipart posting\r\n--END_OF_PART\r\n' + \ + 'Content-Type: application/atom+xml\r\n\r\n') + multipart.append('\r\n--END_OF_PART\r\nContent-Type: ' + \ + media_source.content_type+'\r\n\r\n') + multipart.append('\r\n--END_OF_PART--\r\n') + + extra_headers['MIME-version'] = '1.0' + extra_headers['Content-Length'] = str(len(multipart[0]) + + len(multipart[1]) + len(multipart[2]) + + len(data_str) + media_source.content_length) + + extra_headers['Content-Type'] = 'multipart/related; boundary=END_OF_PART' + server_response = self.request(verb, uri, + data=[multipart[0], data_str, multipart[1], media_source.file_handle, + multipart[2]], headers=extra_headers, url_params=url_params) + result_body = server_response.read() + + elif media_source or isinstance(data, gdata.MediaSource): + if isinstance(data, gdata.MediaSource): + media_source = data + extra_headers['Content-Length'] = str(media_source.content_length) + extra_headers['Content-Type'] = media_source.content_type + server_response = self.request(verb, uri, + data=media_source.file_handle, headers=extra_headers, + url_params=url_params) + result_body = server_response.read() + + else: + http_data = data + if 'Content-Type' not in extra_headers: + content_type = 'application/atom+xml' + extra_headers['Content-Type'] = content_type + server_response = self.request(verb, uri, data=http_data, + headers=extra_headers, url_params=url_params) + result_body = server_response.read() + + # Server returns 201 for most post requests, but when performing a batch + # request the server responds with a 200 on success. + if server_response.status == 201 or server_response.status == 200: + if converter: + return converter(result_body) + feed = gdata.GDataFeedFromString(result_body) + if not feed: + entry = gdata.GDataEntryFromString(result_body) + if not entry: + return result_body + return entry + return feed + elif server_response.status == 302: + if redirects_remaining > 0: + location = (server_response.getheader('Location') + or server_response.getheader('location')) + if location is not None: + m = re.compile('[\?\&]gsessionid=(\w*\-)').search(location) + if m is not None: + self.__gsessionid = m.group(1) + return GDataService.PostOrPut(self, verb, data, location, + extra_headers, url_params, escape_params, + redirects_remaining - 1, media_source, converter=converter) + else: + raise RequestError, {'status': server_response.status, + 'reason': '302 received without Location header', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': 'Redirect received, but redirects_remaining <= 0', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': server_response.reason, 'body': result_body} + + def Put(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=3, media_source=None, + converter=None): + """Updates an entry at the given URI. + + Args: + data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The + XML containing the updated data. + uri: string A URI indicating entry to which the update will be applied. + Example: '/base/feeds/items/ITEM-ID' + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + converter: func (optional) A function which will be executed on the + server's response. Often this is a function like + GDataEntryFromString which will parse the body of the server's + response and return a GDataEntry. + + Returns: + If the put succeeded, this method will return a GDataFeed, GDataEntry, + or the results of running converter on the server's result body (if + converter was specified). + """ + return GDataService.PostOrPut(self, 'PUT', data, uri, + extra_headers=extra_headers, url_params=url_params, + escape_params=escape_params, redirects_remaining=redirects_remaining, + media_source=media_source, converter=converter) + + def Delete(self, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=4): + """Deletes the entry at the given URI. + + Args: + uri: string The URI of the entry to be deleted. Example: + '/base/feeds/items/ITEM-ID' + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type and + Authorization headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + True if the entry was deleted. + """ + if extra_headers is None: + extra_headers = {} + + if self.__gsessionid is not None: + if uri.find('gsessionid=') < 0: + if url_params is None: + url_params = {} + url_params['gsessionid'] = self.__gsessionid + + server_response = self.request('DELETE', uri, + headers=extra_headers, url_params=url_params) + result_body = server_response.read() + + if server_response.status == 200: + return True + elif server_response.status == 302: + if redirects_remaining > 0: + location = (server_response.getheader('Location') + or server_response.getheader('location')) + if location is not None: + m = re.compile('[\?\&]gsessionid=(\w*\-)').search(location) + if m is not None: + self.__gsessionid = m.group(1) + return GDataService.Delete(self, location, extra_headers, + url_params, escape_params, redirects_remaining - 1) + else: + raise RequestError, {'status': server_response.status, + 'reason': '302 received without Location header', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': 'Redirect received, but redirects_remaining <= 0', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': server_response.reason, 'body': result_body} + + +def ExtractToken(url, scopes_included_in_next=True): + """Gets the AuthSub token from the current page's URL. + + Designed to be used on the URL that the browser is sent to after the user + authorizes this application at the page given by GenerateAuthSubRequestUrl. + + Args: + url: The current page's URL. It should contain the token as a URL + parameter. Example: 'http://example.com/?...&token=abcd435' + scopes_included_in_next: If True, this function looks for a scope value + associated with the token. The scope is a URL parameter with the + key set to SCOPE_URL_PARAM_NAME. This parameter should be present + if the AuthSub request URL was generated using + GenerateAuthSubRequestUrl with include_scope_in_next set to True. + + Returns: + A tuple containing the token string and a list of scope strings for which + this token should be valid. If the scope was not included in the URL, the + tuple will contain (token, None). + """ + parsed = urlparse.urlparse(url) + token = gdata.auth.AuthSubTokenFromUrl(parsed[4]) + scopes = '' + if scopes_included_in_next: + for pair in parsed[4].split('&'): + if pair.startswith('%s=' % SCOPE_URL_PARAM_NAME): + scopes = urllib.unquote_plus(pair.split('=')[1]) + return (token, scopes.split(' ')) + + +def GenerateAuthSubRequestUrl(next, scopes, hd='default', secure=False, + session=True, request_url='https://www.google.com/accounts/AuthSubRequest', + include_scopes_in_next=True): + """Creates a URL to request an AuthSub token to access Google services. + + For more details on AuthSub, see the documentation here: + http://code.google.com/apis/accounts/docs/AuthSub.html + + Args: + next: The URL where the browser should be sent after the user authorizes + the application. This page is responsible for receiving the token + which is embeded in the URL as a parameter. + scopes: The base URL to which access will be granted. Example: + 'http://www.google.com/calendar/feeds' will grant access to all + URLs in the Google Calendar data API. If you would like a token for + multiple scopes, pass in a list of URL strings. + hd: The domain to which the user's account belongs. This is set to the + domain name if you are using Google Apps. Example: 'example.org' + Defaults to 'default' + secure: If set to True, all requests should be signed. The default is + False. + session: If set to True, the token received by the 'next' URL can be + upgraded to a multiuse session token. If session is set to False, the + token may only be used once and cannot be upgraded. Default is True. + request_url: The base of the URL to which the user will be sent to + authorize this application to access their data. The default is + 'https://www.google.com/accounts/AuthSubRequest'. + include_scopes_in_next: Boolean if set to true, the 'next' parameter will + be modified to include the requested scope as a URL parameter. The + key for the next's scope parameter will be SCOPE_URL_PARAM_NAME. The + benefit of including the scope URL as a parameter to the next URL, is + that the page which receives the AuthSub token will be able to tell + which URLs the token grants access to. + + Returns: + A URL string to which the browser should be sent. + """ + if isinstance(scopes, list): + scope = ' '.join(scopes) + else: + scope = scopes + if include_scopes_in_next: + if next.find('?') > -1: + next += '&%s' % urllib.urlencode({SCOPE_URL_PARAM_NAME:scope}) + else: + next += '?%s' % urllib.urlencode({SCOPE_URL_PARAM_NAME:scope}) + return gdata.auth.GenerateAuthSubUrl(next=next, scope=scope, secure=secure, + session=session, request_url=request_url, domain=hd) + + +class Query(dict): + """Constructs a query URL to be used in GET requests + + Url parameters are created by adding key-value pairs to this object as a + dict. For example, to add &max-results=25 to the URL do + my_query['max-results'] = 25 + + Category queries are created by adding category strings to the categories + member. All items in the categories list will be concatenated with the / + symbol (symbolizing a category x AND y restriction). If you would like to OR + 2 categories, append them as one string with a | between the categories. + For example, do query.categories.append('Fritz|Laurie') to create a query + like this feed/-/Fritz%7CLaurie . This query will look for results in both + categories. + """ + + def __init__(self, feed=None, text_query=None, params=None, + categories=None): + """Constructor for Query + + Args: + feed: str (optional) The path for the feed (Examples: + '/base/feeds/snippets' or 'calendar/feeds/jo@gmail.com/private/full' + text_query: str (optional) The contents of the q query parameter. The + contents of the text_query are URL escaped upon conversion to a URI. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to the + query's items (key-value pairs). + categories: list (optional) List of category strings which should be + included as query categories. See + http://code.google.com/apis/gdata/reference.html#Queries for + details. If you want to get results from category A or B (both + categories), specify a single list item 'A|B'. + """ + + self.feed = feed + self.categories = [] + if text_query: + self.text_query = text_query + if isinstance(params, dict): + for param in params: + self[param] = params[param] + if isinstance(categories, list): + for category in categories: + self.categories.append(category) + + def _GetTextQuery(self): + if 'q' in self.keys(): + return self['q'] + else: + return None + + def _SetTextQuery(self, query): + self['q'] = query + + text_query = property(_GetTextQuery, _SetTextQuery, + doc="""The feed query's q parameter""") + + def _GetAuthor(self): + if 'author' in self.keys(): + return self['author'] + else: + return None + + def _SetAuthor(self, query): + self['author'] = query + + author = property(_GetAuthor, _SetAuthor, + doc="""The feed query's author parameter""") + + def _GetAlt(self): + if 'alt' in self.keys(): + return self['alt'] + else: + return None + + def _SetAlt(self, query): + self['alt'] = query + + alt = property(_GetAlt, _SetAlt, + doc="""The feed query's alt parameter""") + + def _GetUpdatedMin(self): + if 'updated-min' in self.keys(): + return self['updated-min'] + else: + return None + + def _SetUpdatedMin(self, query): + self['updated-min'] = query + + updated_min = property(_GetUpdatedMin, _SetUpdatedMin, + doc="""The feed query's updated-min parameter""") + + def _GetUpdatedMax(self): + if 'updated-max' in self.keys(): + return self['updated-max'] + else: + return None + + def _SetUpdatedMax(self, query): + self['updated-max'] = query + + updated_max = property(_GetUpdatedMax, _SetUpdatedMax, + doc="""The feed query's updated-max parameter""") + + def _GetPublishedMin(self): + if 'published-min' in self.keys(): + return self['published-min'] + else: + return None + + def _SetPublishedMin(self, query): + self['published-min'] = query + + published_min = property(_GetPublishedMin, _SetPublishedMin, + doc="""The feed query's published-min parameter""") + + def _GetPublishedMax(self): + if 'published-max' in self.keys(): + return self['published-max'] + else: + return None + + def _SetPublishedMax(self, query): + self['published-max'] = query + + published_max = property(_GetPublishedMax, _SetPublishedMax, + doc="""The feed query's published-max parameter""") + + def _GetStartIndex(self): + if 'start-index' in self.keys(): + return self['start-index'] + else: + return None + + def _SetStartIndex(self, query): + if not isinstance(query, str): + query = str(query) + self['start-index'] = query + + start_index = property(_GetStartIndex, _SetStartIndex, + doc="""The feed query's start-index parameter""") + + def _GetMaxResults(self): + if 'max-results' in self.keys(): + return self['max-results'] + else: + return None + + def _SetMaxResults(self, query): + if not isinstance(query, str): + query = str(query) + self['max-results'] = query + + max_results = property(_GetMaxResults, _SetMaxResults, + doc="""The feed query's max-results parameter""") + + def _GetOrderBy(self): + if 'orderby' in self.keys(): + return self['orderby'] + else: + return None + + def _SetOrderBy(self, query): + self['orderby'] = query + + orderby = property(_GetOrderBy, _SetOrderBy, + doc="""The feed query's orderby parameter""") + + def ToUri(self): + q_feed = self.feed or '' + category_string = '/'.join( + [urllib.quote_plus(c) for c in self.categories]) + # Add categories to the feed if there are any. + if len(self.categories) > 0: + q_feed = q_feed + '/-/' + category_string + return atom.service.BuildUri(q_feed, self) + + def __str__(self): + return self.ToUri() diff --git a/gam/gdata/analytics/service.pyc b/gam/gdata/analytics/service.pyc new file mode 100755 index 0000000000000000000000000000000000000000..d17ed8a2eb6334465773dd1bb6326453942153b4 GIT binary patch literal 65673 zcmeIbdvsk_dLMQ!0D=SsKE<a*Nk<YzfFly5o}<B#qX9us;tU0H4#3fvqw)0x?g4no zi+jO)F8Ht|yKAWCF-fP%B(al8Gx2hhNz%l2J+7TJcG^kXG|5_7D=*t=lBR9?_`@r0 zoUFP{Un}YF_xtufXWs`T5v1ri(Ts3-?m7GH{q5KHef#%)``d&6`?jsG?5oT!M(%&Z z`1kYp%l_#=6jh_>3a+K-N-3%iMAgBlJ`i2OjolAMR|ft4P;_M|y0b?=hobtXXk|FM zvN<Zf-B^i+(k;>Ttx@aeqf)7KXQ%%}3Epm;&)@&7-di4<l*eY5$J2S5Kg%>M9|=aH z6-&@4MPKoMj<&7Ql}CI*qtTU7fB#7Ed|Pn8J-FWyq~ED~kh&|XqOjdjbxU;R(I_2_ zuI-7g?TxPNi>f2hmHkl_B_4>X$p2te9gW^c(>)fYk3`oVkFFews@tOUiD(JUxSg-= zM$r#kc`~Z*h|(wZbZ7qbsi?ZkKi!=_jiV|GwX}Qkr%y-Kz5eOG{OL1Mb-#alAb<L7 zR6XdQK9)Z{9919pPY>l!pNpzb_@__iPme^^r=s+*=#BHIV^Q^K|MZ#s>3CFq);~R* zKRp^%pY!=1$)A2Is*d@m<N4F)qv}!r^i%oMV^Q^a*Q(c!i;`pcyNM`05mk>z?_<24 zjH>wgf}Whv^<?z^rs&FxQ56qg(w8shU%t$tJvaUm$NjHwj-oftR@#-hv~{z#n8vHE z=FM6)&Eo0g^4Yk#noY!&MwO43Ta}fSO1rig*PF|=M!eK)#ribP?AvT&>()4)X|<ZI z$#^PWsAOrJ-d;>s+qGsRUaVKLOdnP1_2PLJBdhk+G+wTPit=K;mNweR#{F-lSzW8A z%6Pi6+Fra`Nzxl@wN_f4jOVY`vLd4xKiXGY&0CH5*44BTR~(s@b~{~JZO5%NTdTKK zT&2k0mlMZ}mVJyO>h-j-oW@J7=88z07Eu>eYggBa_-z)_1_}K#E2^DSbxc{gQp)Y- zb<&8I^-tDX^|;bX`{kCU?YQ04_ctr`HFS>arK)c{QkAyGGS+gn*~qNL&Qz*XYiQdB zIt?AyY)r;UrIw}D{w)MjS1Q#szEx{q1!>58vDs>+i$Ql3%@xOMD6~;oNn?zIRrKwx zW~-`>nr*g&V%bx!)Y=lWZLX#*Q3GPxm9@rlT<IW_UAf3!sP=1(cp=5mOVPokAii43 z!p~DrLxVKTtu4&6r8`K7SJL*?W|d8f;Z<9@L#kYAJY>g!0Az0M)Q<)ZSe1^1DVhs( zG@9*BI@m6F9aOD<eK=^>mf7M>WfXkCd9#vYL^f*Kifh}(MUwgvYo)Y(>cp|sTd$4O z2sw%T-rPP?eK?!btnHXaFiwpBaD?DBMoN3F)j)?-(9VtH+1lb_3U0F4tlCJw+(>V) zVx*?kL@DR*tfpa~v%mb*DCV`zn)nt*5PPq=)W2EWpa)YV--+AEdm=uE-c4^;R#xjN zXEr+eR&%Xh#o&xvHpej9`?j*n&J~_vcA{2Gsh^c3)wrPM2Wro1H)5n~-O(5b`SoNx z(*OqnYpb-u7u17`pptXGQb)azFKKSZn01&oE11<NxqU2NTLr`I9Ry@a)wG_rF|YkZ zMcv2}NSQJGsY~Z4;wf^7tX*k9YAjY7Jp;fOD4IG4^|2V;+PZVh5rbiV%nH>VN$APV z$W{Hn#BWh?$d}AAiZ3VUV^rJN$^s^Iz23Y<#=;k?6-X{rs09JhWCxL-qR)#ms<lP3 z$4U!dw%e`R!WtU0qmR)=BH1xV>n{wLB9E~X6nH1tk=co?QCnS2+u0a;t{Q)JY~jY( z*HkPB7^~KnYwb!MjkW?#IEE}C5Q~h!+pBA6)4mzLzHsBT`~La-drG2-`R3(TZTu)% zPLY>!(HLx`)}Gj!JqeLtS*=Z6%NEzFRWPmbFQiLLt@KVjXAh3n{uTZ=J6=M<_9!lk zGxc-@gK55%ruOs^JPjUZ`|$f}yS<uCo;Y#q)~$)<W^)-5d$GCFeiX0R@!8sf3A}Q) zh(Xofh38Ga2%pvl@wCQ*YtY4`Mc(m`Kc?}Q9l+&XkQGJOhojaW-r(oKIVkLN<4l$P z1TN=ljVcuZ^fJrB0JvaU2Os$<%((|;N2BR{BhH$TVv8J3$2im08rK_CBU~y7#XB4| z7{^t<cS5%b?MB~d_M^#{)*2c`)}O|);@M^c#l-QfC{&9~UpPNAJ72zV@s0Dd<+<sL zmuBYLL!g{Vi-vcFmnmV#@ix*W8^hi7<U7{L$1AI=+3}T1qq3Z~-jM?Jj<4G4o%S}Q ztv8$3*H*3Fv+H<}amf=6kCwKS+M94E&b!EG{2v?q4F0k$xTH|(OHnds#j}Omd|Vby z<#Kzgewc<<E?X)*x_3$wX8WV~%UH;8sl5aLOn0h#AC<&p1m7kk?S2U2Apbi4JV6|> zJ=)a6C5f}~cLXHu#C^A>^>8WT`1(db5qk}TnCgkwyB;n<+d;rL1A<5(2r3k&hf5CI z<68ka?7>jqdAQVYy!?1T%^uwMQ=1+xRh(Gg4ybw@_dOGB&e%0r`NJh{H;DO8K-^Qf zKbK+=3e7@;%Jrs{@pF|L7Ay~!KnmEO3J4^@BIfccR_oRDw}K{)tM3LhQEVsa&E|FA zXuoBxwHK89M*(I0O=i_>)qYr-BfFC-{=@A=Do{TYkVUqkHhQyCoo&uhi+Q*N?FRwh z3kV_`O)8CxYwe3m^Q~H%JzR1)fPOI`hio)l0_D_@&NEE-&!I+`@<K=#=;>>OYkA$I z>oBkU4*;dz^t}#5?*mKSOtg&n<`yL$65uwX`%Svv8dZmt00``6-2i^wq8q@V+oN<F zF;f7hcT1%!JBXPwH{iksn*#ns8sM!+vrTDsM=8+HJyE(>_$hGbeZi9*(ffD;1Xds_ zL0A!=0*A%TuIPPW(~r4_Rb=&ew2v<y)eAtg!c(t2;ZwX8;Y&cRgjSgpz$)Nhz_R!T zxGEnV(2s-40LUzGUOs(Xl?Ns(AoU7hSbTy1o`~KDY7YEWQ0f&xu6X#A5(8)zl6nQ$ z>eFfkAfc%IkthYOiuykj-5n@h0h(%AJ*%va`k(gDu7^eDr$jukQ{)VU73~7V6bZ&u z-0`R?fc44*!BsXo08@dh1Wb_(A1Zy(+;|8?6`1!69SN!vsyyISBsihWU(!8bR=}?S zRMCvDMAu#=-pcBKI;sk9y@K@mFGX4S&wjon;y=^x)ysM{8N7PMfAy+feKvT73_mTx zPEkw$42S-&;xgTAv@2Me$5_U5?Uthe1!8LmF4r=Gc|FW>HHFY<8IJoFvaoC_)oM-4 z<9h58#N%zkWVWz!D9BNdHL#QdD!|&uElITc*1|k?&|MEIf%TWI^R*E3Km_QC<2Ln| zH!sf3&zvn^Ov-Ouy!gh2nMF={YJFVPOye*68@OO$c&!v&8;GtAa)pRl{_Z0*h`c+B z=`~P#7r(KJ8H(P;Biw9?-c`EtaP%%7;t3MqA#OHD@8Un)Y>D3860QDVRMVQXjG<}? z9^u<-qtUy=$T}*yr<m~edb_{b5rO_|nB;@e^?|7M*P<%;5o|iUqxZH&?{15(J?acB zcddYZQ3Hf;!orp{%Oark(}Xtwcwm9rT(}0TOL!HqD8po&j-(}R98a~Dvq`iX|Cg>n zGntGt;2Ho##+!oUEA^wm&ke)`Y^*dfIvOAuJAv3I`qAaUT)12(sANDgdxZb<xbKH# z@00n@DvOKFwMM&4C~dt@`Hod|X$jqctcmz?7J^n|HW0>SSZJloz`<aZ5Cq}B%m~De z+0e0c_CBcG6xPvs@?$~zVyKOg03dMj5&#A^WF@vNgKRvhOSKj-OUy0+TuVS+;%YO^ z8e?q$hP4dPTW2<o>L4GhlP72*KASF8V7rI~{!jY41hn~fbu(?fChdYh)MmG?))uc4 zJM)lXK!<=SARo;IG6Ev&*6V_9Tv=t<E&7#*ZfYH`rf4m~ml!-~KO*a%)j*jKN)lTz z_)=^FXl}i>SZmks#2M5dV)QWT0Q5&E05R>Cqqqd=%bIJg#dU|3Su6z23N6Lp2a1Bi zqyhRGvT`Y`Sp6qWn1`h#UDlEnfYOZyrYG>kpn`c*uBL3ro-QSJ7(i!Zo{)m~mL2b% zj03bXn9WlSfS0|T+NcUB$*y{w8up#~8qr~)yUyO{Y%_Ul!uC+DvndSc>Es{48gp=! zz+*LCg}n%DiimYprkRB_4cnoWuGTAycn^@4sG&3X`RbZjai+a>vOWgbtqy3rsEDjq z=z*Y?%BY;RX()myY(ud5GJ;}Zrp&piGY`}j>ahHQW2{uTVu|0JpTFb>EeEKkeo&Af z&t_Aah|e$4LV-dHBOO3P$i`a*THag=GN`nhD-)z?;!>-5yOvhp#JyP@4M}v+qbEP8 z*<}31G$rT4n1i*<&_}Y&a8YCP4iLUgbcm0Ikb!FOqd(Amt}4z*>n0w|y@XIF;^`KY z0JOZbo}ttZV<p+5vubiISBW`S_-Q<@8iGwsT!x7gEn9KZ>L?nf>OMLawg`q!D_v>c zOf5sJd7GGA(0BSBG)6`}Kp5x*%39Jg-N0<FXN0hlkTyhV<an2|n8L$30~j2<S!>kV zfH#SbN-pZL7hyuhL-{4eq?;qQCAP&x+FX<u6Vp%QgxL!ZE*lhHYpkxd%Np}pn`@qO zxX3A&=hF6^I9vNr?@Mn)R(rG`TR#DsrhTyY^)j&U3>>RgZFBvuW_H7&>^OjcRS|5B zT&cGwFnC}o2a9hH;wwC96MZe75}%k_U7b;yVq7OgdB)ja>Xt06FC|aoSMm%m&+<Y` zMRFJyYe`i+d5#Z{@F6?V(WL~cccy$3Hjg?_bQUP^y<q1_j7<J2{xXsj9X?RnTp9`f zzXSK%?DxQq(r9TBD^6J|`0q6SGHRwy)07J8OcbZ_7@|=&gf)z_7%CkUbc(gwii~)e z9xt7I(M?xasyMxi?wK&#O!7SPkx3zW0e6DpT^E*XSoQENFZWVs*$XtxY%H9w{!8Y- z8bt$(gShf<2!GS~%Q&m3{HmJNu?M(#rL)r9I1^?h;A}1sq*TTj#8Y5S9v;qFv-L8e zj?5XKh=O#bR<=cbHRsgv$!kcw!Af&k!Agf@4Z;laaOy3}U6+2Am0-)MipeDJ?xSKv zR>d{Knr#khMq3Yy<KHy?vNv(T+KHW7;&SFabZRMToz)XtO#v_|MF;tAfO8jpj{d%j z6%7(01r{t^x6r$Y<0ShEE@qH7lN`Iu<WauF=vY`l-g+VA7BcQ@zm_q5OtJz?F_254 zpn+i8(d;-mL!ygVh?oJbL4|yB%p;?vH{pB~D=u2TO~$mi2G19)P-H~R#7;ESHaN=2 z4a#rWD4VAq{0KXONLDnwqcmLFSK3k9T3Te6_&;?CD`JyuPNu=^H}Jr>D8#<=SrTbn zYjEAY(v-u*BFOJ@fm(*{3D+A5XET1rS_F1wg9THAiZ`s}E=y=8h4+{FL3y{TCBfdN zdM!Zr!y3(2|F#Bmk{0SvwuW*Gl5;n!5AA4GJUNPr+Z`F+2D?F%9o^t;i|UC3><KZs zk>2*_$g%dn3wEUP-E3v|+&4WoN4p4mHYcbgv07~|W}iZ7NT!zU>@BLQ-qp>`or&Ih z^o*l_f(-pQ;-I0TX8tevpsKCFtjrPMv-e&8VZ(@dLH)hwL8NK@agHIj;(hlSxdW%N z8$+n<(Y(X%X#|~n5Y$jimOIILIVYzi8QeW?*gbtm4RzKn_~W2o#9u}h3kmRLLT1<G z#{1rIbo)z0yMSL^ABkF3vi-JjG=goz(KX;&5F2uo2AT#@04VKlY4EN|6ai|;Z*!5~ zjXlxZ$Zc+%3bE9!mRz``_ZY*%5GTQ_6{wn8ri<-2)>g_$N`r*>GrX26cf{NmDvQ^f zOG~rLQfbi!v+3>jg<9jfDc*xDtlbi?$IG|j--PL)yePS@w$i0>wuPx1lrnXETc#9& z_Y{-R-6MIENzU`~1zhwTo@k~9^QKI9pbauBYVjcL4c`Xm;Eu`)oD&U`>}8I{XwxPr zpj1K+mo}9a*@>L(BluG%QmwfCvg2aZNFlR(duU(~GXLz?9*NMy=*BOXZj7T_$n#j& z*{+J3<~fHqwo0OFnn_+V=j0+Umw4%F9}?z3=?yl@GsuC%-FYCb(>QOiaX5^lq1}+K z!=;@cLAE-^K8COEn@LbP29+`rpXb8vQtpHHrDA$6vOTuokDWD*zw8}ca`?J`X8~y* z7=%nJYihN=;?rZYK_P?gHW=NFO797`hC+s^hPhXwT9(7fKZibr${T<;bR0I@r)HYL z9OgVq?WzENyWCOX*mQAzhIox!SOI-yYjA2SXDJXxL7;IftCX+9!whf%JPOT@aLhZ+ zI+4@9)ij+hpaWPEfIh-4KoTgTgnoJ<mDQHP3f5X_*#R$fJ@3T5yOT^h7jy$coCh`h z!Rf+DGl@A5lF@)MKucP_V;?AEW*)~{sZZEdtuyDzcuOwD6LJ0^a58oP4qG<Ji{!C( zX$APJm)!A}EA<){haEHtYAIa4;kk(B2y$Yp!wnPt>G4*8Yi^N_&YS2@>9lFM^*cdM z$k#gR!@f@Drpj|~PQ5^wFZlk$K6~@6scC)ZzRAZ621pLJo^8Tl0f$8a(o1*PM)0p~ zRf&zXfdi)t{SezcD;s)wZUI6%XM?g(U#e>|i2#5HEN?#P9%W;1Cw@vuli|R~>Bnl_ zjX&4V{yR6?`xSqwRlA8{6>~Cl%(lES^Gt&#^~0M4!3uV(S+1wc%W2h6qse%o*{r7k z^@4iNG%5fwF-mSVk6Yr{rZUDn-KKL|&AGbS0%XvSU_$u0Wr&6$!W4i-=}IlPZ0dLc zsTe)A7SKg(pjevV%Ax}vc5toQ5;KCU3+>bBu2PU8zMpTc$$t`6r>fJ{%aZo4$py?g z!c-*zZ@t*stW0XjE9A74ZRBsl@YF^Hv(6Gq0wF7Uqs4+S60pwVNuVWgb&+?W<4fj5 z>)9f@KGB-^2beX}ch^3-!iUs1jB>5)HYh78ggmW9z2pczpKA{XN1!`UNRHzFS&UDf z%T73aGdrPrLXdGPK+RGcEK*UU#G>?))OroML0G}X;rf=S^<>nBegQ#5K!_j$WDgcH zNPzZ+zOW?3AbXupmVE-~imirp6J!F#@&{#$m8x>;MuUFA)Fw;Os{kIDA2dzqkTA=l zoDmfcB@|jG%3<~c@5umrZEJK5re72b6D{>nVil(0T$7QO#@L~f%WXRZXGFn?oOei> z-N`2upA^NLqw+3a9`1MR{!uSr@p=v}%v6Dq^KkSkd_mv^@+u!F8~8W8`reiZ_$o}v z$n)!Ju(@&ebvB7Mm1mg@yj9K)Lrl~Ge!e;Agr*g*Bh#&0GbXh78fcMEX)vxj7#u~X z;U~5=3+sUK>}W1f4FcyA%<@hXfi_#vYIS}orkAp|GTG**T+C5*=9g8DuN6STT|!DU zG535|m4jGNSLcjvEDl<8t715~hN{`SLjHzXbljAI)e0=Fbmgyf7{3A*lG~6(9QaOd z^m5zIxEf-E<`b95D@;Jn{1a{!bIe5=lGzs^n!tMd^bvMPz^9*Di@hw!2xy~51j7Je z5y)$ZZrXS!;xlW4U7UUb!;IS=q&+5;)-v>~25iWvagde!y0FG^%LXVwg<(c%-Gc3= zoi^N#hKXhD>GK5^p9I+%@4<((3B)f<oCGL9yHS`Kkb&Tf?p}*e$3HUGk<Azk*Dp?- z9DC1w<yzD1$Bw_a0ggVI5HRJd)v?KLjMj<X-3A}bv~q*0(L@U(SBaV<;n>0m$J~S6 z9-Y9EMRivF=(gnGp_JpG>P0W#L`zkusI{9g$WgqZhxytU7sLF*e6{Jv24IkpA7A46 zn5yY2q$mht4xMC+wt7ZeIOq&zaL^C%<ItF~siNV)vCx8Ig%=g@18z-F`B?g5dsXf+ zGLHf^z%evCiQ-i2Q7t7EoKuR=kXxYg{KVOy$7wFOuGUB9pXyOYG6HBEW{7pWmRNU3 zf@>+iI}tT$woH}LD!#LYw8zAK=ysXV^hJ>yG?5sLK(;q#=4*cqFU=5vwZK{?+X>iR z{Nl-zjwoQ8_^qb#RN0W#XK>KT;Ce-+Vxd{RGdoTPQ^5@dAT52O1}m~{oKj*kXG56R zuoRJIlDtD&4&mZfxM@$@U2fH-p~&cEijWEE<N}MW@$!SrMQTY3XB*<qFo{#=Rk@(I zDhosMRVFU;LW|uvg}WIcXn#nSagj>m+Y>FBq<pi)H(bCv<9V7)@d*_Kb<YMB7kb2M z%=A=xje5(AcpW`C3aw=?bQWqZ+Xi;R7{~jO(&Nx)c9b5~?`@@_(jb8K&83|Ky(T)U zfYcvA{$8pOxQFW(v<p6jV(@})28n6mE!6<7qVwHGdjJ|w+2&#i7tTdZc3W6EUPjph zCQpU}>B#qfdc!rrhOY)T=bQ~;d+!)71)GTwdt94<3)EVJN-N!#yC<lb(bsz`<Gbyg zRBl$h^mJQhVBZ{=io<zTDUx0gy<QElQa~y&HlGcsZr8v$?=;u2y%L6q-iAS5t4+8V zx@J)$gbhH0J9Un8P+#C0Ga)A0QA)lk(Tz)MS!Kj>;fa#gZn|yU{fAS~{-OTl$GIA5 zavgqWz0y&%W#?c=4q|jf6Ie*Q?9EBaQHzMqCo}|DVtTCDRL#ti&B=&fuxEwIH(A8< zjm<C3^q=KS;?#|{apsJaGVX5Z(fz0VK$|D`5MV`uw&QOaf7w6A1(T!ooGgb>@x9|E z6zx$y9ipPkIaP|X?|Ad#rjB&2i<)SgxK_A<G(aFR0fk$cHs1P<o@|k-54GBvAfZen zUtEJSTYr>Kn9(0)UasJuF6aOQcIn}}2^Ad62B6tmHV{9<5&|m+EHyv`7Yv&)?dJq- zX8!=ts^`X;3?`7F1}4Gi)iQ<49BVPQ^Es%Y-)ER;oZj=O3zqro4nWhRp*V0;v$@Z~ ztteO8p^kKLD6c4@5@1vESHw*2@KKU4=BaLUM?>Os!1C&t{-!W3!L3v3EcYD}x#>}I zGy|Mt7Svv!j_69ou}0p9{6b3ys6w%K@bc=^dcKF`rPS+-ZV}*Myr!~*1&SKq+6g@d zvw|okuZEoyUanN^#VBn`!-=+7m1MmulBvS)uF&+!*j%|j7q|FuR^@W7QTA1tjL&!= z$0@z1&w&}I*v~7Cl$j9=q2eDRZg@N9^I&s~Q8k1U9yLt$c=v1H+lqXEVS$W;#&86x z`FmGv9iv#wE;VyrWhw1JZbZ6#XXaFn@f&zuD*Iyf;nyp{=NG3g&%bFqWXt%bd}%6~ zdaFD;_12874i;b;S;Dx(y4L7sw{hTD)*JzqFggcRbC1HaIhnGPDyaPsdQ^@14(2Ar z!jRXT6D470Oc4rZ->!t-lzWQhmwgRFn5~!m8ew5pHq!8sA10zdvo&xTow-!IoiFCc z{qS(BD>t9$Ix}W2-6$?-#G=aV4I^j;0Rwyscw_*p1OU%ga^7jm+*8HrB3*$ETV(4i z-o3E)qf$epQg5PfD6j+4*d(q%ZwHWzFculMW+5{&AJslH=5zrK#$FjWhTvtE0s5f! zz88}|$5brOttGn_O;a9RVe{mYfpruc!pP$Eu?P}PbIh=oRf{O{MZRF_*tF`GN6ZHE zrgIV^TXOjvg;0-C&qLdJ?4t7vAMK)Y6P_C!twsgc3z1e$UUI4V*rJAEx)pQ&AmoVh zI9$K0<Tco5P><6ACaLp6U0jPEtyvOcy~%A}2r`;C;33|9ju$R)+E3u(#jz%$?<CQU z)lspGk;3I#!StvE8b*}7$IFlM@?*SYynLP)u3|Q(TYVT=$%Vg6#p^H%hz=iu+BIA{ zP#T2~A$@@MV&TFi3>Pw7)(o$slmU>Pl62B=%m$GckcS+XrQuV`*(!?qIF&PKN}Hk% z<qZ4loN_iS<qX#@<!tx^l`~7cl~hv)v~4>;fuWy4GlPCMMC+$BZXzg|KwK>U&B{q| zS37k8-$H>SxO=AL2-r!z5Ib=03eY76jIw~?4hzY}J&;`7<BD>D(b|R{{4>|y8xh#W z*T{7<fL&|^1eTj;2iW^nhvz;5U6Ert`xSryZV#@pem|+Y7i?t1n7p|N;1&t0AqBoG z_Ml6sEEf5}2SV7q4ea$+E9>cvAv-4Xbq?$s)*FNT$kVxp=eJmIbQJkk0zZS-f3OIq ze7bJ|(K#c#z($9oI;=65-<YzH*tX>LP-`w$4M0{vsGv!m%pWVnFYvg&D6#}~o*PD4 zlgdqy6_%73iv{o;BjydZ_aQT`oryl=U}3Edu_KA>Re_x7+EUGJlj#Cca}_iMQlV^m z<;vjHoE|9=5O7kHOs(nxQ@Kyc$rySrz+c_ey*!gns<7{X!qUZ!LPaTx=Pp5^Hg|%K zDJQU18R^n3+JoVem*!R@A6;-4Q}%;pxGSMUl7lJug?Kk4yN*1EuEzP?5eUF*5+4Gz zN>3wKa+Cx}t7!>D6s`G;=}0G_i)-Ekv>ODdEQYs6N^%v6gtH2t%(xGrAHuq3;R_N# zb5U}giN*=1+`FpKW(l2ILLoSzBNPZ~vdRLeS%|bY%hNzqGt&2p`xnjt*G*bXBxJO} zO}T3#P&f|YXVGJwEZX#p_nFDR%gg6^p~jIwHbn_Ajg9Ia+}Pnde7<QqQBm9fn|cSG z%A+T!YxLJVMgW)cjR#Vpo8bR|Q30@SE-8Lg2oX(TP{NmA;T=pj7RpNem(Y@wkUxkU z;xrJe0#RYW<-Pfn%fKst@<Bp05b^+zugSOzVV+G#K$jhU1RNHIUVMrD_Q(Rz%^=Of zPzr$9yn1)A^q$lM>I^3Ozv0V)?RU`NTc{)3WO#+KR)+iRRkVtc4^E08YuunozG6H` zbI_SIIs}LY1*S2=S_o>m9|6+ah<<_5VMO_$H35ejcs!Mdz6%1+x7UlqLB`<fr?sCd zCJ@A1H!X8>_6`;dn$YG@Z(GN+r_tToQ)U|~PuxC^pkT*=0<0W|=mms`kbPBozE6Wi z0HruS4<$i7VOCp$`fB{^tr6rD;sVY2S!TO{$f=x5NU@VFp+!bsp=REY#;=*I`F9^v zk1Wx~QNc7`g@GXB?yqR;4r%uz1AF0#2;;=SB8FY(=8K=hgM6C6qZV#_usdK907SL9 z17|)kh`A1U6lyZS*OpQkJc=1n?9jlplY4HF*-!I{%c=3%;yb{i@TS1zfhNnn9pRVQ zHV-oXOz<T)`S8y+`uu&=(@9JTSvR2FfRxG5tWItiO01kWyJkLUG5MCO?aBm)@`TGl zv&>K|KFQ;>#1|?HX<gZjKhsg#Gm3v_n+GirsKYFrVS=#=;G1ANc?SefMLbL8@9tEV zJSM2fP-$3K!-))DharJpWtP!6ODnC#tF8c5&S9Maz^SY}5`$<$o#YrKVojx#^Uzy7 zVyi_o=XmaBAstRgin&q5+MMkCJXX?`h13~Il~<Qy<2s?UA&|RFSz4Hc6+O{gcfW1Z z^epwh^{DG`XTFA3DZb?EVQK@gOgBa>wJ1a!y#NaCZ6)e1j8kke5TKZPAA!7SjFw`z zx>b@|GX5IC{m3V!&;s!C@T9&z=Jdr(UM>NfbDT+BNT@^(SBS-gjU;pHa+kTaFHa+Y zq5YDI0a%{8a6#YN)?aP1(ZK84om8j`8w4l_t#G+wqFY5=Afuwf6YV|#Vei3+Z371Y zNnl$NE!vdngZPt7|A)9Ci|DlXWS9AbVt6oWz3Nx~`bn+=GZ4aH<Zu97t;D^+jJT>d z>ngs7^v}LaUIRWxcd~C&2OtL=alEPHl^nbffP;=?1JR9d%i<&T0O34n{lSOZqSh~w zGTfK9`-eNC*5A`x#4HCkAl0PPzWW7Kd(Qh|xls*y#r1=el>$-0xLjM>$a3mJ{$=On zk~ugq8n16(HNFw>fs5RfWosl%CdbeqxVc`tT7jX{q14=EW#s0rR*pbTFgJlrW2tl~ z4xILQ0{>g>^hR;=(9BiJIolYjF*ANtxq%Xp1T*a%cqeu$$veq_S57S#iNiM#*O#`H zk3KvI7^(oi7R-_nLr)+$Y2bax&(1YlZ>I~`M4Vxh{0icvs@MPuQB^x&4_BJVBorFh zt$H~a6Lhd2vkH;vS|fUP!nHE_tglPnj=hr1F*3`ko0?t{Ou{+-)j+ec{o!Y#=mp#< z+>sw?Fv{|_@1eleD9AYQ_3y+16vvxZphBdmnxQ?w2BE|1Cv#^)04EkJz_@e=Vfvh7 z3`F8TdHr&rbX3K!N3A*qT7eHLt_rT`2kutQzOgCP>#2<55x@guo$-J%!8$7D3G1u~ z=AJ`5q%}i;QcJLYvMS7u)5hPug8e2LDOJSYQIXtWdMT&my@3d!;E-=X^80vqix)|y zJG}d0TmltWimh^y&oR+SHMesF+L#bYK7(ukuSQvnn`qx&EgAO?>=@X&iSRXdBhfd< zJPUqXDZ8d~udnPr+NJE;*me4r*CxEWW-kMM1~XgHBQ>TpguFhZ>F;ERo6nt+LzALg zIj|YtLxXbWu<axSS$J+Hr8P)l8kuQn0DtKM894MfN}X!UT6z)Vz%)Ok7;eS50m?EK z;@iYmRC(pvn<p@={(L~V<BA(mc$rN{%YY=-nBjw*y^*}RSXI~-a;fnO*hI^ksCB5L zz7BdIhWRY5+w>MNHakub%;tEfnwb24zWDcf>1WW=jMil4R4=xo-?y?ycRnb40nzi+ ze*bW;@NMg>@D1WKDt-KS8h_dMap|M*O-kVdXw$3Jd9{x^6}7reQTBTRt?tJd!-LA1 zeBp@+<q98OjS%I*V$^AMKNe_po4f)y8nu4XYjxZF!|hS)mz6C(hYNont?nqQp6h0A zc=Xfi;-JkuAJ<@zNORg3=E*usNsIUs<e_$#X$Q+oWwfrZUC&p}SwYl3NVPC1NEGEN z4W~{8bEDu2>!^m8g1`V;vfPte7?dy5!#${m+!TL;)AwAnb)L?&JS`;G5_^<H>%rWK z0u0Q1;F1+o!p>GlO;{Jrj_<_4?-tsc%l9LlIkgOcDx~ut#IubFV_DZRq=VK}9lpm& zehAq{+bN%Szz236*-S8sf12v#Pn?*TCPBOd7(u!5HaID$T(&@K_q|t^8(_4B-NGCF zZIX>YkR&jfw>cmB6n?ms92k=4Iz}#zo7%@a?GrjxCH?$%RvUpYV{9LvTE<K_-RLLq zCeaFfJMOp|fj1T$!<G=7Og`F|=Vp@f)EkJ@uZTm1<E7proR_aYKDUm`#&GvhWAj_3 z(Q8)eUty)Wbvr7JpqH}uamPt$O-!*&$rZ+(3our{SA|L3%G8ZtTY=zu9x^?M`=;C= zm{$-uY-JTb#@GSK!vqxBUpLMmsjxqU@<;He;qk{XP#_5`z$Tj;rg1eouUN|C>isv_ zQP_;NIeO31Y>Be-<n374?*~fIFgPFHnvfOvOdOxkK046b`w8&FrvTfj+@3C22LML` zJCJ`S?f;u)=l?I2#dc4cwr>GbAgq9hx5yrQx7^L{sPSubT;RtzsSVI(z2s^n9{>iT zg_Gz=v=8x&oCE4(3|UZpBsyq0@yDqB*v%bX2;BR1pbvkf1b=~Rdr2P-K08?5q1<Qt zPyq81;`iD&z4Wya-FFw?HjaBjcPpzu(l4t64DQRsD0L48J!fk6Jxm$0M+|s|Hgpgp zR$x%wp#S88%bn+flpR7&yqwBni!Rh$j`0w!W=kie<cXcCx3Y%KW$<+5`YyKZ8FfQv zL!87)av}!t<T$)3W1NnLA)c>yH9Rf-VsRdr>5NWAHUjaHq^L@UlyqB1kR>SgaZ-v% zGk!A-n*qpSod<fED0XLrxnMR{huB5<NW9yY?4YJ`t08G5_a!daZY(;zaRq4P<qqne z6iIM(0Ox9@k=;UnIO$%H*(<%=_a=^WDHiy2urb|+X&B^lRMVH6=dIf2T*2R%#!f@B zTg1vORK388x;40OP}o`QjIs3yan0LRJ%uXrCS_l8@o!F|>)=kidUsd2Rk?Z)gV`Wq zIT3Kk!O1S@e^0Kfd(Y_teu>|iSu16XT{Z-omNhcwTO4)fvYX*Hr1i33JJHR;4XBq- zzJ%bNOG`k(fFtJUTx9{S7RoN;+$}b;1rl8wb}k|Ov2J)&Uh)mj^QUnMM8Un7+iHJ+ ztegstA6^XHNL#+lbD^Erw)KFeM10FnT!+Mda%ladm~d*4(><*$+wpmSqFmn=6__yB zy$LL6JEDcP2~e$Sua}b1^MotTj?*AsxE8jb!B40Fih<<<kCxTz-gH>zzO5h>p+bng z$V{H)<uER4Fmp_vNxlpn;Z1aknH*P{ro{_eBl&TrCaO+rfblU`n%IDC3_Hq{)M}() zDW%2qD(4d-p{^jUBSW0pHuy`LCB$(O3g%=tF2N>ru0VZ%s+^7<_H)5mM$K`Il~yXL z{ElbmJ{|6%O?l@@X+P9jLa%&*IfR#OP?g;U>U-7{{{xkyGk6p2+d*&1A;exB7#ZNE z^_}#c92mCkUIRM@cffoxjK6IFpXlQNg%|3SslE7jXpwph|2USX@s~kr78)hg6$*Ih zEwEr5=9NDsTA`J|rhzM;3~IfJQ>fmgqadF^ok2eEhP=WnpX3WwB*0P^>)Zk8BKhvI zTNuW$fZt3TIPenoEVrT*wo=G#$`T94($gwV#S`Cv9_c)^Tu63?{5~cpDW1xxO#`u= zF6cwBpg`IN5ez~(1b})zH+|4OANth)hU?D<MelZu(Q)NILc%4`z;dZL+SX6$hnL3l zhbQxce7%$|NFZ5fLY5Fj9MxRsG*5mSm%yQiQ_V|(?vIpSUw)Tg{waQmzYVSm0;Yci zqtxtjJ27}M%x$-$`*%Qo7tZ{txKCU_kXdM8U&0L`8yNFprW^2K7oO%5ER_JcV8mkY zE7nWY+oh{xcw)@wIyii5;BByvJEGuwFfinz(f=oykKU&$u9$zn-wEZVB}5+fKbT0R z!U^>dA;uw?B~!We+6}zR7y*bs&$SmtEdq!HvB<gGw&IOQmYH^C$ch#yj`&=I%BfQH zz|Z$R(O$<;T<e*rrZC&!!{ljgZv=$E_p(m)&Lx(dk3(u{q>Tf~8mN0;5~*;zk-XL= z1T@bAx`yAeVVJYiq-Be7sHU4!STkw^lACT$)iS@m!a8o2ioS7V&iYUe3?7a6Mj#r+ zbGre1*+q2Joa`)u%hgIB69`X&P?X^ZTN?CI#U^l0gR*6@cok)X#M)Auw5`6HIxdO) z@iInR3soLBEU4cg@QFUHDln17Ig=~E3cg7wKmr|u8eFbp7>mQ{hhT5XPvMQlo_T?v zd<hrrlvW2PKgVa^<K^dZ*&zR*F5qXTs{jVZ*+H-oDh$ys8f6YZLc*wzNh~Rdm&P#u zJ=}l|VD{7Oewl0oa?C|>5kT7l-is%;Bf4<i!)8lZ{{0I&@F9i#=V|jtEg}BEg5;ls zRX*fkmT^P5bL_*1@X*5*Ct+yCu0?z9UGy3Vbrxt1D~Lqwu)-9|EwLJc9PB;4u5lTP zHYY)*LYuH_sCl{1VSs<2xwvzr2P?4L>H%xExs`yLf>wH0ca77|too48<*N5r;heG^ z(cou+)u%HPBwA+&rp*KAi`0MLWoK62Y}3y`)44W-V`4`Wy0saZ92VJ}0#-Py;PxuH z^!D79tM+#teNS;A-0EswcV*|i(`7_Ggwbxhi3;P3CiN?P@eVKF<YhZAG<PZ{yW=rf zh+|u~7>upS1Cz}ZKcK;NgL9Y+#5KcwbIuA;+q}bR%(YjvgBI4|(!qhpVPze$$Oc1r zP9<d;f7$QG1#<})F6Wh80-*t5X72~G*6B2>iH!++7nmB<T{s5<elrK!it^YL1Y43D zEvF|kp94b1eKGb$urCG*M4ufoe;oSmzpm5x-si5kP3MH^5;%I9^u|?jBZyLp?z-}h z3^-25OzJ?qIw#%7F<a!!n{hf65QL*A9EbM}JlwPI|5lKc{2pHD&M!e4tg~<MSw3rZ zeu3JyOn{{<XkW|vJxHf$rh(}N3UZYES-!yEy({d=pT}1>IWOfsw}U~vu_V>(eAGG3 zFJj=!n3~VdZ{Yzob4^XmCi=nQUOvB=SWv<{BCN0L5swc7k9Y`FOz!>A*#poVUh$Zs z5XU%@&Vcf7`m!Kx6`)N{Z(D#Hb6K4I2rlNw4Avd^!*_7(z7X955d$?8AK5rOgu9+` zXryWI-}f;5=<eo)Am|!|BsclJxCG-$zBUp999$b4R{~Y6t|3GC7DpO&JO+*DcF_@R z9Oa1HVc~5l;_^}UXYk|Uk1~o8nf?pY49xxr?~L6*IM<s)QD&D{Y30E#O4uzREnE~l zwzC4YICDkEpo1;}jHE*7vw%*y)FD7^+_rnaMA&v7qP;h@Bp8oeAoPq!jl}ct*}L!2 zXqe%36f!%m-Ov5}MAlVMy2fP-*;t0u7Xojv&cO+mq0mi;J0~n}gM*c2giX!%_zyW$ z58y@jP$hHZZlW3$mW(6z^8C5uuPu_tv)=r78h_cp1HK7X2&DzJeh*uMvF#S8=V80U z7?&pCb6}megn({i`*kg}Bk&5eV{2B)v<`7A4Os2)3OUsDN)cw<D}i(a?;-y6XmtBu z=)gcE1?JZ*;qXA@ejSM5&qVJcG^MW4m|#2vx86r}=*G`*opa}T@m36fkO$7Jp-nsm z8DO-#5R^kW@$c=%^=Jru{rc9Z^&1gRP5F8hHQvD^l!Hf4!tS^oN|uffJP70BL6`%t zZ;o1D<TFrw<BKJj28Xb)V)An?IcP%y_|AakBbFuULb^@SS8%}K9FIU8j9x<x--V$S zr1LODlmV)?M4v&eZ`_IArg0X*D%GxN__=X1o9tJy0`rcXmH1W*_69%!0bmIaBU8d@ zFjT-^Aa<378qn%HZf?$Fb#}8^tHLR)Rco#R@uA&Br&3`x$M0(CO`@F(YxV1dgK*ds zxBylvK2J^Y0@Y!7DGy%GHW5}ga96`OFdNeAR7iy)QMu#GWc*|l+dEo|$ZE+T2+AF$ z`P~EnjV|4Rb6DfWi5FcJ!gXX2dww!L5AbR^ZGBdSiCowpaJ->Ma1@xnD;dw2s^Ks# zy_3Dp*VBLHg4hS&U2WcqSJto#mhg*x=#2W<SGA0XP$wDO#zIQdmI)tJpB;fEZq=R2 z4?Hui_o_6|YPzttJPt(ZsP9*-M|}V-he@f43WFV*KdUUG4`2g=7XhMDDr6rw_|hYt zi&a8zq}793)(1vWr#k!PN_{QOO=%2z=CG{7q5vOvsU)?A`4!^oLjxmJb9g+lV)R+< zU4x*zfORuZ8)+^Ca!u!hL0!Tir>Vy^IU3mv+RHj}lei>Qp%)|zvKZ-t-p|vXl&86U zAg>$ipuSn16dIGb$ub!~m&vYT5jET~fAZ?_kZA@36@y}9f+*acw$bs&2&h3!WXM4D zyEuOBH&LD1zrZEig_mfUxC!im&B+k06LMrJ5{;AJ%BIL!A}dqT7mADsw-~`x8X^`e zF>Nd1+#PJ2UYWUFYbO)|B4+0HVyYnT`X;O6e7b~Uz+~DBOa{pSGh!|IOZYeW&v^OI zdHFssZ}M`UmoMNV)XfHN@|XSZ6+5MYSoezB7~CJ+{w8Dsq7H6Mge7m`+rL1ObqY7p z0d9JQ5Ay)7{2Sz+7-H}Hu|1Y%r$=Fcf=AWh9v8Y}&(IL`ux+L7cK*UB^2p`bD9EEI z*e6&Gu_PeP%%WsVb36y*{xPk!kxU2}F*wvIf2eggBpwA)D)Ay46IS+gdipdL5I5e{ zV;Q5u$L2bTk|2fz!%=E6LUPH4L53IwL3jnv6?oc=$j{{ek}2!IkSR-2lDiJCA&~`T zLl*Gm`Aq=>xVMK8cq38v&-il=cXLQG=Q2e|XL#BC&n3!2)C`sQzC2|hZM5NVb;AFY zP@=HMCSPlq|Dc4ib`8RdXJZQ~<U!gI<rp4Xnwyl-)GH<Ru$Hngj!-%21d8TIhSEKe z-~haGD2=h_2R4nNfjL+{UEF3=e;2}g?501>tEdLNX{evx*fP+Q1}AfOq-h+cVYnk7 z)*U`a9mzoN-1vTsj*M==Us_AC!GH|XdEs0-Z(@{>mf?5PfVJSf>9+HDgh@jTjc?(K zqb(wiOCp)$bbM(_v5TQ3u05V2fE4!O1}BUjEd;aMKO0wKVBMgovJ6%O4(K-ToPk%s zF=T46zsnrrK{=1nXCTOS%)1wfOj3-LV<(vE1kXarPE1Tpj2)vv0ZtmYXN{B-ghtB< za4{dTRTJS*fps=@=5CxdYEV32=#-5V)}?A~xdz8i;Ho%s0>Xi%DXYTOO;k4#c{rrN zXY~5QjngiV=Pi%1UIPv?&5bYY1ZyHE*yKWoKB%yxCgK?*+1p7aD?e(<UQTxif%VRM zonbUOJDSvq3$_xJ8Rx0qwavy0ZAUd?-9iq^t|2!pK<PdgZjnSP>`K##-^^7l>T}5d zg=&P)Iz6_`$AOq{9uCi+b_vHK1SrGt4ZfOrMs~xNwa>dYpyiHg;cjt_vHo;%9n$U5 z9~^jK7RW%HPNKT)Fm}m&3${t;ZkcNhMTLS=bK#7kjzR47Vc9+51=hTkZ?WK%DYmS7 zwW=c+3mzJ2s#-4WIXO;hLxQp;dcK5{UZDqap8Ib)oIRze)0GP}n;Vzu-L(eJE~-IK z_Fq9OYhqKKtB}_y^06M6Hq<#+5jqdHTPT;@!cR@zTL_FJjD_{_7Wm?vV^l=zJXc(< zvrNW3M=9)pDLoN)bAD;GBxAfcGETRk1K0+>a8xV9G8QX_6sa(Xu}?GH?c%4V3`ap8 z;=Jd;m>A(>PO*_W=%IGHU^I^3`5F<hg@L1H_GTNraN<kB0`jap9MK8>!k7bsNgB-{ zP?|+AhMUz3En6&Z+(e}}Ava0qIs3^7E0vzj@ZcpZ1%i`uax7ah#&aJtt42N~T?^#3 zB26_j@d8~zb5NVbDvZ(q*82}8ylWG9#c$o~*!*4TQ)9DZK_y4<aTF<y-d9lr9vsag zmD?t~K5fR)!?9xHI3u7@9F}@oD!UZJ*QM+0z^o%dg6A?ldcEV3@J9hU3QHl6GpXGd znikYTKkDv>P>!#@^ZGkSz82&%{?4sWy>tAi))s+&OM(>qjZSL$eqFk_QAzjtJ9b=z zzsXMEyTr2y|J^agZ1VFtU~2KH)A7k!E#A$`?0I}HC7<V)S)`i+A|iCI)kZ$din$;( zy%=i2ixGSUcQq3nrh=!ks?(z!2PjB)w`d4j3M{vpYpaPI^8N}d!xe)nXh8tEGD!Y9 zFMXB;it=G2ICnlxehG=KP8hDqJU{*0e8gqI$_AGKr0oA4L>FDaO*DoLi~v~o;{W>x z9>X2I$!K`n3O}=9{S6_&_>dL{3_4ERIyXKZR6z30!Bu|rM7#)iP2(>+jSE!MYXZ3l z7(qcbz1(l7g0@V!6gsoie|Im~>+T@*NM@CN6&EZd8NDBj;w=o3itQ)>1c4*1v}A;6 z!edS38aQMxiYi@GEP;xo`IXp&DyBkD$QDf;FO@F{fG`w%l_Y6UaW)1dCBKT_0aBiK zxBA!fB6r^O#(GdQ6L6GD{W2QdodU<cW6$ONz~!U*VG1{Z$+2eQLIGE;!7!lF@C+)1 z-VmP6Cv<{}S~yD&&5!YoE+c%+z7UxCZLIV@nojSeyu~NS3SE5MT3p@0&Gd3?@7+;v zwn9z@s1y^Z>L)5|x5^rb_dw13E!SB4leLb*s>E($`}M?BDD)TBYW2W}{3~x=@MxPc zN~qy}M!Bi{eT;M=Q*-8GT?HCTjS`9iLPo&WM{)0bBMUxFbFk*8#%Lh`v?)%UT`!Vd zNY{66bEbh3C};OKrB%!i<x_h1Zm`<jFeT*CN%ATEIg;Enr7>wONEl`^008rR<b1|4 z542BV_1`t0|G)<4b4-2GDS@BNWIqKrviR&jW&Vws$s8xp;WjjpX^!kWk)^)7L+*-c zEX$$hGDQ^&wi!z$!u%m}X>3UB|A-eGoUpW9LNKwAJDjgu@H=|+aht7}Q}Tx3Y=x#3 zWnaYQcS#YtD`fO{;%Hr{L#60<;HUhHTGN!ukkGg>!Aw!DC)AoSuLnwV6vKrm#=M4F zB#6VU<$<fjV{{%KGc$m#7Tus+(uCzm)riv?+O6kdFSZ<*227O>y>P*8T<0>;p(x~@ z1w^0@P(EBb<=-#XX%TUN9W!GFZXN8F^G1_P5~4J{GM*VsBFuPmW#Sz3!g8$=XKM>8 z#WvTe3<rfdWe{l!`axYVzqpuIxZaRLzX;O~Bcqt<jGY6B!<t#72l<%vF$%Ct))tfb zqBLLY`J7#886do#(^j$67P6v>I4l6qayQ8S>+C61(G}Yfl!b(=^27vDtgnO{0=d*= zX(lllIxcJ!ERu~B0uqYk(Xl$!yNWxY&2m`eVuXJj>5RpY@nW)xV;-Xy;ize7WujU0 zBGfFU&172Eq-l;D%3X+f11Mx@DPv0!UX;uFRQ8zLD|H{$h1n(l5Eo71d)k>ylP}bI znv~|7=g=4;;7@WQbM8hZ@drEPCgJ2wW_c-C4OvQ{36Kg=%*HQs<|Mz)6!>%JEHlMD zk?0uAEHeBS>7u<Q+BrJ7V~9stZXU>Y`A{jDX6}=?<SGGpi&ON)J-{_+5P+5s6##Y> zDE9IOpuGgUc7}#ssL($SmW5CQXcx}5wN-ecq;V{N6kv$-KGs56kQCuQcZ@_1<h$V- znvwajN8FCZ?o8|XR+>(f-4DP&oa0h{j>*KxZzhT*Imc)){o<IKAgBp;7I5SP#`c4{ z8`1VaMm)|Dtm7YTvhh1rufWq`7oe#zpCA(MIkwh;bpvZ$$?{0p=qjA*=wRmF161Ce zpTCrY&{lc{P?^=kgWA3VUZZgTa*}>@?AU`<U72*K;kI9@b8YLX^F+X--wx>b;NxJp z-6ODJcUGvQ-;5x~jbu(h#Q;L89f%UvG2R#r40)Il!t<ICdG*!uf{lCp$BtBMi)|jl zeOeIP5t?9pJ^Tm!H}A+F8C$8`KJGZA4sLt}2RC9&0o;K&JQBP;{o*SpT|LqaZXaph zDt%5S1_rvsQ*8|R^P8o)S<|fhbsk9<o@*IUhnS6UXCM~oCYCjV1c~f06&G-bO+F6f zv`_0IJ5-9xpK6T#m4;|%C>zo97E;*TtP0hm29)Z&9g3j-S%rwdeJEk*I!BADJKFD4 z41oR_!8mdgySMXMCA`MPmzeATbplkL!xQ)paAOa|8_ZM8{h+Yo{1uUya;XQq!V>K= z9YsS?FlT~~3e1Dgq_+|6jAPUMZGvKRp^8?=R%loX<BRZ6p;rimDgFc4>;PQoRM$gB zR&cmt&FSI|sLKn4p3c2qukr;3H29(&=0d2<vubhG{#t~g^{}(T4w|hYigt?r;jjhK zVZWO~<1?hVJQl3dJ+pzlATxCnO4`R-L#Myz?d_eFd<|S;obQd=jpt{r7U^l@$t?!3 z=5neee++U?2&xo<f=8KuNjN{{d7*8<;b@a=w^%0>=#tx)&;}P<m)6?FcF5#AET>p8 z{IC4^zwsh}j(^I#@8e=Z9yj+U-?>C#sXv>q9-iUGrO}eS1oz-A(c@`m`;P~)6oB-O z1Mdpkiz@(t&*5EB5t7>FU4e%X`1awf+%oblyHF7DYA`x=qoRGiKncEHf--=s;Rb|i zu^#!RU<Bw4`U$^<#weX4@}J9C!pKqK8oUWW<)&yqK9jcs9v~t1L~Vi3_U4Y~3W-&s z2O&P$8l6Ip=m@$y49GLw{XjGlWVYY^LO2rtOAZqInfF%Z%WSqqr{Mg+yB*Of;?5uo zwFI~fM+nQAUJ|%tkQ00gzd-yzuFGf%oO_Kgaasxy&_Os0!fg=#7I#rSgx>n^<PP%f z68tStbAUxW@ET3_$Uyg#Unn5~j%b0iB%4VTf@(*gwZ=L$ilh%A1iR3XJ-jC6;z5VW zlLK(Tcn?3#=>v5~(cRU@2On5LR#wqt;Kh>t=z#mVS57Ve)c{>WzsW1SEK!#^)*mgU zAMc@YcOrV)03RZ_j6FnQyQ1uu2jrHy3tloi@n>bgp9Kx~pAORd#5`sG&>&wS#LRx? zzWV>Dc>FxTpYNd{xU1M8zcGjy0@n~&<{)2P+s{`_gK{7BY0eDd{u(02$nORX;XGvy zM%NxI0C0SWUO;C<7bYc-L<i6}Rir=+-jUKAPF0|*i;?^|TxJeM)hFOG0}q@h&1L2v z4fdpYsw)j$X1elWj@?<Ia&zNPbE^N>h5mhCN;l-&LZNi0u)RTsjxn|dpaI@po*#4a z<KgP(-t_rE6m*}NG^kv$>!+dnln+Ej_nE2K;_``(9%v-KSw7LxKhe>HVev3^^l+Mn z8s|q*(!+1>PfHJT`nO3<@9(BUkKw|DL=kjk#)gY|y<Nk4r>*4(T(7omJ$`&<_H6m$ zIi86+zin%Fg!*uBo(iDhRRjw%ca;-_ke&xRxNR$v@|fqI3_8EfsDLXm;mWSW$B!cu zXx%(=P{`X`=iiz+4x!33pXBd?eO?gP+i~&PntOmSzPvIgut|f6#%Hm_!J)y3EO$Dn zQ;)lWEjhk{OcSm)GOhyp)v?A^N#VDX{4IV#$B+;7)6r^0{__vbJBKiD;hmHG9sVS2 z3w<6H$kKX>EfsA!G_<i6YN6ll;7h1MtU0W<TE1aL$YXTM1n=crkIf*^yR|PsFYp_i zL*(_N4PQ&}&T&o_wu@G_A#I@jNBGS4js0ET{R%I1+DLwlm%qo$f5Xe)=S7I`Z}6`0 z<zcVLoFva2YFZz4si=xl-y(#qHpoXx@(-BnFX5u++R@9D$^XIUM2`#ipX9&A&y6iR z6V~O|xa9o3xQU+i-aiau{wTKrly(n{!tH;gv<03dyW!olwKO<z7S|ov7;p$%1rFdT z(qbb3y@B9yGH9WCw!tR|-a$i8(su}V48}Sv&!Jx5A?C10r)_P^uTZwpog5eZd!V59 zlNe2x1!V(_aXtWjkZY+dl2!K#Uqhp&UBc}en{cr9+invX^};{|8hXtU?L3!=ni$ev z6%_4J%TPt0qVqLyQGlE{Hvx{R82|LEZ#uZB$RqYvCuh&{xv6#^pd+6+KR@%<@$+Z> zvfzPe${p4ADxoY+pMcqUbJ)!%it=w`MH#dCei7x6Mf{tft-{s!w-2T)odTGMIp(g5 z^it<5RDy>F6;m<-n!dCNVtWhjqu3$E)RL%CQ({izFZ**)t^f}L#bkJ^-hFs@Zb-4B zig)b815ym2)IKz%*aAQbV&<y?DaIrGwk_bog^BXgLuH`<eme`w@g2gUez%;(q<+6d z2-+~fAJX<X0C743YxP&OSr0#LC!BWUkvfonssud>Q9%sf1O5x#ju@ae=eab8v`1Xv zE?yNFAiy`o1VJNdhaiYH3=kObfxA2g8vz{f%(e+`1L!B5Rr}<28tC_@i2!$|0c^U% z_R?7zf_`0Tm?I9>?nnko=f>qP@$2Z|v(SZo1Z$J+AHlR9Y6#Ras3SOMlUzT5?Fu_c zkvGn-%Ef;umwVx|e{EF(nzusI3lZ93+Wy3h{fQa77#g|@&6y9#jIDv#<-I%)Yq}4@ zVl$8yOh5nhMPFPb`RCx?V2|JfGZ$v&XS6X#7{EQR@@+Rxjnd#SE)Qc5j+Vv{PEo== zIJU}P<z2r2Mk{4D&Ihlwtp&ubg7r2fT(|pXmx#_6&q6`&XMJ3P1YM(`J%v)E{hjM$ z?SH}UnBEobj?%7tC2M<PX2(yHZi?oFHPF7Hav7T)%4LH@Ug9e{Z6z=B67%wDUOtOU z`$=57&LWxXJkc^a!^D*P{Sw1Db=snAD}Ill2fslz`7Xaa$1e}z(p60HCA>_8roiO| zM|xEhE}IhT?992T%NOR$vzOm0Co}WO`I$Kx-~Dq8;;A2yL%n};W@`G27tftb=2-BT zd3hTbxz?YfvrJOuv!CMSCNE{aeGHe27KVKK;-#4~$RQRu^;UUy>aCgN>rD8&d0|w~ z<XgOahL=CY%a7xtaE86Lv&};Z5tQUbUVfUFpXG&)jL9$Z?l182=Xv>yy!<jRjG&PG z6<+>(Uj8Rugem=9-u*AU{1aaO881SO{w41?rV6-5clY87D2<auWPbve;Z4H>_J2nF z2LAH$$fnUD#0%dx^2o>_Lnym{2L}<zVe}DQH;)Vfk9lesPj<dG@+eP0$KA`kZZ3^Z z4wkm<9eHZU@sY<ypF$qPh$lX@;~Y_-(L+FY_K!X`a%g1N$X-6#J%yaEjBXu0I9S?o zU<Bu^Bh_=d-q!V5>_DQTOl}6C$rRI{z)c%^qYTqj))cY1&jIPs1{qrmeh2SIw=YSV zMW{H&YlMPXiq3xRwdg(QoQS^)&2fl4a~~eJW;nkX@dq!E7AAbSR}Wi^xuNLB4F7wZ zW^rkJOrn4eOLR-6N}Vz67zVz%K~tMxxMYRcD?EUGm{56GW3O*=L4Gx7)2z14y`AQj z1ZQaPB_zkF+?Q*i)x3q39bJO7ZxzWcl#Rd}yn%eExUJpt$ENR4fpB}oO2MJc;S#ZY zx+38^fAGsK9NC8K3Lpz^GLr_pthID9Lr{s}<QxY}$&<%w5BbHN{}&{@C`)rkuRxz1 zb4Q(~792pwoW2eZyd%ovbY)?&`tnP!7__8Nz!^>=TCA_Z-v~Eldd(sR&{9kh;T(_( zpQ&aOd)4XxRk71KZ4Yfu4;+M>i#7QT=4X+pSo6=$57$#I3hIHAF=%VxSPI`kbPOUu zG-ErhxRc*7JM>6i{ot@6ccMs65`50sr+yfD4|?PmQy=i5lj%4QEHD3T-M%zhU5k}f zBj0SEVz!*Ryh9_IirZ_5FA?sOBw^OGhPTUQtWpHrUPKH7w>{EYU7o6<QI28m+x(y< zcw5%6v~a#;*4nHjcU}2D_Pe&w0P>2o+(ensN<ZqxmH9E9Of7FO1yBRu0P_G3jgH7B z^IUdXzfMcl(QT}?xMUm#y4>c1XqgmQt}o^pkd)4Lm?4rrI&MOaWH?`~)y_C7v>~|P z5RiU?0_wE|nI+a5H`WlAtGrrY%Pi{kPm<LAxU4hEm4oQ3>bwyJ(Uo?zTB)@PA#pGA zL8Q#k_jnQB?5N-OsJ~Dti}o;t;xPXI7!dm-rQ|;*WfQn?zGgU^xHDc#Pv}3#P8R8m zPBys>f7AHOegPNs3-~nJpMFqJb2kfp{T15>v34531i<GNbO*c~Yi@!D@*x8&zdjVT z7Wt4oT*3L#nQ+5F`@9O)jc`9vbVFZ(m#>!q{M@)c!2lG8+!Vft;eSw;_bnsF08cO! z9l_0gl;ppO3fayB$K5#T{WOJ%z|UM8>}1sNj>OIu7cGYP9K7ExG}}3FrWJ-`z;X2m z$fbyE?t4a*!ED2f>)hkY5DDDy=(wg1a#{C)==>DGFaUqS*o}A2qYSYZMPFcvUfL9X z?Giej5+{&1ITzx`f!N8}-m;=PI1_da;pD`VZJ$0Z68ui>mFHwW?%<g`g@b`Q9L7yT zC024?YDNm1GbW}OOvl8hH*vTw@aBzWxQlmy8hNo^7<G+pcw)nxSZP^^o-wnNC>P(m z?B~Fx8f50t-CV~{yF6^mz|<wg!vW)CD7ahAwR%<ly%1A2Y#_37n#|6JHnTd~u_bJe zg7dOfj3v8(C0DWeiT!FJ`8h(1EelXG6nNZDDS=;iz1djSF$rjH{O9Bt`Xnd8WuqgW zAfQMz-R|w=6<5D09zkBfL}Ol_H(SfzE)>US)1`{Skbt5w7faBIcAW?<N$v<RfsR)7 zvkRo<qQlfC6VrK2!om+-3j;SYEJS#3&iSB2{@Q7dy@nQUYK#+OOhd?G(&r5kM5jjc zt7p)hXtMAy1@^0dQnaT?DnuC7A?s_#TGaX9fZLn`$lWpUk4NDYg?zq>(MAt=--7HY zliSS5u^&4nhW)N7;NUzCj-COKoBEtOnb)x5wMa}#lo8BkX5uCinhpn%(8j!O4`?QR zbTQ%fTXg6tRP>yFcc$5_r(muae13R3buW!wZ{-X@Rm<yx#zR=e+EUGwY*&Cdj%zi% zFxA7WtA(DX+T~eM$@6;&G(s>SrSfzZ&Hn1A0d!`zfOEE%5QPVwWHfm`Xj%61IW|9s zEKBCl#P)<@yypuAY4|qR-Kg%jEb)wxkv)}f8t^Xr8YS45Hb}+nFdR&lwVVC)MbhjP z!;bORUz~7uCcC(1{+kf17Jg<BcSjI&hX20)T&6QX66y=V@gEs~{yZ)U&8Thm$qIf- z6S~%{H4H@BkDTQW0i8=BnbKq*3)7nW9j4!nOPAUV&CZSr`x4%ne#PbJ)oNGyI<~C; zSQWV&RL1Br(nXQeqCnW+qGm>#qvz;pLCtJ$>FI%|OHY=hjed!-=@$8(|3l*F4F0lT zLZ#Cvx`H(=uCVVs8~j&_-XDstU`0C+y^pt0b_R7b9HpD1bW@aKuOHshtG#pu9?rvh z3yo>BZni4fNR%QPA(HV3uhJD5PDb<=A=S6)W}A|YMk)NCkqoZYNCxZ3sNTZ1u}wES zm25|p?o=|wDnv3FNJtypxOXbqqe`|rO5wDP+;%A$9KLt!tpX1+{XQkz8>MjgMzTFh zhI2#rDj7Vy_vz-Kk{yWBgGvS`auoU)-N%{9<8&71%^@XwJWAnyjocnnvL~YI<9hpK z1P^Z}drHZkj8ZsrBiR#57SrpSZ=vTtrJHAz?CB_dM#*9&L%%$&w}+$ZGrD;$O6JB7 zvlD+2qi`DggW9dNMc@oR5FsZVW}259Pzi!f(PR$z4<o1GXZRFi8wW^f$qQ-6UD?pT zz}b#lbSW++Fz?A~!2Ruj1yi_?7q`jv1|LKo-8<equOflfCca#`Q`WYxuXb#l`&xWD ze(@E84bvcKxe2cnrebK~{&d2D#gaK?$Cu-t<3pCEgWD$JDQFJbWa)LsB9$#!Jr~3h zlCK(S6u-2OPav!8&dNfw9*^6<wI60%RUnof6}RK5*|RZ2Fq0m|*|ME+U0NNQ^J0SR zaPFdg3PQ1mQa)<r=__pEX2Y+YA;17HRN{Bhv+Y~lc1ONne5!bMTsk-v>loVDwmeP* zi7fl^v2(3j`-k7XP=UcO9n<br>wiCrKo_@`S%-6n>xtth^v!dhp0=-IN-m5B+qkDz zLu|b3PKVC}1uWnIJ!BS;z+YrUTmlZ^Z<=G7&SO~JVZ{ZY78`(ggP&Lj9_E!l{n2Y! ziGizgiFi4>{S+5wXTP>b7#^UwO*&r~|D3upDSQrwcvxh?3;DtCif%v6pJ%_ePiG;^ z6wQ;D74N)s<GMb<y3-E9&b~4j%rG=9<-9m5>foXt6zir~``oohB8<#8k6v1>pzFMn zz}Ae%o#hn!V3im1i~9%MwEY#Gi;WZ4nx9|hafJfLR$H~37(yqO*6MYyRkxuK*zD>{ z5djj|lk(6CH(WpFN=v6+f-dX&pvYgdl5^T^1BF_OEsaR;yjwXP36-7qj$<mL&j#R2 z*jfs+8URh>I!<da=41MY?WLun6SSJ;$Ka@#6UCn2yjZSzppNHNGd_|>dc-seGShG# z!;*&^vE%VB9_V%qhU0nC*ha9&^g<2WAECjyF%Jw-ey9rDjyUA`awRC$X$^=d<R?@x zVPb~E2@-$MB42^7;3TZmauMq4YI~*rEQc)FCr4nAkAoMA6(`oe?5t%#mVVQvf>q=d znc`NTGx0blb(m8@>`@>TtLb8G2~xzL<fWtc*q-s&)VpWKT;NR?Gl`C<Ld%Lg^s^XW z7Bh*Qf|5pBUGkgwWk<@rfIIVMeU7>vZ3H$RlEeH>Mx)<?@_a=$rimpnGTV(3<VGFA z=rICyi!QF_s=Ce26G5>N^+WNsXK(|40{#U{#oZ{nj^j_YXABPy&Q%eOhJV^SIxFDu zhVG@dV(euy>?v4<43--sT&A=#j1lu-3gTZUSqrvR1n1i=b`}<C9>kyMrYp4ZV-R|( zpfULrs%KO%y?HIOku`7bhrGFfyjMvc^GDo_8C(^Bi;Psf(@)VG#gtxTqi?+7*%G$I zwOzc=BdZ6~@-)=4<%!=lTc<YM>?eAgJvp8~Lm~IJS6F1B#f_{pMRV(n=$@9gws}y^ zoe(d6&<#y(LJeJSzq#R77GZ2mqfWf#K3Z8cv6buX*`SGOi0^G;#HGt6)cp<T>=6{; z{J)Q{)EW=EsmUAI)aC(SeXG`(-)M{XkczAyP~(eAG=4$lm7oufpq!pQ2-@u9>5Izk zkGn4_w}0rt^aV)$`1+!9d+8(hMdkJb>kAs2R@WAwk6cZwAIHp~iHtMjk}m{vqrTz( z=q2~T^@u3N><B4s1%1NK@0lMRZTE5Y%7dRHy}eSoefvT6$^)Gzy}eSo{iFBLD-Uq4 zP=?5WeH?p)(vOXA&Cy1Fo+Z5hVLfsT6onRI96kh%*_nrRWO6@+RT8GBB6Z}YeSr@~ z(0f@^)aDySZRq2tt)@NmDIn{C%_jC+5NPV-k~eQvZYLIt|64^(`!6N#qu+We@-g*W zQJX(p)MovD3&?s<{U&G5W~++5pLZT~pK(Y~DqUpuXYTxDQN{ip#?jnwDkY!K7aaN8 z?|v2`q6;#}(e+)@WsAP-8IaI3U0m~YH93I+d}+&8x_a7LdliNTc#FWmp^&ErDZ-uP z?AP|nR}eAc;T0(FksriqMNiSck73K<3W&9OmsbgL8H)A;Y25G5X4kn+^bXY09?+r% zmY+?wqlvVRKS77-NH!}#r?#_4n{J;#3Skq8j8R;P6+S=7|B0W8HaT;ZAkg9rLfuv` zw`xUnNTD1z%3KVqtVMaoE;<0(N3+EDwo<<**B`Fs=bI?*&#^m3nTht@jt}fD?J58c zRKi-E^ylb?kI2cl$$4Dw8@bRDiZv>if1wy3v*SltLhcHk46sp2x-gpL7=KRa881YJ zl9Rl=zzf&X$&0+a#LEOfdzp8y;3D^Aw-6{}@szyEH=pi#1OE3K-+YE|Iy5zXHOY6M z?eh-bo#MOGe3uJZ*t8)6lh5(Z>;4Vc3=eLgXGAi^H)nX^BvjibU%*{~xajU)x;l9l z-w&`+ub>5nw+|1%@Aiq2;gLc5sBhrkP3}AYujMl`F5@bDNWfmXY-j?U)_;vH&smV{ z=390rc0EtvgugUR@WXBGXDgjw;o)66>rkx*mws*ZM@o6CDc+~^qO$qJQguLwUJ)}z zBLiGssw}2Bbqs(HLxjqXjqwF{D^XV*Dz0Me=Q1faUvgQCM@cP5n_ehk;Z#1CkRv5q zSgjFWw(`PpoN!7f6o<)QRT5rUxrEa!;UG%Ldy^f!kVHkBPH5gu9_3{ZFMD~}#|zy8 zbxvTy@D#R>Q2yqJSmYzTJj2ToUfA3BF<QimPT5(MpPNK+Sp%2h!C~z2=l=&wdjNyN x2V7U)AJTJsi|gR<=HX$amW_INaAfDm;J%>``v0MS71{s)ztJaY*B(9j{{uOAWNiQd literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/sites/__init__.py b/gam/gdata/analytics/sites/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/gam/gdata/analytics/sites/client.py b/gam/gdata/analytics/sites/client.py new file mode 100755 index 00000000000..2915fc58c63 --- /dev/null +++ b/gam/gdata/analytics/sites/client.py @@ -0,0 +1,462 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""SitesClient extends gdata.client.GDClient to streamline Sites API calls.""" + + +__author__ = 'e.bidelman (Eric Bidelman)' + +import atom.data +import gdata.client +import gdata.sites.data +import gdata.gauth + + +# Feed URI templates +CONTENT_FEED_TEMPLATE = '/feeds/content/%s/%s/' +REVISION_FEED_TEMPLATE = '/feeds/revision/%s/%s/' +ACTIVITY_FEED_TEMPLATE = '/feeds/activity/%s/%s/' +SITE_FEED_TEMPLATE = '/feeds/site/%s/' +ACL_FEED_TEMPLATE = '/feeds/acl/site/%s/%s/' + + +class SitesClient(gdata.client.GDClient): + + """Client extension for the Google Sites API service.""" + + host = 'sites.google.com' # default server for the API + domain = 'site' # default site domain name + api_version = '1.1' # default major version for the service. + auth_service = 'jotspot' + auth_scopes = gdata.gauth.AUTH_SCOPES['jotspot'] + ssl = True + + def __init__(self, site=None, domain=None, auth_token=None, **kwargs): + """Constructs a new client for the Sites API. + + Args: + site: string (optional) Name (webspace) of the Google Site + domain: string (optional) Domain of the (Google Apps hosted) Site. + If no domain is given, the Site is assumed to be a consumer Google + Site, in which case the value 'site' is used. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: The other parameters to pass to gdata.client.GDClient + constructor. + """ + gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs) + self.site = site + if domain is not None: + self.domain = domain + + def __make_kind_category(self, label): + if label is None: + return None + return atom.data.Category( + scheme=gdata.sites.data.SITES_KIND_SCHEME, + term='%s#%s' % (gdata.sites.data.SITES_NAMESPACE, label), label=label) + + __MakeKindCategory = __make_kind_category + + def __upload(self, entry, media_source, auth_token=None, **kwargs): + """Uploads an attachment file to the Sites API. + + Args: + entry: gdata.sites.data.ContentEntry The Atom XML to include. + media_source: gdata.data.MediaSource The file payload to be uploaded. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to gdata.client.post(). + + Returns: + The created entry. + """ + uri = self.make_content_feed_uri() + return self.post(entry, uri, media_source=media_source, + auth_token=auth_token, **kwargs) + + def _get_file_content(self, uri): + """Fetches the file content from the specified URI. + + Args: + uri: string The full URL to fetch the file contents from. + + Returns: + The binary file content. + + Raises: + gdata.client.RequestError: on error response from server. + """ + server_response = self.request('GET', uri) + if server_response.status != 200: + raise gdata.client.RequestError, {'status': server_response.status, + 'reason': server_response.reason, + 'body': server_response.read()} + return server_response.read() + + _GetFileContent = _get_file_content + + def make_content_feed_uri(self): + return CONTENT_FEED_TEMPLATE % (self.domain, self.site) + + MakeContentFeedUri = make_content_feed_uri + + def make_revision_feed_uri(self): + return REVISION_FEED_TEMPLATE % (self.domain, self.site) + + MakeRevisionFeedUri = make_revision_feed_uri + + def make_activity_feed_uri(self): + return ACTIVITY_FEED_TEMPLATE % (self.domain, self.site) + + MakeActivityFeedUri = make_activity_feed_uri + + def make_site_feed_uri(self, site_name=None): + if site_name is not None: + return (SITE_FEED_TEMPLATE % self.domain) + site_name + else: + return SITE_FEED_TEMPLATE % self.domain + + MakeSiteFeedUri = make_site_feed_uri + + def make_acl_feed_uri(self): + return ACL_FEED_TEMPLATE % (self.domain, self.site) + + MakeAclFeedUri = make_acl_feed_uri + + def get_content_feed(self, uri=None, auth_token=None, **kwargs): + """Retrieves the content feed containing the current state of site. + + Args: + uri: string (optional) A full URI to query the Content feed with. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.sites.data.ContentFeed + """ + if uri is None: + uri = self.make_content_feed_uri() + return self.get_feed(uri, desired_class=gdata.sites.data.ContentFeed, + auth_token=auth_token, **kwargs) + + GetContentFeed = get_content_feed + + def get_revision_feed(self, entry_or_uri_or_id, auth_token=None, **kwargs): + """Retrieves the revision feed containing the revision history for a node. + + Args: + entry_or_uri_or_id: string or gdata.sites.data.ContentEntry A full URI, + content entry node ID, or a content entry object of the entry to + retrieve revision information for. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.sites.data.RevisionFeed + """ + uri = self.make_revision_feed_uri() + if isinstance(entry_or_uri_or_id, gdata.sites.data.ContentEntry): + uri = entry_or_uri_or_id.FindRevisionLink() + elif entry_or_uri_or_id.find('/') == -1: + uri += entry_or_uri_or_id + else: + uri = entry_or_uri_or_id + return self.get_feed(uri, desired_class=gdata.sites.data.RevisionFeed, + auth_token=auth_token, **kwargs) + + GetRevisionFeed = get_revision_feed + + def get_activity_feed(self, uri=None, auth_token=None, **kwargs): + """Retrieves the activity feed containing recent Site activity. + + Args: + uri: string (optional) A full URI to query the Activity feed. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.sites.data.ActivityFeed + """ + if uri is None: + uri = self.make_activity_feed_uri() + return self.get_feed(uri, desired_class=gdata.sites.data.ActivityFeed, + auth_token=auth_token, **kwargs) + + GetActivityFeed = get_activity_feed + + def get_site_feed(self, uri=None, auth_token=None, **kwargs): + """Retrieves the site feed containing a list of sites a user has access to. + + Args: + uri: string (optional) A full URI to query the site feed. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.sites.data.SiteFeed + """ + if uri is None: + uri = self.make_site_feed_uri() + return self.get_feed(uri, desired_class=gdata.sites.data.SiteFeed, + auth_token=auth_token, **kwargs) + + GetSiteFeed = get_site_feed + + def get_acl_feed(self, uri=None, auth_token=None, **kwargs): + """Retrieves the acl feed containing a site's sharing permissions. + + Args: + uri: string (optional) A full URI to query the acl feed. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.sites.data.AclFeed + """ + if uri is None: + uri = self.make_acl_feed_uri() + return self.get_feed(uri, desired_class=gdata.sites.data.AclFeed, + auth_token=auth_token, **kwargs) + + GetAclFeed = get_acl_feed + + def create_site(self, title, description=None, source_site=None, + theme=None, uri=None, auth_token=None, **kwargs): + """Creates a new Google Site. + + Note: This feature is only available to Google Apps domains. + + Args: + title: string Title for the site. + description: string (optional) A description/summary for the site. + source_site: string (optional) The site feed URI of the site to copy. + This parameter should only be specified when copying a site. + theme: string (optional) The name of the theme to create the site with. + uri: string (optional) A full site feed URI to override where the site + is created/copied. By default, the site will be created under + the currently set domain (e.g. self.domain). + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to gdata.client.post(). + + Returns: + gdata.sites.data.SiteEntry of the created site. + """ + new_entry = gdata.sites.data.SiteEntry(title=atom.data.Title(text=title)) + + if description is not None: + new_entry.summary = gdata.sites.data.Summary(text=description) + + # Add the source link if we're making a copy of a site. + if source_site is not None: + source_link = atom.data.Link(rel=gdata.sites.data.SITES_SOURCE_LINK_REL, + type='application/atom+xml', + href=source_site) + new_entry.link.append(source_link) + + if theme is not None: + new_entry.theme = gdata.sites.data.Theme(text=theme) + + if uri is None: + uri = self.make_site_feed_uri() + + return self.post(new_entry, uri, auth_token=auth_token, **kwargs) + + CreateSite = create_site + + def create_page(self, kind, title, html='', page_name=None, parent=None, + auth_token=None, **kwargs): + """Creates a new page (specified by kind) on a Google Site. + + Args: + kind: string The type of page/item to create. For example, webpage, + listpage, comment, announcementspage, filecabinet, etc. The full list + of supported kinds can be found in gdata.sites.gdata.SUPPORT_KINDS. + title: string Title for the page. + html: string (optional) XHTML for the page's content body. + page_name: string (optional) The URL page name to set. If not set, the + title will be normalized and used as the page's URL path. + parent: string or gdata.sites.data.ContentEntry (optional) The parent + entry or parent link url to create the page under. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to gdata.client.post(). + + Returns: + gdata.sites.data.ContentEntry of the created page. + """ + new_entry = gdata.sites.data.ContentEntry( + title=atom.data.Title(text=title), kind=kind, + content=gdata.sites.data.Content(text=html)) + + if page_name is not None: + new_entry.page_name = gdata.sites.data.PageName(text=page_name) + + # Add parent link to entry if it should be uploaded as a subpage. + if isinstance(parent, gdata.sites.data.ContentEntry): + parent_link = atom.data.Link(rel=gdata.sites.data.SITES_PARENT_LINK_REL, + type='application/atom+xml', + href=parent.GetSelfLink().href) + new_entry.link.append(parent_link) + elif parent is not None: + parent_link = atom.data.Link(rel=gdata.sites.data.SITES_PARENT_LINK_REL, + type='application/atom+xml', + href=parent) + new_entry.link.append(parent_link) + + return self.post(new_entry, self.make_content_feed_uri(), + auth_token=auth_token, **kwargs) + + CreatePage = create_page + + def create_webattachment(self, src, content_type, title, parent, + description=None, auth_token=None, **kwargs): + """Creates a new webattachment within a filecabinet. + + Args: + src: string The url of the web attachment. + content_type: string The MIME type of the web attachment. + title: string The title to name the web attachment. + parent: string or gdata.sites.data.ContentEntry (optional) The + parent entry or url of the filecabinet to create the attachment under. + description: string (optional) A summary/description for the attachment. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to gdata.client.post(). + + Returns: + gdata.sites.data.ContentEntry of the created page. + """ + new_entry = gdata.sites.data.ContentEntry( + title=atom.data.Title(text=title), kind='webattachment', + content=gdata.sites.data.Content(src=src, type=content_type)) + + if isinstance(parent, gdata.sites.data.ContentEntry): + link = atom.data.Link(rel=gdata.sites.data.SITES_PARENT_LINK_REL, + type='application/atom+xml', + href=parent.GetSelfLink().href) + elif parent is not None: + link = atom.data.Link(rel=gdata.sites.data.SITES_PARENT_LINK_REL, + type='application/atom+xml', href=parent) + + new_entry.link.append(link) + + # Add file decription if it was specified + if description is not None: + new_entry.summary = gdata.sites.data.Summary(type='text', + text=description) + + return self.post(new_entry, self.make_content_feed_uri(), + auth_token=auth_token, **kwargs) + + CreateWebAttachment = create_webattachment + + def upload_attachment(self, file_handle, parent, content_type=None, + title=None, description=None, folder_name=None, + auth_token=None, **kwargs): + """Uploads an attachment to a parent page. + + Args: + file_handle: MediaSource or string A gdata.data.MediaSource object + containing the file to be uploaded or the full path name to the + file on disk. + parent: gdata.sites.data.ContentEntry or string The parent page to + upload the file to or the full URI of the entry's self link. + content_type: string (optional) The MIME type of the file + (e.g 'application/pdf'). This should be provided if file is not a + MediaSource object. + title: string (optional) The title to name the attachment. If not + included, the filepath or media source's filename is used. + description: string (optional) A summary/description for the attachment. + folder_name: string (optional) The name of an existing folder to upload + the attachment to. This only applies when the parent parameter points + to a filecabinet entry. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.__upload(). + + Returns: + A gdata.sites.data.ContentEntry containing information about the created + attachment. + """ + if isinstance(parent, gdata.sites.data.ContentEntry): + link = atom.data.Link(rel=gdata.sites.data.SITES_PARENT_LINK_REL, + type='application/atom+xml', + href=parent.GetSelfLink().href) + else: + link = atom.data.Link(rel=gdata.sites.data.SITES_PARENT_LINK_REL, + type='application/atom+xml', + href=parent) + + if not isinstance(file_handle, gdata.data.MediaSource): + ms = gdata.data.MediaSource(file_path=file_handle, + content_type=content_type) + else: + ms = file_handle + + # If no title specified, use the file name + if title is None: + title = ms.file_name + + new_entry = gdata.sites.data.ContentEntry(kind='attachment') + new_entry.title = atom.data.Title(text=title) + new_entry.link.append(link) + + # Add file decription if it was specified + if description is not None: + new_entry.summary = gdata.sites.data.Summary(type='text', + text=description) + + # Upload the attachment to a filecabinet folder? + if parent.Kind() == 'filecabinet' and folder_name is not None: + folder_category = atom.data.Category( + scheme=gdata.sites.data.FOLDER_KIND_TERM, term=folder_name) + new_entry.category.append(folder_category) + + return self.__upload(new_entry, ms, auth_token=auth_token, **kwargs) + + UploadAttachment = upload_attachment + + def download_attachment(self, uri_or_entry, file_path): + """Downloads an attachment file to disk. + + Args: + uri_or_entry: string The full URL to download the file from. + file_path: string The full path to save the file to. + + Raises: + gdata.client.RequestError: on error response from server. + """ + uri = uri_or_entry + if isinstance(uri_or_entry, gdata.sites.data.ContentEntry): + uri = uri_or_entry.content.src + + f = open(file_path, 'wb') + try: + f.write(self._get_file_content(uri)) + except gdata.client.RequestError, e: + f.close() + raise e + f.flush() + f.close() + + DownloadAttachment = download_attachment diff --git a/gam/gdata/analytics/sites/data.py b/gam/gdata/analytics/sites/data.py new file mode 100755 index 00000000000..ff09ac7b956 --- /dev/null +++ b/gam/gdata/analytics/sites/data.py @@ -0,0 +1,377 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data model classes for parsing and generating XML for the Sites Data API.""" + +__author__ = 'e.bidelman (Eric Bidelman)' + + +import atom.core +import atom.data +import gdata.acl.data +import gdata.data + +# XML Namespaces used in Google Sites entities. +SITES_NAMESPACE = 'http://schemas.google.com/sites/2008' +SITES_TEMPLATE = '{http://schemas.google.com/sites/2008}%s' +SPREADSHEETS_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006' +SPREADSHEETS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s' +DC_TERMS_TEMPLATE = '{http://purl.org/dc/terms}%s' +THR_TERMS_TEMPLATE = '{http://purl.org/syndication/thread/1.0}%s' +XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml' +XHTML_TEMPLATE = '{http://www.w3.org/1999/xhtml}%s' + +SITES_PARENT_LINK_REL = SITES_NAMESPACE + '#parent' +SITES_REVISION_LINK_REL = SITES_NAMESPACE + '#revision' +SITES_SOURCE_LINK_REL = SITES_NAMESPACE + '#source' + +SITES_KIND_SCHEME = 'http://schemas.google.com/g/2005#kind' +ANNOUNCEMENT_KIND_TERM = SITES_NAMESPACE + '#announcement' +ANNOUNCEMENT_PAGE_KIND_TERM = SITES_NAMESPACE + '#announcementspage' +ATTACHMENT_KIND_TERM = SITES_NAMESPACE + '#attachment' +COMMENT_KIND_TERM = SITES_NAMESPACE + '#comment' +FILECABINET_KIND_TERM = SITES_NAMESPACE + '#filecabinet' +LISTITEM_KIND_TERM = SITES_NAMESPACE + '#listitem' +LISTPAGE_KIND_TERM = SITES_NAMESPACE + '#listpage' +WEBPAGE_KIND_TERM = SITES_NAMESPACE + '#webpage' +WEBATTACHMENT_KIND_TERM = SITES_NAMESPACE + '#webattachment' +FOLDER_KIND_TERM = SITES_NAMESPACE + '#folder' +TAG_KIND_TERM = SITES_NAMESPACE + '#tag' + +SUPPORT_KINDS = [ + 'announcement', 'announcementspage', 'attachment', 'comment', 'filecabinet', + 'listitem', 'listpage', 'webpage', 'webattachment', 'tag' + ] + + +class Revision(atom.core.XmlElement): + """Google Sites <sites:revision>.""" + _qname = SITES_TEMPLATE % 'revision' + + +class PageName(atom.core.XmlElement): + """Google Sites <sites:pageName>.""" + _qname = SITES_TEMPLATE % 'pageName' + + +class SiteName(atom.core.XmlElement): + """Google Sites <sites:siteName>.""" + _qname = SITES_TEMPLATE % 'siteName' + + +class Theme(atom.core.XmlElement): + """Google Sites <sites:theme>.""" + _qname = SITES_TEMPLATE % 'theme' + + +class Deleted(atom.core.XmlElement): + """Google Sites <gd:deleted>.""" + _qname = gdata.data.GDATA_TEMPLATE % 'deleted' + + +class Publisher(atom.core.XmlElement): + """Google Sites <dc:pulisher>.""" + _qname = DC_TERMS_TEMPLATE % 'publisher' + + +class Worksheet(atom.core.XmlElement): + """Google Sites List Page <gs:worksheet>.""" + + _qname = SPREADSHEETS_TEMPLATE % 'worksheet' + name = 'name' + + +class Header(atom.core.XmlElement): + """Google Sites List Page <gs:header>.""" + + _qname = SPREADSHEETS_TEMPLATE % 'header' + row = 'row' + + +class Column(atom.core.XmlElement): + """Google Sites List Page <gs:column>.""" + + _qname = SPREADSHEETS_TEMPLATE % 'column' + index = 'index' + name = 'name' + + +class Data(atom.core.XmlElement): + """Google Sites List Page <gs:data>.""" + + _qname = SPREADSHEETS_TEMPLATE % 'data' + startRow = 'startRow' + column = [Column] + + +class Field(atom.core.XmlElement): + """Google Sites List Item <gs:field>.""" + + _qname = SPREADSHEETS_TEMPLATE % 'field' + index = 'index' + name = 'name' + + +class InReplyTo(atom.core.XmlElement): + """Google Sites List Item <thr:in-reply-to>.""" + + _qname = THR_TERMS_TEMPLATE % 'in-reply-to' + href = 'href' + ref = 'ref' + source = 'source' + type = 'type' + + +class Content(atom.data.Content): + """Google Sites version of <atom:content> that encapsulates XHTML.""" + + def __init__(self, html=None, type=None, **kwargs): + if type is None and html: + type = 'xhtml' + super(Content, self).__init__(type=type, **kwargs) + if html is not None: + self.html = html + + def _get_html(self): + if self.children: + return self.children[0] + else: + return '' + + def _set_html(self, html): + if not html: + self.children = [] + return + + if type(html) == str: + html = atom.core.parse(html) + if not html.namespace: + html.namespace = XHTML_NAMESPACE + + self.children = [html] + + html = property(_get_html, _set_html) + + +class Summary(atom.data.Summary): + """Google Sites version of <atom:summary>.""" + + def __init__(self, html=None, type=None, text=None, **kwargs): + if type is None and html: + type = 'xhtml' + + super(Summary, self).__init__(type=type, text=text, **kwargs) + if html is not None: + self.html = html + + def _get_html(self): + if self.children: + return self.children[0] + else: + return '' + + def _set_html(self, html): + if not html: + self.children = [] + return + + if type(html) == str: + html = atom.core.parse(html) + if not html.namespace: + html.namespace = XHTML_NAMESPACE + + self.children = [html] + + html = property(_get_html, _set_html) + + +class BaseSiteEntry(gdata.data.GDEntry): + """Google Sites Entry.""" + + def __init__(self, kind=None, **kwargs): + super(BaseSiteEntry, self).__init__(**kwargs) + if kind is not None: + self.category.append( + atom.data.Category(scheme=SITES_KIND_SCHEME, + term='%s#%s' % (SITES_NAMESPACE, kind), + label=kind)) + + def __find_category_scheme(self, scheme): + for category in self.category: + if category.scheme == scheme: + return category + return None + + def kind(self): + kind = self.__find_category_scheme(SITES_KIND_SCHEME) + if kind is not None: + return kind.term[len(SITES_NAMESPACE) + 1:] + else: + return None + + Kind = kind + + def get_node_id(self): + return self.id.text[self.id.text.rfind('/') + 1:] + + GetNodeId = get_node_id + + def find_parent_link(self): + return self.find_url(SITES_PARENT_LINK_REL) + + FindParentLink = find_parent_link + + def is_deleted(self): + return self.deleted is not None + + IsDeleted = is_deleted + + +class ContentEntry(BaseSiteEntry): + """Google Sites Content Entry.""" + content = Content + deleted = Deleted + publisher = Publisher + in_reply_to = InReplyTo + worksheet = Worksheet + header = Header + data = Data + field = [Field] + revision = Revision + page_name = PageName + feed_link = gdata.data.FeedLink + + def find_revison_link(self): + return self.find_url(SITES_REVISION_LINK_REL) + + FindRevisionLink = find_revison_link + + +class ContentFeed(gdata.data.GDFeed): + """Google Sites Content Feed. + + The Content feed is a feed containing the current, editable site content. + """ + entry = [ContentEntry] + + def __get_entry_type(self, kind): + matches = [] + for entry in self.entry: + if entry.Kind() == kind: + matches.append(entry) + return matches + + def get_announcements(self): + return self.__get_entry_type('announcement') + + GetAnnouncements = get_announcements + + def get_announcement_pages(self): + return self.__get_entry_type('announcementspage') + + GetAnnouncementPages = get_announcement_pages + + def get_attachments(self): + return self.__get_entry_type('attachment') + + GetAttachments = get_attachments + + def get_comments(self): + return self.__get_entry_type('comment') + + GetComments = get_comments + + def get_file_cabinets(self): + return self.__get_entry_type('filecabinet') + + GetFileCabinets = get_file_cabinets + + def get_list_items(self): + return self.__get_entry_type('listitem') + + GetListItems = get_list_items + + def get_list_pages(self): + return self.__get_entry_type('listpage') + + GetListPages = get_list_pages + + def get_webpages(self): + return self.__get_entry_type('webpage') + + GetWebpages = get_webpages + + def get_webattachments(self): + return self.__get_entry_type('webattachment') + + GetWebattachments = get_webattachments + + +class ActivityEntry(BaseSiteEntry): + """Google Sites Activity Entry.""" + summary = Summary + + +class ActivityFeed(gdata.data.GDFeed): + """Google Sites Activity Feed. + + The Activity feed is a feed containing recent Site activity. + """ + entry = [ActivityEntry] + + +class RevisionEntry(BaseSiteEntry): + """Google Sites Revision Entry.""" + content = Content + + +class RevisionFeed(gdata.data.GDFeed): + """Google Sites Revision Feed. + + The Activity feed is a feed containing recent Site activity. + """ + entry = [RevisionEntry] + + +class SiteEntry(gdata.data.GDEntry): + """Google Sites Site Feed Entry.""" + site_name = SiteName + theme = Theme + + def find_source_link(self): + return self.find_url(SITES_SOURCE_LINK_REL) + + FindSourceLink = find_source_link + + +class SiteFeed(gdata.data.GDFeed): + """Google Sites Site Feed. + + The Site feed can be used to list a user's sites and create new sites. + """ + entry = [SiteEntry] + + +class AclEntry(gdata.acl.data.AclEntry): + """Google Sites ACL Entry.""" + + +class AclFeed(gdata.acl.data.AclFeed): + """Google Sites ACL Feed. + + The ACL feed can be used to modify the sharing permissions of a Site. + """ + entry = [AclEntry] diff --git a/gam/gdata/analytics/spreadsheet/__init__.py b/gam/gdata/analytics/spreadsheet/__init__.py new file mode 100755 index 00000000000..e9a0fb3dc7c --- /dev/null +++ b/gam/gdata/analytics/spreadsheet/__init__.py @@ -0,0 +1,474 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to Atom objects used with Google Spreadsheets. +""" + +__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)' + + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata +import re +import string + + +# XML namespaces which are often used in Google Spreadsheets entities. +GSPREADSHEETS_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006' +GSPREADSHEETS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s' + +GSPREADSHEETS_EXTENDED_NAMESPACE = ('http://schemas.google.com/spreadsheets' + '/2006/extended') +GSPREADSHEETS_EXTENDED_TEMPLATE = ('{http://schemas.google.com/spreadsheets' + '/2006/extended}%s') + + +class ColCount(atom.AtomBase): + """The Google Spreadsheets colCount element """ + + _tag = 'colCount' + _namespace = GSPREADSHEETS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ColCountFromString(xml_string): + return atom.CreateClassFromXMLString(ColCount, xml_string) + + +class RowCount(atom.AtomBase): + """The Google Spreadsheets rowCount element """ + + _tag = 'rowCount' + _namespace = GSPREADSHEETS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def RowCountFromString(xml_string): + return atom.CreateClassFromXMLString(RowCount, xml_string) + + +class Cell(atom.AtomBase): + """The Google Spreadsheets cell element """ + + _tag = 'cell' + _namespace = GSPREADSHEETS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['row'] = 'row' + _attributes['col'] = 'col' + _attributes['inputValue'] = 'inputValue' + _attributes['numericValue'] = 'numericValue' + + def __init__(self, text=None, row=None, col=None, inputValue=None, + numericValue=None, extension_elements=None, extension_attributes=None): + self.text = text + self.row = row + self.col = col + self.inputValue = inputValue + self.numericValue = numericValue + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def CellFromString(xml_string): + return atom.CreateClassFromXMLString(Cell, xml_string) + + +class Custom(atom.AtomBase): + """The Google Spreadsheets custom element""" + + _namespace = GSPREADSHEETS_EXTENDED_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, column=None, text=None, extension_elements=None, + extension_attributes=None): + self.column = column # The name of the column + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def _BecomeChildElement(self, tree): + new_child = ElementTree.Element('') + tree.append(new_child) + new_child.tag = '{%s}%s' % (self.__class__._namespace, + self.column) + self._AddMembersToElementTree(new_child) + + def _ToElementTree(self): + new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace, + self.column)) + self._AddMembersToElementTree(new_tree) + return new_tree + + def _HarvestElementTree(self, tree): + namespace_uri, local_tag = string.split(tree.tag[1:], "}", 1) + self.column = local_tag + # Fill in the instance members from the contents of the XML tree. + for child in tree: + self._ConvertElementTreeToMember(child) + for attribute, value in tree.attrib.iteritems(): + self._ConvertElementAttributeToMember(attribute, value) + self.text = tree.text + + +def CustomFromString(xml_string): + element_tree = ElementTree.fromstring(xml_string) + return _CustomFromElementTree(element_tree) + + +def _CustomFromElementTree(element_tree): + namespace_uri, local_tag = string.split(element_tree.tag[1:], "}", 1) + if namespace_uri == GSPREADSHEETS_EXTENDED_NAMESPACE: + new_custom = Custom() + new_custom._HarvestElementTree(element_tree) + new_custom.column = local_tag + return new_custom + return None + + + + + +class SpreadsheetsSpreadsheet(gdata.GDataEntry): + """A Google Spreadsheets flavor of a Spreadsheet Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.updated = updated + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SpreadsheetsSpreadsheetFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsSpreadsheet, + xml_string) + + +class SpreadsheetsWorksheet(gdata.GDataEntry): + """A Google Spreadsheets flavor of a Worksheet Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rowCount' % GSPREADSHEETS_NAMESPACE] = ('row_count', + RowCount) + _children['{%s}colCount' % GSPREADSHEETS_NAMESPACE] = ('col_count', + ColCount) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + row_count=None, col_count=None, text=None, extension_elements=None, + extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.updated = updated + self.row_count = row_count + self.col_count = col_count + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SpreadsheetsWorksheetFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsWorksheet, + xml_string) + + +class SpreadsheetsCell(gdata.BatchEntry): + """A Google Spreadsheets flavor of a Cell Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.BatchEntry._children.copy() + _attributes = gdata.BatchEntry._attributes.copy() + _children['{%s}cell' % GSPREADSHEETS_NAMESPACE] = ('cell', Cell) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + cell=None, batch_operation=None, batch_id=None, batch_status=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.batch_operation = batch_operation + self.batch_id = batch_id + self.batch_status = batch_status + self.updated = updated + self.cell = cell + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SpreadsheetsCellFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsCell, + xml_string) + + +class SpreadsheetsList(gdata.GDataEntry): + """A Google Spreadsheets flavor of a List Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + custom=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.updated = updated + self.custom = custom or {} + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + # We need to overwrite _ConvertElementTreeToMember to add special logic to + # convert custom attributes to members + def _ConvertElementTreeToMember(self, child_tree): + # Find the element's tag in this class's list of child members + if self.__class__._children.has_key(child_tree.tag): + member_name = self.__class__._children[child_tree.tag][0] + member_class = self.__class__._children[child_tree.tag][1] + # If the class member is supposed to contain a list, make sure the + # matching member is set to a list, then append the new member + # instance to the list. + if isinstance(member_class, list): + if getattr(self, member_name) is None: + setattr(self, member_name, []) + getattr(self, member_name).append(atom._CreateClassFromElementTree( + member_class[0], child_tree)) + else: + setattr(self, member_name, + atom._CreateClassFromElementTree(member_class, child_tree)) + elif child_tree.tag.find('{%s}' % GSPREADSHEETS_EXTENDED_NAMESPACE) == 0: + # If this is in the custom namespace, make add it to the custom dict. + name = child_tree.tag[child_tree.tag.index('}')+1:] + custom = _CustomFromElementTree(child_tree) + if custom: + self.custom[name] = custom + else: + atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree) + + # We need to overwtite _AddMembersToElementTree to add special logic to + # convert custom members to XML nodes. + def _AddMembersToElementTree(self, tree): + # Convert the members of this class which are XML child nodes. + # This uses the class's _children dictionary to find the members which + # should become XML child nodes. + member_node_names = [values[0] for tag, values in + self.__class__._children.iteritems()] + for member_name in member_node_names: + member = getattr(self, member_name) + if member is None: + pass + elif isinstance(member, list): + for instance in member: + instance._BecomeChildElement(tree) + else: + member._BecomeChildElement(tree) + # Convert the members of this class which are XML attributes. + for xml_attribute, member_name in self.__class__._attributes.iteritems(): + member = getattr(self, member_name) + if member is not None: + tree.attrib[xml_attribute] = member + # Convert all special custom item attributes to nodes + for name, custom in self.custom.iteritems(): + custom._BecomeChildElement(tree) + # Lastly, call the ExtensionContainers's _AddMembersToElementTree to + # convert any extension attributes. + atom.ExtensionContainer._AddMembersToElementTree(self, tree) + + +def SpreadsheetsListFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsList, + xml_string) + element_tree = ElementTree.fromstring(xml_string) + return _SpreadsheetsListFromElementTree(element_tree) + + +class SpreadsheetsSpreadsheetsFeed(gdata.GDataFeed): + """A feed containing Google Spreadsheets Spreadsheets""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsSpreadsheet]) + + +def SpreadsheetsSpreadsheetsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsSpreadsheetsFeed, + xml_string) + + +class SpreadsheetsWorksheetsFeed(gdata.GDataFeed): + """A feed containing Google Spreadsheets Spreadsheets""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsWorksheet]) + + +def SpreadsheetsWorksheetsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsWorksheetsFeed, + xml_string) + + +class SpreadsheetsCellsFeed(gdata.BatchFeed): + """A feed containing Google Spreadsheets Cells""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.BatchFeed._children.copy() + _attributes = gdata.BatchFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsCell]) + _children['{%s}rowCount' % GSPREADSHEETS_NAMESPACE] = ('row_count', + RowCount) + _children['{%s}colCount' % GSPREADSHEETS_NAMESPACE] = ('col_count', + ColCount) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None, row_count=None, + col_count=None, interrupted=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text, interrupted=interrupted) + self.row_count = row_count + self.col_count = col_count + + def GetBatchLink(self): + for link in self.link: + if link.rel == 'http://schemas.google.com/g/2005#batch': + return link + return None + + +def SpreadsheetsCellsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsCellsFeed, + xml_string) + + +class SpreadsheetsListFeed(gdata.GDataFeed): + """A feed containing Google Spreadsheets Spreadsheets""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsList]) + + +def SpreadsheetsListFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsListFeed, + xml_string) diff --git a/gam/gdata/analytics/spreadsheet/service.py b/gam/gdata/analytics/spreadsheet/service.py new file mode 100755 index 00000000000..1fe3eb7d727 --- /dev/null +++ b/gam/gdata/analytics/spreadsheet/service.py @@ -0,0 +1,487 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""SpreadsheetsService extends the GDataService to streamline Google +Spreadsheets operations. + + SpreadsheetService: Provides methods to query feeds and manipulate items. + Extends GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + +__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)' + + +import gdata +import atom.service +import gdata.service +import gdata.spreadsheet +import atom + + +class Error(Exception): + """Base class for exceptions in this module.""" + pass + + +class RequestError(Error): + pass + + +class SpreadsheetsService(gdata.service.GDataService): + """Client for the Google Spreadsheets service.""" + + def __init__(self, email=None, password=None, source=None, + server='spreadsheets.google.com', additional_headers=None, + **kwargs): + """Creates a client for the Google Spreadsheets service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'spreadsheets.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='wise', source=source, + server=server, additional_headers=additional_headers, **kwargs) + + def GetSpreadsheetsFeed(self, key=None, query=None, visibility='private', + projection='full'): + """Gets a spreadsheets feed or a specific entry if a key is defined + Args: + key: string (optional) The spreadsheet key defined in /ccc?key= + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no key, then a SpreadsheetsSpreadsheetsFeed. + If there is a key, then a SpreadsheetsSpreadsheet. + """ + + base_uri = 'https://%s/feeds/spreadsheets' % self.server + uri = ('%s/%s/%s' + % (base_uri, visibility, projection)) + + if key is not None: + uri = '%s/%s' % (uri, key) + + if query != None: + query.feed = base_uri + query.visibility = visibility + query.projection = projection + uri = query.ToUri() + + if key: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsSpreadsheetFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsSpreadsheetsFeedFromString) + + def GetWorksheetsFeed(self, key, wksht_id=None, query=None, + visibility='private', projection='full'): + """Gets a worksheets feed or a specific entry if a wksht is defined + Args: + key: string The spreadsheet key defined in /ccc?key= + wksht_id: string (optional) The id for a specific worksheet entry + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no wksht_id, then a SpreadsheetsWorksheetsFeed. + If there is a wksht_id, then a SpreadsheetsWorksheet. + """ + + uri = ('https://%s/feeds/worksheets/%s/%s/%s' + % (self.server, key, visibility, projection)) + + if wksht_id != None: + uri = '%s/%s' % (uri, wksht_id) + + if query != None: + query.feed = uri + uri = query.ToUri() + + if wksht_id: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsWorksheetsFeedFromString) + + def AddWorksheet(self, title, row_count, col_count, key): + """Creates a new worksheet in the desired spreadsheet. + + The new worksheet is appended to the end of the list of worksheets. The + new worksheet will only have the available number of columns and cells + specified. + + Args: + title: str The title which will be displayed in the list of worksheets. + row_count: int or str The number of rows in the new worksheet. + col_count: int or str The number of columns in the new worksheet. + key: str The spreadsheet key to the spreadsheet to which the new + worksheet should be added. + + Returns: + A SpreadsheetsWorksheet if the new worksheet was created succesfully. + """ + new_worksheet = gdata.spreadsheet.SpreadsheetsWorksheet( + title=atom.Title(text=title), + row_count=gdata.spreadsheet.RowCount(text=str(row_count)), + col_count=gdata.spreadsheet.ColCount(text=str(col_count))) + return self.Post(new_worksheet, + 'https://%s/feeds/worksheets/%s/private/full' % (self.server, key), + converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) + + def UpdateWorksheet(self, worksheet_entry, url=None): + """Changes the size and/or title of the desired worksheet. + + Args: + worksheet_entry: SpreadsheetWorksheet The new contents of the + worksheet. + url: str (optional) The URL to which the edited worksheet entry should + be sent. If the url is None, the edit URL from the worksheet will + be used. + + Returns: + A SpreadsheetsWorksheet with the new information about the worksheet. + """ + target_url = url or worksheet_entry.GetEditLink().href + return self.Put(worksheet_entry, target_url, + converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) + + def DeleteWorksheet(self, worksheet_entry=None, url=None): + """Removes the desired worksheet from the spreadsheet + + Args: + worksheet_entry: SpreadsheetWorksheet (optional) The worksheet to + be deleted. If this is none, then the DELETE reqest is sent to + the url specified in the url parameter. + url: str (optaional) The URL to which the DELETE request should be + sent. If left as None, the worksheet's edit URL is used. + + Returns: + True if the worksheet was deleted successfully. + """ + if url: + target_url = url + else: + target_url = worksheet_entry.GetEditLink().href + return self.Delete(target_url) + + def GetCellsFeed(self, key, wksht_id='default', cell=None, query=None, + visibility='private', projection='full'): + """Gets a cells feed or a specific entry if a cell is defined + Args: + key: string The spreadsheet key defined in /ccc?key= + wksht_id: string The id for a specific worksheet entry + cell: string (optional) The R1C1 address of the cell + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no cell, then a SpreadsheetsCellsFeed. + If there is a cell, then a SpreadsheetsCell. + """ + + uri = ('https://%s/feeds/cells/%s/%s/%s/%s' + % (self.server, key, wksht_id, visibility, projection)) + + if cell != None: + uri = '%s/%s' % (uri, cell) + + if query != None: + query.feed = uri + uri = query.ToUri() + + if cell: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsCellFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString) + + def GetListFeed(self, key, wksht_id='default', row_id=None, query=None, + visibility='private', projection='full'): + """Gets a list feed or a specific entry if a row_id is defined + Args: + key: string The spreadsheet key defined in /ccc?key= + wksht_id: string The id for a specific worksheet entry + row_id: string (optional) The row_id of a row in the list + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no row_id, then a SpreadsheetsListFeed. + If there is a row_id, then a SpreadsheetsList. + """ + + uri = ('https://%s/feeds/list/%s/%s/%s/%s' + % (self.server, key, wksht_id, visibility, projection)) + + if row_id is not None: + uri = '%s/%s' % (uri, row_id) + + if query is not None: + query.feed = uri + uri = query.ToUri() + + if row_id: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsListFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsListFeedFromString) + + def UpdateCell(self, row, col, inputValue, key, wksht_id='default'): + """Updates an existing cell. + + Args: + row: int The row the cell to be editted is in + col: int The column the cell to be editted is in + inputValue: str the new value of the cell + key: str The key of the spreadsheet in which this cell resides. + wksht_id: str The ID of the worksheet which holds this cell. + + Returns: + The updated cell entry + """ + row = str(row) + col = str(col) + # make the new cell + new_cell = gdata.spreadsheet.Cell(row=row, col=col, inputValue=inputValue) + # get the edit uri and PUT + cell = 'R%sC%s' % (row, col) + entry = self.GetCellsFeed(key, wksht_id, cell) + for a_link in entry.link: + if a_link.rel == 'edit': + entry.cell = new_cell + return self.Put(entry, a_link.href, + converter=gdata.spreadsheet.SpreadsheetsCellFromString) + + def _GenerateCellsBatchUrl(self, spreadsheet_key, worksheet_id): + return ('https://spreadsheets.google.com/feeds/cells/%s/%s/' + 'private/full/batch' % (spreadsheet_key, worksheet_id)) + + def ExecuteBatch(self, batch_feed, url=None, spreadsheet_key=None, + worksheet_id=None, + converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString): + """Sends a batch request feed to the server. + + The batch request needs to be sent to the batch URL for a particular + worksheet. You can specify the worksheet by providing the spreadsheet_key + and worksheet_id, or by sending the URL from the cells feed's batch link. + + Args: + batch_feed: gdata.spreadsheet.SpreadsheetsCellFeed A feed containing + BatchEntry elements which contain the desired CRUD operation and + any necessary data to modify a cell. + url: str (optional) The batch URL for the cells feed to which these + changes should be applied. This can be found by calling + cells_feed.GetBatchLink().href. + spreadsheet_key: str (optional) Used to generate the batch request URL + if the url argument is None. If using the spreadsheet key to + generate the URL, the worksheet id is also required. + worksheet_id: str (optional) Used if the url is not provided, it is + oart of the batch feed target URL. This is used with the spreadsheet + key. + converter: Function (optional) Function to be executed on the server's + response. This function should take one string as a parameter. The + default value is SpreadsheetsCellsFeedFromString which will turn the result + into a gdata.spreadsheet.SpreadsheetsCellsFeed object. + + Returns: + A gdata.BatchFeed containing the results. + """ + + if url is None: + url = self._GenerateCellsBatchUrl(spreadsheet_key, worksheet_id) + return self.Post(batch_feed, url, converter=converter) + + def InsertRow(self, row_data, key, wksht_id='default'): + """Inserts a new row with the provided data + + Args: + uri: string The post uri of the list feed + row_data: dict A dictionary of column header to row data + + Returns: + The inserted row + """ + new_entry = gdata.spreadsheet.SpreadsheetsList() + for k, v in row_data.iteritems(): + new_custom = gdata.spreadsheet.Custom() + new_custom.column = k + new_custom.text = v + new_entry.custom[new_custom.column] = new_custom + # Generate the post URL for the worksheet which will receive the new entry. + post_url = 'https://spreadsheets.google.com/feeds/list/%s/%s/private/full'%( + key, wksht_id) + return self.Post(new_entry, post_url, + converter=gdata.spreadsheet.SpreadsheetsListFromString) + + def UpdateRow(self, entry, new_row_data): + """Updates a row with the provided data + + If you want to add additional information to a row, it is often + easier to change the values in entry.custom, then use the Put + method instead of UpdateRow. This UpdateRow method will replace + the contents of the row with new_row_data - it will change all columns + not just the columns specified in the new_row_data dict. + + Args: + entry: gdata.spreadsheet.SpreadsheetsList The entry to be updated + new_row_data: dict A dictionary of column header to row data + + Returns: + The updated row + """ + entry.custom = {} + for k, v in new_row_data.iteritems(): + new_custom = gdata.spreadsheet.Custom() + new_custom.column = k + new_custom.text = v + entry.custom[k] = new_custom + for a_link in entry.link: + if a_link.rel == 'edit': + return self.Put(entry, a_link.href, + converter=gdata.spreadsheet.SpreadsheetsListFromString) + + def DeleteRow(self, entry): + """Deletes a row, the provided entry + + Args: + entry: gdata.spreadsheet.SpreadsheetsList The row to be deleted + + Returns: + The delete response + """ + for a_link in entry.link: + if a_link.rel == 'edit': + return self.Delete(a_link.href) + + +class DocumentQuery(gdata.service.Query): + + def _GetTitleQuery(self): + return self['title'] + + def _SetTitleQuery(self, document_query): + self['title'] = document_query + + title = property(_GetTitleQuery, _SetTitleQuery, + doc="""The title query parameter""") + + def _GetTitleExactQuery(self): + return self['title-exact'] + + def _SetTitleExactQuery(self, document_query): + self['title-exact'] = document_query + + title_exact = property(_GetTitleExactQuery, _SetTitleExactQuery, + doc="""The title-exact query parameter""") + + +class CellQuery(gdata.service.Query): + + def _GetMinRowQuery(self): + return self['min-row'] + + def _SetMinRowQuery(self, cell_query): + self['min-row'] = cell_query + + min_row = property(_GetMinRowQuery, _SetMinRowQuery, + doc="""The min-row query parameter""") + + def _GetMaxRowQuery(self): + return self['max-row'] + + def _SetMaxRowQuery(self, cell_query): + self['max-row'] = cell_query + + max_row = property(_GetMaxRowQuery, _SetMaxRowQuery, + doc="""The max-row query parameter""") + + def _GetMinColQuery(self): + return self['min-col'] + + def _SetMinColQuery(self, cell_query): + self['min-col'] = cell_query + + min_col = property(_GetMinColQuery, _SetMinColQuery, + doc="""The min-col query parameter""") + + def _GetMaxColQuery(self): + return self['max-col'] + + def _SetMaxColQuery(self, cell_query): + self['max-col'] = cell_query + + max_col = property(_GetMaxColQuery, _SetMaxColQuery, + doc="""The max-col query parameter""") + + def _GetRangeQuery(self): + return self['range'] + + def _SetRangeQuery(self, cell_query): + self['range'] = cell_query + + range = property(_GetRangeQuery, _SetRangeQuery, + doc="""The range query parameter""") + + def _GetReturnEmptyQuery(self): + return self['return-empty'] + + def _SetReturnEmptyQuery(self, cell_query): + self['return-empty'] = cell_query + + return_empty = property(_GetReturnEmptyQuery, _SetReturnEmptyQuery, + doc="""The return-empty query parameter""") + + +class ListQuery(gdata.service.Query): + + def _GetSpreadsheetQuery(self): + return self['sq'] + + def _SetSpreadsheetQuery(self, list_query): + self['sq'] = list_query + + sq = property(_GetSpreadsheetQuery, _SetSpreadsheetQuery, + doc="""The sq query parameter""") + + def _GetOrderByQuery(self): + return self['orderby'] + + def _SetOrderByQuery(self, list_query): + self['orderby'] = list_query + + orderby = property(_GetOrderByQuery, _SetOrderByQuery, + doc="""The orderby query parameter""") + + def _GetReverseQuery(self): + return self['reverse'] + + def _SetReverseQuery(self, list_query): + self['reverse'] = list_query + + reverse = property(_GetReverseQuery, _SetReverseQuery, + doc="""The reverse query parameter""") diff --git a/gam/gdata/analytics/spreadsheet/text_db.py b/gam/gdata/analytics/spreadsheet/text_db.py new file mode 100755 index 00000000000..a8de5463c2b --- /dev/null +++ b/gam/gdata/analytics/spreadsheet/text_db.py @@ -0,0 +1,559 @@ +#!/usr/bin/python +# +# Copyright Google 2007-2008, all rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import StringIO +import gdata +import gdata.service +import gdata.spreadsheet +import gdata.spreadsheet.service +import gdata.docs +import gdata.docs.service + + +"""Make the Google Documents API feel more like using a database. + +This module contains a client and other classes which make working with the +Google Documents List Data API and the Google Spreadsheets Data API look a +bit more like working with a heirarchical database. Using the DatabaseClient, +you can create or find spreadsheets and use them like a database, with +worksheets representing tables and rows representing records. + +Example Usage: +# Create a new database, a new table, and add records. +client = gdata.spreadsheet.text_db.DatabaseClient(username='jo@example.com', + password='12345') +database = client.CreateDatabase('My Text Database') +table = database.CreateTable('addresses', ['name','email', + 'phonenumber', 'mailingaddress']) +record = table.AddRecord({'name':'Bob', 'email':'bob@example.com', + 'phonenumber':'555-555-1234', 'mailingaddress':'900 Imaginary St.'}) + +# Edit a record +record.content['email'] = 'bob2@example.com' +record.Push() + +# Delete a table +table.Delete + +Warnings: +Care should be exercised when using this module on spreadsheets +which contain formulas. This module treats all rows as containing text and +updating a row will overwrite any formula with the output of the formula. +The intended use case is to allow easy storage of text data in a spreadsheet. + + Error: Domain specific extension of Exception. + BadCredentials: Error raised is username or password was incorrect. + CaptchaRequired: Raised if a login attempt failed and a CAPTCHA challenge + was issued. + DatabaseClient: Communicates with Google Docs APIs servers. + Database: Represents a spreadsheet and interacts with tables. + Table: Represents a worksheet and interacts with records. + RecordResultSet: A list of records in a table. + Record: Represents a row in a worksheet allows manipulation of text data. +""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +class Error(Exception): + pass + + +class BadCredentials(Error): + pass + + +class CaptchaRequired(Error): + pass + + +class DatabaseClient(object): + """Allows creation and finding of Google Spreadsheets databases. + + The DatabaseClient simplifies the process of creating and finding Google + Spreadsheets and will talk to both the Google Spreadsheets API and the + Google Documents List API. + """ + + def __init__(self, username=None, password=None): + """Constructor for a Database Client. + + If the username and password are present, the constructor will contact + the Google servers to authenticate. + + Args: + username: str (optional) Example: jo@example.com + password: str (optional) + """ + self.__docs_client = gdata.docs.service.DocsService() + self.__spreadsheets_client = ( + gdata.spreadsheet.service.SpreadsheetsService()) + self.SetCredentials(username, password) + + def SetCredentials(self, username, password): + """Attempts to log in to Google APIs using the provided credentials. + + If the username or password are None, the client will not request auth + tokens. + + Args: + username: str (optional) Example: jo@example.com + password: str (optional) + """ + self.__docs_client.email = username + self.__docs_client.password = password + self.__spreadsheets_client.email = username + self.__spreadsheets_client.password = password + if username and password: + try: + self.__docs_client.ProgrammaticLogin() + self.__spreadsheets_client.ProgrammaticLogin() + except gdata.service.CaptchaRequired: + raise CaptchaRequired('Please visit https://www.google.com/accounts/' + 'DisplayUnlockCaptcha to unlock your account.') + except gdata.service.BadAuthentication: + raise BadCredentials('Username or password incorrect.') + + def CreateDatabase(self, name): + """Creates a new Google Spreadsheet with the desired name. + + Args: + name: str The title for the spreadsheet. + + Returns: + A Database instance representing the new spreadsheet. + """ + # Create a Google Spreadsheet to form the foundation of this database. + # Spreadsheet is created by uploading a file to the Google Documents + # List API. + virtual_csv_file = StringIO.StringIO(',,,') + virtual_media_source = gdata.MediaSource(file_handle=virtual_csv_file, content_type='text/csv', content_length=3) + db_entry = self.__docs_client.UploadSpreadsheet(virtual_media_source, name) + return Database(spreadsheet_entry=db_entry, database_client=self) + + def GetDatabases(self, spreadsheet_key=None, name=None): + """Finds spreadsheets which have the unique key or title. + + If querying on the spreadsheet_key there will be at most one result, but + searching by name could yield multiple results. + + Args: + spreadsheet_key: str The unique key for the spreadsheet, this + usually in the the form 'pk23...We' or 'o23...423.12,,,3'. + name: str The title of the spreadsheets. + + Returns: + A list of Database objects representing the desired spreadsheets. + """ + if spreadsheet_key: + db_entry = self.__docs_client.GetDocumentListEntry( + r'/feeds/documents/private/full/spreadsheet%3A' + spreadsheet_key) + return [Database(spreadsheet_entry=db_entry, database_client=self)] + else: + title_query = gdata.docs.service.DocumentQuery() + title_query['title'] = name + db_feed = self.__docs_client.QueryDocumentListFeed(title_query.ToUri()) + matching_databases = [] + for entry in db_feed.entry: + matching_databases.append(Database(spreadsheet_entry=entry, + database_client=self)) + return matching_databases + + def _GetDocsClient(self): + return self.__docs_client + + def _GetSpreadsheetsClient(self): + return self.__spreadsheets_client + + +class Database(object): + """Provides interface to find and create tables. + + The database represents a Google Spreadsheet. + """ + + def __init__(self, spreadsheet_entry=None, database_client=None): + """Constructor for a database object. + + Args: + spreadsheet_entry: gdata.docs.DocumentListEntry The + Atom entry which represents the Google Spreadsheet. The + spreadsheet's key is extracted from the entry and stored as a + member. + database_client: DatabaseClient A client which can talk to the + Google Spreadsheets servers to perform operations on worksheets + within this spreadsheet. + """ + self.entry = spreadsheet_entry + if self.entry: + id_parts = spreadsheet_entry.id.text.split('/') + self.spreadsheet_key = id_parts[-1].replace('spreadsheet%3A', '') + self.client = database_client + + def CreateTable(self, name, fields=None): + """Add a new worksheet to this spreadsheet and fill in column names. + + Args: + name: str The title of the new worksheet. + fields: list of strings The column names which are placed in the + first row of this worksheet. These names are converted into XML + tags by the server. To avoid changes during the translation + process I recommend using all lowercase alphabetic names. For + example ['somelongname', 'theothername'] + + Returns: + Table representing the newly created worksheet. + """ + worksheet = self.client._GetSpreadsheetsClient().AddWorksheet(title=name, + row_count=1, col_count=len(fields), key=self.spreadsheet_key) + return Table(name=name, worksheet_entry=worksheet, + database_client=self.client, + spreadsheet_key=self.spreadsheet_key, fields=fields) + + def GetTables(self, worksheet_id=None, name=None): + """Searches for a worksheet with the specified ID or name. + + The list of results should have one table at most, or no results + if the id or name were not found. + + Args: + worksheet_id: str The ID of the worksheet, example: 'od6' + name: str The title of the worksheet. + + Returns: + A list of length 0 or 1 containing the desired Table. A list is returned + to make this method feel like GetDatabases and GetRecords. + """ + if worksheet_id: + worksheet_entry = self.client._GetSpreadsheetsClient().GetWorksheetsFeed( + self.spreadsheet_key, wksht_id=worksheet_id) + return [Table(name=worksheet_entry.title.text, + worksheet_entry=worksheet_entry, database_client=self.client, + spreadsheet_key=self.spreadsheet_key)] + else: + matching_tables = [] + query = None + if name: + query = gdata.spreadsheet.service.DocumentQuery() + query.title = name + + worksheet_feed = self.client._GetSpreadsheetsClient().GetWorksheetsFeed( + self.spreadsheet_key, query=query) + for entry in worksheet_feed.entry: + matching_tables.append(Table(name=entry.title.text, + worksheet_entry=entry, database_client=self.client, + spreadsheet_key=self.spreadsheet_key)) + return matching_tables + + def Delete(self): + """Deletes the entire database spreadsheet from Google Spreadsheets.""" + entry = self.client._GetDocsClient().Get( + r'http://docs.google.com/feeds/documents/private/full/spreadsheet%3A' + + self.spreadsheet_key) + self.client._GetDocsClient().Delete(entry.GetEditLink().href) + + +class Table(object): + + def __init__(self, name=None, worksheet_entry=None, database_client=None, + spreadsheet_key=None, fields=None): + self.name = name + self.entry = worksheet_entry + id_parts = worksheet_entry.id.text.split('/') + self.worksheet_id = id_parts[-1] + self.spreadsheet_key = spreadsheet_key + self.client = database_client + self.fields = fields or [] + if fields: + self.SetFields(fields) + + def LookupFields(self): + """Queries to find the column names in the first row of the worksheet. + + Useful when you have retrieved the table from the server and you don't + know the column names. + """ + if self.entry: + first_row_contents = [] + query = gdata.spreadsheet.service.CellQuery() + query.max_row = '1' + query.min_row = '1' + feed = self.client._GetSpreadsheetsClient().GetCellsFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=query) + for entry in feed.entry: + first_row_contents.append(entry.content.text) + # Get the next set of cells if needed. + next_link = feed.GetNextLink() + while next_link: + feed = self.client._GetSpreadsheetsClient().Get(next_link.href, + converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString) + for entry in feed.entry: + first_row_contents.append(entry.content.text) + next_link = feed.GetNextLink() + # Convert the contents of the cells to valid headers. + self.fields = ConvertStringsToColumnHeaders(first_row_contents) + + def SetFields(self, fields): + """Changes the contents of the cells in the first row of this worksheet. + + Args: + fields: list of strings The names in the list comprise the + first row of the worksheet. These names are converted into XML + tags by the server. To avoid changes during the translation + process I recommend using all lowercase alphabetic names. For + example ['somelongname', 'theothername'] + """ + # TODO: If the table already had fields, we might want to clear out the, + # current column headers. + self.fields = fields + i = 0 + for column_name in fields: + i = i + 1 + # TODO: speed this up by using a batch request to update cells. + self.client._GetSpreadsheetsClient().UpdateCell(1, i, column_name, + self.spreadsheet_key, self.worksheet_id) + + def Delete(self): + """Deletes this worksheet from the spreadsheet.""" + worksheet = self.client._GetSpreadsheetsClient().GetWorksheetsFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id) + self.client._GetSpreadsheetsClient().DeleteWorksheet( + worksheet_entry=worksheet) + + def AddRecord(self, data): + """Adds a new row to this worksheet. + + Args: + data: dict of strings Mapping of string values to column names. + + Returns: + Record which represents this row of the spreadsheet. + """ + new_row = self.client._GetSpreadsheetsClient().InsertRow(data, + self.spreadsheet_key, wksht_id=self.worksheet_id) + return Record(content=data, row_entry=new_row, + spreadsheet_key=self.spreadsheet_key, worksheet_id=self.worksheet_id, + database_client=self.client) + + def GetRecord(self, row_id=None, row_number=None): + """Gets a single record from the worksheet based on row ID or number. + + Args: + row_id: The ID for the individual row. + row_number: str or int The position of the desired row. Numbering + begins at 1, which refers to the second row in the worksheet since + the first row is used for column names. + + Returns: + Record for the desired row. + """ + if row_id: + row_entry = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, row_id=row_id) + return Record(content=None, row_entry=row_entry, + spreadsheet_key=self.spreadsheet_key, + worksheet_id=self.worksheet_id, database_client=self.client) + else: + row_query = gdata.spreadsheet.service.ListQuery() + row_query.start_index = str(row_number) + row_query.max_results = '1' + row_feed = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query) + if len(row_feed.entry) >= 1: + return Record(content=None, row_entry=row_feed.entry[0], + spreadsheet_key=self.spreadsheet_key, + worksheet_id=self.worksheet_id, database_client=self.client) + else: + return None + + def GetRecords(self, start_row, end_row): + """Gets all rows between the start and end row numbers inclusive. + + Args: + start_row: str or int + end_row: str or int + + Returns: + RecordResultSet for the desired rows. + """ + start_row = int(start_row) + end_row = int(end_row) + max_rows = end_row - start_row + 1 + row_query = gdata.spreadsheet.service.ListQuery() + row_query.start_index = str(start_row) + row_query.max_results = str(max_rows) + rows_feed = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query) + return RecordResultSet(rows_feed, self.client, self.spreadsheet_key, + self.worksheet_id) + + def FindRecords(self, query_string): + """Performs a query against the worksheet to find rows which match. + + For details on query string syntax see the section on sq under + http://code.google.com/apis/spreadsheets/reference.html#list_Parameters + + Args: + query_string: str Examples: 'name == john' to find all rows with john + in the name column, '(cost < 19.50 and name != toy) or cost > 500' + + Returns: + RecordResultSet with the first group of matches. + """ + row_query = gdata.spreadsheet.service.ListQuery() + row_query.sq = query_string + matching_feed = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query) + return RecordResultSet(matching_feed, self.client, + self.spreadsheet_key, self.worksheet_id) + + +class RecordResultSet(list): + """A collection of rows which allows fetching of the next set of results. + + The server may not send all rows in the requested range because there are + too many. Using this result set you can access the first set of results + as if it is a list, then get the next batch (if there are more results) by + calling GetNext(). + """ + + def __init__(self, feed, client, spreadsheet_key, worksheet_id): + self.client = client + self.spreadsheet_key = spreadsheet_key + self.worksheet_id = worksheet_id + self.feed = feed + list(self) + for entry in self.feed.entry: + self.append(Record(content=None, row_entry=entry, + spreadsheet_key=spreadsheet_key, worksheet_id=worksheet_id, + database_client=client)) + + def GetNext(self): + """Fetches the next batch of rows in the result set. + + Returns: + A new RecordResultSet. + """ + next_link = self.feed.GetNextLink() + if next_link and next_link.href: + new_feed = self.client._GetSpreadsheetsClient().Get(next_link.href, + converter=gdata.spreadsheet.SpreadsheetsListFeedFromString) + return RecordResultSet(new_feed, self.client, self.spreadsheet_key, + self.worksheet_id) + + +class Record(object): + """Represents one row in a worksheet and provides a dictionary of values. + + Attributes: + custom: dict Represents the contents of the row with cell values mapped + to column headers. + """ + + def __init__(self, content=None, row_entry=None, spreadsheet_key=None, + worksheet_id=None, database_client=None): + """Constructor for a record. + + Args: + content: dict of strings Mapping of string values to column names. + row_entry: gdata.spreadsheet.SpreadsheetsList The Atom entry + representing this row in the worksheet. + spreadsheet_key: str The ID of the spreadsheet in which this row + belongs. + worksheet_id: str The ID of the worksheet in which this row belongs. + database_client: DatabaseClient The client which can be used to talk + the Google Spreadsheets server to edit this row. + """ + self.entry = row_entry + self.spreadsheet_key = spreadsheet_key + self.worksheet_id = worksheet_id + if row_entry: + self.row_id = row_entry.id.text.split('/')[-1] + else: + self.row_id = None + self.client = database_client + self.content = content or {} + if not content: + self.ExtractContentFromEntry(row_entry) + + def ExtractContentFromEntry(self, entry): + """Populates the content and row_id based on content of the entry. + + This method is used in the Record's contructor. + + Args: + entry: gdata.spreadsheet.SpreadsheetsList The Atom entry + representing this row in the worksheet. + """ + self.content = {} + if entry: + self.row_id = entry.id.text.split('/')[-1] + for label, custom in entry.custom.iteritems(): + self.content[label] = custom.text + + def Push(self): + """Send the content of the record to spreadsheets to edit the row. + + All items in the content dictionary will be sent. Items which have been + removed from the content may remain in the row. The content member + of the record will not be modified so additional fields in the row + might be absent from this local copy. + """ + self.entry = self.client._GetSpreadsheetsClient().UpdateRow(self.entry, self.content) + + def Pull(self): + """Query Google Spreadsheets to get the latest data from the server. + + Fetches the entry for this row and repopulates the content dictionary + with the data found in the row. + """ + if self.row_id: + self.entry = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, row_id=self.row_id) + self.ExtractContentFromEntry(self.entry) + + def Delete(self): + self.client._GetSpreadsheetsClient().DeleteRow(self.entry) + + +def ConvertStringsToColumnHeaders(proposed_headers): + """Converts a list of strings to column names which spreadsheets accepts. + + When setting values in a record, the keys which represent column names must + fit certain rules. They are all lower case, contain no spaces or special + characters. If two columns have the same name after being sanitized, the + columns further to the right have _2, _3 _4, etc. appended to them. + + If there are column names which consist of all special characters, or if + the column header is blank, an obfuscated value will be used for a column + name. This method does not handle blank column names or column names with + only special characters. + """ + headers = [] + for input_string in proposed_headers: + # TODO: probably a more efficient way to do this. Perhaps regex. + sanitized = input_string.lower().replace('_', '').replace( + ':', '').replace(' ', '') + # When the same sanitized header appears multiple times in the first row + # of a spreadsheet, _n is appended to the name to make it unique. + header_count = headers.count(sanitized) + if header_count > 0: + headers.append('%s_%i' % (sanitized, header_count+1)) + else: + headers.append(sanitized) + return headers diff --git a/gam/gdata/analytics/spreadsheets/__init__.py b/gam/gdata/analytics/spreadsheets/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/gam/gdata/analytics/spreadsheets/client.py b/gam/gdata/analytics/spreadsheets/client.py new file mode 100755 index 00000000000..872ec130250 --- /dev/null +++ b/gam/gdata/analytics/spreadsheets/client.py @@ -0,0 +1,592 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains a client to communicate with the Google Spreadsheets servers. + +For documentation on the Spreadsheets API, see: +http://code.google.com/apis/spreadsheets/ +""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import gdata.client +import gdata.gauth +import gdata.spreadsheets.data +import atom.data +import atom.http_core + + +SPREADSHEETS_URL = ('https://spreadsheets.google.com/feeds/spreadsheets' + '/private/full') +WORKSHEETS_URL = ('https://spreadsheets.google.com/feeds/worksheets/' + '%s/private/full') +WORKSHEET_URL = ('https://spreadsheets.google.com/feeds/worksheets/' + '%s/private/full/%s') +TABLES_URL = 'https://spreadsheets.google.com/feeds/%s/tables' +RECORDS_URL = 'https://spreadsheets.google.com/feeds/%s/records/%s' +RECORD_URL = 'https://spreadsheets.google.com/feeds/%s/records/%s/%s' +CELLS_URL = 'https://spreadsheets.google.com/feeds/cells/%s/%s/private/full' +CELL_URL = ('https://spreadsheets.google.com/feeds/cells/%s/%s/private/full/' + 'R%sC%s') +LISTS_URL = 'https://spreadsheets.google.com/feeds/list/%s/%s/private/full' + + +class SpreadsheetsClient(gdata.client.GDClient): + api_version = '3' + auth_service = 'wise' + auth_scopes = gdata.gauth.AUTH_SCOPES['wise'] + ssl = True + + def get_spreadsheets(self, auth_token=None, + desired_class=gdata.spreadsheets.data.SpreadsheetsFeed, + **kwargs): + """Obtains a feed with the spreadsheets belonging to the current user. + + Args: + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.SpreadsheetsFeed. + """ + return self.get_feed(SPREADSHEETS_URL, auth_token=auth_token, + desired_class=desired_class, **kwargs) + + GetSpreadsheets = get_spreadsheets + + def get_worksheets(self, spreadsheet_key, auth_token=None, + desired_class=gdata.spreadsheets.data.WorksheetsFeed, + **kwargs): + """Finds the worksheets within a given spreadsheet. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.WorksheetsFeed. + """ + return self.get_feed(WORKSHEETS_URL % spreadsheet_key, + auth_token=auth_token, desired_class=desired_class, + **kwargs) + + GetWorksheets = get_worksheets + + def add_worksheet(self, spreadsheet_key, title, rows, cols, + auth_token=None, **kwargs): + """Creates a new worksheet entry in the spreadsheet. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + title: str, The title to be used in for the worksheet. + rows: str or int, The number of rows this worksheet should start with. + cols: str or int, The number of columns this worksheet should start with. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + new_worksheet = gdata.spreadsheets.data.WorksheetEntry( + title=atom.data.Title(text=title), + row_count=gdata.spreadsheets.data.RowCount(text=str(rows)), + col_count=gdata.spreadsheets.data.ColCount(text=str(cols))) + return self.post(new_worksheet, WORKSHEETS_URL % spreadsheet_key, + auth_token=auth_token, **kwargs) + + AddWorksheet = add_worksheet + + def get_worksheet(self, spreadsheet_key, worksheet_id, + desired_class=gdata.spreadsheets.data.WorksheetEntry, + auth_token=None, **kwargs): + """Retrieves a single worksheet. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + worksheet_id: str, The unique ID for the worksheet withing the desired + spreadsheet. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.WorksheetEntry. + + """ + return self.get_entry(WORKSHEET_URL % (spreadsheet_key, worksheet_id,), + auth_token=auth_token, desired_class=desired_class, + **kwargs) + + GetWorksheet = get_worksheet + + def add_table(self, spreadsheet_key, title, summary, worksheet_name, + header_row, num_rows, start_row, insertion_mode, + column_headers, auth_token=None, **kwargs): + """Creates a new table within the worksheet. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + title: str, The title for the new table within a worksheet. + summary: str, A description of the table. + worksheet_name: str The name of the worksheet in which this table + should live. + header_row: int or str, The number of the row in the worksheet which + will contain the column names for the data in this table. + num_rows: int or str, The number of adjacent rows in this table. + start_row: int or str, The number of the row at which the data begins. + insertion_mode: str + column_headers: dict of strings, maps the column letters (A, B, C) to + the desired name which will be viewable in the + worksheet. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + data = gdata.spreadsheets.data.Data( + insertion_mode=insertion_mode, num_rows=str(num_rows), + start_row=str(start_row)) + for index, name in column_headers.iteritems(): + data.column.append(gdata.spreadsheets.data.Column( + index=index, name=name)) + new_table = gdata.spreadsheets.data.Table( + title=atom.data.Title(text=title), summary=atom.data.Summary(summary), + worksheet=gdata.spreadsheets.data.Worksheet(name=worksheet_name), + header=gdata.spreadsheets.data.Header(row=str(header_row)), data=data) + return self.post(new_table, TABLES_URL % spreadsheet_key, + auth_token=auth_token, **kwargs) + + AddTable = add_table + + def get_tables(self, spreadsheet_key, + desired_class=gdata.spreadsheets.data.TablesFeed, + auth_token=None, **kwargs): + """Retrieves a feed listing the tables in this spreadsheet. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.TablesFeed. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.get_feed(TABLES_URL % spreadsheet_key, + desired_class=desired_class, auth_token=auth_token, + **kwargs) + + GetTables = get_tables + + def add_record(self, spreadsheet_key, table_id, fields, + title=None, auth_token=None, **kwargs): + """Adds a new row to the table. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + table_id: str, The ID of the table within the worksheet which should + receive this new record. The table ID can be found using the + get_table_id method of a gdata.spreadsheets.data.Table. + fields: dict of strings mapping column names to values. + title: str, optional The title for this row. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + new_record = gdata.spreadsheets.data.Record() + if title is not None: + new_record.title = atom.data.Title(text=title) + for name, value in fields.iteritems(): + new_record.field.append(gdata.spreadsheets.data.Field( + name=name, text=value)) + return self.post(new_record, RECORDS_URL % (spreadsheet_key, table_id), + auth_token=auth_token, **kwargs) + + AddRecord = add_record + + def get_records(self, spreadsheet_key, table_id, + desired_class=gdata.spreadsheets.data.RecordsFeed, + auth_token=None, **kwargs): + """Retrieves the records in a table. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + table_id: str, The ID of the table within the worksheet whose records + we would like to fetch. The table ID can be found using the + get_table_id method of a gdata.spreadsheets.data.Table. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.RecordsFeed. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.get_feed(RECORDS_URL % (spreadsheet_key, table_id), + desired_class=desired_class, auth_token=auth_token, + **kwargs) + + GetRecords = get_records + + def get_record(self, spreadsheet_key, table_id, record_id, + desired_class=gdata.spreadsheets.data.Record, + auth_token=None, **kwargs): + """Retrieves a single record from the table. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + table_id: str, The ID of the table within the worksheet whose records + we would like to fetch. The table ID can be found using the + get_table_id method of a gdata.spreadsheets.data.Table. + record_id: str, The ID of the record within this table which we want to + fetch. You can find the record ID using get_record_id() on + an instance of the gdata.spreadsheets.data.Record class. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.RecordsFeed. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.get_entry(RECORD_URL % (spreadsheet_key, table_id, record_id), + desired_class=desired_class, auth_token=auth_token, + **kwargs) + + GetRecord = get_record + + def get_cells(self, spreadsheet_key, worksheet_id, + desired_class=gdata.spreadsheets.data.CellsFeed, + auth_token=None, **kwargs): + """Retrieves the cells which have values in this spreadsheet. + + Blank cells are not included. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + worksheet_id: str, The unique ID of the worksheet in this spreadsheet + whose cells we want. This can be obtained using + WorksheetEntry's get_worksheet_id method. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.CellsFeed. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.get_feed(CELLS_URL % (spreadsheet_key, worksheet_id), + auth_token=auth_token, desired_class=desired_class, + **kwargs) + + GetCells = get_cells + + def get_cell(self, spreadsheet_key, worksheet_id, row_num, col_num, + desired_class=gdata.spreadsheets.data.CellEntry, + auth_token=None, **kwargs): + """Retrieves a single cell from the worksheet. + + Indexes are 1 based so the first cell in the worksheet is 1, 1. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + worksheet_id: str, The unique ID of the worksheet in this spreadsheet + whose cells we want. This can be obtained using + WorksheetEntry's get_worksheet_id method. + row_num: int, The row of the cell that we want. Numbering starts with 1. + col_num: int, The column of the cell we want. Numbering starts with 1. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.CellEntry. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.get_entry( + CELL_URL % (spreadsheet_key, worksheet_id, row_num, col_num), + auth_token=auth_token, desired_class=desired_class, **kwargs) + + GetCell = get_cell + + def get_list_feed(self, spreadsheet_key, worksheet_id, + desired_class=gdata.spreadsheets.data.ListsFeed, + auth_token=None, **kwargs): + """Retrieves the value rows from the worksheet's list feed. + + The list feed is a view of the spreadsheet in which the first row is used + for column names and subsequent rows up to the first blank line are + records. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + worksheet_id: str, The unique ID of the worksheet in this spreadsheet + whose cells we want. This can be obtained using + WorksheetEntry's get_worksheet_id method. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.ListsFeed. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.get_feed(LISTS_URL % (spreadsheet_key, worksheet_id), + auth_token=auth_token, desired_class=desired_class, + **kwargs) + + GetListFeed = get_list_feed + + def add_list_entry(self, list_entry, spreadsheet_key, worksheet_id, + auth_token=None, **kwargs): + """Adds a new row to the worksheet's list feed. + + Args: + list_entry: gdata.spreadsheets.data.ListsEntry An entry which contains + the values which should be set for the columns in this + record. + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + worksheet_id: str, The unique ID of the worksheet in this spreadsheet + whose cells we want. This can be obtained using + WorksheetEntry's get_worksheet_id method. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.post(list_entry, LISTS_URL % (spreadsheet_key, worksheet_id), + auth_token=auth_token, **kwargs) + + AddListEntry = add_list_entry + + +class SpreadsheetQuery(gdata.client.Query): + + def __init__(self, title=None, title_exact=None, **kwargs): + """Adds Spreadsheets feed query parameters to a request. + + Args: + title: str Specifies the search terms for the title of a document. + This parameter used without title-exact will only submit partial + queries, not exact queries. + title_exact: str Specifies whether the title query should be taken as an + exact string. Meaningless without title. Possible values are + 'true' and 'false'. + """ + gdata.client.Query.__init__(self, **kwargs) + self.title = title + self.title_exact = title_exact + + def modify_request(self, http_request): + gdata.client._add_query_param('title', self.title, http_request) + gdata.client._add_query_param('title-exact', self.title_exact, + http_request) + gdata.client.Query.modify_request(self, http_request) + + ModifyRequest = modify_request + + +class WorksheetQuery(SpreadsheetQuery): + pass + + +class ListQuery(gdata.client.Query): + + def __init__(self, order_by=None, reverse=None, sq=None, **kwargs): + """Adds List-feed specific query parameters to a request. + + Args: + order_by: str Specifies what column to use in ordering the entries in + the feed. By position (the default): 'position' returns + rows in the order in which they appear in the GUI. Row 1, then + row 2, then row 3, and so on. By column: + 'column:columnName' sorts rows in ascending order based on the + values in the column with the given columnName, where + columnName is the value in the header row for that column. + reverse: str Specifies whether to sort in descending or ascending order. + Reverses default sort order: 'true' results in a descending + sort; 'false' (the default) results in an ascending sort. + sq: str Structured query on the full text in the worksheet. + [columnName][binaryOperator][value] + Supported binaryOperators are: + - (), for overriding order of operations + - = or ==, for strict equality + - <> or !=, for strict inequality + - and or &&, for boolean and + - or or ||, for boolean or + """ + gdata.client.Query.__init__(self, **kwargs) + self.order_by = order_by + self.reverse = reverse + self.sq = sq + + def modify_request(self, http_request): + gdata.client._add_query_param('orderby', self.order_by, http_request) + gdata.client._add_query_param('reverse', self.reverse, http_request) + gdata.client._add_query_param('sq', self.sq, http_request) + gdata.client.Query.modify_request(self, http_request) + + ModifyRequest = modify_request + + +class TableQuery(ListQuery): + pass + + +class CellQuery(gdata.client.Query): + + def __init__(self, min_row=None, max_row=None, min_col=None, max_col=None, + range=None, return_empty=None, **kwargs): + """Adds Cells-feed specific query parameters to a request. + + Args: + min_row: str or int Positional number of minimum row returned in query. + max_row: str or int Positional number of maximum row returned in query. + min_col: str or int Positional number of minimum column returned in query. + max_col: str or int Positional number of maximum column returned in query. + range: str A single cell or a range of cells. Use standard spreadsheet + cell-range notations, using a colon to separate start and end of + range. Examples: + - 'A1' and 'R1C1' both specify only cell A1. + - 'D1:F3' and 'R1C4:R3C6' both specify the rectangle of cells with + corners at D1 and F3. + return_empty: str If 'true' then empty cells will be returned in the feed. + If omitted, the default is 'false'. + """ + gdata.client.Query.__init__(self, **kwargs) + self.min_row = min_row + self.max_row = max_row + self.min_col = min_col + self.max_col = max_col + self.range = range + self.return_empty = return_empty + + def modify_request(self, http_request): + gdata.client._add_query_param('min-row', self.min_row, http_request) + gdata.client._add_query_param('max-row', self.max_row, http_request) + gdata.client._add_query_param('min-col', self.min_col, http_request) + gdata.client._add_query_param('max-col', self.max_col, http_request) + gdata.client._add_query_param('range', self.range, http_request) + gdata.client._add_query_param('return-empty', self.return_empty, + http_request) + gdata.client.Query.modify_request(self, http_request) + + ModifyRequest = modify_request diff --git a/gam/gdata/analytics/spreadsheets/data.py b/gam/gdata/analytics/spreadsheets/data.py new file mode 100755 index 00000000000..93e67a8b4d5 --- /dev/null +++ b/gam/gdata/analytics/spreadsheets/data.py @@ -0,0 +1,346 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +"""Provides classes and constants for the XML in the Google Spreadsheets API. + +Documentation for the raw XML which these classes represent can be found here: +http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements +""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import gdata.data + + +GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s' +GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended' + + +INSERT_MODE = 'insert' +OVERWRITE_MODE = 'overwrite' + + +WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed' + + +class Error(Exception): + pass + + +class FieldMissing(Exception): + pass + + +class HeaderNotSet(Error): + """The desired column header had no value for the row in the list feed.""" + + +class Cell(atom.core.XmlElement): + """The gs:cell element. + + A cell in the worksheet. The <gs:cell> element can appear only as a child + of <atom:entry>. + """ + _qname = GS_TEMPLATE % 'cell' + col = 'col' + input_value = 'inputValue' + numeric_value = 'numericValue' + row = 'row' + + +class ColCount(atom.core.XmlElement): + """The gs:colCount element. + + Indicates the number of columns in the worksheet, including columns that + contain only empty cells. The <gs:colCount> element can appear as a child + of <atom:entry> or <atom:feed> + """ + _qname = GS_TEMPLATE % 'colCount' + + +class Field(atom.core.XmlElement): + """The gs:field element. + + A field single cell within a record. Contained in an <atom:entry>. + """ + _qname = GS_TEMPLATE % 'field' + index = 'index' + name = 'name' + + +class Column(Field): + """The gs:column element.""" + _qname = GS_TEMPLATE % 'column' + + +class Data(atom.core.XmlElement): + """The gs:data element. + + A data region of a table. Contained in an <atom:entry> element. + """ + _qname = GS_TEMPLATE % 'data' + column = [Column] + insertion_mode = 'insertionMode' + num_rows = 'numRows' + start_row = 'startRow' + + +class Header(atom.core.XmlElement): + """The gs:header element. + + Indicates which row is the header row. Contained in an <atom:entry>. + """ + _qname = GS_TEMPLATE % 'header' + row = 'row' + + +class RowCount(atom.core.XmlElement): + """The gs:rowCount element. + + Indicates the number of total rows in the worksheet, including rows that + contain only empty cells. The <gs:rowCount> element can appear as a + child of <atom:entry> or <atom:feed>. + """ + _qname = GS_TEMPLATE % 'rowCount' + + +class Worksheet(atom.core.XmlElement): + """The gs:worksheet element. + + The worksheet where the table lives.Contained in an <atom:entry>. + """ + _qname = GS_TEMPLATE % 'worksheet' + name = 'name' + + +class Spreadsheet(gdata.data.GDEntry): + """An Atom entry which represents a Google Spreadsheet.""" + + def find_worksheets_feed(self): + return self.find_url(WORKSHEETS_REL) + + FindWorksheetsFeed = find_worksheets_feed + + def get_spreadsheet_key(self): + """Extracts the spreadsheet key unique to this spreadsheet.""" + return self.get_id().split('/')[-1] + + GetSpreadsheetKey = get_spreadsheet_key + + +class SpreadsheetsFeed(gdata.data.GDFeed): + """An Atom feed listing a user's Google Spreadsheets.""" + entry = [Spreadsheet] + + +class WorksheetEntry(gdata.data.GDEntry): + """An Atom entry representing a single worksheet in a spreadsheet.""" + row_count = RowCount + col_count = ColCount + + def get_worksheet_id(self): + """The worksheet ID identifies this worksheet in its spreadsheet.""" + return self.get_id().split('/')[-1] + + GetWorksheetId = get_worksheet_id + + +class WorksheetsFeed(gdata.data.GDFeed): + """A feed containing the worksheets in a single spreadsheet.""" + entry = [WorksheetEntry] + + +class Table(gdata.data.GDEntry): + """An Atom entry that represents a subsection of a worksheet. + + A table allows you to treat part or all of a worksheet somewhat like a + table in a database that is, as a set of structured data items. Tables + don't exist until you explicitly create them before you can use a table + feed, you have to explicitly define where the table data comes from. + """ + data = Data + header = Header + worksheet = Worksheet + + def get_table_id(self): + if self.id.text: + return self.id.text.split('/')[-1] + return None + + GetTableId = get_table_id + + +class TablesFeed(gdata.data.GDFeed): + """An Atom feed containing the tables defined within a worksheet.""" + entry = [Table] + + +class Record(gdata.data.GDEntry): + """An Atom entry representing a single record in a table. + + Note that the order of items in each record is the same as the order of + columns in the table definition, which may not match the order of + columns in the GUI. + """ + field = [Field] + + def value_for_index(self, column_index): + for field in self.field: + if field.index == column_index: + return field.text + raise FieldMissing('There is no field for %s' % column_index) + + ValueForIndex = value_for_index + + def value_for_name(self, name): + for field in self.field: + if field.name == name: + return field.text + raise FieldMissing('There is no field for %s' % name) + + ValueForName = value_for_name + + def get_record_id(self): + if self.id.text: + return self.id.text.split('/')[-1] + return None + + +class RecordsFeed(gdata.data.GDFeed): + """An Atom feed containing the individuals records in a table.""" + entry = [Record] + + +class ListRow(atom.core.XmlElement): + """A gsx column value within a row. + + The local tag in the _qname is blank and must be set to the column + name. For example, when adding to a ListEntry, do: + col_value = ListRow(text='something') + col_value._qname = col_value._qname % 'mycolumnname' + """ + _qname = '{http://schemas.google.com/spreadsheets/2006/extended}%s' + + +class ListEntry(gdata.data.GDEntry): + """An Atom entry representing a worksheet row in the list feed. + + The values for a particular column can be get and set using + x.get_value('columnheader') and x.set_value('columnheader', 'value'). + See also the explanation of column names in the ListFeed class. + """ + + def get_value(self, column_name): + """Returns the displayed text for the desired column in this row. + + The formula or input which generated the displayed value is not accessible + through the list feed, to see the user's input, use the cells feed. + + If a column is not present in this spreadsheet, or there is no value + for a column in this row, this method will return None. + """ + values = self.get_elements(column_name, GSX_NAMESPACE) + if len(values) == 0: + return None + return values[0].text + + def set_value(self, column_name, value): + """Changes the value of cell in this row under the desired column name. + + Warning: if the cell contained a formula, it will be wiped out by setting + the value using the list feed since the list feed only works with + displayed values. + + No client side checking is performed on the column_name, you need to + ensure that the column_name is the local tag name in the gsx tag for the + column. For example, the column_name will not contain special characters, + spaces, uppercase letters, etc. + """ + # Try to find the column in this row to change an existing value. + values = self.get_elements(column_name, GSX_NAMESPACE) + if len(values) > 0: + values[0].text = value + else: + # There is no value in this row for the desired column, so add a new + # gsx:column_name element. + new_value = ListRow(text=value) + new_value._qname = new_value._qname % (column_name,) + self._other_elements.append(new_value) + + def to_dict(self): + """Converts this row to a mapping of column names to their values.""" + result = {} + values = self.get_elements(namespace=GSX_NAMESPACE) + for item in values: + result[item._get_tag()] = item.text + return result + + def from_dict(self, values): + """Sets values for this row from the dictionary. + + Old values which are already in the entry will not be removed unless + they are overwritten with new values from the dict. + """ + for column, value in values.iteritems(): + self.set_value(column, value) + + +class ListsFeed(gdata.data.GDFeed): + """An Atom feed in which each entry represents a row in a worksheet. + + The first row in the worksheet is used as the column names for the values + in each row. If a header cell is empty, then a unique column ID is used + for the gsx element name. + + Spaces in a column name are removed from the name of the corresponding + gsx element. + + Caution: The columnNames are case-insensitive. For example, if you see + a <gsx:e-mail> element in a feed, you can't know whether the column + heading in the original worksheet was "e-mail" or "E-Mail". + + Note: If two or more columns have the same name, then subsequent columns + of the same name have _n appended to the columnName. For example, if the + first column name is "e-mail", followed by columns named "E-Mail" and + "E-mail", then the columnNames will be gsx:e-mail, gsx:e-mail_2, and + gsx:e-mail_3 respectively. + """ + entry = [ListEntry] + + +class CellEntry(gdata.data.BatchEntry): + """An Atom entry representing a single cell in a worksheet.""" + cell = Cell + + +class CellsFeed(gdata.data.BatchFeed): + """An Atom feed contains one entry per cell in a worksheet. + + The cell feed supports batch operations, you can send multiple cell + operations in one HTTP request. + """ + entry = [CellEntry] + + def batch_set_cell(row, col, input): + pass + diff --git a/gam/gdata/analytics/test_config.py b/gam/gdata/analytics/test_config.py new file mode 100755 index 00000000000..e07ce63e6ba --- /dev/null +++ b/gam/gdata/analytics/test_config.py @@ -0,0 +1,434 @@ +#!/usr/bin/env python + +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import sys +import unittest +import getpass +import inspect +import atom.mock_http_core +import gdata.gauth + + +"""Loads configuration for tests which connect to Google servers. + +Settings used in tests are stored in a ConfigCollection instance in this +module called options. If your test needs to get a test related setting, +use + +import gdata.test_config +option_value = gdata.test_config.options.get_value('x') + +The above will check the command line for an '--x' argument, and if not +found will either use the default value for 'x' or prompt the user to enter +one. + +Your test can override the value specified by the user by performing: + +gdata.test_config.options.set_value('x', 'y') + +If your test uses a new option which you would like to allow the user to +specify on the command line or via a prompt, you can use the register_option +method as follows: + +gdata.test_config.options.register( + 'option_name', 'Prompt shown to the user', secret=False #As for password. + 'This is the description of the option, shown when help is requested.', + 'default value, provide only if you do not want the user to be prompted') +""" + + +class Option(object): + + def __init__(self, name, prompt, secret=False, description=None, default=None): + self.name = name + self.prompt = prompt + self.secret = secret + self.description = description + self.default = default + + def get(self): + value = self.default + # Check for a command line parameter. + for i in xrange(len(sys.argv)): + if sys.argv[i].startswith('--%s=' % self.name): + value = sys.argv[i].split('=')[1] + elif sys.argv[i] == '--%s' % self.name: + value = sys.argv[i + 1] + # If the param was not on the command line, ask the user to input the + # value. + # In order for this to prompt the user, the default value for the option + # must be None. + if value is None: + prompt = '%s: ' % self.prompt + if self.secret: + value = getpass.getpass(prompt) + else: + print 'You can specify this on the command line using --%s' % self.name + value = raw_input(prompt) + return value + + +class ConfigCollection(object): + + def __init__(self, options=None): + self.options = options or {} + self.values = {} + + def register_option(self, option): + self.options[option.name] = option + + def register(self, *args, **kwargs): + self.register_option(Option(*args, **kwargs)) + + def get_value(self, option_name): + if option_name in self.values: + return self.values[option_name] + value = self.options[option_name].get() + if value is not None: + self.values[option_name] = value + return value + + def set_value(self, option_name, value): + self.values[option_name] = value + + def render_usage(self): + message_parts = [] + for opt_name, option in self.options.iteritems(): + message_parts.append('--%s: %s' % (opt_name, option.description)) + return '\n'.join(message_parts) + + +options = ConfigCollection() + + +# Register the default options. +options.register( + 'username', + 'Please enter the email address of your test account', + description=('The email address you want to sign in with. ' + 'Make sure this is a test account as these tests may edit' + ' or delete data.')) +options.register( + 'password', + 'Please enter the password for your test account', + secret=True, description='The test account password.') +options.register( + 'clearcache', + 'Delete cached data? (enter true or false)', + description=('If set to true, any temporary files which cache test' + ' requests and responses will be deleted.'), + default='true') +options.register( + 'savecache', + 'Save requests and responses in a temporary file? (enter true or false)', + description=('If set to true, requests to the server and responses will' + ' be saved in temporary files.'), + default='false') +options.register( + 'runlive', + 'Run the live tests which contact the server? (enter true or false)', + description=('If set to true, the tests will make real HTTP requests to' + ' the servers. This slows down test execution and may' + ' modify the users data, be sure to use a test account.'), + default='true') +options.register( + 'ssl', + 'Run the live tests over SSL (enter true or false)', + description='If set to true, all tests will be performed over HTTPS (SSL)', + default='false') +options.register( + 'clean', + 'Clean ALL data first before and after each test (enter true or false)', + description='If set to true, all tests will remove all data (DANGEROUS)', + default='false') +options.register( + 'appsusername', + 'Please enter the email address of your test Apps domain account', + description=('The email address you want to sign in with. ' + 'Make sure this is a test account on your Apps domain as ' + 'these tests may edit or delete data.')) +options.register( + 'appspassword', + 'Please enter the password for your test Apps domain account', + secret=True, description='The test Apps account password.') + +# Other options which may be used if needed. +BLOG_ID_OPTION = Option( + 'blogid', + 'Please enter the ID of your test blog', + description=('The blog ID for the blog which should have test posts added' + ' to it. Example 7682659670455539811')) +TEST_IMAGE_LOCATION_OPTION = Option( + 'imgpath', + 'Please enter the full path to a test image to upload', + description=('This test image will be uploaded to a service which' + ' accepts a media file, it must be a jpeg.')) +SPREADSHEET_ID_OPTION = Option( + 'spreadsheetid', + 'Please enter the ID of a spreadsheet to use in these tests', + description=('The spreadsheet ID for the spreadsheet which should be' + ' modified by theses tests.')) +APPS_DOMAIN_OPTION = Option( + 'appsdomain', + 'Please enter your Google Apps domain', + description=('The domain the Google Apps is hosted on or leave blank' + ' if n/a')) +SITES_NAME_OPTION = Option( + 'sitename', + 'Please enter name of your Google Site', + description='The webspace name of the Site found in its URL.') +PROJECT_NAME_OPTION = Option( + 'project_name', + 'Please enter the name of your project hosting project', + description=('The name of the project which should have test issues added' + ' to it. Example gdata-python-client')) +ISSUE_ASSIGNEE_OPTION = Option( + 'issue_assignee', + 'Enter the email address of the target owner of the updated issue.', + description=('The email address of the user a created issue\'s owner will ' + ' become. Example testuser2@gmail.com')) +GA_TABLE_ID = Option( + 'table_id', + 'Enter the Table ID of the Google Analytics profile to test', + description=('The Table ID of the Google Analytics profile to test.' + ' Example ga:1174')) +TARGET_USERNAME_OPTION = Option( + 'targetusername', + 'Please enter the username (without domain) of the user which will be' + ' affected by the tests', + description=('The username of the user to be tested')) +YT_DEVELOPER_KEY_OPTION = Option( + 'developerkey', + 'Please enter your YouTube developer key', + description=('The YouTube developer key for your account')) +YT_CLIENT_ID_OPTION = Option( + 'clientid', + 'Please enter your YouTube client ID', + description=('The YouTube client ID for your account')) +YT_VIDEO_ID_OPTION= Option( + 'videoid', + 'Please enter the ID of a YouTube video you uploaded', + description=('The video ID of a YouTube video uploaded to your account')) + + +# Functions to inject a cachable HTTP client into a service client. +def configure_client(client, case_name, service_name, use_apps_auth=False): + """Sets up a mock client which will reuse a saved session. + + Should be called during setUp of each unit test. + + Handles authentication to allow the GDClient to make requests which + require an auth header. + + Args: + client: a gdata.GDClient whose http_client member should be replaced + with a atom.mock_http_core.MockHttpClient so that repeated + executions can used cached responses instead of contacting + the server. + case_name: str The name of the test case class. Examples: 'BloggerTest', + 'ContactsTest'. Used to save a session + for the ClientLogin auth token request, so the case_name + should be reused if and only if the same username, password, + and service are being used. + service_name: str The service name as used for ClientLogin to identify + the Google Data API being accessed. Example: 'blogger', + 'wise', etc. + use_apps_auth: bool (optional) If set to True, use appsusername and + appspassword command-line args instead of username and + password respectively. + """ + # Use a mock HTTP client which will record and replay the HTTP traffic + # from these tests. + client.http_client = atom.mock_http_core.MockHttpClient() + client.http_client.cache_case_name = case_name + # Getting the auth token only needs to be done once in the course of test + # runs. + auth_token_key = '%s_auth_token' % service_name + if (auth_token_key not in options.values + and options.get_value('runlive') == 'true'): + client.http_client.cache_test_name = 'client_login' + cache_name = client.http_client.get_cache_file_name() + if options.get_value('clearcache') == 'true': + client.http_client.delete_session(cache_name) + client.http_client.use_cached_session(cache_name) + if not use_apps_auth: + username = options.get_value('username') + password = options.get_value('password') + else: + username = options.get_value('appsusername') + password = options.get_value('appspassword') + auth_token = client.client_login(username, password, case_name, + service=service_name) + options.values[auth_token_key] = gdata.gauth.token_to_blob(auth_token) + if client.alt_auth_service is not None: + options.values[client.alt_auth_service] = gdata.gauth.token_to_blob( + client.alt_auth_token) + client.http_client.close_session() + # Allow a config auth_token of False to prevent the client's auth header + # from being modified. + if auth_token_key in options.values: + client.auth_token = gdata.gauth.token_from_blob( + options.values[auth_token_key]) + if client.alt_auth_service is not None: + client.alt_auth_token = gdata.gauth.token_from_blob( + options.values[client.alt_auth_service]) + + +def configure_cache(client, test_name): + """Loads or begins a cached session to record HTTP traffic. + + Should be called at the beginning of each test method. + + Args: + client: a gdata.GDClient whose http_client member has been replaced + with a atom.mock_http_core.MockHttpClient so that repeated + executions can used cached responses instead of contacting + the server. + test_name: str The name of this test method. Examples: + 'TestClass.test_x_works', 'TestClass.test_crud_operations'. + This is used to name the recording of the HTTP requests and + responses, so it should be unique to each test method in the + test case. + """ + # Auth token is obtained in configure_client which is called as part of + # setUp. + client.http_client.cache_test_name = test_name + cache_name = client.http_client.get_cache_file_name() + if options.get_value('clearcache') == 'true': + client.http_client.delete_session(cache_name) + client.http_client.use_cached_session(cache_name) + + +def close_client(client): + """Saves the recoded responses to a temp file if the config file allows. + + This should be called in the unit test's tearDown method. + + Checks to see if the 'savecache' option is set to 'true', to make sure we + only save sessions to repeat if the user desires. + """ + if client and options.get_value('savecache') == 'true': + # If this was a live request, save the recording. + client.http_client.close_session() + + +def configure_service(service, case_name, service_name): + """Sets up a mock GDataService v1 client to reuse recorded sessions. + + Should be called during setUp of each unit test. This is a duplicate of + configure_client, modified to handle old v1 service classes. + """ + service.http_client.v2_http_client = atom.mock_http_core.MockHttpClient() + service.http_client.v2_http_client.cache_case_name = case_name + # Getting the auth token only needs to be done once in the course of test + # runs. + auth_token_key = 'service_%s_auth_token' % service_name + if (auth_token_key not in options.values + and options.get_value('runlive') == 'true'): + service.http_client.v2_http_client.cache_test_name = 'client_login' + cache_name = service.http_client.v2_http_client.get_cache_file_name() + if options.get_value('clearcache') == 'true': + service.http_client.v2_http_client.delete_session(cache_name) + service.http_client.v2_http_client.use_cached_session(cache_name) + service.ClientLogin(options.get_value('username'), + options.get_value('password'), + service=service_name, source=case_name) + options.values[auth_token_key] = service.GetClientLoginToken() + service.http_client.v2_http_client.close_session() + if auth_token_key in options.values: + service.SetClientLoginToken(options.values[auth_token_key]) + + +def configure_service_cache(service, test_name): + """Loads or starts a session recording for a v1 Service object. + + Duplicates the behavior of configure_cache, but the target for this + function is a v1 Service object instead of a v2 Client. + """ + service.http_client.v2_http_client.cache_test_name = test_name + cache_name = service.http_client.v2_http_client.get_cache_file_name() + if options.get_value('clearcache') == 'true': + service.http_client.v2_http_client.delete_session(cache_name) + service.http_client.v2_http_client.use_cached_session(cache_name) + + +def close_service(service): + if service and options.get_value('savecache') == 'true': + # If this was a live request, save the recording. + service.http_client.v2_http_client.close_session() + + +def build_suite(classes): + """Creates a TestSuite for all unit test classes in the list. + + Assumes that each of the classes in the list has unit test methods which + begin with 'test'. Calls unittest.makeSuite. + + Returns: + A new unittest.TestSuite containing a test suite for all classes. + """ + suites = [unittest.makeSuite(a_class, 'test') for a_class in classes] + return unittest.TestSuite(suites) + + +def check_data_classes(test, classes): + import inspect + for data_class in classes: + test.assert_(data_class.__doc__ is not None, + 'The class %s should have a docstring' % data_class) + if hasattr(data_class, '_qname'): + qname_versions = None + if isinstance(data_class._qname, tuple): + qname_versions = data_class._qname + else: + qname_versions = (data_class._qname,) + for versioned_qname in qname_versions: + test.assert_(isinstance(versioned_qname, str), + 'The class %s has a non-string _qname' % data_class) + test.assert_(not versioned_qname.endswith('}'), + 'The _qname for class %s is only a namespace' % ( + data_class)) + + for attribute_name, value in data_class.__dict__.iteritems(): + # Ignore all elements that start with _ (private members) + if not attribute_name.startswith('_'): + try: + if not (isinstance(value, str) or inspect.isfunction(value) + or (isinstance(value, list) + and issubclass(value[0], atom.core.XmlElement)) + or type(value) == property # Allow properties. + or inspect.ismethod(value) # Allow methods. + or inspect.ismethoddescriptor(value) # Allow method descriptors. + # staticmethod et al. + or issubclass(value, atom.core.XmlElement)): + test.fail( + 'XmlElement member should have an attribute, XML class,' + ' or list of XML classes as attributes.') + + except TypeError: + test.fail('Element %s in %s was of type %s' % ( + attribute_name, data_class._qname, type(value))) + + +def check_clients_with_auth(test, classes): + for client_class in classes: + test.assert_(hasattr(client_class, 'api_version')) + test.assert_(isinstance(client_class.auth_service, (str, unicode, int))) + test.assert_(hasattr(client_class, 'auth_service')) + test.assert_(isinstance(client_class.auth_service, (str, unicode))) + test.assert_(hasattr(client_class, 'auth_scopes')) + test.assert_(isinstance(client_class.auth_scopes, (list, tuple))) diff --git a/gam/gdata/analytics/test_data.py b/gam/gdata/analytics/test_data.py new file mode 100755 index 00000000000..d75a5c96f97 --- /dev/null +++ b/gam/gdata/analytics/test_data.py @@ -0,0 +1,5616 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + +XML_ENTRY_1 = """<?xml version='1.0'?> +<entry xmlns='http://www.w3.org/2005/Atom' + xmlns:g='http://base.google.com/ns/1.0'> + <category scheme="http://base.google.com/categories/itemtypes" + term="products"/> + <id> http://www.google.com/test/id/url </id> + <title type='text'>Testing 2000 series laptop + +
    A Testing Laptop
    +
    + + + Computer + Laptop + testing laptop + products +
    """ + + +TEST_BASE_ENTRY = """ + + + Testing 2000 series laptop + +
    A Testing Laptop
    +
    + + yes + + + + Computer + Laptop + testing laptop + products +
    """ + + +BIG_FEED = """ + + dive into mark + + A <em>lot</em> of effort + went into making this effortless + + 2005-07-31T12:29:29Z + tag:example.org,2003:3 + + + Copyright (c) 2003, Mark Pilgrim + + Example Toolkit + + + Atom draft-07 snapshot + + + tag:example.org,2003:3.2397 + 2005-07-31T12:29:29Z + 2003-12-13T08:29:29-04:00 + + Mark Pilgrim + http://example.org/ + f8dy@example.com + + + Sam Ruby + + + Joe Gregorio + + +
    +

    [Update: The Atom draft is finished.]

    +
    +
    +
    +
    +""" + +SMALL_FEED = """ + + Example Feed + + 2003-12-13T18:30:02Z + + John Doe + + urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6 + + Atom-Powered Robots Run Amok + + urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a + 2003-12-13T18:30:02Z + Some text. + + +""" + +GBASE_FEED = """ + +http://www.google.com/base/feeds/snippets +2007-02-08T23:18:21.935Z +Items matching query: digital camera + + + + + + + + +GoogleBase +2171885 +1 +25 + +http://www.google.com/base/feeds/snippets/13246453826751927533 +2007-02-08T13:23:27.000Z +2007-02-08T16:40:57.000Z + + +Digital Camera Battery Notebook Computer 12v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables +Notebook Computer 12v DC Power Cable - 5.5mm x 2.1mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power portable computers that operate with 12v power and have a 2.1mm power connector (center +) Digital ... + + + + + +B&H Photo-Video +anon-szot0wdsq0at@base.google.com + +PayPal & Bill Me Later credit available online only. +new +420 9th Ave. 10001 +305668-REG +Products +Digital Camera Battery +2007-03-10T13:23:27.000Z +1172711 +34.95 usd +Digital Photography>Camera Connecting Cables +EN +DCB5092 +US +1.0 +http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305668.jpg&dhm=ffffffff84c9a95e&size=6 + + +http://www.google.com/base/feeds/snippets/10145771037331858608 +2007-02-08T13:23:27.000Z +2007-02-08T16:40:57.000Z + + +Digital Camera Battery Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables +Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power any electronic device that operates with 5v power and has a 2.5mm power connector (center +) Digital ... + + + + + +B&H Photo-Video +anon-szot0wdsq0at@base.google.com + +420 9th Ave. 10001 +new +0.18 +US +Digital Photography>Camera Connecting Cables +PayPal & Bill Me Later credit available online only. +305656-REG +http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305656.jpg&dhm=7315bdc8&size=6 +DCB5108 +838098005108 +34.95 usd +EN +Digital Camera Battery +1172711 +Products +2007-03-10T13:23:27.000Z + + +http://www.google.com/base/feeds/snippets/3128608193804768644 +2007-02-08T02:21:27.000Z +2007-02-08T15:40:13.000Z + + +Digital Camera Battery Power Cable for Kodak 645 Pro-Back ProBack & DCS-300 Series Camera Connecting Cables +Camera Connection Cable - to Power Kodak 645 Pro-Back DCS-300 Series Digital Cameras This connection cable will allow any Digital Pursuits battery pack to power the following digital cameras: Kodak DCS Pro Back 645 DCS-300 series Digital Photography ... + + + + + +B&H Photo-Video +anon-szot0wdsq0at@base.google.com + +0.3 +DCB6006 +http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305685.jpg&dhm=72f0ca0a&size=6 +420 9th Ave. 10001 +PayPal & Bill Me Later credit available online only. +Products +US +digital kodak camera +Digital Camera Battery +2007-03-10T02:21:27.000Z +EN +new +34.95 usd +1172711 +Digital Photography>Camera Connecting Cables +305685-REG + +""" + +EXTENSION_TREE = """ + + + John Doe + Bar + + + +""" + +TEST_AUTHOR = """ + + John Doe + johndoes@someemailadress.com + http://www.google.com + +""" + +TEST_LINK = """ + +""" + +TEST_GBASE_ATTRIBUTE = """ + Digital Camera Battery +""" + + +CALENDAR_FEED = """ + + http://www.google.com/calendar/feeds/default + 2007-03-20T22:48:57.833Z + GData Ops Demo's Calendar List + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + Google Calendar + 1 + + + http://www.google.com/calendar/feeds/default/gdata.ops.demo%40gmail.com + 2007-03-20T22:48:57.837Z + 2007-03-20T22:48:52.000Z + GData Ops Demo + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + http://www.google.com/calendar/feeds/default/jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com + 2007-03-20T22:48:57.837Z + 2007-03-20T22:48:53.000Z + GData Ops Demo Secondary Calendar + + + + + + + GData Ops Demo Secondary Calendar + + + + + + + + +""" + +CALENDAR_FULL_EVENT_FEED = """ + + + http://www.google.com/calendar/feeds/default/private/full + 2007-03-20T21:29:57.000Z + + GData Ops Demo + GData Ops Demo + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + Google Calendar + 10 + 1 + 25 + + + + http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100 + 2007-03-20T21:29:52.000Z + 2007-03-20T21:29:57.000Z + + test deleted + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0 + 2007-03-20T21:26:04.000Z + 2007-03-20T21:28:46.000Z + + Afternoon at Dolores Park with Kim + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/uvsqhg7klnae40v50vihr1pvos + 2007-03-20T21:28:37.000Z + 2007-03-20T21:28:37.000Z + + Team meeting + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + DTSTART;TZID=America/Los_Angeles:20070323T090000 + DTEND;TZID=America/Los_Angeles:20070323T100000 + RRULE:FREQ=WEEKLY;BYDAY=FR;UNTIL=20070817T160000Z;WKST=SU + BEGIN:VTIMEZONE TZID:America/Los_Angeles + X-LIC-LOCATION:America/Los_Angeles BEGIN:STANDARD + TZOFFSETFROM:-0700 TZOFFSETTO:-0800 TZNAME:PST + DTSTART:19701025T020000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU + END:STANDARD BEGIN:DAYLIGHT TZOFFSETFROM:-0800 TZOFFSETTO:-0700 + TZNAME:PDT DTSTART:19700405T020000 + RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU END:DAYLIGHT + END:VTIMEZONE + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo + 2007-03-20T21:25:46.000Z + 2007-03-20T21:25:46.000Z + + Movie with Kim and danah + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo + 2007-03-20T21:24:43.000Z + 2007-03-20T21:25:08.000Z + + Dinner with Kim and Sarah + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g + 2007-03-20T21:24:19.000Z + 2007-03-20T21:25:05.000Z + + Dinner with Jane and John + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc + 2007-03-20T21:24:33.000Z + 2007-03-20T21:24:33.000Z + + Tennis with Elizabeth + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c + 2007-03-20T21:24:00.000Z + 2007-03-20T21:24:00.000Z + + Lunch with Jenn + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco + 2007-03-20T07:50:02.000Z + 2007-03-20T20:39:26.000Z + + test entry + test desc + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg + 2007-02-14T23:23:37.000Z + 2007-02-14T23:25:30.000Z + + test + + + + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc + 2007-07-16T22:13:28.000Z + 2007-07-16T22:13:29.000Z + + + + + + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + +""" + +CALENDAR_BATCH_REQUEST = """ + + + + 1 + + + Event inserted via batch + + + 2 + + http://www.google.com/calendar/feeds/default/private/full/glcs0kv2qqa0gf52qi1jo018gc + + Event queried via batch + + + 3 + + http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs + + Event updated via batch + + + + + + 4 + + http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc + + Event deleted via batch + + + + + +""" + +CALENDAR_BATCH_RESPONSE = """ + + http://www.google.com/calendar/feeds/default/private/full + 2007-09-21T23:01:00.380Z + + Batch Feed + + + + + 1 + + + http://www.google.com/calendar/feeds/default/private/full/n9ug78gd9tv53ppn4hdjvk68ek + + Event inserted via batch + + + + + + 2 + + + http://www.google.com/calendar/feeds/default/private/full/glsc0kv2aqa0ff52qi1jo018gc + + Event queried via batch + + + + + + 3 + + + http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs + + Event updated via batch + + + + 3 + + + + + 4 + + + http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc + + Event deleted via batch + Deleted + + +""" + +GBASE_ATTRIBUTE_FEED = """ + + http://www.google.com/base/feeds/attributes + 2006-11-01T20:35:59.578Z + + + Attribute histogram for query: [item type:jobs] + + + + GoogleBase + 16 + 1 + 16 + + http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D + 2006-11-01T20:36:00.100Z + job industry(text) + Attribute"job industry" of type text. + + + + it internet + healthcare + information technology + accounting + clerical and administrative + other + sales and sales management + information systems + engineering and architecture + sales + + + +""" + + +GBASE_ATTRIBUTE_ENTRY = """ + + http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D + 2006-11-01T20:36:00.100Z + job industry(text) + Attribute"job industry" of type text. + + + + it internet + healthcare + information technology + accounting + clerical and administrative + other + sales and sales management + information systems + engineering and architecture + sales + + +""" + +GBASE_LOCALES_FEED = """ + + http://www.google.com/base/feeds/locales/ + 2006-06-13T18:11:40.120Z + Locales + + + + + Google Inc. + base@google.com + + GoogleBase + 3 + 25 + + + http://www.google.com/base/feeds/locales/en_US + 2006-03-27T22:27:36.658Z + + + en_US + en_US + + + + + + http://www.google.com/base/feeds/locales/en_GB + 2006-06-13T18:14:18.601Z + + en_GB + en_GB + + + + + http://www.google.com/base/feeds/locales/de_DE + 2006-06-13T18:14:18.601Z + + de_DE + de_DE + + + +""" + +GBASE_STRING_ENCODING_ENTRY = """ + + http://www.google.com/base/feeds/snippets/17495780256183230088 + 2007-12-09T03:13:07.000Z + 2008-01-07T03:26:46.000Z + + Digital Camera Cord Fits SONY Cybershot DSC-R1 S40 + SONY \xC2\xB7 Cybershot Digital Camera Usb Cable DESCRIPTION + This is a 2.5 USB 2.0 A to Mini B (5 Pin) high quality digital camera + cable used for connecting your Sony Digital Cameras and Camcoders. Backward + Compatible with USB 2.0, 1.0 and 1.1. Fully ... + + + + eBay + + Products + EN + US + 0.99 usd + http://thumbs.ebaystatic.com/pict/270195049057_1.jpg + Cameras & Photo>Digital Camera Accessories>Cables + Cords & Connectors>USB Cables>For Other Brands + 11729 + 270195049057 + 2008-02-06T03:26:46Z +""" + + +RECURRENCE_EXCEPTION_ENTRY = """ + + http://www.google.com/calendar/feeds/default/private/composite/i7lgfj69mjqjgnodklif3vbm7g + 2007-04-05T21:51:49.000Z + 2007-04-05T21:51:49.000Z + + testDavid + + + + + + gdata ops + gdata.ops.test@gmail.com + + + + + + + + + + DTSTART;TZID=America/Anchorage:20070403T100000 + DTEND;TZID=America/Anchorage:20070403T110000 + RRULE:FREQ=DAILY;UNTIL=20070408T180000Z;WKST=SU + EXDATE;TZID=America/Anchorage:20070407T100000 + EXDATE;TZID=America/Anchorage:20070405T100000 + EXDATE;TZID=America/Anchorage:20070404T100000 BEGIN:VTIMEZONE + TZID:America/Anchorage X-LIC-LOCATION:America/Anchorage + BEGIN:STANDARD TZOFFSETFROM:-0800 TZOFFSETTO:-0900 TZNAME:AKST + DTSTART:19701025T020000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU + END:STANDARD BEGIN:DAYLIGHT TZOFFSETFROM:-0900 TZOFFSETTO:-0800 + TZNAME:AKDT DTSTART:19700405T020000 + RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU END:DAYLIGHT + END:VTIMEZONE + + + + + + i7lgfj69mjqjgnodklif3vbm7g_20070407T180000Z + 2007-04-05T21:51:49.000Z + 2007-04-05T21:52:58.000Z + + testDavid + + + + gdata ops + gdata.ops.test@gmail.com + + + + + + + + + + + + + + + + + + + 2007-04-05T21:54:09.285Z + + + Comments for: testDavid + + + + + + + + + + + + """ + +NICK_ENTRY = """ + + https://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo + 1970-01-01T00:00:00.000Z + + Foo + + + + +""" + +NICK_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/nickname/2.0 + + 1970-01-01T00:00:00.000Z + + Nicknames for user SusanJones + + + + 1 + 2 + + + http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo + + + Foo + + + + + + + + http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/suse + + + suse + + + + + +""" + +USER_ENTRY = """ + + https://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser + 1970-01-01T00:00:00.000Z + + TestUser + + + + + + + +""" + +USER_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/user/2.0 + + 1970-01-01T00:00:00.000Z + + Users + """ + +EMAIL_LIST_ENTRY = """ + + + https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/testlist + + 1970-01-01T00:00:00.000Z + + testlist + + + + +""" + +EMAIL_LIST_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/emailList/2.0 + + 1970-01-01T00:00:00.000Z + + EmailLists + """ + +EMAIL_LIST_RECIPIENT_ENTRY = """ + + https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/TestUser%40example.com + 1970-01-01T00:00:00.000Z + + TestUser + + + +""" + +EMAIL_LIST_RECIPIENT_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient + + 1970-01-01T00:00:00.000Z + + Recipients for email list us-sales + """ + +ACL_FEED = """ + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full + 2007-04-21T00:52:04.000Z + Elizabeth Bennet's access control list + + + + + + + + + Google Calendar + 2 + 1 + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com + 2007-04-21T00:52:04.000Z + + + owner + + + + + + + Elizabeth Bennet + liz@gmail.com + + + + + + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default + 2007-04-21T00:52:04.000Z + + + read + + + + + + + Elizabeth Bennet + liz@gmail.com + + + + + + """ + +ACL_ENTRY = """ + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com + 2007-04-21T00:52:04.000Z + + + owner + + + + + + + Elizabeth Bennet + liz@gmail.com + + + + + """ + +DOCUMENT_LIST_FEED = """ +21test.usertest.user@gmail.comhttps://docs.google.com/feeds/documents/private/full/spreadsheet%3AsupercalifragilisticexpeadociousTest Spreadsheet2007-07-03T18:03:32.045Z + +document:dfrkj84g_3348jbxpxcd + + test.user + test.user@gmail.com + +2009-03-05T07:48:21.493Z + +test.usertest.user@gmail.comhttp://docs.google.com/feeds/documents/private/full/document%3Agr00vyTest Document2007-07-03T18:02:50.338Z + + + test.user + test.user@gmail.com + + + 2009-03-05T07:48:21.493Z +http://docs.google.com/feeds/documents/private/fullAvailable +Documents - +test.user@gmail.com2007-07-09T23:07:21.898Z + +""" + +DOCUMENT_LIST_ENTRY = """ + +test.usertest.user@gmail.com +https://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpealidocious + +Test Spreadsheet2007-07-03T18:03:32.045Z +spreadsheet:supercalifragilisticexpealidocious + + test.user + test.user@gmail.com + +2009-03-05T07:48:21.493Z + + +""" + +DOCUMENT_LIST_ENTRY_V3 = """ + +test.usertest.user@gmail.com +https://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpealidocious + + +Test Spreadsheet2007-07-03T18:03:32.045Z +spreadsheet:supercalifragilisticexpealidocious + + test.user + test.user@gmail.com + +2009-03-05T07:48:21.493Z + +1000 + + + + + + + + + + + +""" + +DOCUMENT_LIST_ACL_ENTRY = """ + + + + +""" + +DOCUMENT_LIST_ACL_WITHKEY_ENTRY = """ + + + + +""" + +DOCUMENT_LIST_ACL_FEED = """ + +http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZpwUQ +2009-02-22T03:48:25.895Z + +Document Permissions + + + + +2 +1 + + http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQp4pwUwUQ/user%3Auser%40gmail.com + 2009-02-22T03:48:25.896Z + + Document Permission - user@gmail.com + + + + + + + http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8fCgZp4pwUwUQ/user%3Auser2%40google.com + 2009-02-22T03:48:26.257Z + + Document Permission - user2@google.com + + + + + +""" + +DOCUMENT_LIST_REVISION_FEED = """ + +https://docs.google.com/feeds/default/private/full/resource_id/revisions +2009-08-17T04:22:10.378Z +Document Revisions + + + +6 +1 + + https://docs.google.com/feeds/id/resource_id/revisions/2 + 2009-08-17T04:22:10.440Z + 2009-08-14T07:11:34.197Z + Revision 2 + + + + + + another_user + another_user@gmail.com + + + + + + +""" + +BATCH_ENTRY = """ + + http://www.google.com/base/feeds/items/2173859253842813008 + 2006-07-11T14:51:43.560Z + 2006-07-11T14:51: 43.560Z + title + content + + + recipes + + itemB + +""" + +BATCH_FEED_REQUEST = """ + + My Batch Feed + + http://www.google.com/base/feeds/items/13308004346459454600 + + + + http://www.google.com/base/feeds/items/17437536661927313949 + + + + ... + ... + itemA + + recipes + + + ... + ... + itemB + + recipes + +""" + +BATCH_FEED_RESULT = """ + + http://www.google.com/base/feeds/items + 2006-07-11T14:51:42.894Z + My Batch + + + + + http://www.google.com/base/feeds/items/2173859253842813008 + 2006-07-11T14:51:43.560Z + 2006-07-11T14:51: 43.560Z + ... + ... + + + recipes + + itemB + + + + http://www.google.com/base/feeds/items/11974645606383737963 + 2006-07-11T14:51:43.247Z + 2006-07-11T14:51: 43.247Z + ... + ... + + + recipes + + itemA + + + + http://www.google.com/base/feeds/items/13308004346459454600 + 2006-07-11T14:51:42.894Z + Error + Bad request + + + + + + + + http://www.google.com/base/feeds/items/17437536661927313949 + 2006-07-11T14:51:43.246Z + Deleted + + + +""" + +ALBUM_FEED = """ + + http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1 + 2007-09-21T18:23:05.000Z + + Test + + public + http://lh6.google.com/sample.user/Rt8WNoDZEJE/AAAAAAAAABk/HQGlDhpIgWo/s160-c/Test.jpg + + + + + + sample + http://picasaweb.google.com/sample.user + + Picasaweb 4 + 1 + 500 + 1 + Test + + public 1188975600000 + 2 + sample.user + sample + true + 0 + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/2 + 2007-09-05T20:49:23.000Z + 2007-09-21T18:23:05.000Z + + Aqua Blue.jpg + Blue + + + + 2 + 1190398985145172 + 0.0 + 1 2560 + 1600 + 883405 + + + 1189025362000 + true + c041ce17aaa637eb656c81d9cf526c24 + + true + 1 + + Aqua Blue.jpg Blue + tag, test + + + + + sample + + + + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/3 + 2007-09-05T20:49:24.000Z + 2007-09-21T18:19:38.000Z + + Aqua Graphite.jpg + Gray + + + + + 3 + 1190398778006402 + 1.0 + 1 + 2560 + 1600 + 798334 + + + 1189025363000 + + true + a5ce2e36b9df7d3cb081511c72e73926 + + true + 0 + + Aqua Graphite.jpg + Gray + + + + + + sample + + + + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/tag + 2007-09-05T20:49:24.000Z + + tag + tag + + + + sample + http://picasaweb.google.com/sample.user + + + + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/test + 2007-09-05T20:49:24.000Z + + test + test + + + + sample + http://picasaweb.google.com/sample.user + + +""" + +CODE_SEARCH_FEED = """ + +http://www.google.com/codesearch/feeds/search?q=malloc +2007-12-19T16:08:04Z +Google Code Search +Google Code Search +2530000 +1 + +Google Code Search + +http://www.google.com/codesearch + + + + + +http://www.google.com/codesearch?hl=en&q=+malloc+show:LDjwp-Iqc7U:84hEYaYsZk8:xDGReDhvNi0&sa=N&ct=rx&cd=1&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002#first2007-12-19T16:08:04ZCode owned by external author.software/autoconf/manual/autoconf-2.60/autoconf.html<pre> 8: void *<b>malloc</b> (); + + +</pre><pre> #undef <b>malloc</b> +</pre><pre> void *<b>malloc</b> (); + +</pre><pre> rpl_<b>malloc</b> (size_t n) +</pre><pre> return <b>malloc</b> (n); + +</pre> +http://www.google.com/codesearch?hl=en&q=+malloc+show:h4hfh-fV-jI:niBq_bwWZNs:H0OhClf0HWQ&sa=N&ct=rx&cd=2&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c#first2007-12-19T16:08:04ZCode owned by external author.guile-1.6.8/libguile/mallocs.c<pre> 86: { + scm_t_bits mem = n ? (scm_t_bits) <b>malloc</b> (n) : 0; + if (n &amp;&amp; !mem) + +</pre><pre>#include &lt;<b>malloc</b>.h&gt; +</pre><pre>scm_t_bits scm_tc16_<b>malloc</b>; + +</pre><pre><b>malloc</b>_free (SCM ptr) +</pre><pre><b>malloc</b>_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED) + +</pre><pre> scm_puts(&quot;#&lt;<b>malloc</b> &quot;, port); +</pre><pre> scm_t_bits mem = n ? (scm_t_bits) <b>malloc</b> (n) : 0; + +</pre><pre> SCM_RETURN_NEWSMOB (scm_tc16_<b>malloc</b>, mem); +</pre><pre> scm_tc16_<b>malloc</b> = scm_make_smob_type (&quot;<b>malloc</b>&quot;, 0); + +</pre><pre> scm_set_smob_free (scm_tc16_<b>malloc</b>, <b>malloc</b>_free); +</pre>GPL + +http://www.google.com/codesearch?hl=en&q=+malloc+show:9wyZUG-N_30:7_dFxoC1ZrY:C0_iYbFj90M&sa=N&ct=rx&cd=3&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c#first2007-12-19T16:08:04ZCode owned by external author.bash-3.0/lib/malloc/alloca.c<pre> 78: #ifndef emacs + #define <b>malloc</b> x<b>malloc</b> + extern pointer x<b>malloc</b> (); + +</pre><pre> <b>malloc</b>. The Emacs executable needs alloca to call x<b>malloc</b>, because +</pre><pre> ordinary <b>malloc</b> isn&#39;t protected from input signals. On the other + +</pre><pre> hand, the utilities in lib-src need alloca to call <b>malloc</b>; some of +</pre><pre> them are very simple, and don&#39;t have an x<b>malloc</b> routine. + +</pre><pre> Callers below should use <b>malloc</b>. */ +</pre><pre>#define <b>malloc</b> x<b>malloc</b> + +</pre><pre>extern pointer x<b>malloc</b> (); +</pre><pre> It is very important that sizeof(header) agree with <b>malloc</b> + +</pre><pre> register pointer new = <b>malloc</b> (sizeof (header) + size); +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:uhVCKyPcT6k:8juMxxzmUJw:H7_IDsTB2L4&sa=N&ct=rx&cd=4&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c#first2007-12-19T16:08:04ZCode owned by external author.mozilla/xpcom/build/malloc.c<pre> 54: http://gee.cs.oswego.edu/dl/html/<b>malloc</b>.html + + You may already by default be using a c library containing a <b>malloc</b> + +</pre><pre>/* ---------- To make a <b>malloc</b>.h, start cutting here ------------ */ +</pre><pre> Note: There may be an updated version of this <b>malloc</b> obtainable at + +</pre><pre> ftp://gee.cs.oswego.edu/pub/misc/<b>malloc</b>.c +</pre><pre>* Why use this <b>malloc</b>? + +</pre><pre> most tunable <b>malloc</b> ever written. However it is among the fastest +</pre><pre> allocator for <b>malloc</b>-intensive programs. + +</pre><pre> http://gee.cs.oswego.edu/dl/html/<b>malloc</b>.html +</pre><pre> You may already by default be using a c library containing a <b>malloc</b> + +</pre><pre> that is somehow based on some version of this <b>malloc</b> (for example in +</pre>Mozilla +http://www.google.com/codesearch?hl=en&q=+malloc+show:4n1P2HVOISs:Ybbpph0wR2M:OhIN_sDrG0U&sa=N&ct=rx&cd=5&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh#first2007-12-19T16:08:04ZCode owned by external author.hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh<pre> 11: echo ================ unit-must-<b>malloc</b> tests ================ + ./unit-must-<b>malloc</b> + echo ...passed + +</pre><pre># tag: Tom Lord Tue Dec 4 14:54:29 2001 (mem-tests/unit-must-<b>malloc</b>.sh) +</pre><pre>echo ================ unit-must-<b>malloc</b> tests ================ + +</pre><pre>./unit-must-<b>malloc</b> +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:GzkwiWG266M:ykuz3bG00ws:2sTvVSif08g&sa=N&ct=rx&cd=6&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c#first2007-12-19T16:08:04ZCode owned by external author.tar-1.14/lib/malloc.c<pre> 22: #endif + #undef <b>malloc</b> + + +</pre><pre>/* Work around bug on some systems where <b>malloc</b> (0) fails. +</pre><pre>#undef <b>malloc</b> + +</pre><pre>rpl_<b>malloc</b> (size_t n) +</pre><pre> return <b>malloc</b> (n); + +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:o_TFIeBY6dY:ktI_dt8wPao:AI03BD1Dz0Y&sa=N&ct=rx&cd=7&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c#first2007-12-19T16:08:04ZCode owned by external author.tar-1.16.1/lib/malloc.c<pre> 21: #include &lt;config.h&gt; + #undef <b>malloc</b> + + +</pre><pre>/* <b>malloc</b>() function that is glibc compatible. +</pre><pre>#undef <b>malloc</b> + +</pre><pre>rpl_<b>malloc</b> (size_t n) +</pre><pre> return <b>malloc</b> (n); + +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:_ibw-VLkMoI:jBOtIJSmFd4:-0NUEVeCwfY&sa=N&ct=rx&cd=8&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h#first2007-12-19T16:08:04ZCode owned by external author.uClibc-0.9.29/include/malloc.h<pre> 1: /* Prototypes and definition for <b>malloc</b> implementation. + Copyright (C) 1996, 1997, 1999, 2000 Free Software Foundation, Inc. + +</pre><pre>/* Prototypes and definition for <b>malloc</b> implementation. +</pre><pre> `pt<b>malloc</b>&#39;, a <b>malloc</b> implementation for multiple threads without + +</pre><pre> See the files `pt<b>malloc</b>.c&#39; or `COPYRIGHT&#39; for copying conditions. +</pre><pre> This work is mainly derived from <b>malloc</b>-2.6.4 by Doug Lea + +</pre><pre> ftp://g.oswego.edu/pub/misc/<b>malloc</b>.c +</pre><pre> `pt<b>malloc</b>.c&#39;. + +</pre><pre># define __<b>malloc</b>_ptr_t void * +</pre><pre># define __<b>malloc</b>_ptr_t char * + +</pre><pre># define __<b>malloc</b>_size_t size_t +</pre>LGPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:F6qHcZ9vefo:bTX7o9gKfks:hECF4r_eKC0&sa=N&ct=rx&cd=9&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h#first2007-12-19T16:08:04ZCode owned by external author.glibc-2.0.1/hurd/hurdmalloc.h<pre> 15: #define <b>malloc</b> _hurd_<b>malloc</b> + #define realloc _hurd_realloc + +</pre><pre> All hurd-internal code which uses <b>malloc</b> et al includes this file so it +</pre><pre> will use the internal <b>malloc</b> routines _hurd_{<b>malloc</b>,realloc,free} + +</pre><pre> of <b>malloc</b> et al is the unixoid one using sbrk. +</pre><pre>extern void *_hurd_<b>malloc</b> (size_t); + +</pre><pre>#define <b>malloc</b> _hurd_<b>malloc</b> +</pre>GPL + +http://www.google.com/codesearch?hl=en&q=+malloc+show:CHUvHYzyLc8:pdcAfzDA6lY:wjofHuNLTHg&sa=N&ct=rx&cd=10&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h#first2007-12-19T16:08:04ZCode owned by external author.httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h<pre> 173: #undef <b>malloc</b> + #define <b>malloc</b>(x) library_<b>malloc</b>(gLibHandle,x) + +</pre><pre>/* Redefine <b>malloc</b> to use the library <b>malloc</b> call so +</pre><pre>#undef <b>malloc</b> + +</pre><pre>#define <b>malloc</b>(x) library_<b>malloc</b>(gLibHandle,x) +</pre>Apache + +""" + +YOUTUBE_VIDEO_FEED = """http://gdata.youtube.com/feeds/api/standardfeeds/top_rated2008-05-14T02:24:07.000-07:00Top Ratedhttp://www.youtube.com/img/pic_youtubelogo_123x63.gifYouTubehttp://www.youtube.com/YouTube data API100125 +http://gdata.youtube.com/feeds/api/videos/C71ypXYGho82008-03-20T10:17:27.000-07:002008-05-14T04:26:37.000-07:00Me odeio por te amar - KARYN GARCIAhttp://www.karyngarcia.com.brTvKarynGarciahttp://gdata.youtube.com/feeds/api/users/tvkaryngarciaMe odeio por te amar - KARYN GARCIAhttp://www.karyngarcia.com.bramar, boyfriend, garcia, karyn, me, odeio, por, teMusictest111test222 +http://gdata.youtube.com/feeds/api/videos/gsVaTyb1tBw2008-02-15T04:31:45.000-08:002008-05-14T05:09:42.000-07:00extreme helmet cam Kani, Keil and Patotrimmedperaltamagichttp://gdata.youtube.com/feeds/api/users/peraltamagicextreme helmet cam Kani, Keil and Patotrimmedalcala, cam, campillo, dirt, extreme, helmet, kani, patoSports +""" + +YOUTUBE_ENTRY_PRIVATE = """ + + http://gdata.youtube.com/feeds/videos/UMFI1hdm96E + 2007-01-07T01:50:15.000Z + 2007-01-07T01:50:15.000Z + + + + + + + + + "Crazy (Gnarles Barkley)" - Acoustic Cover + <div style="color: #000000;font-family: + Arial, Helvetica, sans-serif; font-size:12px; font-size: 12px; + width: 555px;"><table cellspacing="0" cellpadding="0" + border="0"><tbody><tr><td width="140" + valign="top" rowspan="2"><div style="border: 1px solid + #999999; margin: 0px 10px 5px 0px;"><a + href="http://www.youtube.com/watch?v=UMFI1hdm96E"><img + alt="" + src="http://img.youtube.com/vi/UMFI1hdm96E/2.jpg"></a></div></td> + <td width="256" valign="top"><div style="font-size: + 12px; font-weight: bold;"><a style="font-size: 15px; + font-weight: bold; font-decoration: none;" + href="http://www.youtube.com/watch?v=UMFI1hdm96E">&quot;Crazy + (Gnarles Barkley)&quot; - Acoustic Cover</a> + <br></div> <div style="font-size: 12px; margin: + 3px 0px;"><span>Gnarles Barkley acoustic cover + http://www.myspace.com/davidchoimusic</span></div></td> + <td style="font-size: 11px; line-height: 1.4em; padding-left: + 20px; padding-top: 1px;" width="146" + valign="top"><div><span style="color: #666666; + font-size: 11px;">From:</span> <a + href="http://www.youtube.com/profile?user=davidchoimusic">davidchoimusic</a></div> + <div><span style="color: #666666; font-size: + 11px;">Views:</span> 113321</div> <div + style="white-space: nowrap;text-align: left"><img + style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_half_11x11.gif"></div> + <div style="font-size: 11px;">1005 <span style="color: + #666666; font-size: + 11px;">ratings</span></div></td></tr> + <tr><td><span style="color: #666666; font-size: + 11px;">Time:</span> <span style="color: #000000; + font-size: 11px; font-weight: + bold;">04:15</span></td> <td style="font-size: + 11px; padding-left: 20px;"><span style="color: #666666; + font-size: 11px;">More in</span> <a + href="http://www.youtube.com/categories_portal?c=10">Music</a></td></tr></tbody></table></div> + + + + + + davidchoimusic + http://gdata.youtube.com/feeds/users/davidchoimusic + + + "Crazy (Gnarles Barkley)" - Acoustic Cover + Gnarles Barkley acoustic cover http://www.myspace.com/davidchoimusic + music, singing, gnarls, barkley, acoustic, cover + + + Music + + DeveloperTag1 + + + + + + + + + + + + + 37.398529052734375 -122.0635986328125 + + + + + + + + yes + + The content of this video may violate the terms of use. + +""" + +YOUTUBE_COMMENT_FEED = """ +http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments2008-05-19T21:45:45.261ZCommentshttp://www.youtube.com/img/pic_youtubelogo_123x63.gifYouTubehttp://www.youtube.com/YouTube data API0125 + + http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/91F809A3DE2EB81B + 2008-02-22T15:27:15.000-08:002008-02-22T15:27:15.000-08:00 + + test66 + test66 + + + + apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmann + + + http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/A261AEEFD23674AA + 2008-02-22T15:27:01.000-08:002008-02-22T15:27:01.000-08:00 + + test333 + test333 + + + + apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmann + + + http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/0DCF1E3531B3FF85 + 2008-02-22T15:11:06.000-08:002008-02-22T15:11:06.000-08:00 + + test2 + test2 + + + + apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmann + +""" + +YOUTUBE_PLAYLIST_FEED = """ + + http://gdata.youtube.com/feeds/users/andyland74/playlists?start-index=1&max-results=25 + 2008-02-26T00:26:15.635Z + + andyland74's Playlists + http://www.youtube.com/img/pic_youtubelogo_123x63.gif + + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + YouTube data API + 1 + 1 + 25 + + My new playlist Description + + http://gdata.youtube.com/feeds/users/andyland74/playlists/8BCDD04DE8F771B2 + 2007-11-04T17:30:27.000-08:00 + 2008-02-22T09:55:14.000-08:00 + + My New Playlist Title + My new playlist Description + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + +""" + +YOUTUBE_PLAYLIST_VIDEO_FEED = """http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B5052008-05-16T12:03:17.000-07:00Test PlaylistTest playlist 1http://www.youtube.com/img/pic_youtubelogo_123x63.gifgdpythonhttp://gdata.youtube.com/feeds/api/users/gdpythonYouTube data API1125Test PlaylistTest playlist 1http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505/B0F29389E537F8882008-05-16T20:54:08.520ZUploading YouTube Videos with the PHP Client LibraryJochen Hartmann demonstrates the basics of how to use the PHP Client Library with the YouTube Data API. + +PHP Developer's Guide: +http://code.google.com/apis/youtube/developers_guide_php.html + +Other documentation: +http://code.google.com/apis/youtube/GoogleDevelopershttp://gdata.youtube.com/feeds/api/users/googledevelopersUploading YouTube Videos with the PHP Client LibraryJochen Hartmann demonstrates the basics of how to use the PHP Client Library with the YouTube Data API. + +PHP Developer's Guide: +http://code.google.com/apis/youtube/developers_guide_php.html + +Other documentation: +http://code.google.com/apis/youtube/api, data, demo, php, screencast, tutorial, uploading, walkthrough, youtubeEducationundefined1""" + +YOUTUBE_SUBSCRIPTION_FEED = """ + + http://gdata.youtube.com/feeds/users/andyland74/subscriptions?start-index=1&max-results=25 + 2008-02-26T00:26:15.635Z + + andyland74's Subscriptions + http://www.youtube.com/img/pic_youtubelogo_123x63.gif + + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + YouTube data API + 1 + 1 + 25 + + http://gdata.youtube.com/feeds/users/andyland74/subscriptions/d411759045e2ad8c + 2007-11-04T17:30:27.000-08:00 + 2008-02-22T09:55:14.000-08:00 + + + Videos published by : NBC + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + NBC + + +""" + +YOUTUBE_VIDEO_RESPONSE_FEED = """ + + http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses2008-05-19T22:37:34.076ZVideos responses to 'Giant NES controller coffee table'http://www.youtube.com/img/pic_youtubelogo_123x63.gifYouTubehttp://www.youtube.com/YouTube data API8125 + + http://gdata.youtube.com/feeds/videos/7b9EnRI9VbY2008-03-11T19:08:53.000-07:002008-05-18T21:33:10.000-07:00 + + + + + + + + + + + + Catnip Partysnipped + + + + + PismoBeachhttp://gdata.youtube.com/feeds/users/pismobeach + + Catnip Party + Uncle, Hillary, Hankette, and B4 all but overdose on the patioBrattman, cat, catmint, catnip, cats, chat, drug, gato, gatto, kat, kato, katt, Katze, kedi, kissa, OD, overdose, party, sex, Uncle + + Animals + + + + + + + + + + + + + + + + +""" + + +YOUTUBE_PROFILE = """ + + http://gdata.youtube.com/feeds/users/andyland74 + 2006-10-16T00:09:45.000-07:00 + 2008-02-26T11:48:21.000-08:00 + + + andyland74 Channel + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + 33 + andyland74 + andy + example + Catch-22 + m + Google + Testing YouTube APIs + Somewhere + US + Aqua Teen Hungerforce + Elliott Smith + Technical Writer + University of North Carolina + + + + + + + + +""" + +YOUTUBE_CONTACTS_FEED = """ + http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts2008-05-16T19:24:34.916Zapitestjhartmann's Contactshttp://www.youtube.com/img/pic_youtubelogo_123x63.gifapitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmannYouTube data API2125 + + http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/test898990902008-02-04T11:27:54.000-08:002008-05-16T19:24:34.916Ztest89899090apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmanntest89899090requested + + http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/testjfisher2008-02-26T14:13:03.000-08:002008-05-16T19:24:34.916Ztestjfisherapitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmanntestjfisherpending +""" + +NEW_CONTACT = """ + + http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/8411573 + 2008-02-28T18:47:02.303Z + + Fitzgerald + Notes + + + + + (206)555-1212 + 456-123-2133 + (206)555-1213 + + + + + + + 1600 Amphitheatre Pkwy Mountain View +""" + +CONTACTS_FEED = """ + + http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base + 2008-03-05T12:36:38.836Z + + Contacts + + + + + + Elizabeth Bennet + liz@gmail.com + + + Contacts + + 1 + 1 + 25 + + + http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/c9012de + + 2008-03-05T12:36:38.835Z + + Fitzgerald + + + + + + 456 + + + + +""" + + +CONTACT_GROUPS_FEED = """ + + jo@gmail.com + 2008-05-21T21:11:25.237Z + + Jo's Contact Groups + + + + + + + Jo Brown + jo@gmail.com + + Contacts + 3 + 1 + 25 + + http://google.com/m8/feeds/groups/jo%40gmail.com/base/270f + 2008-05-14T13:10:19.070Z + + joggers + joggers + + + +""" + +CONTACT_GROUP_ENTRY = """ + + + http://www.google.com/feeds/groups/jo%40gmail.com/base/1234 + 2005-01-18T21:00:00Z + 2006-01-01T00:00:00Z + Salsa group + Salsa group + + + + Very nice people. + +""" + +CALENDAR_RESOURCE_ENTRY = """ + + + + + +""" + +CALENDAR_RESOURCES_FEED = """ + + https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com + 2008-10-17T15:29:21.064Z + + + + + 1 + + https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/CR-NYC-14-12-BR + 2008-10-17T15:29:21.064Z + + + + + + + + + + https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/?start=(Bike)-London-43-Lobby-Bike-1 + 2008-10-17T15:29:21.064Z + + + + + + + + +""" + +BLOG_ENTRY = """ + tag:blogger.com,1999:blog-blogID.post-postID + 2006-08-02T18:44:43.089-07:00 + 2006-11-08T18:10:23.020-08:00 + Lizzy's Diary + Being the journal of Elizabeth Bennet + + + + + + + + + + + + Elizabeth Bennet + liz@gmail.com + +""" + +BLOG_POST = """ + Marriage! + +
    +

    Mr. Darcy has proposed marriage to me!

    +

    He is the last man on earth I would ever desire to marry.

    +

    Whatever shall I do?

    +
    +
    + + Elizabeth Bennet + liz@gmail.com + +
    """ + +BLOG_POSTS_FEED = """ + tag:blogger.com,1999:blog-blogID + 2006-11-08T18:10:23.020-08:00 + Lizzy's Diary + + + + + + + + Elizabeth Bennet + liz@gmail.com + + Blogger + + tag:blogger.com,1999:blog-blogID.post-postID + 2006-11-08T18:10:00.000-08:00 + 2006-11-08T18:10:14.954-08:00 + Quite disagreeable + <p>I met Mr. Bingley's friend Mr. Darcy + this evening. I found him quite disagreeable.</p> + + + + + + + + Elizabeth Bennet + liz@gmail.com + + +""" + +BLOG_COMMENTS_FEED = """ + tag:blogger.com,1999:blog-blogID.postpostID..comments + 2007-04-04T21:56:29.803-07:00 + My Blog : Time to relax + + + + + Blog Author name + + Blogger + 1 + 1 + + tag:blogger.com,1999:blog-blogID.post-commentID + 2007-04-04T21:56:00.000-07:00 + 2007-04-04T21:56:29.803-07:00 + This is my first comment + This is my first comment + + + + + Blog Author name + + + +""" + + +SITES_FEED = """ + https://www.google.com/webmasters/tools/feeds/sites + Sites + 1 + + + + + 2008-10-02T07:26:51.833Z + + http://www.example.com + http://www.example.com + + + + 2007-11-17T18:27:32.543Z + + + + true + 2008-09-14T08:59:28.000 + US + none + normal + true + false + + + 456456-google.html + +""" + + +SITEMAPS_FEED = """ + http://www.example.com + http://www.example.com/ + 2006-11-17T18:27:32.543Z + + + + HTML + WAP + + + Value1 + Value2 + Value3 + + + http://www.example.com/sitemap-index.xml + http://www.example.com/sitemap-index.xml + + 2006-11-17T18:27:32.543Z + WEB + StatusValue + 2006-11-18T19:27:32.543Z + 102 + + + http://www.example.com/mobile/sitemap-index.xml + http://www.example.com/mobile/sitemap-index.xml + + 2006-11-17T18:27:32.543Z + StatusValue + 2006-11-18T19:27:32.543Z + 102 + HTML + + + http://www.example.com/news/sitemap-index.xml + http://www.example.com/news/sitemap-index.xml + + 2006-11-17T18:27:32.543Z + StatusValue + 2006-11-18T19:27:32.543Z + 102 + LabelValue + +""" + +HEALTH_CCR_NOTICE_PAYLOAD = """ + + + + + Start date + 2007-04-04T07:00:00Z + + + Aortic valve disorders + + 410.10 + ICD9 + 2004 + + + Active + + + +""" + +HEALTH_PROFILE_ENTRY_DIGEST = """ + + https://www.google.com/health/feeds/profile/default/vneCn5qdEIY_digest + 2008-09-29T07:52:17.176Z + + + + + + vneCn5qdEIY + + English + + en + ISO-639-1 + + + V1.0 + + 2008-09-29T07:52:17.176Z + + + Google Health Profile + + + + + + Pregnancy status + + + Not pregnant + + + + + user@google.com + + Patient + + + + + + + Breastfeeding status + + + Not breastfeeding + + + + + user@gmail.com + + Patient + + + + + + + + Hn0FE0IlcY-FMFFgSTxkvA/CONDITION/0 + + + Start date + + 2007-04-04T07:00:00Z + + + Aortic valve disorders + + 410.10 + ICD9 + 2004 + + + + Active + + + + example.com + + Information Provider + + + + + + + + Malaria + + 136.9 + ICD9_Broader + + + 084.6 + ICD9 + + + + ACTIVE + + + + user@gmail.com + + Patient + + + + + + + + + + + + Race + + S15814 + HL7 + + + + White + + + + + user@gmail.com + + Patient + + + + + + + + + + + + + + Allergy + + + A-Fil + + + ACTIVE + + + + user@gmail.com + + Patient + + + + + + + Severe + + + + + + Allergy + + + A.E.R Traveler + + + ACTIVE + + + + user@gmail.com + + Patient + + + + + + + Severe + + + + + + + + + + ACTIVE + + + + user@gmail.com + + Patient + + + + + + A& D + + + + 0 + + + + + + + + + + 0 + + + + To skin + + C38305 + FDA + + 0 + + + + + + + + + + + ACTIVE + + + + user@gmail.com + + Patient + + + + + + A-Fil + + + + 0 + + + + + + + + + + 0 + + + + To skin + + C38305 + FDA + + 0 + + + + + + + + + + + ACTIVE + + + + user@gmail.com + + Patient + + + + + + Lipitor + + + + 0 + + + + + + + + + + 0 + + + + By mouth + + C38288 + FDA + + 0 + + + + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + Chickenpox Vaccine + + 21 + HL7 + + + + + + + + + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + + + Height + + + + 0 + + 70 + + inches + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + + + Weight + + + + 0 + + 2480 + + ounces + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + + + Blood Type + + + + 0 + + O+ + + + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + + + Collection start date + + 2008-09-03 + + + + Acetaldehyde - Blood + + + + 0 + + + + + + + + + + + + Abdominal Ultrasound + + + + + user@gmail.com + + Patient + + + + + + + + Abdominoplasty + + + + + user@gmail.com + + Patient + + + + + + + + + Google Health Profile + + + + + + + + 1984-07-22 + + + Male + + + + + + user@gmail.com + + Patient + + + + + + +""" + +HEALTH_PROFILE_FEED = """ +https://www.google.com/health/feeds/profile/default +2008-09-30T01:07:17.888Z + +Profile Feed + + + + +1 + + https://www.google.com/health/feeds/profile/default/DysasdfARnFAao + 2008-09-29T03:12:50.850Z + 2008-09-29T03:12:50.850Z + + + + + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/MEDICATION/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA%26+D"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/DysasdfARnFAao"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/DysasdfARnFAao"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>hiD9sEigSzdk8nNT0evR4g</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Medications> + <Medication> + <Type/> + <Description/> + <Status> + <Text>ACTIVE</Text> + </Status> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <Product> + <ProductName> + <Text>A& D</Text> + </ProductName> + <Strength> + <Units/> + <StrengthSequencePosition>0</StrengthSequencePosition> + <VariableStrengthModifier/> + </Strength> + </Product> + <Directions> + <Direction> + <Description/> + <DeliveryMethod/> + <Dose> + <Units/> + <DoseSequencePosition>0</DoseSequencePosition> + <VariableDoseModifier/> + </Dose> + <Route> + <Text>To skin</Text> + <Code> + <Value>C38305</Value> + <CodingSystem>FDA</CodingSystem> + </Code> + <RouteSequencePosition>0</RouteSequencePosition> + <MultipleRouteModifier/> + </Route> + </Direction> + </Directions> + <Refills/> + </Medication> + </Medications> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/7I1WQzZrgp4</id> + <published>2008-09-29T03:27:14.909Z</published> + <updated>2008-09-29T03:27:14.909Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category scheme="http://schemas.google.com/health/item" term="A-Fil"/> + <category term="ALLERGY"/> + <title type="text"/> + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA-Fil/ALLERGY"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/7I1WQzZrgp4"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/7I1WQzZrgp4"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>YOyHDxQUiECCPgnsjV8SlQ</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Alerts> + <Alert> + <Type> + <Text>Allergy</Text> + </Type> + <Description> + <Text>A-Fil</Text> + </Description> + <Status> + <Text>ACTIVE</Text> + </Status> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <Reaction> + <Description/> + <Severity> + <Text>Severe</Text> + </Severity> + </Reaction> + </Alert> + </Alerts> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/Dz9wV83sKFg</id> + <published>2008-09-29T03:12:52.166Z</published> + <updated>2008-09-29T03:12:52.167Z</updated> + <category term="MEDICATION"/> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category scheme="http://schemas.google.com/health/item" term="A-Fil"/> + <title type="text"/> + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/MEDICATION/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA-Fil"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Dz9wV83sKFg"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Dz9wV83sKFg"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>7w.XFEPeuIYN3Rn32pUiUw</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Medications> + <Medication> + <Type/> + <Description/> + <Status> + <Text>ACTIVE</Text> + </Status> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <Product> + <ProductName> + <Text>A-Fil</Text> + </ProductName> + <Strength> + <Units/> + <StrengthSequencePosition>0</StrengthSequencePosition> + <VariableStrengthModifier/> + </Strength> + </Product> + <Directions> + <Direction> + <Description/> + <DeliveryMethod/> + <Dose> + <Units/> + <DoseSequencePosition>0</DoseSequencePosition> + <VariableDoseModifier/> + </Dose> + <Route> + <Text>To skin</Text> + <Code> + <Value>C38305</Value> + <CodingSystem>FDA</CodingSystem> + </Code> + <RouteSequencePosition>0</RouteSequencePosition> + <MultipleRouteModifier/> + </Route> + </Direction> + </Directions> + <Refills/> + </Medication> + </Medications> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/lzsxVzqZUyw</id> + <published>2008-09-29T03:13:07.496Z</published> + <updated>2008-09-29T03:13:07.497Z</updated> + <category scheme="http://schemas.google.com/health/item" term="A.E.R Traveler"/> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category term="ALLERGY"/> + <title type="text"/> + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA.E.R+Traveler/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/ALLERGY"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/lzsxVzqZUyw"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/lzsxVzqZUyw"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>5efFB0J2WgEHNUvk2z3A1A</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Alerts> + <Alert> + <Type> + <Text>Allergy</Text> + </Type> + <Description> + <Text>A.E.R Traveler</Text> + </Description> + <Status> + <Text>ACTIVE</Text> + </Status> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <Reaction> + <Description/> + <Severity> + <Text>Severe</Text> + </Severity> + </Reaction> + </Alert> + </Alerts> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/6PvhfKAXyYw</id> + <published>2008-09-29T03:13:02.123Z</published> + <updated>2008-09-29T03:13:02.124Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category term="PROCEDURE"/> + <category scheme="http://schemas.google.com/health/item" term="Abdominal Ultrasound"/> + <title type="text"/> + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/PROCEDURE/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DAbdominal+Ultrasound"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/6PvhfKAXyYw"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/6PvhfKAXyYw"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>W3Wbvx_QHwG5pxVchpuF1A</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Procedures> + <Procedure> + <Type/> + <Description> + <Text>Abdominal Ultrasound</Text> + </Description> + <Status/> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + </Procedure> + </Procedures> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/r2zGPGewCeU</id> + <published>2008-09-29T03:13:03.434Z</published> + <updated>2008-09-29T03:13:03.435Z</updated> + <category scheme="http://schemas.google.com/health/item" term="Abdominoplasty"/> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category term="PROCEDURE"/> + <title type="text"/> + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DAbdominoplasty/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/PROCEDURE"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/r2zGPGewCeU"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/r2zGPGewCeU"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>OUKgj5X0KMnbkC5sDL.yHA</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Procedures> + <Procedure> + <Type/> + <Description> + <Text>Abdominoplasty</Text> + </Description> + <Status/> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + </Procedure> + </Procedures> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/_cCCbQ0O3ug</id> + <published>2008-09-29T03:13:29.041Z</published> + <updated>2008-09-29T03:13:29.042Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category scheme="http://schemas.google.com/health/item" term="Acetaldehyde - Blood"/> + <category term="LABTEST"/> + <title type="text"/> + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DAcetaldehyde+-+Blood/LABTEST"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/_cCCbQ0O3ug"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/_cCCbQ0O3ug"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>YWtomFb8aG.DueZ7z7fyug</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Results> + <Result> + <Type/> + <Description/> + <Status/> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <Substance/> + <Test> + <DateTime> + <Type> + <Text>Collection start date</Text> + </Type> + <ExactDateTime>2008-09-03</ExactDateTime> + </DateTime> + <Type/> + <Description> + <Text>Acetaldehyde - Blood</Text> + </Description> + <Status/> + <TestResult> + <ResultSequencePosition>0</ResultSequencePosition> + <VariableResultModifier/> + <Units/> + </TestResult> + <ConfidenceValue/> + </Test> + </Result> + </Results> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/BdyA3iJZyCc</id> + <published>2008-09-29T03:00:45.915Z</published> + <updated>2008-09-29T03:00:45.915Z</updated> + <category scheme="http://schemas.google.com/health/item" term="Aortic valve disorders"/> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category term="CONDITION"/> + <title type="text">Aortic valve disorders + + + + + example.com + example.com + + + h1ljpoeKJ85li.1FHsG9Gw + + + + Hn0FE0IlcY-FMFFgSTxkvA/CONDITION/0 + + + Start date + + 2007-04-04T07:00:00Z + + + Aortic valve disorders + + 410.10 + ICD9 + 2004 + + + + Active + + + + example.com + + Information Provider + + + + + + + + + + https://www.google.com/health/feeds/profile/default/Cl.aMWIH5VA + 2008-09-29T03:13:34.996Z + 2008-09-29T03:13:34.997Z + + + + + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DChickenpox+Vaccine/IMMUNIZATION"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Cl.aMWIH5VA"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Cl.aMWIH5VA"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>KlhUqfftgELIitpKbqYalw</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Immunizations> + <Immunization> + <Type/> + <Description/> + <Status/> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <Product> + <ProductName> + <Text>Chickenpox Vaccine</Text> + <Code> + <Value>21</Value> + <CodingSystem>HL7</CodingSystem> + </Code> + </ProductName> + </Product> + <Directions> + <Direction> + <Description/> + <DeliveryMethod/> + </Direction> + </Directions> + <Refills/> + </Immunization> + </Immunizations> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/l0a7.FlX3_0</id> + <published>2008-09-29T03:14:47.461Z</published> + <updated>2008-09-29T03:14:47.461Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category term="DEMOGRAPHICS"/> + <category scheme="http://schemas.google.com/health/item" term="Demographics"/> + <title type="text">Demographics + + + + + + User Name + user@gmail.com + + + U5GDAVOxFbexQw3iyvqPYg + + + + + + + + + + + + + + + + 1984-07-22 + + + Male + + + + + + user@gmail.com + + Patient + + + + + + + + + https://www.google.com/health/feeds/profile/default/oIBDdgwFLyo + 2008-09-29T03:14:47.690Z + 2008-09-29T03:14:47.691Z + + + + FunctionalStatus + + + + + + User Name + user@gmail.com + + + W.EJcnhxb7W5M4eR4Tr1YA + + + + + + + + + + Pregnancy status + + + Not pregnant + + + + + user@gmail.com + + Patient + + + + + + + Breastfeeding status + + + Not breastfeeding + + + + + user@gmail.com + + Patient + + + + + + + + + + https://www.google.com/health/feeds/profile/default/wwljIlXuTVg + 2008-09-29T03:26:10.080Z + 2008-09-29T03:26:10.081Z + + + + + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/MEDICATION/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DLipitor"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/wwljIlXuTVg"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/wwljIlXuTVg"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>OrpghzvvbG_YaO5koqT2ug</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Medications> + <Medication> + <Type/> + <Description/> + <Status> + <Text>ACTIVE</Text> + </Status> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <Product> + <ProductName> + <Text>Lipitor</Text> + </ProductName> + <Strength> + <Units/> + <StrengthSequencePosition>0</StrengthSequencePosition> + <VariableStrengthModifier/> + </Strength> + </Product> + <Directions> + <Direction> + <Description/> + <DeliveryMethod/> + <Dose> + <Units/> + <DoseSequencePosition>0</DoseSequencePosition> + <VariableDoseModifier/> + </Dose> + <Route> + <Text>By mouth</Text> + <Code> + <Value>C38288</Value> + <CodingSystem>FDA</CodingSystem> + </Code> + <RouteSequencePosition>0</RouteSequencePosition> + <MultipleRouteModifier/> + </Route> + </Direction> + </Directions> + <Refills/> + </Medication> + </Medications> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/dd09TR12SiY</id> + <published>2008-09-29T07:52:17.175Z</published> + <updated>2008-09-29T07:52:17.176Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category scheme="http://schemas.google.com/health/item" term="Malaria"/> + <category term="CONDITION"/> + <title type="text"/> + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DMalaria/CONDITION"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/dd09TR12SiY"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/dd09TR12SiY"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>XF99N6X4lpy.jfPUPLMMSQ</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Problems> + <Problem> + <Type/> + <Description> + <Text>Malaria</Text> + <Code> + <Value>136.9</Value> + <CodingSystem>ICD9_Broader</CodingSystem> + </Code> + <Code> + <Value>084.6</Value> + <CodingSystem>ICD9</CodingSystem> + </Code> + </Description> + <Status> + <Text>ACTIVE</Text> + </Status> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <HealthStatus> + <Description/> + </HealthStatus> + </Problem> + </Problems> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/aS0Cf964DPs</id> + <published>2008-09-29T03:14:47.463Z</published> + <updated>2008-09-29T03:14:47.463Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category term="DEMOGRAPHICS"/> + <category scheme="http://schemas.google.com/health/item" term="SocialHistory (Drinking, Smoking)"/> + <title type="text">SocialHistory (Drinking, Smoking) + + + + + + User Name + user@gmail.com + + + kXylGU5YXLBzriv61xPGZQ + + + + + + + + + + Race + + S15814 + HL7 + + + + White + + + + + user@gmail.com + + Patient + + + + + + + + + + + + + + + https://www.google.com/health/feeds/profile/default/s5lII5xfj_g + 2008-09-29T03:14:47.544Z + 2008-09-29T03:14:47.545Z + + + + VitalSigns + + + + + + User Name + user@gmail.com + + + FTTIiY0TVVj35kZqFFjPjQ + + + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + + + Height + + + + 0 + + 70 + + inches + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + + + Weight + + + + 0 + + 2480 + + ounces + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + + + Blood Type + + + + 0 + + O+ + + + + + + + + + +""" + +HEALTH_PROFILE_LIST_ENTRY = """ + + https://www.google.com/health/feeds/profile/list/vndCn5sdfwdEIY + 1970-01-01T00:00:00.000Z + profile name + vndCn5sdfwdEIY + + + + user@gmail.com + +""" + +BOOK_ENTRY = """"""\ + """"""\ + """http://www.google.com/books/feeds/volumes/b7GZr5Btp30C"""\ + """2009-04-24T23:35:16.000Z"""\ + """"""\ + """A theory of justice"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """John Rawls"""\ + """1999"""\ + """p Since it appeared in 1971, John Rawls's i A Theory of Justice /i has become a classic. The author has now revised the original edition to clear up a number of difficulties he and others have found in the original book. /p p Rawls aims to express an essential part of the common core of the democratic tradition--justice as fairness--and to provide an alternative to utilitarianism, which had dominated the Anglo-Saxon tradition of political thought since the nineteenth century. Rawls substitutes the ideal of the social contract as a more satisfactory account of the basic rights and liberties of citizens as free and equal persons. "Each person," writes Rawls, "possesses an inviolability founded on justice that even the welfare of society as a whole cannot override." Advancing the ideas of Rousseau, Kant, Emerson, and Lincoln, Rawls's theory is as powerful today as it was when first published. /p"""\ + """538 pages"""\ + """b7GZr5Btp30C"""\ + """ISBN:0198250541"""\ + """ISBN:9780198250548"""\ + """en"""\ + """Oxford University Press"""\ + """A theory of justice"""\ +"""""" + +BOOK_FEED = """"""\ + """"""\ + """http://www.google.com/books/feeds/volumes"""\ + """2009-04-24T23:39:47.000Z"""\ + """"""\ + """Search results for 9780198250548"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """Google Books Search"""\ + """http://www.google.com"""\ + """"""\ + """Google Book Search data API"""\ + """1"""\ + """1"""\ + """20"""\ + """"""\ + """http://www.google.com/books/feeds/volumes/b7GZr5Btp30C"""\ + """2009-04-24T23:39:47.000Z"""\ + """"""\ + """A theory of justice"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """John Rawls"""\ + """1999"""\ + """... 9780198250548 ..."""\ + """538 pages"""\ + """b7GZr5Btp30C"""\ + """ISBN:0198250541"""\ + """ISBN:9780198250548"""\ + """Law"""\ + """A theory of justice"""\ + """"""\ +"""""" + +MAP_FEED = """ + + http://maps.google.com/maps/feeds/maps/208825816854482607313 + 2009-07-27T18:48:29.631Z + + My maps + + + + + + + Roman + + 1 + 1 + 1 + + http://maps.google.com/maps/feeds/maps/208825816854482607313/00046fb45f88fa910bcea + 2009-07-27T18:46:34.451Z + 2009-07-27T18:48:29.631Z + 2009-07-27T18:48:29.631Z + + yes + + + Untitled +
    + + + + + + Roman + + + +""" + +MAP_ENTRY = """ + + http://maps.google.com/maps/feeds/maps/208825816854482607313/00046fb45f88fa910bcea + 2009-07-27T18:46:34.451Z + 2009-07-27T18:48:29.631Z + 2009-07-27T18:48:29.631Z + + yes + + + Untitled + + + + + + + Roman + + +""" + +MAP_FEATURE_FEED = """ + + http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea + 2009-07-27T18:48:29.631Z + + Untitled + + + + + 4 + 1 + 4 + + http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb4632573b19e0b7 + 2009-07-27T18:47:35.037Z + 2009-07-27T18:47:35.037Z + 2009-07-27T18:47:35.037Z + + Some feature title + + + Some feature title + Some feature content]]> + + + -113.818359,41.442726,0.0 + + + + + + + Roman + + + Roman + + + + http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb46325e839a11e6 + 2009-07-27T18:47:35.067Z + 2009-07-27T18:48:22.184Z + 2009-07-27T18:48:22.184Z + + A cool poly! + + + A cool poly! + And a description]]> + + + + + 1 + -109.775391,47.457809,0.0 -99.755859,51.508742,0.0 -92.900391,48.04871,0.0 -92.8125,44.339565,0.0 -95.273437,44.402392,0.0 -97.207031,46.619261,0.0 -100.898437,46.073231,0.0 -102.480469,43.068888,0.0 -110.742187,45.274886,0.0 -109.775391,47.457809,0.0 + + + + + + + + + Roman + + + Roman + + + + http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb465f5002e56b7a + 2009-07-27T18:48:22.194Z + 2009-07-27T18:48:22.194Z + 2009-07-27T18:48:22.194Z + + New Mexico + + + New Mexico + Word.]]> + + + 1 + -110.039062,37.788081,0.0 -103.183594,37.926868,0.0 -103.183594,32.472695,0.0 -108.896484,32.026706,0.0 -109.863281,31.203405,0.0 -110.039062,37.788081,0.0 + + + + + + + Roman + + + Roman + + + +""" + +MAP_FEATURE_ENTRY = """ + + http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb4632573b19e0b7 + 2009-07-27T18:47:35.037Z + 2009-07-27T18:47:35.037Z + 2009-07-27T18:47:35.037Z + + Some feature title + + + Some feature title + Some feature content]]> + + + -113.818359,41.442726,0.0 + + + + + + + Roman + + + Roman + + +""" + +MAP_FEATURE_KML = """ + Some feature title + Some feature content]]> + + + -113.818359,41.442726,0.0 + + +""" + +SITES_LISTPAGE_ENTRY = ''' + + http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703 + 2009-06-16T00:37:37.393Z + + ListPagesTitle + +
    + +
    stuff go here
    asdf
    +
    sdf
    +
    +
    +
    +
    +
    +
    +
    + + + + Test User + test@gmail.com + + + + + + + + + + + +
    ''' + +SITES_COMMENT_ENTRY = ''' + + http://sites.google.com/feeds/content/site/gdatatestsite/abc123 + 2009-06-15T18:40:22.407Z + + + <content type="xhtml"> + <div xmlns="http://www.w3.org/1999/xhtml">first comment</div> + </content> + <link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123parent"/> + <link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <author> + <name>Test User</name> + <email>test@gmail.com</email> + </author> + <thr:in-reply-to xmlns:thr="http://purl.org/syndication/thread/1.0" href="http://sites.google.com/site/gdatatestsite/annoucment/testpost" ref="http://sites.google.com/feeds/content/site/gdatatestsite/abc123" source="http://sites.google.com/feeds/content/site/gdatatestsite" type="text/html"/> +</entry>''' + +SITES_LISTITEM_ENTRY = '''<?xml version="1.0" encoding="UTF-8"?> +<entry xmlns="http://www.w3.org/2005/Atom"> + <id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id> + <updated>2009-06-16T00:34:55.633Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#listitem"/> + <title type="text"/> + <link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123def"/> + <link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <author> + <name>Test User</name> + <email>test@gmail.com</email> + </author> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="A" name="Owner">test value</gs:field> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="B" name="Description">test</gs:field> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="C" name="Resolution">90</gs:field> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="D" name="Complete"/> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="E" name="MyCo">2009-05-31</gs:field> +</entry>''' + +SITES_CONTENT_FEED = '''<?xml version="1.0" encoding="UTF-8"?> +<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/" +xmlns:sites="http://schemas.google.com/sites/2008" xmlns:gs="http://schemas.google.com/spreadsheets/2006" +xmlns:dc="http://purl.org/dc/terms" xmlns:batch="http://schemas.google.com/gdata/batch" +xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr="http://purl.org/syndication/thread/1.0"> +<id>http://sites.google.com/feeds/content/site/gdatatestsite</id> +<updated>2009-06-15T21:35:43.282Z</updated> +<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite"/> +<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite"/> +<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite"/> +<generator version="1" uri="http://sites.google.com">Google Sites</generator> +<openSearch:startIndex>1</openSearch:startIndex> +<entry> + <id>http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703</id> + <updated>2009-06-16T00:37:37.393Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#listpage"/> + <title type="text">ListPagesTitle + +
    + +
    stuff go here
    asdf
    +
    sdf
    +
    +
    +
    +
    +
    +
    +
    + + + + + + Test User + test@gmail.com + + + + + + + + + + + 2 + + home + +
    + + http://sites.google.com/feeds/content/site/gdatatestsite/abc123 + 2009-06-17T00:40:37.082Z + + filecabinet + +
    + +
    sdf
    +
    +
    +
    + + + + + + Test User + test@gmail.com + + +
    + + http://sites.google.com/feeds/content/site/gdatatestsite/abc123 + 2009-06-16T00:34:55.633Z + + + <link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123def"/> + <link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <author> + <name>Test User</name> + <email>test@gmail.com</email> + </author> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="A" name="Owner">test value</gs:field> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="B" name="Description">test</gs:field> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="C" name="Resolution">90</gs:field> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="D" name="Complete"/> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="E" name="MyCo">2009-05-31</gs:field> +</entry> +<entry> + <id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id> + <updated>2009-06-15T18:40:32.922Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#attachment"/> + <title type="text">testFile.ods + + + + + + + Test User + test@gmail.com + + + something else + + + http://sites.google.com/feeds/content/site/gdatatestsite/abc123 + 2009-06-15T18:40:22.407Z + + + <content type="xhtml"> + <div xmlns="http://www.w3.org/1999/xhtml">first comment</div> + </content> + <link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <author> + <name>Test User</name> + <email>test@gmail.com</email> + </author> + <thr:in-reply-to xmlns:thr="http://purl.org/syndication/thread/1.0" href="http://sites.google.com/site/gdatatestsite/annoucment/testpost" ref="http://sites.google.com/feeds/content/site/gdatatestsite/abc123" source="http://sites.google.com/feeds/content/site/gdatatestsite" type="text/html"/> +</entry> +<entry> + <id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id> + <updated>2009-06-15T18:40:16.388Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#announcement"/> + <title type="text">TestPost + +
    + +
    content goes here
    +
    +
    +
    + + + + + + Test User + test@gmail.com + +
    + + http://sites.google.com/feeds/content/site/gdatatestsite/abc123 + 2009-06-12T23:37:59.417Z + + Home + +
    + +
    Some Content goes here
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    + + + + + Test User + test@gmail.com + +
    + + http://sites.google.com/feeds/content/site/gdatatestsite/2639323850129333500 + 2009-06-12T23:32:09.191Z + + annoucment + +
    +
    +
    + + + + + Test User + test@gmail.com + + +
    +''' + +SITES_ACTIVITY_FEED = ''' + +http://sites.google.com/feeds/activity/site/siteName +2009-08-19T05:46:01.503Z +Activity + + + +Google Sites +1 + +http://sites.google.com/feeds/activity/site/siteName/197441951793148343 +2009-08-17T00:08:19.387Z + +NewWebpage3 +
    + + + + + + + User + user@gmail.com + + + +http://sites.google.com/feeds/activity/site/siteName/7299542210274956360 +2009-08-17T00:08:03.711Z + +NewWebpage3 + +
    User edited NewWebpage3 +
    +
    + + + + + User + user@gmail.com + +
    +''' + +SITES_REVISION_FEED = ''' + +http://sites.google.com/feeds/revision/site/siteName/2947510322163358574 +2009-08-19T06:20:18.151Z +Revisions + + +Google Sites +1 + +http://sites.google.com/feeds/revision/site/siteName/2947510322163358574/1 +2009-08-19T04:33:14.856Z + + +<content type="xhtml"> + <div xmlns="http://www.w3.org/1999/xhtml"> + <table cellspacing="0" class="sites-layout-name-one-column sites-layout-hbox"> + <tbody> + <tr> + <td class="sites-layout-tile sites-tile-name-content-1">testcomment</td> + </tr> + </tbody> + </table> +</div> +</content> +<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/siteName/54395424125706119"/> +<link rel="alternate" type="text" href="http://sites.google.com/site/system/app/pages/admin/compare?wuid=wuid%3Agx%3A28e7a9057c581b6e&rev1=1"/> +<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/revision/site/siteName/2947510322163358574/1"/> +<author> + <name>User</name> + <email>user@gmail.com</email> +</author> +<thr:in-reply-to href="http://sites.google.com/site/siteName/code/js" ref="http://sites.google.com/feeds/content/site/siteName/54395424125706119" source="http://sites.google.com/feeds/content/google.com/siteName" type="text/html;charset=UTF-8"/> +<sites:revision>1</sites:revision> +</entry> +</feed>''' + +SITES_SITE_FEED = ''' +<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/" xmlns:gAcl="http://schemas.google.com/acl/2007" xmlns:sites="http://schemas.google.com/sites/2008" xmlns:gs="http://schemas.google.com/spreadsheets/2006" xmlns:dc="http://purl.org/dc/terms" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr="http://purl.org/syndication/thread/1.0"> +<id>https://sites.google.com/feeds/site/example.com</id> +<updated>2009-12-09T01:05:54.631Z</updated> +<title>Site + + + +Google Sites +1 + +https://sites.google.com/feeds/site/example.com/new-test-site +2009-12-02T22:55:31.040Z +2009-12-02T22:55:31.040Z +New Test Site +A new site to hold memories + + + + + +new-test-site +iceberg + + +https://sites.google.com/feeds/site/example.com/newautosite2 +2009-12-05T00:28:01.077Z +2009-12-05T00:28:01.077Z +newAutoSite3 +A new site to hold memories2 + + + + +newautosite2 +default + +''' + +SITES_ACL_FEED = ''' + +https://sites.google.comsites.google.com/feeds/acl/site/example.com/new-test-site +2009-12-09T01:24:59.080Z + +Acl + + + +Google Sites +1 + + https://sites.google.com/feeds/acl/site/google.com/new-test-site/user%3Auser%40example.com + 2009-12-09T01:24:59.080Z + 2009-12-09T01:24:59.080Z + + + + + + +''' + +ANALYTICS_ACCOUNT_FEED_old = ''' + +http://www.google.com/analytics/feeds/accounts/abc@test.com +2009-06-25T03:55:22.000-07:00 +Profile list for abc@test.com + + +Google Analytics + +Google Analytics +12 +1 +12 + +http://www.google.com/analytics/feeds/accounts/ga:1174 +2009-06-25T03:55:22.000-07:00 +www.googlestore.com + +ga:1174 + + + + + + + +''' + +ANALYTICS_ACCOUNT_FEED = ''' + + http://www.google.com/analytics/feeds/accounts/api.nickm@google.com + 2009-10-14T09:14:25.000-07:00 + Profile list for abc@test.com + + + Google Analytics + + Google Analytics + 37 + 1 + 37 + + ga:operatingSystem==iPhone + + + http://www.google.com/analytics/feeds/accounts/ga:1174 + 2009-10-14T09:14:25.000-07:00 + www.googlestore.com + + + + + + + + + + + + + + + + + + + + + + ga:1174 + +''' + +ANALYTICS_DATA_FEED = ''' + + http://www.google.com/analytics/feeds/data?ids=ga:1174&dimensions=ga:medium,ga:source&metrics=ga:bounces,ga:visits&filters=ga:medium%3D%3Dreferral&start-date=2008-10-01&end-date=2008-10-31 + 2008-10-31T16:59:59.999-07:00 + Google Analytics Data for Profile 1174 + + + + Google Analytics + + Google Analytics + 6451 + 1 + 2 + 2008-10-01 + 2008-10-31 + + ga:operatingSystem==iPhone + + + + + + true + + ga:1174 + www.googlestore.com + + + + + + http://www.google.com/analytics/feeds/data?ids=ga:1174&ga:medium=referral&ga:source=blogger.com&filters=ga:medium%3D%3Dreferral&start-date=2008-10-01&end-date=2008-10-31 + 2008-10-30T17:00:00.001-07:00 + ga:source=blogger.com | ga:medium=referral + + + + + + +''' + + +ANALYTICS_MGMT_PROFILE_FEED = ''' + + https://www.google.com/analytics/feeds/datasources/ga/accounts/~all/webproperties/~all/profiles + 2010-06-14T22:18:48.676Z + Google Analytics Profiles for superman@gmail.com + + + Google Analytics + + Google Analytics + 1 + 1 + 1000 + + https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174 + 2010-06-09T05:58:15.436-07:00 + Google Analytics Profile www.googlestore.com + + + + + + + + + + + + +''' + +ANALYTICS_MGMT_GOAL_FEED = ''' + + https://www.google.com/analytics/feeds/datasources/ga/accounts/~all/webproperties/~all/profiles/~all/goals + 2010-06-14T22:21:18.485Z + Google Analytics Goals for superman@gmail.com + + + Google Analytics + + Google Analytics + 3 + 1 + 1000 + + https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174/goals/1 + 2010-02-07T13:12:43.377-08:00 + Google Analytics Goal 1 + + + + + + + + + + + https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174/goals/2 + 2010-02-07T13:12:43.376-08:00 + Google Analytics Goal 2 + + + + + + + + +''' + +ANALYTICS_MGMT_ADV_SEGMENT_FEED = ''' + + https://www.google.com/analytics/feeds/datasources/ga/segments + 2010-06-14T22:22:02.728Z + Google Analytics Advanced Segments for superman@gmail.com + + + Google Analytics + + Google Analytics + 2 + 1 + 1000 + + https://www.google.com/analytics/feeds/datasources/ga/segments/gaid::0 + 2009-10-26T13:00:44.915-07:00 + Google Analytics Advanced Segment Sources Form Google + + + ga:source=~^\Qgoogle\E + + + +''' + +MULTIDOMAIN_USER_ENTRY = """ + + + + + + + +""" + +MULTIDOMAIN_USER_FEED = """ + + https://apps-apis.google.com/a/feeds/user/2.0/example.com + 2010-01-26T23:38:13.215Z + + + + 1 + + https://apps-apis.google.com/a/feeds/user/2.0/example.com/admin%40example.com + 2010-01-26T23:38:13.210Z + + + + + + + + + + + + https://apps-apis.google.com/a/feeds/user/2.0/example.com/liz%40example.com + 2010-01-26T23:38:13.210Z + + + + + + + + + + +""" + +MULTIDOMAIN_USER_RENAME_REQUEST = """ + + +""" + +MULTIDOMAIN_ALIAS_ENTRY = """ + + https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/helpdesk%40gethelp_example.com + 2008-10-17T15:02:45.646Z + + + + +""" + +MULTIDOMAIN_ALIAS_FEED = """ + + https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com + 2010-01-26T23:38:13.215Z + + + + 1 + + https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/helpdesk%40gethelp_example.com + 2010-01-26T23:38:13.210Z + + + + + + + https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/support%40gethelp_example.com + 2010-01-26T23:38:13.210Z + + + + + +""" diff --git a/gam/gdata/analytics/tlslite/BaseDB.py b/gam/gdata/analytics/tlslite/BaseDB.py new file mode 100755 index 00000000000..ca8dff6b408 --- /dev/null +++ b/gam/gdata/analytics/tlslite/BaseDB.py @@ -0,0 +1,120 @@ +"""Base class for SharedKeyDB and VerifierDB.""" + +import anydbm +import thread + +class BaseDB: + def __init__(self, filename, type): + self.type = type + self.filename = filename + if self.filename: + self.db = None + else: + self.db = {} + self.lock = thread.allocate_lock() + + def create(self): + """Create a new on-disk database. + + @raise anydbm.error: If there's a problem creating the database. + """ + if self.filename: + self.db = anydbm.open(self.filename, "n") #raises anydbm.error + self.db["--Reserved--type"] = self.type + self.db.sync() + else: + self.db = {} + + def open(self): + """Open a pre-existing on-disk database. + + @raise anydbm.error: If there's a problem opening the database. + @raise ValueError: If the database is not of the right type. + """ + if not self.filename: + raise ValueError("Can only open on-disk databases") + self.db = anydbm.open(self.filename, "w") #raises anydbm.error + try: + if self.db["--Reserved--type"] != self.type: + raise ValueError("Not a %s database" % self.type) + except KeyError: + raise ValueError("Not a recognized database") + + def __getitem__(self, username): + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + valueStr = self.db[username] + finally: + self.lock.release() + + return self._getItem(username, valueStr) + + def __setitem__(self, username, value): + if self.db == None: + raise AssertionError("DB not open") + + valueStr = self._setItem(username, value) + + self.lock.acquire() + try: + self.db[username] = valueStr + if self.filename: + self.db.sync() + finally: + self.lock.release() + + def __delitem__(self, username): + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + del(self.db[username]) + if self.filename: + self.db.sync() + finally: + self.lock.release() + + def __contains__(self, username): + """Check if the database contains the specified username. + + @type username: str + @param username: The username to check for. + + @rtype: bool + @return: True if the database contains the username, False + otherwise. + + """ + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + return self.db.has_key(username) + finally: + self.lock.release() + + def check(self, username, param): + value = self.__getitem__(username) + return self._checkItem(value, username, param) + + def keys(self): + """Return a list of usernames in the database. + + @rtype: list + @return: The usernames in the database. + """ + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + usernames = self.db.keys() + finally: + self.lock.release() + usernames = [u for u in usernames if not u.startswith("--Reserved--")] + return usernames \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/Checker.py b/gam/gdata/analytics/tlslite/Checker.py new file mode 100755 index 00000000000..f978697628e --- /dev/null +++ b/gam/gdata/analytics/tlslite/Checker.py @@ -0,0 +1,146 @@ +"""Class for post-handshake certificate checking.""" + +from utils.cryptomath import hashAndBase64 +from X509 import X509 +from X509CertChain import X509CertChain +from errors import * + + +class Checker: + """This class is passed to a handshake function to check the other + party's certificate chain. + + If a handshake function completes successfully, but the Checker + judges the other party's certificate chain to be missing or + inadequate, a subclass of + L{tlslite.errors.TLSAuthenticationError} will be raised. + + Currently, the Checker can check either an X.509 or a cryptoID + chain (for the latter, cryptoIDlib must be installed). + """ + + def __init__(self, cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + checkResumedSession=False): + """Create a new Checker instance. + + You must pass in one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + @type cryptoID: str + @param cryptoID: A cryptoID which the other party's certificate + chain must match. The cryptoIDlib module must be installed. + Mutually exclusive with all of the 'x509...' arguments. + + @type protocol: str + @param protocol: A cryptoID protocol URI which the other + party's certificate chain must match. Requires the 'cryptoID' + argument. + + @type x509Fingerprint: str + @param x509Fingerprint: A hex-encoded X.509 end-entity + fingerprint which the other party's end-entity certificate must + match. Mutually exclusive with the 'cryptoID' and + 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed. Mutually exclusive with the 'cryptoID' and + 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type checkResumedSession: bool + @param checkResumedSession: If resumed sessions should be + checked. This defaults to False, on the theory that if the + session was checked once, we don't need to bother + re-checking it. + """ + + if cryptoID and (x509Fingerprint or x509TrustList): + raise ValueError() + if x509Fingerprint and x509TrustList: + raise ValueError() + if x509CommonName and not x509TrustList: + raise ValueError() + if protocol and not cryptoID: + raise ValueError() + if cryptoID: + import cryptoIDlib #So we raise an error here + if x509TrustList: + import cryptlib_py #So we raise an error here + self.cryptoID = cryptoID + self.protocol = protocol + self.x509Fingerprint = x509Fingerprint + self.x509TrustList = x509TrustList + self.x509CommonName = x509CommonName + self.checkResumedSession = checkResumedSession + + def __call__(self, connection): + """Check a TLSConnection. + + When a Checker is passed to a handshake function, this will + be called at the end of the function. + + @type connection: L{tlslite.TLSConnection.TLSConnection} + @param connection: The TLSConnection to examine. + + @raise tlslite.errors.TLSAuthenticationError: If the other + party's certificate chain is missing or bad. + """ + if not self.checkResumedSession and connection.resumed: + return + + if self.cryptoID or self.x509Fingerprint or self.x509TrustList: + if connection._client: + chain = connection.session.serverCertChain + else: + chain = connection.session.clientCertChain + + if self.x509Fingerprint or self.x509TrustList: + if isinstance(chain, X509CertChain): + if self.x509Fingerprint: + if chain.getFingerprint() != self.x509Fingerprint: + raise TLSFingerprintError(\ + "X.509 fingerprint mismatch: %s, %s" % \ + (chain.getFingerprint(), self.x509Fingerprint)) + else: #self.x509TrustList + if not chain.validate(self.x509TrustList): + raise TLSValidationError("X.509 validation failure") + if self.x509CommonName and \ + (chain.getCommonName() != self.x509CommonName): + raise TLSAuthorizationError(\ + "X.509 Common Name mismatch: %s, %s" % \ + (chain.getCommonName(), self.x509CommonName)) + elif chain: + raise TLSAuthenticationTypeError() + else: + raise TLSNoAuthenticationError() + elif self.cryptoID: + import cryptoIDlib.CertChain + if isinstance(chain, cryptoIDlib.CertChain.CertChain): + if chain.cryptoID != self.cryptoID: + raise TLSFingerprintError(\ + "cryptoID mismatch: %s, %s" % \ + (chain.cryptoID, self.cryptoID)) + if self.protocol: + if not chain.checkProtocol(self.protocol): + raise TLSAuthorizationError(\ + "cryptoID protocol mismatch") + if not chain.validate(): + raise TLSValidationError("cryptoID validation failure") + elif chain: + raise TLSAuthenticationTypeError() + else: + raise TLSNoAuthenticationError() + diff --git a/gam/gdata/analytics/tlslite/FileObject.py b/gam/gdata/analytics/tlslite/FileObject.py new file mode 100755 index 00000000000..6ee02b2436b --- /dev/null +++ b/gam/gdata/analytics/tlslite/FileObject.py @@ -0,0 +1,220 @@ +"""Class returned by TLSConnection.makefile().""" + +class FileObject: + """This class provides a file object interface to a + L{tlslite.TLSConnection.TLSConnection}. + + Call makefile() on a TLSConnection to create a FileObject instance. + + This class was copied, with minor modifications, from the + _fileobject class in socket.py. Note that fileno() is not + implemented.""" + + default_bufsize = 16384 #TREV: changed from 8192 + + def __init__(self, sock, mode='rb', bufsize=-1): + self._sock = sock + self.mode = mode # Not actually used in this version + if bufsize < 0: + bufsize = self.default_bufsize + self.bufsize = bufsize + self.softspace = False + if bufsize == 0: + self._rbufsize = 1 + elif bufsize == 1: + self._rbufsize = self.default_bufsize + else: + self._rbufsize = bufsize + self._wbufsize = bufsize + self._rbuf = "" # A string + self._wbuf = [] # A list of strings + + def _getclosed(self): + return self._sock is not None + closed = property(_getclosed, doc="True if the file is closed") + + def close(self): + try: + if self._sock: + for result in self._sock._decrefAsync(): #TREV + pass + finally: + self._sock = None + + def __del__(self): + try: + self.close() + except: + # close() may fail if __init__ didn't complete + pass + + def flush(self): + if self._wbuf: + buffer = "".join(self._wbuf) + self._wbuf = [] + self._sock.sendall(buffer) + + #def fileno(self): + # raise NotImplementedError() #TREV + + def write(self, data): + data = str(data) # XXX Should really reject non-string non-buffers + if not data: + return + self._wbuf.append(data) + if (self._wbufsize == 0 or + self._wbufsize == 1 and '\n' in data or + self._get_wbuf_len() >= self._wbufsize): + self.flush() + + def writelines(self, list): + # XXX We could do better here for very long lists + # XXX Should really reject non-string non-buffers + self._wbuf.extend(filter(None, map(str, list))) + if (self._wbufsize <= 1 or + self._get_wbuf_len() >= self._wbufsize): + self.flush() + + def _get_wbuf_len(self): + buf_len = 0 + for x in self._wbuf: + buf_len += len(x) + return buf_len + + def read(self, size=-1): + data = self._rbuf + if size < 0: + # Read until EOF + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + if self._rbufsize <= 1: + recv_size = self.default_bufsize + else: + recv_size = self._rbufsize + while True: + data = self._sock.recv(recv_size) + if not data: + break + buffers.append(data) + return "".join(buffers) + else: + # Read until size bytes or EOF seen, whichever comes first + buf_len = len(data) + if buf_len >= size: + self._rbuf = data[size:] + return data[:size] + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + while True: + left = size - buf_len + recv_size = max(self._rbufsize, left) + data = self._sock.recv(recv_size) + if not data: + break + buffers.append(data) + n = len(data) + if n >= left: + self._rbuf = data[left:] + buffers[-1] = data[:left] + break + buf_len += n + return "".join(buffers) + + def readline(self, size=-1): + data = self._rbuf + if size < 0: + # Read until \n or EOF, whichever comes first + if self._rbufsize <= 1: + # Speed up unbuffered case + assert data == "" + buffers = [] + recv = self._sock.recv + while data != "\n": + data = recv(1) + if not data: + break + buffers.append(data) + return "".join(buffers) + nl = data.find('\n') + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + return data[:nl] + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + while True: + data = self._sock.recv(self._rbufsize) + if not data: + break + buffers.append(data) + nl = data.find('\n') + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + buffers[-1] = data[:nl] + break + return "".join(buffers) + else: + # Read until size bytes or \n or EOF seen, whichever comes first + nl = data.find('\n', 0, size) + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + return data[:nl] + buf_len = len(data) + if buf_len >= size: + self._rbuf = data[size:] + return data[:size] + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + while True: + data = self._sock.recv(self._rbufsize) + if not data: + break + buffers.append(data) + left = size - buf_len + nl = data.find('\n', 0, left) + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + buffers[-1] = data[:nl] + break + n = len(data) + if n >= left: + self._rbuf = data[left:] + buffers[-1] = data[:left] + break + buf_len += n + return "".join(buffers) + + def readlines(self, sizehint=0): + total = 0 + list = [] + while True: + line = self.readline() + if not line: + break + list.append(line) + total += len(line) + if sizehint and total >= sizehint: + break + return list + + # Iterator protocols + + def __iter__(self): + return self + + def next(self): + line = self.readline() + if not line: + raise StopIteration + return line diff --git a/gam/gdata/analytics/tlslite/HandshakeSettings.py b/gam/gdata/analytics/tlslite/HandshakeSettings.py new file mode 100755 index 00000000000..c7c3223e515 --- /dev/null +++ b/gam/gdata/analytics/tlslite/HandshakeSettings.py @@ -0,0 +1,159 @@ +"""Class for setting handshake parameters.""" + +from constants import CertificateType +from utils import cryptomath +from utils import cipherfactory + +class HandshakeSettings: + """This class encapsulates various parameters that can be used with + a TLS handshake. + @sort: minKeySize, maxKeySize, cipherNames, certificateTypes, + minVersion, maxVersion + + @type minKeySize: int + @ivar minKeySize: The minimum bit length for asymmetric keys. + + If the other party tries to use SRP, RSA, or Diffie-Hellman + parameters smaller than this length, an alert will be + signalled. The default is 1023. + + @type maxKeySize: int + @ivar maxKeySize: The maximum bit length for asymmetric keys. + + If the other party tries to use SRP, RSA, or Diffie-Hellman + parameters larger than this length, an alert will be signalled. + The default is 8193. + + @type cipherNames: list + @ivar cipherNames: The allowed ciphers, in order of preference. + + The allowed values in this list are 'aes256', 'aes128', '3des', and + 'rc4'. If these settings are used with a client handshake, they + determine the order of the ciphersuites offered in the ClientHello + message. + + If these settings are used with a server handshake, the server will + choose whichever ciphersuite matches the earliest entry in this + list. + + NOTE: If '3des' is used in this list, but TLS Lite can't find an + add-on library that supports 3DES, then '3des' will be silently + removed. + + The default value is ['aes256', 'aes128', '3des', 'rc4']. + + @type certificateTypes: list + @ivar certificateTypes: The allowed certificate types, in order of + preference. + + The allowed values in this list are 'x509' and 'cryptoID'. This + list is only used with a client handshake. The client will + advertise to the server which certificate types are supported, and + will check that the server uses one of the appropriate types. + + NOTE: If 'cryptoID' is used in this list, but cryptoIDlib is not + installed, then 'cryptoID' will be silently removed. + + @type minVersion: tuple + @ivar minVersion: The minimum allowed SSL/TLS version. + + This variable can be set to (3,0) for SSL 3.0, (3,1) for + TLS 1.0, or (3,2) for TLS 1.1. If the other party wishes to + use a lower version, a protocol_version alert will be signalled. + The default is (3,0). + + @type maxVersion: tuple + @ivar maxVersion: The maximum allowed SSL/TLS version. + + This variable can be set to (3,0) for SSL 3.0, (3,1) for + TLS 1.0, or (3,2) for TLS 1.1. If the other party wishes to + use a higher version, a protocol_version alert will be signalled. + The default is (3,2). (WARNING: Some servers may (improperly) + reject clients which offer support for TLS 1.1. In this case, + try lowering maxVersion to (3,1)). + """ + def __init__(self): + self.minKeySize = 1023 + self.maxKeySize = 8193 + self.cipherNames = ["aes256", "aes128", "3des", "rc4"] + self.cipherImplementations = ["cryptlib", "openssl", "pycrypto", + "python"] + self.certificateTypes = ["x509", "cryptoID"] + self.minVersion = (3,0) + self.maxVersion = (3,2) + + #Filters out options that are not supported + def _filter(self): + other = HandshakeSettings() + other.minKeySize = self.minKeySize + other.maxKeySize = self.maxKeySize + other.cipherNames = self.cipherNames + other.cipherImplementations = self.cipherImplementations + other.certificateTypes = self.certificateTypes + other.minVersion = self.minVersion + other.maxVersion = self.maxVersion + + if not cipherfactory.tripleDESPresent: + other.cipherNames = [e for e in self.cipherNames if e != "3des"] + if len(other.cipherNames)==0: + raise ValueError("No supported ciphers") + + try: + import cryptoIDlib + except ImportError: + other.certificateTypes = [e for e in self.certificateTypes \ + if e != "cryptoID"] + if len(other.certificateTypes)==0: + raise ValueError("No supported certificate types") + + if not cryptomath.cryptlibpyLoaded: + other.cipherImplementations = [e for e in \ + self.cipherImplementations if e != "cryptlib"] + if not cryptomath.m2cryptoLoaded: + other.cipherImplementations = [e for e in \ + other.cipherImplementations if e != "openssl"] + if not cryptomath.pycryptoLoaded: + other.cipherImplementations = [e for e in \ + other.cipherImplementations if e != "pycrypto"] + if len(other.cipherImplementations)==0: + raise ValueError("No supported cipher implementations") + + if other.minKeySize<512: + raise ValueError("minKeySize too small") + if other.minKeySize>16384: + raise ValueError("minKeySize too large") + if other.maxKeySize<512: + raise ValueError("maxKeySize too small") + if other.maxKeySize>16384: + raise ValueError("maxKeySize too large") + for s in other.cipherNames: + if s not in ("aes256", "aes128", "rc4", "3des"): + raise ValueError("Unknown cipher name: '%s'" % s) + for s in other.cipherImplementations: + if s not in ("cryptlib", "openssl", "python", "pycrypto"): + raise ValueError("Unknown cipher implementation: '%s'" % s) + for s in other.certificateTypes: + if s not in ("x509", "cryptoID"): + raise ValueError("Unknown certificate type: '%s'" % s) + + if other.minVersion > other.maxVersion: + raise ValueError("Versions set incorrectly") + + if not other.minVersion in ((3,0), (3,1), (3,2)): + raise ValueError("minVersion set incorrectly") + + if not other.maxVersion in ((3,0), (3,1), (3,2)): + raise ValueError("maxVersion set incorrectly") + + return other + + def _getCertificateTypes(self): + l = [] + for ct in self.certificateTypes: + if ct == "x509": + l.append(CertificateType.x509) + elif ct == "cryptoID": + l.append(CertificateType.cryptoID) + else: + raise AssertionError() + return l diff --git a/gam/gdata/analytics/tlslite/Session.py b/gam/gdata/analytics/tlslite/Session.py new file mode 100755 index 00000000000..a951f458942 --- /dev/null +++ b/gam/gdata/analytics/tlslite/Session.py @@ -0,0 +1,131 @@ +"""Class representing a TLS session.""" + +from utils.compat import * +from mathtls import * +from constants import * + +class Session: + """ + This class represents a TLS session. + + TLS distinguishes between connections and sessions. A new + handshake creates both a connection and a session. Data is + transmitted over the connection. + + The session contains a more permanent record of the handshake. The + session can be inspected to determine handshake results. The + session can also be used to create a new connection through + "session resumption". If the client and server both support this, + they can create a new connection based on an old session without + the overhead of a full handshake. + + The session for a L{tlslite.TLSConnection.TLSConnection} can be + retrieved from the connection's 'session' attribute. + + @type srpUsername: str + @ivar srpUsername: The client's SRP username (or None). + + @type sharedKeyUsername: str + @ivar sharedKeyUsername: The client's shared-key username (or + None). + + @type clientCertChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @ivar clientCertChain: The client's certificate chain (or None). + + @type serverCertChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @ivar serverCertChain: The server's certificate chain (or None). + """ + + def __init__(self): + self.masterSecret = createByteArraySequence([]) + self.sessionID = createByteArraySequence([]) + self.cipherSuite = 0 + self.srpUsername = None + self.sharedKeyUsername = None + self.clientCertChain = None + self.serverCertChain = None + self.resumable = False + self.sharedKey = False + + def _clone(self): + other = Session() + other.masterSecret = self.masterSecret + other.sessionID = self.sessionID + other.cipherSuite = self.cipherSuite + other.srpUsername = self.srpUsername + other.sharedKeyUsername = self.sharedKeyUsername + other.clientCertChain = self.clientCertChain + other.serverCertChain = self.serverCertChain + other.resumable = self.resumable + other.sharedKey = self.sharedKey + return other + + def _calcMasterSecret(self, version, premasterSecret, clientRandom, + serverRandom): + if version == (3,0): + self.masterSecret = PRF_SSL(premasterSecret, + concatArrays(clientRandom, serverRandom), 48) + elif version in ((3,1), (3,2)): + self.masterSecret = PRF(premasterSecret, "master secret", + concatArrays(clientRandom, serverRandom), 48) + else: + raise AssertionError() + + def valid(self): + """If this session can be used for session resumption. + + @rtype: bool + @return: If this session can be used for session resumption. + """ + return self.resumable or self.sharedKey + + def _setResumable(self, boolean): + #Only let it be set if this isn't a shared key + if not self.sharedKey: + #Only let it be set to True if the sessionID is non-null + if (not boolean) or (boolean and self.sessionID): + self.resumable = boolean + + def getCipherName(self): + """Get the name of the cipher used with this connection. + + @rtype: str + @return: The name of the cipher used with this connection. + Either 'aes128', 'aes256', 'rc4', or '3des'. + """ + if self.cipherSuite in CipherSuite.aes128Suites: + return "aes128" + elif self.cipherSuite in CipherSuite.aes256Suites: + return "aes256" + elif self.cipherSuite in CipherSuite.rc4Suites: + return "rc4" + elif self.cipherSuite in CipherSuite.tripleDESSuites: + return "3des" + else: + return None + + def _createSharedKey(self, sharedKeyUsername, sharedKey): + if len(sharedKeyUsername)>16: + raise ValueError() + if len(sharedKey)>47: + raise ValueError() + + self.sharedKeyUsername = sharedKeyUsername + + self.sessionID = createByteArrayZeros(16) + for x in range(len(sharedKeyUsername)): + self.sessionID[x] = ord(sharedKeyUsername[x]) + + premasterSecret = createByteArrayZeros(48) + sharedKey = chr(len(sharedKey)) + sharedKey + for x in range(48): + premasterSecret[x] = ord(sharedKey[x % len(sharedKey)]) + + self.masterSecret = PRF(premasterSecret, "shared secret", + createByteArraySequence([]), 48) + self.sharedKey = True + return self + + diff --git a/gam/gdata/analytics/tlslite/SessionCache.py b/gam/gdata/analytics/tlslite/SessionCache.py new file mode 100755 index 00000000000..34cf0b0ec4e --- /dev/null +++ b/gam/gdata/analytics/tlslite/SessionCache.py @@ -0,0 +1,103 @@ +"""Class for caching TLS sessions.""" + +import thread +import time + +class SessionCache: + """This class is used by the server to cache TLS sessions. + + Caching sessions allows the client to use TLS session resumption + and avoid the expense of a full handshake. To use this class, + simply pass a SessionCache instance into the server handshake + function. + + This class is thread-safe. + """ + + #References to these instances + #are also held by the caller, who may change the 'resumable' + #flag, so the SessionCache must return the same instances + #it was passed in. + + def __init__(self, maxEntries=10000, maxAge=14400): + """Create a new SessionCache. + + @type maxEntries: int + @param maxEntries: The maximum size of the cache. When this + limit is reached, the oldest sessions will be deleted as + necessary to make room for new ones. The default is 10000. + + @type maxAge: int + @param maxAge: The number of seconds before a session expires + from the cache. The default is 14400 (i.e. 4 hours).""" + + self.lock = thread.allocate_lock() + + # Maps sessionIDs to sessions + self.entriesDict = {} + + #Circular list of (sessionID, timestamp) pairs + self.entriesList = [(None,None)] * maxEntries + + self.firstIndex = 0 + self.lastIndex = 0 + self.maxAge = maxAge + + def __getitem__(self, sessionID): + self.lock.acquire() + try: + self._purge() #Delete old items, so we're assured of a new one + session = self.entriesDict[sessionID] + + #When we add sessions they're resumable, but it's possible + #for the session to be invalidated later on (if a fatal alert + #is returned), so we have to check for resumability before + #returning the session. + + if session.valid(): + return session + else: + raise KeyError() + finally: + self.lock.release() + + + def __setitem__(self, sessionID, session): + self.lock.acquire() + try: + #Add the new element + self.entriesDict[sessionID] = session + self.entriesList[self.lastIndex] = (sessionID, time.time()) + self.lastIndex = (self.lastIndex+1) % len(self.entriesList) + + #If the cache is full, we delete the oldest element to make an + #empty space + if self.lastIndex == self.firstIndex: + del(self.entriesDict[self.entriesList[self.firstIndex][0]]) + self.firstIndex = (self.firstIndex+1) % len(self.entriesList) + finally: + self.lock.release() + + #Delete expired items + def _purge(self): + currentTime = time.time() + + #Search through the circular list, deleting expired elements until + #we reach a non-expired element. Since elements in list are + #ordered in time, we can break once we reach the first non-expired + #element + index = self.firstIndex + while index != self.lastIndex: + if currentTime - self.entriesList[index][1] > self.maxAge: + del(self.entriesDict[self.entriesList[index][0]]) + index = (index+1) % len(self.entriesList) + else: + break + self.firstIndex = index + +def _test(): + import doctest, SessionCache + return doctest.testmod(SessionCache) + +if __name__ == "__main__": + _test() diff --git a/gam/gdata/analytics/tlslite/SharedKeyDB.py b/gam/gdata/analytics/tlslite/SharedKeyDB.py new file mode 100755 index 00000000000..3246ec7f155 --- /dev/null +++ b/gam/gdata/analytics/tlslite/SharedKeyDB.py @@ -0,0 +1,58 @@ +"""Class for storing shared keys.""" + +from utils.cryptomath import * +from utils.compat import * +from mathtls import * +from Session import Session +from BaseDB import BaseDB + +class SharedKeyDB(BaseDB): + """This class represent an in-memory or on-disk database of shared + keys. + + A SharedKeyDB can be passed to a server handshake function to + authenticate a client based on one of the shared keys. + + This class is thread-safe. + """ + + def __init__(self, filename=None): + """Create a new SharedKeyDB. + + @type filename: str + @param filename: Filename for an on-disk database, or None for + an in-memory database. If the filename already exists, follow + this with a call to open(). To create a new on-disk database, + follow this with a call to create(). + """ + BaseDB.__init__(self, filename, "shared key") + + def _getItem(self, username, valueStr): + session = Session() + session._createSharedKey(username, valueStr) + return session + + def __setitem__(self, username, sharedKey): + """Add a shared key to the database. + + @type username: str + @param username: The username to associate the shared key with. + Must be less than or equal to 16 characters in length, and must + not already be in the database. + + @type sharedKey: str + @param sharedKey: The shared key to add. Must be less than 48 + characters in length. + """ + BaseDB.__setitem__(self, username, sharedKey) + + def _setItem(self, username, value): + if len(username)>16: + raise ValueError("username too long") + if len(value)>=48: + raise ValueError("shared key too long") + return value + + def _checkItem(self, value, username, param): + newSession = self._getItem(username, param) + return value.masterSecret == newSession.masterSecret \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/TLSConnection.py b/gam/gdata/analytics/tlslite/TLSConnection.py new file mode 100755 index 00000000000..d125f8f0a40 --- /dev/null +++ b/gam/gdata/analytics/tlslite/TLSConnection.py @@ -0,0 +1,1600 @@ +""" +MAIN CLASS FOR TLS LITE (START HERE!). +""" +from __future__ import generators + +import socket +from utils.compat import formatExceptionTrace +from TLSRecordLayer import TLSRecordLayer +from Session import Session +from constants import * +from utils.cryptomath import getRandomBytes +from errors import * +from messages import * +from mathtls import * +from HandshakeSettings import HandshakeSettings + + +class TLSConnection(TLSRecordLayer): + """ + This class wraps a socket and provides TLS handshaking and data + transfer. + + To use this class, create a new instance, passing a connected + socket into the constructor. Then call some handshake function. + If the handshake completes without raising an exception, then a TLS + connection has been negotiated. You can transfer data over this + connection as if it were a socket. + + This class provides both synchronous and asynchronous versions of + its key functions. The synchronous versions should be used when + writing single-or multi-threaded code using blocking sockets. The + asynchronous versions should be used when performing asynchronous, + event-based I/O with non-blocking sockets. + + Asynchronous I/O is a complicated subject; typically, you should + not use the asynchronous functions directly, but should use some + framework like asyncore or Twisted which TLS Lite integrates with + (see + L{tlslite.integration.TLSAsyncDispatcherMixIn.TLSAsyncDispatcherMixIn} or + L{tlslite.integration.TLSTwistedProtocolWrapper.TLSTwistedProtocolWrapper}). + """ + + + def __init__(self, sock): + """Create a new TLSConnection instance. + + @param sock: The socket data will be transmitted on. The + socket should already be connected. It may be in blocking or + non-blocking mode. + + @type sock: L{socket.socket} + """ + TLSRecordLayer.__init__(self, sock) + + def handshakeClientSRP(self, username, password, session=None, + settings=None, checker=None, async=False): + """Perform an SRP handshake in the role of client. + + This function performs a TLS/SRP handshake. SRP mutually + authenticates both parties to each other using only a + username and password. This function may also perform a + combined SRP and server-certificate handshake, if the server + chooses to authenticate itself with a certificate chain in + addition to doing SRP. + + TLS/SRP is non-standard. Most TLS implementations don't + support it. See + U{http://www.ietf.org/html.charters/tls-charter.html} or + U{http://trevp.net/tlssrp/} for the latest information on + TLS/SRP. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type username: str + @param username: The SRP username. + + @type password: str + @param password: The SRP password. + + @type session: L{tlslite.Session.Session} + @param session: A TLS session to attempt to resume. This + session must be an SRP session performed with the same username + and password as were passed in. If the resumption does not + succeed, a full SRP handshake will be performed. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(srpParams=(username, password), + session=session, settings=settings, checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def handshakeClientCert(self, certChain=None, privateKey=None, + session=None, settings=None, checker=None, + async=False): + """Perform a certificate-based handshake in the role of client. + + This function performs an SSL or TLS handshake. The server + will authenticate itself using an X.509 or cryptoID certificate + chain. If the handshake succeeds, the server's certificate + chain will be stored in the session's serverCertChain attribute. + Unless a checker object is passed in, this function does no + validation or checking of the server's certificate chain. + + If the server requests client authentication, the + client will send the passed-in certificate chain, and use the + passed-in private key to authenticate itself. If no + certificate chain and private key were passed in, the client + will attempt to proceed without client authentication. The + server may or may not allow this. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: The certificate chain to be used if the + server requests client authentication. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: The private key to be used if the server + requests client authentication. + + @type session: L{tlslite.Session.Session} + @param session: A TLS session to attempt to resume. If the + resumption does not succeed, a full handshake will be + performed. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(certParams=(certChain, + privateKey), session=session, settings=settings, + checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def handshakeClientUnknown(self, srpCallback=None, certCallback=None, + session=None, settings=None, checker=None, + async=False): + """Perform a to-be-determined type of handshake in the role of client. + + This function performs an SSL or TLS handshake. If the server + requests client certificate authentication, the + certCallback will be invoked and should return a (certChain, + privateKey) pair. If the callback returns None, the library + will attempt to proceed without client authentication. The + server may or may not allow this. + + If the server requests SRP authentication, the srpCallback + will be invoked and should return a (username, password) pair. + If the callback returns None, the local implementation will + signal a user_canceled error alert. + + After the handshake completes, the client can inspect the + connection's session attribute to determine what type of + authentication was performed. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type srpCallback: callable + @param srpCallback: The callback to be used if the server + requests SRP authentication. If None, the client will not + offer support for SRP ciphersuites. + + @type certCallback: callable + @param certCallback: The callback to be used if the server + requests client certificate authentication. + + @type session: L{tlslite.Session.Session} + @param session: A TLS session to attempt to resume. If the + resumption does not succeed, a full handshake will be + performed. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(unknownParams=(srpCallback, + certCallback), session=session, settings=settings, + checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def handshakeClientSharedKey(self, username, sharedKey, settings=None, + checker=None, async=False): + """Perform a shared-key handshake in the role of client. + + This function performs a shared-key handshake. Using shared + symmetric keys of high entropy (128 bits or greater) mutually + authenticates both parties to each other. + + TLS with shared-keys is non-standard. Most TLS + implementations don't support it. See + U{http://www.ietf.org/html.charters/tls-charter.html} for the + latest information on TLS with shared-keys. If the shared-keys + Internet-Draft changes or is superceded, TLS Lite will track + those changes, so the shared-key support in later versions of + TLS Lite may become incompatible with this version. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type username: str + @param username: The shared-key username. + + @type sharedKey: str + @param sharedKey: The shared key. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(sharedKeyParams=(username, + sharedKey), settings=settings, checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def _handshakeClientAsync(self, srpParams=(), certParams=(), + unknownParams=(), sharedKeyParams=(), + session=None, settings=None, checker=None, + recursive=False): + + handshaker = self._handshakeClientAsyncHelper(srpParams=srpParams, + certParams=certParams, unknownParams=unknownParams, + sharedKeyParams=sharedKeyParams, session=session, + settings=settings, recursive=recursive) + for result in self._handshakeWrapperAsync(handshaker, checker): + yield result + + + def _handshakeClientAsyncHelper(self, srpParams, certParams, unknownParams, + sharedKeyParams, session, settings, recursive): + if not recursive: + self._handshakeStart(client=True) + + #Unpack parameters + srpUsername = None # srpParams + password = None # srpParams + clientCertChain = None # certParams + privateKey = None # certParams + srpCallback = None # unknownParams + certCallback = None # unknownParams + #session # sharedKeyParams (or session) + #settings # settings + + if srpParams: + srpUsername, password = srpParams + elif certParams: + clientCertChain, privateKey = certParams + elif unknownParams: + srpCallback, certCallback = unknownParams + elif sharedKeyParams: + session = Session()._createSharedKey(*sharedKeyParams) + + if not settings: + settings = HandshakeSettings() + settings = settings._filter() + + #Validate parameters + if srpUsername and not password: + raise ValueError("Caller passed a username but no password") + if password and not srpUsername: + raise ValueError("Caller passed a password but no username") + + if clientCertChain and not privateKey: + raise ValueError("Caller passed a certChain but no privateKey") + if privateKey and not clientCertChain: + raise ValueError("Caller passed a privateKey but no certChain") + + if clientCertChain: + foundType = False + try: + import cryptoIDlib.CertChain + if isinstance(clientCertChain, cryptoIDlib.CertChain.CertChain): + if "cryptoID" not in settings.certificateTypes: + raise ValueError("Client certificate doesn't "\ + "match Handshake Settings") + settings.certificateTypes = ["cryptoID"] + foundType = True + except ImportError: + pass + if not foundType and isinstance(clientCertChain, + X509CertChain): + if "x509" not in settings.certificateTypes: + raise ValueError("Client certificate doesn't match "\ + "Handshake Settings") + settings.certificateTypes = ["x509"] + foundType = True + if not foundType: + raise ValueError("Unrecognized certificate type") + + + if session: + if not session.valid(): + session = None #ignore non-resumable sessions... + elif session.resumable and \ + (session.srpUsername != srpUsername): + raise ValueError("Session username doesn't match") + + #Add Faults to parameters + if srpUsername and self.fault == Fault.badUsername: + srpUsername += "GARBAGE" + if password and self.fault == Fault.badPassword: + password += "GARBAGE" + if sharedKeyParams: + identifier = sharedKeyParams[0] + sharedKey = sharedKeyParams[1] + if self.fault == Fault.badIdentifier: + identifier += "GARBAGE" + session = Session()._createSharedKey(identifier, sharedKey) + elif self.fault == Fault.badSharedKey: + sharedKey += "GARBAGE" + session = Session()._createSharedKey(identifier, sharedKey) + + + #Initialize locals + serverCertChain = None + cipherSuite = 0 + certificateType = CertificateType.x509 + premasterSecret = None + + #Get client nonce + clientRandom = getRandomBytes(32) + + #Initialize acceptable ciphersuites + cipherSuites = [] + if srpParams: + cipherSuites += CipherSuite.getSrpRsaSuites(settings.cipherNames) + cipherSuites += CipherSuite.getSrpSuites(settings.cipherNames) + elif certParams: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + elif unknownParams: + if srpCallback: + cipherSuites += \ + CipherSuite.getSrpRsaSuites(settings.cipherNames) + cipherSuites += \ + CipherSuite.getSrpSuites(settings.cipherNames) + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + elif sharedKeyParams: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + else: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + + #Initialize acceptable certificate types + certificateTypes = settings._getCertificateTypes() + + #Tentatively set the version to the client's minimum version. + #We'll use this for the ClientHello, and if an error occurs + #parsing the Server Hello, we'll use this version for the response + self.version = settings.maxVersion + + #Either send ClientHello (with a resumable session)... + if session: + #If it's a resumable (i.e. not a shared-key session), then its + #ciphersuite must be one of the acceptable ciphersuites + if (not sharedKeyParams) and \ + session.cipherSuite not in cipherSuites: + raise ValueError("Session's cipher suite not consistent "\ + "with parameters") + else: + clientHello = ClientHello() + clientHello.create(settings.maxVersion, clientRandom, + session.sessionID, cipherSuites, + certificateTypes, session.srpUsername) + + #Or send ClientHello (without) + else: + clientHello = ClientHello() + clientHello.create(settings.maxVersion, clientRandom, + createByteArraySequence([]), cipherSuites, + certificateTypes, srpUsername) + for result in self._sendMsg(clientHello): + yield result + + #Get ServerHello (or missing_srp_username) + for result in self._getMsg((ContentType.handshake, + ContentType.alert), + HandshakeType.server_hello): + if result in (0,1): + yield result + else: + break + msg = result + + if isinstance(msg, ServerHello): + serverHello = msg + elif isinstance(msg, Alert): + alert = msg + + #If it's not a missing_srp_username, re-raise + if alert.description != AlertDescription.missing_srp_username: + self._shutdown(False) + raise TLSRemoteAlert(alert) + + #If we're not in SRP callback mode, we won't have offered SRP + #without a username, so we shouldn't get this alert + if not srpCallback: + for result in self._sendError(\ + AlertDescription.unexpected_message): + yield result + srpParams = srpCallback() + #If the callback returns None, cancel the handshake + if srpParams == None: + for result in self._sendError(AlertDescription.user_canceled): + yield result + + #Recursively perform handshake + for result in self._handshakeClientAsyncHelper(srpParams, + None, None, None, None, settings, True): + yield result + return + + #Get the server version. Do this before anything else, so any + #error alerts will use the server's version + self.version = serverHello.server_version + + #Future responses from server must use this version + self._versionCheck = True + + #Check ServerHello + if serverHello.server_version < settings.minVersion: + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too old version: %s" % str(serverHello.server_version)): + yield result + if serverHello.server_version > settings.maxVersion: + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too new version: %s" % str(serverHello.server_version)): + yield result + if serverHello.cipher_suite not in cipherSuites: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server responded with incorrect ciphersuite"): + yield result + if serverHello.certificate_type not in certificateTypes: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server responded with incorrect certificate type"): + yield result + if serverHello.compression_method != 0: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server responded with incorrect compression method"): + yield result + + #Get the server nonce + serverRandom = serverHello.random + + #If the server agrees to resume + if session and session.sessionID and \ + serverHello.session_id == session.sessionID: + + #If a shared-key, we're flexible about suites; otherwise the + #server-chosen suite has to match the session's suite + if sharedKeyParams: + session.cipherSuite = serverHello.cipher_suite + elif serverHello.cipher_suite != session.cipherSuite: + for result in self._sendError(\ + AlertDescription.illegal_parameter,\ + "Server's ciphersuite doesn't match session"): + yield result + + #Set the session for this connection + self.session = session + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._getFinished(): + yield result + for result in self._sendFinished(): + yield result + + #Mark the connection as open + self._handshakeDone(resumed=True) + + #If server DOES NOT agree to resume + else: + + if sharedKeyParams: + for result in self._sendError(\ + AlertDescription.user_canceled, + "Was expecting a shared-key resumption"): + yield result + + #We've already validated these + cipherSuite = serverHello.cipher_suite + certificateType = serverHello.certificate_type + + #If the server chose an SRP suite... + if cipherSuite in CipherSuite.srpSuites: + #Get ServerKeyExchange, ServerHelloDone + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_key_exchange, cipherSuite): + if result in (0,1): + yield result + else: + break + serverKeyExchange = result + + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_hello_done): + if result in (0,1): + yield result + else: + break + serverHelloDone = result + + #If the server chose an SRP+RSA suite... + elif cipherSuite in CipherSuite.srpRsaSuites: + #Get Certificate, ServerKeyExchange, ServerHelloDone + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate, certificateType): + if result in (0,1): + yield result + else: + break + serverCertificate = result + + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_key_exchange, cipherSuite): + if result in (0,1): + yield result + else: + break + serverKeyExchange = result + + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_hello_done): + if result in (0,1): + yield result + else: + break + serverHelloDone = result + + #If the server chose an RSA suite... + elif cipherSuite in CipherSuite.rsaSuites: + #Get Certificate[, CertificateRequest], ServerHelloDone + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate, certificateType): + if result in (0,1): + yield result + else: + break + serverCertificate = result + + for result in self._getMsg(ContentType.handshake, + (HandshakeType.server_hello_done, + HandshakeType.certificate_request)): + if result in (0,1): + yield result + else: + break + msg = result + + certificateRequest = None + if isinstance(msg, CertificateRequest): + certificateRequest = msg + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_hello_done): + if result in (0,1): + yield result + else: + break + serverHelloDone = result + elif isinstance(msg, ServerHelloDone): + serverHelloDone = msg + else: + raise AssertionError() + + + #Calculate SRP premaster secret, if server chose an SRP or + #SRP+RSA suite + if cipherSuite in CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites: + #Get and check the server's group parameters and B value + N = serverKeyExchange.srp_N + g = serverKeyExchange.srp_g + s = serverKeyExchange.srp_s + B = serverKeyExchange.srp_B + + if (g,N) not in goodGroupParameters: + for result in self._sendError(\ + AlertDescription.untrusted_srp_parameters, + "Unknown group parameters"): + yield result + if numBits(N) < settings.minKeySize: + for result in self._sendError(\ + AlertDescription.untrusted_srp_parameters, + "N value is too small: %d" % numBits(N)): + yield result + if numBits(N) > settings.maxKeySize: + for result in self._sendError(\ + AlertDescription.untrusted_srp_parameters, + "N value is too large: %d" % numBits(N)): + yield result + if B % N == 0: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Suspicious B value"): + yield result + + #Check the server's signature, if server chose an + #SRP+RSA suite + if cipherSuite in CipherSuite.srpRsaSuites: + #Hash ServerKeyExchange/ServerSRPParams + hashBytes = serverKeyExchange.hash(clientRandom, + serverRandom) + + #Extract signature bytes from ServerKeyExchange + sigBytes = serverKeyExchange.signature + if len(sigBytes) == 0: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server sent an SRP ServerKeyExchange "\ + "message without a signature"): + yield result + + #Get server's public key from the Certificate message + for result in self._getKeyFromChain(serverCertificate, + settings): + if result in (0,1): + yield result + else: + break + publicKey, serverCertChain = result + + #Verify signature + if not publicKey.verify(sigBytes, hashBytes): + for result in self._sendError(\ + AlertDescription.decrypt_error, + "Signature failed to verify"): + yield result + + + #Calculate client's ephemeral DH values (a, A) + a = bytesToNumber(getRandomBytes(32)) + A = powMod(g, a, N) + + #Calculate client's static DH values (x, v) + x = makeX(bytesToString(s), srpUsername, password) + v = powMod(g, x, N) + + #Calculate u + u = makeU(N, A, B) + + #Calculate premaster secret + k = makeK(N, g) + S = powMod((B - (k*v)) % N, a+(u*x), N) + + if self.fault == Fault.badA: + A = N + S = 0 + premasterSecret = numberToBytes(S) + + #Send ClientKeyExchange + for result in self._sendMsg(\ + ClientKeyExchange(cipherSuite).createSRP(A)): + yield result + + + #Calculate RSA premaster secret, if server chose an RSA suite + elif cipherSuite in CipherSuite.rsaSuites: + + #Handle the presence of a CertificateRequest + if certificateRequest: + if unknownParams and certCallback: + certParamsNew = certCallback() + if certParamsNew: + clientCertChain, privateKey = certParamsNew + + #Get server's public key from the Certificate message + for result in self._getKeyFromChain(serverCertificate, + settings): + if result in (0,1): + yield result + else: + break + publicKey, serverCertChain = result + + + #Calculate premaster secret + premasterSecret = getRandomBytes(48) + premasterSecret[0] = settings.maxVersion[0] + premasterSecret[1] = settings.maxVersion[1] + + if self.fault == Fault.badPremasterPadding: + premasterSecret[0] = 5 + if self.fault == Fault.shortPremasterSecret: + premasterSecret = premasterSecret[:-1] + + #Encrypt premaster secret to server's public key + encryptedPreMasterSecret = publicKey.encrypt(premasterSecret) + + #If client authentication was requested, send Certificate + #message, either with certificates or empty + if certificateRequest: + clientCertificate = Certificate(certificateType) + + if clientCertChain: + #Check to make sure we have the same type of + #certificates the server requested + wrongType = False + if certificateType == CertificateType.x509: + if not isinstance(clientCertChain, X509CertChain): + wrongType = True + elif certificateType == CertificateType.cryptoID: + if not isinstance(clientCertChain, + cryptoIDlib.CertChain.CertChain): + wrongType = True + if wrongType: + for result in self._sendError(\ + AlertDescription.handshake_failure, + "Client certificate is of wrong type"): + yield result + + clientCertificate.create(clientCertChain) + + for result in self._sendMsg(clientCertificate): + yield result + else: + #The server didn't request client auth, so we + #zeroize these so the clientCertChain won't be + #stored in the session. + privateKey = None + clientCertChain = None + + #Send ClientKeyExchange + clientKeyExchange = ClientKeyExchange(cipherSuite, + self.version) + clientKeyExchange.createRSA(encryptedPreMasterSecret) + for result in self._sendMsg(clientKeyExchange): + yield result + + #If client authentication was requested and we have a + #private key, send CertificateVerify + if certificateRequest and privateKey: + if self.version == (3,0): + #Create a temporary session object, just for the + #purpose of creating the CertificateVerify + session = Session() + session._calcMasterSecret(self.version, + premasterSecret, + clientRandom, + serverRandom) + verifyBytes = self._calcSSLHandshakeHash(\ + session.masterSecret, "") + elif self.version in ((3,1), (3,2)): + verifyBytes = stringToBytes(\ + self._handshake_md5.digest() + \ + self._handshake_sha.digest()) + if self.fault == Fault.badVerifyMessage: + verifyBytes[0] = ((verifyBytes[0]+1) % 256) + signedBytes = privateKey.sign(verifyBytes) + certificateVerify = CertificateVerify() + certificateVerify.create(signedBytes) + for result in self._sendMsg(certificateVerify): + yield result + + + #Create the session object + self.session = Session() + self.session._calcMasterSecret(self.version, premasterSecret, + clientRandom, serverRandom) + self.session.sessionID = serverHello.session_id + self.session.cipherSuite = cipherSuite + self.session.srpUsername = srpUsername + self.session.clientCertChain = clientCertChain + self.session.serverCertChain = serverCertChain + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._sendFinished(): + yield result + for result in self._getFinished(): + yield result + + #Mark the connection as open + self.session._setResumable(True) + self._handshakeDone(resumed=False) + + + + def handshakeServer(self, sharedKeyDB=None, verifierDB=None, + certChain=None, privateKey=None, reqCert=False, + sessionCache=None, settings=None, checker=None): + """Perform a handshake in the role of server. + + This function performs an SSL or TLS handshake. Depending on + the arguments and the behavior of the client, this function can + perform a shared-key, SRP, or certificate-based handshake. It + can also perform a combined SRP and server-certificate + handshake. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + This function does not send a Hello Request message before + performing the handshake, so if re-handshaking is required, + the server must signal the client to begin the re-handshake + through some other means. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type sharedKeyDB: L{tlslite.SharedKeyDB.SharedKeyDB} + @param sharedKeyDB: A database of shared symmetric keys + associated with usernames. If the client performs a + shared-key handshake, the session's sharedKeyUsername + attribute will be set. + + @type verifierDB: L{tlslite.VerifierDB.VerifierDB} + @param verifierDB: A database of SRP password verifiers + associated with usernames. If the client performs an SRP + handshake, the session's srpUsername attribute will be set. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: The certificate chain to be used if the + client requests server certificate authentication. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: The private key to be used if the client + requests server certificate authentication. + + @type reqCert: bool + @param reqCert: Whether to request client certificate + authentication. This only applies if the client chooses server + certificate authentication; if the client chooses SRP or + shared-key authentication, this will be ignored. If the client + performs a client certificate authentication, the sessions's + clientCertChain attribute will be set. + + @type sessionCache: L{tlslite.SessionCache.SessionCache} + @param sessionCache: An in-memory cache of resumable sessions. + The client can resume sessions from this cache. Alternatively, + if the client performs a full handshake, a new session will be + added to the cache. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites and SSL/TLS version chosen by the server. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + for result in self.handshakeServerAsync(sharedKeyDB, verifierDB, + certChain, privateKey, reqCert, sessionCache, settings, + checker): + pass + + + def handshakeServerAsync(self, sharedKeyDB=None, verifierDB=None, + certChain=None, privateKey=None, reqCert=False, + sessionCache=None, settings=None, checker=None): + """Start a server handshake operation on the TLS connection. + + This function returns a generator which behaves similarly to + handshakeServer(). Successive invocations of the generator + will return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or it will raise StopIteration + if the handshake operation is complete. + + @rtype: iterable + @return: A generator; see above for details. + """ + handshaker = self._handshakeServerAsyncHelper(\ + sharedKeyDB=sharedKeyDB, + verifierDB=verifierDB, certChain=certChain, + privateKey=privateKey, reqCert=reqCert, + sessionCache=sessionCache, settings=settings) + for result in self._handshakeWrapperAsync(handshaker, checker): + yield result + + + def _handshakeServerAsyncHelper(self, sharedKeyDB, verifierDB, + certChain, privateKey, reqCert, sessionCache, + settings): + + self._handshakeStart(client=False) + + if (not sharedKeyDB) and (not verifierDB) and (not certChain): + raise ValueError("Caller passed no authentication credentials") + if certChain and not privateKey: + raise ValueError("Caller passed a certChain but no privateKey") + if privateKey and not certChain: + raise ValueError("Caller passed a privateKey but no certChain") + + if not settings: + settings = HandshakeSettings() + settings = settings._filter() + + #Initialize acceptable cipher suites + cipherSuites = [] + if verifierDB: + if certChain: + cipherSuites += \ + CipherSuite.getSrpRsaSuites(settings.cipherNames) + cipherSuites += CipherSuite.getSrpSuites(settings.cipherNames) + if sharedKeyDB or certChain: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + + #Initialize acceptable certificate type + certificateType = None + if certChain: + try: + import cryptoIDlib.CertChain + if isinstance(certChain, cryptoIDlib.CertChain.CertChain): + certificateType = CertificateType.cryptoID + except ImportError: + pass + if isinstance(certChain, X509CertChain): + certificateType = CertificateType.x509 + if certificateType == None: + raise ValueError("Unrecognized certificate type") + + #Initialize locals + clientCertChain = None + serverCertChain = None #We may set certChain to this later + postFinishedError = None + + #Tentatively set version to most-desirable version, so if an error + #occurs parsing the ClientHello, this is what we'll use for the + #error alert + self.version = settings.maxVersion + + #Get ClientHello + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_hello): + if result in (0,1): + yield result + else: + break + clientHello = result + + #If client's version is too low, reject it + if clientHello.client_version < settings.minVersion: + self.version = settings.minVersion + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too old version: %s" % str(clientHello.client_version)): + yield result + + #If client's version is too high, propose my highest version + elif clientHello.client_version > settings.maxVersion: + self.version = settings.maxVersion + + else: + #Set the version to the client's version + self.version = clientHello.client_version + + #Get the client nonce; create server nonce + clientRandom = clientHello.random + serverRandom = getRandomBytes(32) + + #Calculate the first cipher suite intersection. + #This is the 'privileged' ciphersuite. We'll use it if we're + #doing a shared-key resumption or a new negotiation. In fact, + #the only time we won't use it is if we're resuming a non-sharedkey + #session, in which case we use the ciphersuite from the session. + # + #Given the current ciphersuite ordering, this means we prefer SRP + #over non-SRP. + for cipherSuite in cipherSuites: + if cipherSuite in clientHello.cipher_suites: + break + else: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + + #If resumption was requested... + if clientHello.session_id and (sharedKeyDB or sessionCache): + session = None + + #Check in the sharedKeys container + if sharedKeyDB and len(clientHello.session_id)==16: + try: + #Trim off zero padding, if any + for x in range(16): + if clientHello.session_id[x]==0: + break + self.allegedSharedKeyUsername = bytesToString(\ + clientHello.session_id[:x]) + session = sharedKeyDB[self.allegedSharedKeyUsername] + if not session.sharedKey: + raise AssertionError() + #use privileged ciphersuite + session.cipherSuite = cipherSuite + except KeyError: + pass + + #Then check in the session cache + if sessionCache and not session: + try: + session = sessionCache[bytesToString(\ + clientHello.session_id)] + if session.sharedKey: + raise AssertionError() + if not session.resumable: + raise AssertionError() + #Check for consistency with ClientHello + if session.cipherSuite not in cipherSuites: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + if session.cipherSuite not in clientHello.cipher_suites: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + if clientHello.srp_username: + if clientHello.srp_username != session.srpUsername: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + except KeyError: + pass + + #If a session is found.. + if session: + #Set the session + self.session = session + + #Send ServerHello + serverHello = ServerHello() + serverHello.create(self.version, serverRandom, + session.sessionID, session.cipherSuite, + certificateType) + for result in self._sendMsg(serverHello): + yield result + + #From here on, the client's messages must have the right version + self._versionCheck = True + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._sendFinished(): + yield result + for result in self._getFinished(): + yield result + + #Mark the connection as open + self._handshakeDone(resumed=True) + return + + + #If not a resumption... + + #TRICKY: we might have chosen an RSA suite that was only deemed + #acceptable because of the shared-key resumption. If the shared- + #key resumption failed, because the identifier wasn't recognized, + #we might fall through to here, where we have an RSA suite + #chosen, but no certificate. + if cipherSuite in CipherSuite.rsaSuites and not certChain: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + + #If an RSA suite is chosen, check for certificate type intersection + #(We do this check down here because if the mismatch occurs but the + # client is using a shared-key session, it's okay) + if cipherSuite in CipherSuite.rsaSuites + \ + CipherSuite.srpRsaSuites: + if certificateType not in clientHello.certificate_types: + for result in self._sendError(\ + AlertDescription.handshake_failure, + "the client doesn't support my certificate type"): + yield result + + #Move certChain -> serverCertChain, now that we're using it + serverCertChain = certChain + + + #Create sessionID + if sessionCache: + sessionID = getRandomBytes(32) + else: + sessionID = createByteArraySequence([]) + + #If we've selected an SRP suite, exchange keys and calculate + #premaster secret: + if cipherSuite in CipherSuite.srpSuites + CipherSuite.srpRsaSuites: + + #If there's no SRP username... + if not clientHello.srp_username: + + #Ask the client to re-send ClientHello with one + for result in self._sendMsg(Alert().create(\ + AlertDescription.missing_srp_username, + AlertLevel.warning)): + yield result + + #Get ClientHello + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_hello): + if result in (0,1): + yield result + else: + break + clientHello = result + + #Check ClientHello + #If client's version is too low, reject it (COPIED CODE; BAD!) + if clientHello.client_version < settings.minVersion: + self.version = settings.minVersion + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too old version: %s" % str(clientHello.client_version)): + yield result + + #If client's version is too high, propose my highest version + elif clientHello.client_version > settings.maxVersion: + self.version = settings.maxVersion + + else: + #Set the version to the client's version + self.version = clientHello.client_version + + #Recalculate the privileged cipher suite, making sure to + #pick an SRP suite + cipherSuites = [c for c in cipherSuites if c in \ + CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites] + for cipherSuite in cipherSuites: + if cipherSuite in clientHello.cipher_suites: + break + else: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + + #Get the client nonce; create server nonce + clientRandom = clientHello.random + serverRandom = getRandomBytes(32) + + #The username better be there, this time + if not clientHello.srp_username: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Client resent a hello, but without the SRP"\ + " username"): + yield result + + + #Get username + self.allegedSrpUsername = clientHello.srp_username + + #Get parameters from username + try: + entry = verifierDB[self.allegedSrpUsername] + except KeyError: + for result in self._sendError(\ + AlertDescription.unknown_srp_username): + yield result + (N, g, s, v) = entry + + #Calculate server's ephemeral DH values (b, B) + b = bytesToNumber(getRandomBytes(32)) + k = makeK(N, g) + B = (powMod(g, b, N) + (k*v)) % N + + #Create ServerKeyExchange, signing it if necessary + serverKeyExchange = ServerKeyExchange(cipherSuite) + serverKeyExchange.createSRP(N, g, stringToBytes(s), B) + if cipherSuite in CipherSuite.srpRsaSuites: + hashBytes = serverKeyExchange.hash(clientRandom, + serverRandom) + serverKeyExchange.signature = privateKey.sign(hashBytes) + + #Send ServerHello[, Certificate], ServerKeyExchange, + #ServerHelloDone + msgs = [] + serverHello = ServerHello() + serverHello.create(self.version, serverRandom, sessionID, + cipherSuite, certificateType) + msgs.append(serverHello) + if cipherSuite in CipherSuite.srpRsaSuites: + certificateMsg = Certificate(certificateType) + certificateMsg.create(serverCertChain) + msgs.append(certificateMsg) + msgs.append(serverKeyExchange) + msgs.append(ServerHelloDone()) + for result in self._sendMsgs(msgs): + yield result + + #From here on, the client's messages must have the right version + self._versionCheck = True + + #Get and check ClientKeyExchange + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_key_exchange, + cipherSuite): + if result in (0,1): + yield result + else: + break + clientKeyExchange = result + A = clientKeyExchange.srp_A + if A % N == 0: + postFinishedError = (AlertDescription.illegal_parameter, + "Suspicious A value") + #Calculate u + u = makeU(N, A, B) + + #Calculate premaster secret + S = powMod((A * powMod(v,u,N)) % N, b, N) + premasterSecret = numberToBytes(S) + + + #If we've selected an RSA suite, exchange keys and calculate + #premaster secret: + elif cipherSuite in CipherSuite.rsaSuites: + + #Send ServerHello, Certificate[, CertificateRequest], + #ServerHelloDone + msgs = [] + msgs.append(ServerHello().create(self.version, serverRandom, + sessionID, cipherSuite, certificateType)) + msgs.append(Certificate(certificateType).create(serverCertChain)) + if reqCert: + msgs.append(CertificateRequest()) + msgs.append(ServerHelloDone()) + for result in self._sendMsgs(msgs): + yield result + + #From here on, the client's messages must have the right version + self._versionCheck = True + + #Get [Certificate,] (if was requested) + if reqCert: + if self.version == (3,0): + for result in self._getMsg((ContentType.handshake, + ContentType.alert), + HandshakeType.certificate, + certificateType): + if result in (0,1): + yield result + else: + break + msg = result + + if isinstance(msg, Alert): + #If it's not a no_certificate alert, re-raise + alert = msg + if alert.description != \ + AlertDescription.no_certificate: + self._shutdown(False) + raise TLSRemoteAlert(alert) + elif isinstance(msg, Certificate): + clientCertificate = msg + if clientCertificate.certChain and \ + clientCertificate.certChain.getNumCerts()!=0: + clientCertChain = clientCertificate.certChain + else: + raise AssertionError() + elif self.version in ((3,1), (3,2)): + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate, + certificateType): + if result in (0,1): + yield result + else: + break + clientCertificate = result + if clientCertificate.certChain and \ + clientCertificate.certChain.getNumCerts()!=0: + clientCertChain = clientCertificate.certChain + else: + raise AssertionError() + + #Get ClientKeyExchange + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_key_exchange, + cipherSuite): + if result in (0,1): + yield result + else: + break + clientKeyExchange = result + + #Decrypt ClientKeyExchange + premasterSecret = privateKey.decrypt(\ + clientKeyExchange.encryptedPreMasterSecret) + + randomPreMasterSecret = getRandomBytes(48) + versionCheck = (premasterSecret[0], premasterSecret[1]) + if not premasterSecret: + premasterSecret = randomPreMasterSecret + elif len(premasterSecret)!=48: + premasterSecret = randomPreMasterSecret + elif versionCheck != clientHello.client_version: + if versionCheck != self.version: #Tolerate buggy IE clients + premasterSecret = randomPreMasterSecret + + #Get and check CertificateVerify, if relevant + if clientCertChain: + if self.version == (3,0): + #Create a temporary session object, just for the purpose + #of checking the CertificateVerify + session = Session() + session._calcMasterSecret(self.version, premasterSecret, + clientRandom, serverRandom) + verifyBytes = self._calcSSLHandshakeHash(\ + session.masterSecret, "") + elif self.version in ((3,1), (3,2)): + verifyBytes = stringToBytes(self._handshake_md5.digest() +\ + self._handshake_sha.digest()) + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate_verify): + if result in (0,1): + yield result + else: + break + certificateVerify = result + publicKey = clientCertChain.getEndEntityPublicKey() + if len(publicKey) < settings.minKeySize: + postFinishedError = (AlertDescription.handshake_failure, + "Client's public key too small: %d" % len(publicKey)) + if len(publicKey) > settings.maxKeySize: + postFinishedError = (AlertDescription.handshake_failure, + "Client's public key too large: %d" % len(publicKey)) + + if not publicKey.verify(certificateVerify.signature, + verifyBytes): + postFinishedError = (AlertDescription.decrypt_error, + "Signature failed to verify") + + + #Create the session object + self.session = Session() + self.session._calcMasterSecret(self.version, premasterSecret, + clientRandom, serverRandom) + self.session.sessionID = sessionID + self.session.cipherSuite = cipherSuite + self.session.srpUsername = self.allegedSrpUsername + self.session.clientCertChain = clientCertChain + self.session.serverCertChain = serverCertChain + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._getFinished(): + yield result + + #If we were holding a post-finished error until receiving the client + #finished message, send it now. We delay the call until this point + #because calling sendError() throws an exception, and our caller might + #shut down the socket upon receiving the exception. If he did, and the + #client was still sending its ChangeCipherSpec or Finished messages, it + #would cause a socket error on the client side. This is a lot of + #consideration to show to misbehaving clients, but this would also + #cause problems with fault-testing. + if postFinishedError: + for result in self._sendError(*postFinishedError): + yield result + + for result in self._sendFinished(): + yield result + + #Add the session object to the session cache + if sessionCache and sessionID: + sessionCache[bytesToString(sessionID)] = self.session + + #Mark the connection as open + self.session._setResumable(True) + self._handshakeDone(resumed=False) + + + def _handshakeWrapperAsync(self, handshaker, checker): + if not self.fault: + try: + for result in handshaker: + yield result + if checker: + try: + checker(self) + except TLSAuthenticationError: + alert = Alert().create(AlertDescription.close_notify, + AlertLevel.fatal) + for result in self._sendMsg(alert): + yield result + raise + except: + self._shutdown(False) + raise + else: + try: + for result in handshaker: + yield result + if checker: + try: + checker(self) + except TLSAuthenticationError: + alert = Alert().create(AlertDescription.close_notify, + AlertLevel.fatal) + for result in self._sendMsg(alert): + yield result + raise + except socket.error, e: + raise TLSFaultError("socket error!") + except TLSAbruptCloseError, e: + raise TLSFaultError("abrupt close error!") + except TLSAlert, alert: + if alert.description not in Fault.faultAlerts[self.fault]: + raise TLSFaultError(str(alert)) + else: + pass + except: + self._shutdown(False) + raise + else: + raise TLSFaultError("No error!") + + + def _getKeyFromChain(self, certificate, settings): + #Get and check cert chain from the Certificate message + certChain = certificate.certChain + if not certChain or certChain.getNumCerts() == 0: + for result in self._sendError(AlertDescription.illegal_parameter, + "Other party sent a Certificate message without "\ + "certificates"): + yield result + + #Get and check public key from the cert chain + publicKey = certChain.getEndEntityPublicKey() + if len(publicKey) < settings.minKeySize: + for result in self._sendError(AlertDescription.handshake_failure, + "Other party's public key too small: %d" % len(publicKey)): + yield result + if len(publicKey) > settings.maxKeySize: + for result in self._sendError(AlertDescription.handshake_failure, + "Other party's public key too large: %d" % len(publicKey)): + yield result + + yield publicKey, certChain diff --git a/gam/gdata/analytics/tlslite/TLSRecordLayer.py b/gam/gdata/analytics/tlslite/TLSRecordLayer.py new file mode 100755 index 00000000000..875ce800700 --- /dev/null +++ b/gam/gdata/analytics/tlslite/TLSRecordLayer.py @@ -0,0 +1,1123 @@ +"""Helper class for TLSConnection.""" +from __future__ import generators + +from utils.compat import * +from utils.cryptomath import * +from utils.cipherfactory import createAES, createRC4, createTripleDES +from utils.codec import * +from errors import * +from messages import * +from mathtls import * +from constants import * +from utils.cryptomath import getRandomBytes +from utils import hmac +from FileObject import FileObject +import sha +import md5 +import socket +import errno +import traceback + +class _ConnectionState: + def __init__(self): + self.macContext = None + self.encContext = None + self.seqnum = 0 + + def getSeqNumStr(self): + w = Writer(8) + w.add(self.seqnum, 8) + seqnumStr = bytesToString(w.bytes) + self.seqnum += 1 + return seqnumStr + + +class TLSRecordLayer: + """ + This class handles data transmission for a TLS connection. + + Its only subclass is L{tlslite.TLSConnection.TLSConnection}. We've + separated the code in this class from TLSConnection to make things + more readable. + + + @type sock: socket.socket + @ivar sock: The underlying socket object. + + @type session: L{tlslite.Session.Session} + @ivar session: The session corresponding to this connection. + + Due to TLS session resumption, multiple connections can correspond + to the same underlying session. + + @type version: tuple + @ivar version: The TLS version being used for this connection. + + (3,0) means SSL 3.0, and (3,1) means TLS 1.0. + + @type closed: bool + @ivar closed: If this connection is closed. + + @type resumed: bool + @ivar resumed: If this connection is based on a resumed session. + + @type allegedSharedKeyUsername: str or None + @ivar allegedSharedKeyUsername: This is set to the shared-key + username asserted by the client, whether the handshake succeeded or + not. If the handshake fails, this can be inspected to + determine if a guessing attack is in progress against a particular + user account. + + @type allegedSrpUsername: str or None + @ivar allegedSrpUsername: This is set to the SRP username + asserted by the client, whether the handshake succeeded or not. + If the handshake fails, this can be inspected to determine + if a guessing attack is in progress against a particular user + account. + + @type closeSocket: bool + @ivar closeSocket: If the socket should be closed when the + connection is closed (writable). + + If you set this to True, TLS Lite will assume the responsibility of + closing the socket when the TLS Connection is shutdown (either + through an error or through the user calling close()). The default + is False. + + @type ignoreAbruptClose: bool + @ivar ignoreAbruptClose: If an abrupt close of the socket should + raise an error (writable). + + If you set this to True, TLS Lite will not raise a + L{tlslite.errors.TLSAbruptCloseError} exception if the underlying + socket is unexpectedly closed. Such an unexpected closure could be + caused by an attacker. However, it also occurs with some incorrect + TLS implementations. + + You should set this to True only if you're not worried about an + attacker truncating the connection, and only if necessary to avoid + spurious errors. The default is False. + + @sort: __init__, read, readAsync, write, writeAsync, close, closeAsync, + getCipherImplementation, getCipherName + """ + + def __init__(self, sock): + self.sock = sock + + #My session object (Session instance; read-only) + self.session = None + + #Am I a client or server? + self._client = None + + #Buffers for processing messages + self._handshakeBuffer = [] + self._readBuffer = "" + + #Handshake digests + self._handshake_md5 = md5.md5() + self._handshake_sha = sha.sha() + + #TLS Protocol Version + self.version = (0,0) #read-only + self._versionCheck = False #Once we choose a version, this is True + + #Current and Pending connection states + self._writeState = _ConnectionState() + self._readState = _ConnectionState() + self._pendingWriteState = _ConnectionState() + self._pendingReadState = _ConnectionState() + + #Is the connection open? + self.closed = True #read-only + self._refCount = 0 #Used to trigger closure + + #Is this a resumed (or shared-key) session? + self.resumed = False #read-only + + #What username did the client claim in his handshake? + self.allegedSharedKeyUsername = None + self.allegedSrpUsername = None + + #On a call to close(), do we close the socket? (writeable) + self.closeSocket = False + + #If the socket is abruptly closed, do we ignore it + #and pretend the connection was shut down properly? (writeable) + self.ignoreAbruptClose = False + + #Fault we will induce, for testing purposes + self.fault = None + + #********************************************************* + # Public Functions START + #********************************************************* + + def read(self, max=None, min=1): + """Read some data from the TLS connection. + + This function will block until at least 'min' bytes are + available (or the connection is closed). + + If an exception is raised, the connection will have been + automatically closed. + + @type max: int + @param max: The maximum number of bytes to return. + + @type min: int + @param min: The minimum number of bytes to return + + @rtype: str + @return: A string of no more than 'max' bytes, and no fewer + than 'min' (unless the connection has been closed, in which + case fewer than 'min' bytes may be returned). + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + """ + for result in self.readAsync(max, min): + pass + return result + + def readAsync(self, max=None, min=1): + """Start a read operation on the TLS connection. + + This function returns a generator which behaves similarly to + read(). Successive invocations of the generator will return 0 + if it is waiting to read from the socket, 1 if it is waiting + to write to the socket, or a string if the read operation has + completed. + + @rtype: iterable + @return: A generator; see above for details. + """ + try: + while len(self._readBuffer)= len(s): + break + if endIndex > len(s): + endIndex = len(s) + block = stringToBytes(s[startIndex : endIndex]) + applicationData = ApplicationData().create(block) + for result in self._sendMsg(applicationData, skipEmptyFrag): + yield result + skipEmptyFrag = True #only send an empy fragment on 1st message + index += 1 + except: + self._shutdown(False) + raise + + def close(self): + """Close the TLS connection. + + This function will block until it has exchanged close_notify + alerts with the other party. After doing so, it will shut down the + TLS connection. Further attempts to read through this connection + will return "". Further attempts to write through this connection + will raise ValueError. + + If makefile() has been called on this connection, the connection + will be not be closed until the connection object and all file + objects have been closed. + + Even if an exception is raised, the connection will have been + closed. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + """ + if not self.closed: + for result in self._decrefAsync(): + pass + + def closeAsync(self): + """Start a close operation on the TLS connection. + + This function returns a generator which behaves similarly to + close(). Successive invocations of the generator will return 0 + if it is waiting to read from the socket, 1 if it is waiting + to write to the socket, or will raise StopIteration if the + close operation has completed. + + @rtype: iterable + @return: A generator; see above for details. + """ + if not self.closed: + for result in self._decrefAsync(): + yield result + + def _decrefAsync(self): + self._refCount -= 1 + if self._refCount == 0 and not self.closed: + try: + for result in self._sendMsg(Alert().create(\ + AlertDescription.close_notify, AlertLevel.warning)): + yield result + alert = None + while not alert: + for result in self._getMsg((ContentType.alert, \ + ContentType.application_data)): + if result in (0,1): + yield result + if result.contentType == ContentType.alert: + alert = result + if alert.description == AlertDescription.close_notify: + self._shutdown(True) + else: + raise TLSRemoteAlert(alert) + except (socket.error, TLSAbruptCloseError): + #If the other side closes the socket, that's okay + self._shutdown(True) + except: + self._shutdown(False) + raise + + def getCipherName(self): + """Get the name of the cipher used with this connection. + + @rtype: str + @return: The name of the cipher used with this connection. + Either 'aes128', 'aes256', 'rc4', or '3des'. + """ + if not self._writeState.encContext: + return None + return self._writeState.encContext.name + + def getCipherImplementation(self): + """Get the name of the cipher implementation used with + this connection. + + @rtype: str + @return: The name of the cipher implementation used with + this connection. Either 'python', 'cryptlib', 'openssl', + or 'pycrypto'. + """ + if not self._writeState.encContext: + return None + return self._writeState.encContext.implementation + + + + #Emulate a socket, somewhat - + def send(self, s): + """Send data to the TLS connection (socket emulation). + + @raise socket.error: If a socket error occurs. + """ + self.write(s) + return len(s) + + def sendall(self, s): + """Send data to the TLS connection (socket emulation). + + @raise socket.error: If a socket error occurs. + """ + self.write(s) + + def recv(self, bufsize): + """Get some data from the TLS connection (socket emulation). + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + """ + return self.read(bufsize) + + def makefile(self, mode='r', bufsize=-1): + """Create a file object for the TLS connection (socket emulation). + + @rtype: L{tlslite.FileObject.FileObject} + """ + self._refCount += 1 + return FileObject(self, mode, bufsize) + + def getsockname(self): + """Return the socket's own address (socket emulation).""" + return self.sock.getsockname() + + def getpeername(self): + """Return the remote address to which the socket is connected + (socket emulation).""" + return self.sock.getpeername() + + def settimeout(self, value): + """Set a timeout on blocking socket operations (socket emulation).""" + return self.sock.settimeout(value) + + def gettimeout(self): + """Return the timeout associated with socket operations (socket + emulation).""" + return self.sock.gettimeout() + + def setsockopt(self, level, optname, value): + """Set the value of the given socket option (socket emulation).""" + return self.sock.setsockopt(level, optname, value) + + + #********************************************************* + # Public Functions END + #********************************************************* + + def _shutdown(self, resumable): + self._writeState = _ConnectionState() + self._readState = _ConnectionState() + #Don't do this: self._readBuffer = "" + self.version = (0,0) + self._versionCheck = False + self.closed = True + if self.closeSocket: + self.sock.close() + + #Even if resumable is False, we'll never toggle this on + if not resumable and self.session: + self.session.resumable = False + + + def _sendError(self, alertDescription, errorStr=None): + alert = Alert().create(alertDescription, AlertLevel.fatal) + for result in self._sendMsg(alert): + yield result + self._shutdown(False) + raise TLSLocalAlert(alert, errorStr) + + def _sendMsgs(self, msgs): + skipEmptyFrag = False + for msg in msgs: + for result in self._sendMsg(msg, skipEmptyFrag): + yield result + skipEmptyFrag = True + + def _sendMsg(self, msg, skipEmptyFrag=False): + bytes = msg.write() + contentType = msg.contentType + + #Whenever we're connected and asked to send a message, + #we first send an empty Application Data message. This prevents + #an attacker from launching a chosen-plaintext attack based on + #knowing the next IV. + if not self.closed and not skipEmptyFrag and self.version == (3,1): + if self._writeState.encContext: + if self._writeState.encContext.isBlockCipher: + for result in self._sendMsg(ApplicationData(), + skipEmptyFrag=True): + yield result + + #Update handshake hashes + if contentType == ContentType.handshake: + bytesStr = bytesToString(bytes) + self._handshake_md5.update(bytesStr) + self._handshake_sha.update(bytesStr) + + #Calculate MAC + if self._writeState.macContext: + seqnumStr = self._writeState.getSeqNumStr() + bytesStr = bytesToString(bytes) + mac = self._writeState.macContext.copy() + mac.update(seqnumStr) + mac.update(chr(contentType)) + if self.version == (3,0): + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + elif self.version in ((3,1), (3,2)): + mac.update(chr(self.version[0])) + mac.update(chr(self.version[1])) + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + else: + raise AssertionError() + mac.update(bytesStr) + macString = mac.digest() + macBytes = stringToBytes(macString) + if self.fault == Fault.badMAC: + macBytes[0] = (macBytes[0]+1) % 256 + + #Encrypt for Block or Stream Cipher + if self._writeState.encContext: + #Add padding and encrypt (for Block Cipher): + if self._writeState.encContext.isBlockCipher: + + #Add TLS 1.1 fixed block + if self.version == (3,2): + bytes = self.fixedIVBlock + bytes + + #Add padding: bytes = bytes + (macBytes + paddingBytes) + currentLength = len(bytes) + len(macBytes) + 1 + blockLength = self._writeState.encContext.block_size + paddingLength = blockLength-(currentLength % blockLength) + + paddingBytes = createByteArraySequence([paddingLength] * \ + (paddingLength+1)) + if self.fault == Fault.badPadding: + paddingBytes[0] = (paddingBytes[0]+1) % 256 + endBytes = concatArrays(macBytes, paddingBytes) + bytes = concatArrays(bytes, endBytes) + #Encrypt + plaintext = stringToBytes(bytes) + ciphertext = self._writeState.encContext.encrypt(plaintext) + bytes = stringToBytes(ciphertext) + + #Encrypt (for Stream Cipher) + else: + bytes = concatArrays(bytes, macBytes) + plaintext = bytesToString(bytes) + ciphertext = self._writeState.encContext.encrypt(plaintext) + bytes = stringToBytes(ciphertext) + + #Add record header and send + r = RecordHeader3().create(self.version, contentType, len(bytes)) + s = bytesToString(concatArrays(r.write(), bytes)) + while 1: + try: + bytesSent = self.sock.send(s) #Might raise socket.error + except socket.error, why: + if why[0] == errno.EWOULDBLOCK: + yield 1 + continue + else: + raise + if bytesSent == len(s): + return + s = s[bytesSent:] + yield 1 + + + def _getMsg(self, expectedType, secondaryType=None, constructorType=None): + try: + if not isinstance(expectedType, tuple): + expectedType = (expectedType,) + + #Spin in a loop, until we've got a non-empty record of a type we + #expect. The loop will be repeated if: + # - we receive a renegotiation attempt; we send no_renegotiation, + # then try again + # - we receive an empty application-data fragment; we try again + while 1: + for result in self._getNextRecord(): + if result in (0,1): + yield result + recordHeader, p = result + + #If this is an empty application-data fragment, try again + if recordHeader.type == ContentType.application_data: + if p.index == len(p.bytes): + continue + + #If we received an unexpected record type... + if recordHeader.type not in expectedType: + + #If we received an alert... + if recordHeader.type == ContentType.alert: + alert = Alert().parse(p) + + #We either received a fatal error, a warning, or a + #close_notify. In any case, we're going to close the + #connection. In the latter two cases we respond with + #a close_notify, but ignore any socket errors, since + #the other side might have already closed the socket. + if alert.level == AlertLevel.warning or \ + alert.description == AlertDescription.close_notify: + + #If the sendMsg() call fails because the socket has + #already been closed, we will be forgiving and not + #report the error nor invalidate the "resumability" + #of the session. + try: + alertMsg = Alert() + alertMsg.create(AlertDescription.close_notify, + AlertLevel.warning) + for result in self._sendMsg(alertMsg): + yield result + except socket.error: + pass + + if alert.description == \ + AlertDescription.close_notify: + self._shutdown(True) + elif alert.level == AlertLevel.warning: + self._shutdown(False) + + else: #Fatal alert: + self._shutdown(False) + + #Raise the alert as an exception + raise TLSRemoteAlert(alert) + + #If we received a renegotiation attempt... + if recordHeader.type == ContentType.handshake: + subType = p.get(1) + reneg = False + if self._client: + if subType == HandshakeType.hello_request: + reneg = True + else: + if subType == HandshakeType.client_hello: + reneg = True + #Send no_renegotiation, then try again + if reneg: + alertMsg = Alert() + alertMsg.create(AlertDescription.no_renegotiation, + AlertLevel.warning) + for result in self._sendMsg(alertMsg): + yield result + continue + + #Otherwise: this is an unexpected record, but neither an + #alert nor renegotiation + for result in self._sendError(\ + AlertDescription.unexpected_message, + "received type=%d" % recordHeader.type): + yield result + + break + + #Parse based on content_type + if recordHeader.type == ContentType.change_cipher_spec: + yield ChangeCipherSpec().parse(p) + elif recordHeader.type == ContentType.alert: + yield Alert().parse(p) + elif recordHeader.type == ContentType.application_data: + yield ApplicationData().parse(p) + elif recordHeader.type == ContentType.handshake: + #Convert secondaryType to tuple, if it isn't already + if not isinstance(secondaryType, tuple): + secondaryType = (secondaryType,) + + #If it's a handshake message, check handshake header + if recordHeader.ssl2: + subType = p.get(1) + if subType != HandshakeType.client_hello: + for result in self._sendError(\ + AlertDescription.unexpected_message, + "Can only handle SSLv2 ClientHello messages"): + yield result + if HandshakeType.client_hello not in secondaryType: + for result in self._sendError(\ + AlertDescription.unexpected_message): + yield result + subType = HandshakeType.client_hello + else: + subType = p.get(1) + if subType not in secondaryType: + for result in self._sendError(\ + AlertDescription.unexpected_message, + "Expecting %s, got %s" % (str(secondaryType), subType)): + yield result + + #Update handshake hashes + sToHash = bytesToString(p.bytes) + self._handshake_md5.update(sToHash) + self._handshake_sha.update(sToHash) + + #Parse based on handshake type + if subType == HandshakeType.client_hello: + yield ClientHello(recordHeader.ssl2).parse(p) + elif subType == HandshakeType.server_hello: + yield ServerHello().parse(p) + elif subType == HandshakeType.certificate: + yield Certificate(constructorType).parse(p) + elif subType == HandshakeType.certificate_request: + yield CertificateRequest().parse(p) + elif subType == HandshakeType.certificate_verify: + yield CertificateVerify().parse(p) + elif subType == HandshakeType.server_key_exchange: + yield ServerKeyExchange(constructorType).parse(p) + elif subType == HandshakeType.server_hello_done: + yield ServerHelloDone().parse(p) + elif subType == HandshakeType.client_key_exchange: + yield ClientKeyExchange(constructorType, \ + self.version).parse(p) + elif subType == HandshakeType.finished: + yield Finished(self.version).parse(p) + else: + raise AssertionError() + + #If an exception was raised by a Parser or Message instance: + except SyntaxError, e: + for result in self._sendError(AlertDescription.decode_error, + formatExceptionTrace(e)): + yield result + + + #Returns next record or next handshake message + def _getNextRecord(self): + + #If there's a handshake message waiting, return it + if self._handshakeBuffer: + recordHeader, bytes = self._handshakeBuffer[0] + self._handshakeBuffer = self._handshakeBuffer[1:] + yield (recordHeader, Parser(bytes)) + return + + #Otherwise... + #Read the next record header + bytes = createByteArraySequence([]) + recordHeaderLength = 1 + ssl2 = False + while 1: + try: + s = self.sock.recv(recordHeaderLength-len(bytes)) + except socket.error, why: + if why[0] == errno.EWOULDBLOCK: + yield 0 + continue + else: + raise + + #If the connection was abruptly closed, raise an error + if len(s)==0: + raise TLSAbruptCloseError() + + bytes += stringToBytes(s) + if len(bytes)==1: + if bytes[0] in ContentType.all: + ssl2 = False + recordHeaderLength = 5 + elif bytes[0] == 128: + ssl2 = True + recordHeaderLength = 2 + else: + raise SyntaxError() + if len(bytes) == recordHeaderLength: + break + + #Parse the record header + if ssl2: + r = RecordHeader2().parse(Parser(bytes)) + else: + r = RecordHeader3().parse(Parser(bytes)) + + #Check the record header fields + if r.length > 18432: + for result in self._sendError(AlertDescription.record_overflow): + yield result + + #Read the record contents + bytes = createByteArraySequence([]) + while 1: + try: + s = self.sock.recv(r.length - len(bytes)) + except socket.error, why: + if why[0] == errno.EWOULDBLOCK: + yield 0 + continue + else: + raise + + #If the connection is closed, raise a socket error + if len(s)==0: + raise TLSAbruptCloseError() + + bytes += stringToBytes(s) + if len(bytes) == r.length: + break + + #Check the record header fields (2) + #We do this after reading the contents from the socket, so that + #if there's an error, we at least don't leave extra bytes in the + #socket.. + # + # THIS CHECK HAS NO SECURITY RELEVANCE (?), BUT COULD HURT INTEROP. + # SO WE LEAVE IT OUT FOR NOW. + # + #if self._versionCheck and r.version != self.version: + # for result in self._sendError(AlertDescription.protocol_version, + # "Version in header field: %s, should be %s" % (str(r.version), + # str(self.version))): + # yield result + + #Decrypt the record + for result in self._decryptRecord(r.type, bytes): + if result in (0,1): + yield result + else: + break + bytes = result + p = Parser(bytes) + + #If it doesn't contain handshake messages, we can just return it + if r.type != ContentType.handshake: + yield (r, p) + #If it's an SSLv2 ClientHello, we can return it as well + elif r.ssl2: + yield (r, p) + else: + #Otherwise, we loop through and add the handshake messages to the + #handshake buffer + while 1: + if p.index == len(bytes): #If we're at the end + if not self._handshakeBuffer: + for result in self._sendError(\ + AlertDescription.decode_error, \ + "Received empty handshake record"): + yield result + break + #There needs to be at least 4 bytes to get a header + if p.index+4 > len(bytes): + for result in self._sendError(\ + AlertDescription.decode_error, + "A record has a partial handshake message (1)"): + yield result + p.get(1) # skip handshake type + msgLength = p.get(3) + if p.index+msgLength > len(bytes): + for result in self._sendError(\ + AlertDescription.decode_error, + "A record has a partial handshake message (2)"): + yield result + + handshakePair = (r, bytes[p.index-4 : p.index+msgLength]) + self._handshakeBuffer.append(handshakePair) + p.index += msgLength + + #We've moved at least one handshake message into the + #handshakeBuffer, return the first one + recordHeader, bytes = self._handshakeBuffer[0] + self._handshakeBuffer = self._handshakeBuffer[1:] + yield (recordHeader, Parser(bytes)) + + + def _decryptRecord(self, recordType, bytes): + if self._readState.encContext: + + #Decrypt if it's a block cipher + if self._readState.encContext.isBlockCipher: + blockLength = self._readState.encContext.block_size + if len(bytes) % blockLength != 0: + for result in self._sendError(\ + AlertDescription.decryption_failed, + "Encrypted data not a multiple of blocksize"): + yield result + ciphertext = bytesToString(bytes) + plaintext = self._readState.encContext.decrypt(ciphertext) + if self.version == (3,2): #For TLS 1.1, remove explicit IV + plaintext = plaintext[self._readState.encContext.block_size : ] + bytes = stringToBytes(plaintext) + + #Check padding + paddingGood = True + paddingLength = bytes[-1] + if (paddingLength+1) > len(bytes): + paddingGood=False + totalPaddingLength = 0 + else: + if self.version == (3,0): + totalPaddingLength = paddingLength+1 + elif self.version in ((3,1), (3,2)): + totalPaddingLength = paddingLength+1 + paddingBytes = bytes[-totalPaddingLength:-1] + for byte in paddingBytes: + if byte != paddingLength: + paddingGood = False + totalPaddingLength = 0 + else: + raise AssertionError() + + #Decrypt if it's a stream cipher + else: + paddingGood = True + ciphertext = bytesToString(bytes) + plaintext = self._readState.encContext.decrypt(ciphertext) + bytes = stringToBytes(plaintext) + totalPaddingLength = 0 + + #Check MAC + macGood = True + macLength = self._readState.macContext.digest_size + endLength = macLength + totalPaddingLength + if endLength > len(bytes): + macGood = False + else: + #Read MAC + startIndex = len(bytes) - endLength + endIndex = startIndex + macLength + checkBytes = bytes[startIndex : endIndex] + + #Calculate MAC + seqnumStr = self._readState.getSeqNumStr() + bytes = bytes[:-endLength] + bytesStr = bytesToString(bytes) + mac = self._readState.macContext.copy() + mac.update(seqnumStr) + mac.update(chr(recordType)) + if self.version == (3,0): + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + elif self.version in ((3,1), (3,2)): + mac.update(chr(self.version[0])) + mac.update(chr(self.version[1])) + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + else: + raise AssertionError() + mac.update(bytesStr) + macString = mac.digest() + macBytes = stringToBytes(macString) + + #Compare MACs + if macBytes != checkBytes: + macGood = False + + if not (paddingGood and macGood): + for result in self._sendError(AlertDescription.bad_record_mac, + "MAC failure (or padding failure)"): + yield result + + yield bytes + + def _handshakeStart(self, client): + self._client = client + self._handshake_md5 = md5.md5() + self._handshake_sha = sha.sha() + self._handshakeBuffer = [] + self.allegedSharedKeyUsername = None + self.allegedSrpUsername = None + self._refCount = 1 + + def _handshakeDone(self, resumed): + self.resumed = resumed + self.closed = False + + def _calcPendingStates(self, clientRandom, serverRandom, implementations): + if self.session.cipherSuite in CipherSuite.aes128Suites: + macLength = 20 + keyLength = 16 + ivLength = 16 + createCipherFunc = createAES + elif self.session.cipherSuite in CipherSuite.aes256Suites: + macLength = 20 + keyLength = 32 + ivLength = 16 + createCipherFunc = createAES + elif self.session.cipherSuite in CipherSuite.rc4Suites: + macLength = 20 + keyLength = 16 + ivLength = 0 + createCipherFunc = createRC4 + elif self.session.cipherSuite in CipherSuite.tripleDESSuites: + macLength = 20 + keyLength = 24 + ivLength = 8 + createCipherFunc = createTripleDES + else: + raise AssertionError() + + if self.version == (3,0): + createMACFunc = MAC_SSL + elif self.version in ((3,1), (3,2)): + createMACFunc = hmac.HMAC + + outputLength = (macLength*2) + (keyLength*2) + (ivLength*2) + + #Calculate Keying Material from Master Secret + if self.version == (3,0): + keyBlock = PRF_SSL(self.session.masterSecret, + concatArrays(serverRandom, clientRandom), + outputLength) + elif self.version in ((3,1), (3,2)): + keyBlock = PRF(self.session.masterSecret, + "key expansion", + concatArrays(serverRandom,clientRandom), + outputLength) + else: + raise AssertionError() + + #Slice up Keying Material + clientPendingState = _ConnectionState() + serverPendingState = _ConnectionState() + p = Parser(keyBlock) + clientMACBlock = bytesToString(p.getFixBytes(macLength)) + serverMACBlock = bytesToString(p.getFixBytes(macLength)) + clientKeyBlock = bytesToString(p.getFixBytes(keyLength)) + serverKeyBlock = bytesToString(p.getFixBytes(keyLength)) + clientIVBlock = bytesToString(p.getFixBytes(ivLength)) + serverIVBlock = bytesToString(p.getFixBytes(ivLength)) + clientPendingState.macContext = createMACFunc(clientMACBlock, + digestmod=sha) + serverPendingState.macContext = createMACFunc(serverMACBlock, + digestmod=sha) + clientPendingState.encContext = createCipherFunc(clientKeyBlock, + clientIVBlock, + implementations) + serverPendingState.encContext = createCipherFunc(serverKeyBlock, + serverIVBlock, + implementations) + + #Assign new connection states to pending states + if self._client: + self._pendingWriteState = clientPendingState + self._pendingReadState = serverPendingState + else: + self._pendingWriteState = serverPendingState + self._pendingReadState = clientPendingState + + if self.version == (3,2) and ivLength: + #Choose fixedIVBlock for TLS 1.1 (this is encrypted with the CBC + #residue to create the IV for each sent block) + self.fixedIVBlock = getRandomBytes(ivLength) + + def _changeWriteState(self): + self._writeState = self._pendingWriteState + self._pendingWriteState = _ConnectionState() + + def _changeReadState(self): + self._readState = self._pendingReadState + self._pendingReadState = _ConnectionState() + + def _sendFinished(self): + #Send ChangeCipherSpec + for result in self._sendMsg(ChangeCipherSpec()): + yield result + + #Switch to pending write state + self._changeWriteState() + + #Calculate verification data + verifyData = self._calcFinished(True) + if self.fault == Fault.badFinished: + verifyData[0] = (verifyData[0]+1)%256 + + #Send Finished message under new state + finished = Finished(self.version).create(verifyData) + for result in self._sendMsg(finished): + yield result + + def _getFinished(self): + #Get and check ChangeCipherSpec + for result in self._getMsg(ContentType.change_cipher_spec): + if result in (0,1): + yield result + changeCipherSpec = result + + if changeCipherSpec.type != 1: + for result in self._sendError(AlertDescription.illegal_parameter, + "ChangeCipherSpec type incorrect"): + yield result + + #Switch to pending read state + self._changeReadState() + + #Calculate verification data + verifyData = self._calcFinished(False) + + #Get and check Finished message under new state + for result in self._getMsg(ContentType.handshake, + HandshakeType.finished): + if result in (0,1): + yield result + finished = result + if finished.verify_data != verifyData: + for result in self._sendError(AlertDescription.decrypt_error, + "Finished message is incorrect"): + yield result + + def _calcFinished(self, send=True): + if self.version == (3,0): + if (self._client and send) or (not self._client and not send): + senderStr = "\x43\x4C\x4E\x54" + else: + senderStr = "\x53\x52\x56\x52" + + verifyData = self._calcSSLHandshakeHash(self.session.masterSecret, + senderStr) + return verifyData + + elif self.version in ((3,1), (3,2)): + if (self._client and send) or (not self._client and not send): + label = "client finished" + else: + label = "server finished" + + handshakeHashes = stringToBytes(self._handshake_md5.digest() + \ + self._handshake_sha.digest()) + verifyData = PRF(self.session.masterSecret, label, handshakeHashes, + 12) + return verifyData + else: + raise AssertionError() + + #Used for Finished messages and CertificateVerify messages in SSL v3 + def _calcSSLHandshakeHash(self, masterSecret, label): + masterSecretStr = bytesToString(masterSecret) + + imac_md5 = self._handshake_md5.copy() + imac_sha = self._handshake_sha.copy() + + imac_md5.update(label + masterSecretStr + '\x36'*48) + imac_sha.update(label + masterSecretStr + '\x36'*40) + + md5Str = md5.md5(masterSecretStr + ('\x5c'*48) + \ + imac_md5.digest()).digest() + shaStr = sha.sha(masterSecretStr + ('\x5c'*40) + \ + imac_sha.digest()).digest() + + return stringToBytes(md5Str + shaStr) + diff --git a/gam/gdata/analytics/tlslite/VerifierDB.py b/gam/gdata/analytics/tlslite/VerifierDB.py new file mode 100755 index 00000000000..f706b179672 --- /dev/null +++ b/gam/gdata/analytics/tlslite/VerifierDB.py @@ -0,0 +1,90 @@ +"""Class for storing SRP password verifiers.""" + +from utils.cryptomath import * +from utils.compat import * +import mathtls +from BaseDB import BaseDB + +class VerifierDB(BaseDB): + """This class represent an in-memory or on-disk database of SRP + password verifiers. + + A VerifierDB can be passed to a server handshake to authenticate + a client based on one of the verifiers. + + This class is thread-safe. + """ + def __init__(self, filename=None): + """Create a new VerifierDB instance. + + @type filename: str + @param filename: Filename for an on-disk database, or None for + an in-memory database. If the filename already exists, follow + this with a call to open(). To create a new on-disk database, + follow this with a call to create(). + """ + BaseDB.__init__(self, filename, "verifier") + + def _getItem(self, username, valueStr): + (N, g, salt, verifier) = valueStr.split(" ") + N = base64ToNumber(N) + g = base64ToNumber(g) + salt = base64ToString(salt) + verifier = base64ToNumber(verifier) + return (N, g, salt, verifier) + + def __setitem__(self, username, verifierEntry): + """Add a verifier entry to the database. + + @type username: str + @param username: The username to associate the verifier with. + Must be less than 256 characters in length. Must not already + be in the database. + + @type verifierEntry: tuple + @param verifierEntry: The verifier entry to add. Use + L{tlslite.VerifierDB.VerifierDB.makeVerifier} to create a + verifier entry. + """ + BaseDB.__setitem__(self, username, verifierEntry) + + + def _setItem(self, username, value): + if len(username)>=256: + raise ValueError("username too long") + N, g, salt, verifier = value + N = numberToBase64(N) + g = numberToBase64(g) + salt = stringToBase64(salt) + verifier = numberToBase64(verifier) + valueStr = " ".join( (N, g, salt, verifier) ) + return valueStr + + def _checkItem(self, value, username, param): + (N, g, salt, verifier) = value + x = mathtls.makeX(salt, username, param) + v = powMod(g, x, N) + return (verifier == v) + + + def makeVerifier(username, password, bits): + """Create a verifier entry which can be stored in a VerifierDB. + + @type username: str + @param username: The username for this verifier. Must be less + than 256 characters in length. + + @type password: str + @param password: The password for this verifier. + + @type bits: int + @param bits: This values specifies which SRP group parameters + to use. It must be one of (1024, 1536, 2048, 3072, 4096, 6144, + 8192). Larger values are more secure but slower. 2048 is a + good compromise between safety and speed. + + @rtype: tuple + @return: A tuple which may be stored in a VerifierDB. + """ + return mathtls.makeVerifier(username, password, bits) + makeVerifier = staticmethod(makeVerifier) \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/X509.py b/gam/gdata/analytics/tlslite/X509.py new file mode 100755 index 00000000000..a47ddcfa2a2 --- /dev/null +++ b/gam/gdata/analytics/tlslite/X509.py @@ -0,0 +1,133 @@ +"""Class representing an X.509 certificate.""" + +from utils.ASN1Parser import ASN1Parser +from utils.cryptomath import * +from utils.keyfactory import _createPublicRSAKey + + +class X509: + """This class represents an X.509 certificate. + + @type bytes: L{array.array} of unsigned bytes + @ivar bytes: The DER-encoded ASN.1 certificate + + @type publicKey: L{tlslite.utils.RSAKey.RSAKey} + @ivar publicKey: The subject public key from the certificate. + """ + + def __init__(self): + self.bytes = createByteArraySequence([]) + self.publicKey = None + + def parse(self, s): + """Parse a PEM-encoded X.509 certificate. + + @type s: str + @param s: A PEM-encoded X.509 certificate (i.e. a base64-encoded + certificate wrapped with "-----BEGIN CERTIFICATE-----" and + "-----END CERTIFICATE-----" tags). + """ + + start = s.find("-----BEGIN CERTIFICATE-----") + end = s.find("-----END CERTIFICATE-----") + if start == -1: + raise SyntaxError("Missing PEM prefix") + if end == -1: + raise SyntaxError("Missing PEM postfix") + s = s[start+len("-----BEGIN CERTIFICATE-----") : end] + + bytes = base64ToBytes(s) + self.parseBinary(bytes) + return self + + def parseBinary(self, bytes): + """Parse a DER-encoded X.509 certificate. + + @type bytes: str or L{array.array} of unsigned bytes + @param bytes: A DER-encoded X.509 certificate. + """ + + if isinstance(bytes, type("")): + bytes = stringToBytes(bytes) + + self.bytes = bytes + p = ASN1Parser(bytes) + + #Get the tbsCertificate + tbsCertificateP = p.getChild(0) + + #Is the optional version field present? + #This determines which index the key is at. + if tbsCertificateP.value[0]==0xA0: + subjectPublicKeyInfoIndex = 6 + else: + subjectPublicKeyInfoIndex = 5 + + #Get the subjectPublicKeyInfo + subjectPublicKeyInfoP = tbsCertificateP.getChild(\ + subjectPublicKeyInfoIndex) + + #Get the algorithm + algorithmP = subjectPublicKeyInfoP.getChild(0) + rsaOID = algorithmP.value + if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]: + raise SyntaxError("Unrecognized AlgorithmIdentifier") + + #Get the subjectPublicKey + subjectPublicKeyP = subjectPublicKeyInfoP.getChild(1) + + #Adjust for BIT STRING encapsulation + if (subjectPublicKeyP.value[0] !=0): + raise SyntaxError() + subjectPublicKeyP = ASN1Parser(subjectPublicKeyP.value[1:]) + + #Get the modulus and exponent + modulusP = subjectPublicKeyP.getChild(0) + publicExponentP = subjectPublicKeyP.getChild(1) + + #Decode them into numbers + n = bytesToNumber(modulusP.value) + e = bytesToNumber(publicExponentP.value) + + #Create a public key instance + self.publicKey = _createPublicRSAKey(n, e) + + def getFingerprint(self): + """Get the hex-encoded fingerprint of this certificate. + + @rtype: str + @return: A hex-encoded fingerprint. + """ + return sha.sha(self.bytes).hexdigest() + + def getCommonName(self): + """Get the Subject's Common Name from the certificate. + + The cryptlib_py module must be installed in order to use this + function. + + @rtype: str or None + @return: The CN component of the certificate's subject DN, if + present. + """ + import cryptlib_py + import array + c = cryptlib_py.cryptImportCert(self.bytes, cryptlib_py.CRYPT_UNUSED) + name = cryptlib_py.CRYPT_CERTINFO_COMMONNAME + try: + try: + length = cryptlib_py.cryptGetAttributeString(c, name, None) + returnVal = array.array('B', [0] * length) + cryptlib_py.cryptGetAttributeString(c, name, returnVal) + returnVal = returnVal.tostring() + except cryptlib_py.CryptException, e: + if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: + returnVal = None + return returnVal + finally: + cryptlib_py.cryptDestroyCert(c) + + def writeBytes(self): + return self.bytes + + diff --git a/gam/gdata/analytics/tlslite/X509CertChain.py b/gam/gdata/analytics/tlslite/X509CertChain.py new file mode 100755 index 00000000000..d5f0b4d42a4 --- /dev/null +++ b/gam/gdata/analytics/tlslite/X509CertChain.py @@ -0,0 +1,181 @@ +"""Class representing an X.509 certificate chain.""" + +from utils import cryptomath + +class X509CertChain: + """This class represents a chain of X.509 certificates. + + @type x509List: list + @ivar x509List: A list of L{tlslite.X509.X509} instances, + starting with the end-entity certificate and with every + subsequent certificate certifying the previous. + """ + + def __init__(self, x509List=None): + """Create a new X509CertChain. + + @type x509List: list + @param x509List: A list of L{tlslite.X509.X509} instances, + starting with the end-entity certificate and with every + subsequent certificate certifying the previous. + """ + if x509List: + self.x509List = x509List + else: + self.x509List = [] + + def getNumCerts(self): + """Get the number of certificates in this chain. + + @rtype: int + """ + return len(self.x509List) + + def getEndEntityPublicKey(self): + """Get the public key from the end-entity certificate. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + """ + if self.getNumCerts() == 0: + raise AssertionError() + return self.x509List[0].publicKey + + def getFingerprint(self): + """Get the hex-encoded fingerprint of the end-entity certificate. + + @rtype: str + @return: A hex-encoded fingerprint. + """ + if self.getNumCerts() == 0: + raise AssertionError() + return self.x509List[0].getFingerprint() + + def getCommonName(self): + """Get the Subject's Common Name from the end-entity certificate. + + The cryptlib_py module must be installed in order to use this + function. + + @rtype: str or None + @return: The CN component of the certificate's subject DN, if + present. + """ + if self.getNumCerts() == 0: + raise AssertionError() + return self.x509List[0].getCommonName() + + def validate(self, x509TrustList): + """Check the validity of the certificate chain. + + This checks that every certificate in the chain validates with + the subsequent one, until some certificate validates with (or + is identical to) one of the passed-in root certificates. + + The cryptlib_py module must be installed in order to use this + function. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + certificate chain must extend to one of these certificates to + be considered valid. + """ + + import cryptlib_py + c1 = None + c2 = None + lastC = None + rootC = None + + try: + rootFingerprints = [c.getFingerprint() for c in x509TrustList] + + #Check that every certificate in the chain validates with the + #next one + for cert1, cert2 in zip(self.x509List, self.x509List[1:]): + + #If we come upon a root certificate, we're done. + if cert1.getFingerprint() in rootFingerprints: + return True + + c1 = cryptlib_py.cryptImportCert(cert1.writeBytes(), + cryptlib_py.CRYPT_UNUSED) + c2 = cryptlib_py.cryptImportCert(cert2.writeBytes(), + cryptlib_py.CRYPT_UNUSED) + try: + cryptlib_py.cryptCheckCert(c1, c2) + except: + return False + cryptlib_py.cryptDestroyCert(c1) + c1 = None + cryptlib_py.cryptDestroyCert(c2) + c2 = None + + #If the last certificate is one of the root certificates, we're + #done. + if self.x509List[-1].getFingerprint() in rootFingerprints: + return True + + #Otherwise, find a root certificate that the last certificate + #chains to, and validate them. + lastC = cryptlib_py.cryptImportCert(self.x509List[-1].writeBytes(), + cryptlib_py.CRYPT_UNUSED) + for rootCert in x509TrustList: + rootC = cryptlib_py.cryptImportCert(rootCert.writeBytes(), + cryptlib_py.CRYPT_UNUSED) + if self._checkChaining(lastC, rootC): + try: + cryptlib_py.cryptCheckCert(lastC, rootC) + return True + except: + return False + return False + finally: + if not (c1 is None): + cryptlib_py.cryptDestroyCert(c1) + if not (c2 is None): + cryptlib_py.cryptDestroyCert(c2) + if not (lastC is None): + cryptlib_py.cryptDestroyCert(lastC) + if not (rootC is None): + cryptlib_py.cryptDestroyCert(rootC) + + + + def _checkChaining(self, lastC, rootC): + import cryptlib_py + import array + def compareNames(name): + try: + length = cryptlib_py.cryptGetAttributeString(lastC, name, None) + lastName = array.array('B', [0] * length) + cryptlib_py.cryptGetAttributeString(lastC, name, lastName) + lastName = lastName.tostring() + except cryptlib_py.CryptException, e: + if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: + lastName = None + try: + length = cryptlib_py.cryptGetAttributeString(rootC, name, None) + rootName = array.array('B', [0] * length) + cryptlib_py.cryptGetAttributeString(rootC, name, rootName) + rootName = rootName.tostring() + except cryptlib_py.CryptException, e: + if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: + rootName = None + + return lastName == rootName + + cryptlib_py.cryptSetAttribute(lastC, + cryptlib_py.CRYPT_CERTINFO_ISSUERNAME, + cryptlib_py.CRYPT_UNUSED) + + if not compareNames(cryptlib_py.CRYPT_CERTINFO_COUNTRYNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_LOCALITYNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONALUNITNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_COMMONNAME): + return False + return True \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/__init__.py b/gam/gdata/analytics/tlslite/__init__.py new file mode 100755 index 00000000000..47cfd1c6f18 --- /dev/null +++ b/gam/gdata/analytics/tlslite/__init__.py @@ -0,0 +1,39 @@ +""" +TLS Lite is a free python library that implements SSL v3, TLS v1, and +TLS v1.1. TLS Lite supports non-traditional authentication methods +such as SRP, shared keys, and cryptoIDs, in addition to X.509 +certificates. TLS Lite is pure python, however it can access OpenSSL, +cryptlib, pycrypto, and GMPY for faster crypto operations. TLS Lite +integrates with httplib, xmlrpclib, poplib, imaplib, smtplib, +SocketServer, asyncore, and Twisted. + +To use, do:: + + from tlslite.api import * + +Then use the L{tlslite.TLSConnection.TLSConnection} class with a socket, +or use one of the integration classes in L{tlslite.integration}. + +@version: 0.3.8 +""" +__version__ = "0.3.8" + +__all__ = ["api", + "BaseDB", + "Checker", + "constants", + "errors", + "FileObject", + "HandshakeSettings", + "mathtls", + "messages", + "Session", + "SessionCache", + "SharedKeyDB", + "TLSConnection", + "TLSRecordLayer", + "VerifierDB", + "X509", + "X509CertChain", + "integration", + "utils"] diff --git a/gam/gdata/analytics/tlslite/__init__.pyc b/gam/gdata/analytics/tlslite/__init__.pyc new file mode 100755 index 0000000000000000000000000000000000000000..84802f3116cdd35cc326dc9c690bff38c1171866 GIT binary patch literal 1214 zcmah|ZEqAu44%A@z(OcVD4+c?sxGTU2&s(_ia694aS+5s6^$aTh8^GSG`lma8DGMw z;sd|Af1uCICQA6!J?*URu^-!yeQW=26)&FY_qDoycJV#N8&7Z;rOt4!sIwJiR+U*( zW?h+k%4{gJsmy(49w<{N^H7;b%4{j~SeYM`d7{j=GCRubD)Us$chwnUe^k@GX9@V* zs#3-2WJ(h&Og56HSzxAqDUEm3+Rs6UB}t=&Y}dEkv6D#C>4Ywyk0?c69*#&mlTj`Y z%fphgIRnN;-}`_7=iR;pZLC=DbW3_64UlZDlYu%$Iunbys2kG2|M+1v#?Hh4m(G>veHl^(r3&s zT^ss(fcJyRc6tzEH#ikjUth3HIiO4=k4snkz(X0QR~G3_Srn(977lRIX~H2LSo2@@Q9=eSODD8JLbQr&70lk3+Mb4{ZH1SA;m z@Xj*YO|NN^IRs5(K(|TmIDAFj$LK1;`G^k6=jG2uyoXiHqO3te-Lir2YaRK`YuSW2 zZWx3Fc>uZgE{et$W+x88hbRRuezh&1eEu7)$Trm9v6NV$3!buwb@P~!IxP))m;3PO zFm^rX;bIB}HOV7fT+3r!H!Kg6A+O?FUZS2Y=-=@qj|a;~#*oZJFHvZ!=T8pUMh=Ks zhx!RCnuC)thR?XsASS`z;YSh|3$bl{zn93!MOT&abya2Gs=8(mdZMPPblX|~qw~4<^OpB@-^cwSrS K)wPxX|NRF%0$_mv literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/tlslite/api.py b/gam/gdata/analytics/tlslite/api.py new file mode 100755 index 00000000000..eebfbc6091e --- /dev/null +++ b/gam/gdata/analytics/tlslite/api.py @@ -0,0 +1,75 @@ +"""Import this module for easy access to TLS Lite objects. + +The TLS Lite API consists of classes, functions, and variables spread +throughout this package. Instead of importing them individually with:: + + from tlslite.TLSConnection import TLSConnection + from tlslite.HandshakeSettings import HandshakeSettings + from tlslite.errors import * + . + . + +It's easier to do:: + + from tlslite.api import * + +This imports all the important objects (TLSConnection, Checker, +HandshakeSettings, etc.) into the global namespace. In particular, it +imports:: + + from constants import AlertLevel, AlertDescription, Fault + from errors import * + from Checker import Checker + from HandshakeSettings import HandshakeSettings + from Session import Session + from SessionCache import SessionCache + from SharedKeyDB import SharedKeyDB + from TLSConnection import TLSConnection + from VerifierDB import VerifierDB + from X509 import X509 + from X509CertChain import X509CertChain + + from integration.HTTPTLSConnection import HTTPTLSConnection + from integration.POP3_TLS import POP3_TLS + from integration.IMAP4_TLS import IMAP4_TLS + from integration.SMTP_TLS import SMTP_TLS + from integration.XMLRPCTransport import XMLRPCTransport + from integration.TLSSocketServerMixIn import TLSSocketServerMixIn + from integration.TLSAsyncDispatcherMixIn import TLSAsyncDispatcherMixIn + from integration.TLSTwistedProtocolWrapper import TLSTwistedProtocolWrapper + from utils.cryptomath import cryptlibpyLoaded, m2cryptoLoaded, + gmpyLoaded, pycryptoLoaded, prngName + from utils.keyfactory import generateRSAKey, parsePEMKey, parseXMLKey, + parseAsPublicKey, parsePrivateKey +""" + +from constants import AlertLevel, AlertDescription, Fault +from errors import * +from Checker import Checker +from HandshakeSettings import HandshakeSettings +from Session import Session +from SessionCache import SessionCache +from SharedKeyDB import SharedKeyDB +from TLSConnection import TLSConnection +from VerifierDB import VerifierDB +from X509 import X509 +from X509CertChain import X509CertChain + +from integration.HTTPTLSConnection import HTTPTLSConnection +from integration.TLSSocketServerMixIn import TLSSocketServerMixIn +from integration.TLSAsyncDispatcherMixIn import TLSAsyncDispatcherMixIn +from integration.POP3_TLS import POP3_TLS +from integration.IMAP4_TLS import IMAP4_TLS +from integration.SMTP_TLS import SMTP_TLS +from integration.XMLRPCTransport import XMLRPCTransport +try: + import twisted + del(twisted) + from integration.TLSTwistedProtocolWrapper import TLSTwistedProtocolWrapper +except ImportError: + pass + +from utils.cryptomath import cryptlibpyLoaded, m2cryptoLoaded, gmpyLoaded, \ + pycryptoLoaded, prngName +from utils.keyfactory import generateRSAKey, parsePEMKey, parseXMLKey, \ + parseAsPublicKey, parsePrivateKey diff --git a/gam/gdata/analytics/tlslite/constants.py b/gam/gdata/analytics/tlslite/constants.py new file mode 100755 index 00000000000..8f2d5590e94 --- /dev/null +++ b/gam/gdata/analytics/tlslite/constants.py @@ -0,0 +1,225 @@ +"""Constants used in various places.""" + +class CertificateType: + x509 = 0 + openpgp = 1 + cryptoID = 2 + +class HandshakeType: + hello_request = 0 + client_hello = 1 + server_hello = 2 + certificate = 11 + server_key_exchange = 12 + certificate_request = 13 + server_hello_done = 14 + certificate_verify = 15 + client_key_exchange = 16 + finished = 20 + +class ContentType: + change_cipher_spec = 20 + alert = 21 + handshake = 22 + application_data = 23 + all = (20,21,22,23) + +class AlertLevel: + warning = 1 + fatal = 2 + +class AlertDescription: + """ + @cvar bad_record_mac: A TLS record failed to decrypt properly. + + If this occurs during a shared-key or SRP handshake it most likely + indicates a bad password. It may also indicate an implementation + error, or some tampering with the data in transit. + + This alert will be signalled by the server if the SRP password is bad. It + may also be signalled by the server if the SRP username is unknown to the + server, but it doesn't wish to reveal that fact. + + This alert will be signalled by the client if the shared-key username is + bad. + + @cvar handshake_failure: A problem occurred while handshaking. + + This typically indicates a lack of common ciphersuites between client and + server, or some other disagreement (about SRP parameters or key sizes, + for example). + + @cvar protocol_version: The other party's SSL/TLS version was unacceptable. + + This indicates that the client and server couldn't agree on which version + of SSL or TLS to use. + + @cvar user_canceled: The handshake is being cancelled for some reason. + + """ + + close_notify = 0 + unexpected_message = 10 + bad_record_mac = 20 + decryption_failed = 21 + record_overflow = 22 + decompression_failure = 30 + handshake_failure = 40 + no_certificate = 41 #SSLv3 + bad_certificate = 42 + unsupported_certificate = 43 + certificate_revoked = 44 + certificate_expired = 45 + certificate_unknown = 46 + illegal_parameter = 47 + unknown_ca = 48 + access_denied = 49 + decode_error = 50 + decrypt_error = 51 + export_restriction = 60 + protocol_version = 70 + insufficient_security = 71 + internal_error = 80 + user_canceled = 90 + no_renegotiation = 100 + unknown_srp_username = 120 + missing_srp_username = 121 + untrusted_srp_parameters = 122 + +class CipherSuite: + TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = 0x0050 + TLS_SRP_SHA_WITH_AES_128_CBC_SHA = 0x0053 + TLS_SRP_SHA_WITH_AES_256_CBC_SHA = 0x0056 + + TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = 0x0051 + TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = 0x0054 + TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = 0x0057 + + TLS_RSA_WITH_3DES_EDE_CBC_SHA = 0x000A + TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F + TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 + TLS_RSA_WITH_RC4_128_SHA = 0x0005 + + srpSuites = [] + srpSuites.append(TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA) + srpSuites.append(TLS_SRP_SHA_WITH_AES_128_CBC_SHA) + srpSuites.append(TLS_SRP_SHA_WITH_AES_256_CBC_SHA) + def getSrpSuites(ciphers): + suites = [] + for cipher in ciphers: + if cipher == "aes128": + suites.append(CipherSuite.TLS_SRP_SHA_WITH_AES_128_CBC_SHA) + elif cipher == "aes256": + suites.append(CipherSuite.TLS_SRP_SHA_WITH_AES_256_CBC_SHA) + elif cipher == "3des": + suites.append(CipherSuite.TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA) + return suites + getSrpSuites = staticmethod(getSrpSuites) + + srpRsaSuites = [] + srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA) + srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA) + srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA) + def getSrpRsaSuites(ciphers): + suites = [] + for cipher in ciphers: + if cipher == "aes128": + suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA) + elif cipher == "aes256": + suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA) + elif cipher == "3des": + suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA) + return suites + getSrpRsaSuites = staticmethod(getSrpRsaSuites) + + rsaSuites = [] + rsaSuites.append(TLS_RSA_WITH_3DES_EDE_CBC_SHA) + rsaSuites.append(TLS_RSA_WITH_AES_128_CBC_SHA) + rsaSuites.append(TLS_RSA_WITH_AES_256_CBC_SHA) + rsaSuites.append(TLS_RSA_WITH_RC4_128_SHA) + def getRsaSuites(ciphers): + suites = [] + for cipher in ciphers: + if cipher == "aes128": + suites.append(CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA) + elif cipher == "aes256": + suites.append(CipherSuite.TLS_RSA_WITH_AES_256_CBC_SHA) + elif cipher == "rc4": + suites.append(CipherSuite.TLS_RSA_WITH_RC4_128_SHA) + elif cipher == "3des": + suites.append(CipherSuite.TLS_RSA_WITH_3DES_EDE_CBC_SHA) + return suites + getRsaSuites = staticmethod(getRsaSuites) + + tripleDESSuites = [] + tripleDESSuites.append(TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA) + tripleDESSuites.append(TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA) + tripleDESSuites.append(TLS_RSA_WITH_3DES_EDE_CBC_SHA) + + aes128Suites = [] + aes128Suites.append(TLS_SRP_SHA_WITH_AES_128_CBC_SHA) + aes128Suites.append(TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA) + aes128Suites.append(TLS_RSA_WITH_AES_128_CBC_SHA) + + aes256Suites = [] + aes256Suites.append(TLS_SRP_SHA_WITH_AES_256_CBC_SHA) + aes256Suites.append(TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA) + aes256Suites.append(TLS_RSA_WITH_AES_256_CBC_SHA) + + rc4Suites = [] + rc4Suites.append(TLS_RSA_WITH_RC4_128_SHA) + + +class Fault: + badUsername = 101 + badPassword = 102 + badA = 103 + clientSrpFaults = range(101,104) + + badVerifyMessage = 601 + clientCertFaults = range(601,602) + + badPremasterPadding = 501 + shortPremasterSecret = 502 + clientNoAuthFaults = range(501,503) + + badIdentifier = 401 + badSharedKey = 402 + clientSharedKeyFaults = range(401,403) + + badB = 201 + serverFaults = range(201,202) + + badFinished = 300 + badMAC = 301 + badPadding = 302 + genericFaults = range(300,303) + + faultAlerts = {\ + badUsername: (AlertDescription.unknown_srp_username, \ + AlertDescription.bad_record_mac),\ + badPassword: (AlertDescription.bad_record_mac,),\ + badA: (AlertDescription.illegal_parameter,),\ + badIdentifier: (AlertDescription.handshake_failure,),\ + badSharedKey: (AlertDescription.bad_record_mac,),\ + badPremasterPadding: (AlertDescription.bad_record_mac,),\ + shortPremasterSecret: (AlertDescription.bad_record_mac,),\ + badVerifyMessage: (AlertDescription.decrypt_error,),\ + badFinished: (AlertDescription.decrypt_error,),\ + badMAC: (AlertDescription.bad_record_mac,),\ + badPadding: (AlertDescription.bad_record_mac,) + } + + faultNames = {\ + badUsername: "bad username",\ + badPassword: "bad password",\ + badA: "bad A",\ + badIdentifier: "bad identifier",\ + badSharedKey: "bad sharedkey",\ + badPremasterPadding: "bad premaster padding",\ + shortPremasterSecret: "short premaster secret",\ + badVerifyMessage: "bad verify message",\ + badFinished: "bad finished message",\ + badMAC: "bad MAC",\ + badPadding: "bad padding" + } diff --git a/gam/gdata/analytics/tlslite/errors.py b/gam/gdata/analytics/tlslite/errors.py new file mode 100755 index 00000000000..c7f7ba81d4d --- /dev/null +++ b/gam/gdata/analytics/tlslite/errors.py @@ -0,0 +1,149 @@ +"""Exception classes. +@sort: TLSError, TLSAbruptCloseError, TLSAlert, TLSLocalAlert, TLSRemoteAlert, +TLSAuthenticationError, TLSNoAuthenticationError, TLSAuthenticationTypeError, +TLSFingerprintError, TLSAuthorizationError, TLSValidationError, TLSFaultError +""" + +from constants import AlertDescription, AlertLevel + +class TLSError(Exception): + """Base class for all TLS Lite exceptions.""" + pass + +class TLSAbruptCloseError(TLSError): + """The socket was closed without a proper TLS shutdown. + + The TLS specification mandates that an alert of some sort + must be sent before the underlying socket is closed. If the socket + is closed without this, it could signify that an attacker is trying + to truncate the connection. It could also signify a misbehaving + TLS implementation, or a random network failure. + """ + pass + +class TLSAlert(TLSError): + """A TLS alert has been signalled.""" + pass + + _descriptionStr = {\ + AlertDescription.close_notify: "close_notify",\ + AlertDescription.unexpected_message: "unexpected_message",\ + AlertDescription.bad_record_mac: "bad_record_mac",\ + AlertDescription.decryption_failed: "decryption_failed",\ + AlertDescription.record_overflow: "record_overflow",\ + AlertDescription.decompression_failure: "decompression_failure",\ + AlertDescription.handshake_failure: "handshake_failure",\ + AlertDescription.no_certificate: "no certificate",\ + AlertDescription.bad_certificate: "bad_certificate",\ + AlertDescription.unsupported_certificate: "unsupported_certificate",\ + AlertDescription.certificate_revoked: "certificate_revoked",\ + AlertDescription.certificate_expired: "certificate_expired",\ + AlertDescription.certificate_unknown: "certificate_unknown",\ + AlertDescription.illegal_parameter: "illegal_parameter",\ + AlertDescription.unknown_ca: "unknown_ca",\ + AlertDescription.access_denied: "access_denied",\ + AlertDescription.decode_error: "decode_error",\ + AlertDescription.decrypt_error: "decrypt_error",\ + AlertDescription.export_restriction: "export_restriction",\ + AlertDescription.protocol_version: "protocol_version",\ + AlertDescription.insufficient_security: "insufficient_security",\ + AlertDescription.internal_error: "internal_error",\ + AlertDescription.user_canceled: "user_canceled",\ + AlertDescription.no_renegotiation: "no_renegotiation",\ + AlertDescription.unknown_srp_username: "unknown_srp_username",\ + AlertDescription.missing_srp_username: "missing_srp_username"} + +class TLSLocalAlert(TLSAlert): + """A TLS alert has been signalled by the local implementation. + + @type description: int + @ivar description: Set to one of the constants in + L{tlslite.constants.AlertDescription} + + @type level: int + @ivar level: Set to one of the constants in + L{tlslite.constants.AlertLevel} + + @type message: str + @ivar message: Description of what went wrong. + """ + def __init__(self, alert, message=None): + self.description = alert.description + self.level = alert.level + self.message = message + + def __str__(self): + alertStr = TLSAlert._descriptionStr.get(self.description) + if alertStr == None: + alertStr = str(self.description) + if self.message: + return alertStr + ": " + self.message + else: + return alertStr + +class TLSRemoteAlert(TLSAlert): + """A TLS alert has been signalled by the remote implementation. + + @type description: int + @ivar description: Set to one of the constants in + L{tlslite.constants.AlertDescription} + + @type level: int + @ivar level: Set to one of the constants in + L{tlslite.constants.AlertLevel} + """ + def __init__(self, alert): + self.description = alert.description + self.level = alert.level + + def __str__(self): + alertStr = TLSAlert._descriptionStr.get(self.description) + if alertStr == None: + alertStr = str(self.description) + return alertStr + +class TLSAuthenticationError(TLSError): + """The handshake succeeded, but the other party's authentication + was inadequate. + + This exception will only be raised when a + L{tlslite.Checker.Checker} has been passed to a handshake function. + The Checker will be invoked once the handshake completes, and if + the Checker objects to how the other party authenticated, a + subclass of this exception will be raised. + """ + pass + +class TLSNoAuthenticationError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + certificate chain, but this did not occur.""" + pass + +class TLSAuthenticationTypeError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + different type of certificate chain.""" + pass + +class TLSFingerprintError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + certificate chain that matches a different fingerprint.""" + pass + +class TLSAuthorizationError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + certificate chain that has a different authorization.""" + pass + +class TLSValidationError(TLSAuthenticationError): + """The Checker has determined that the other party's certificate + chain is invalid.""" + pass + +class TLSFaultError(TLSError): + """The other party responded incorrectly to an induced fault. + + This exception will only occur during fault testing, when a + TLSConnection's fault variable is set to induce some sort of + faulty behavior, and the other party doesn't respond appropriately. + """ + pass diff --git a/gam/gdata/analytics/tlslite/integration/AsyncStateMachine.py b/gam/gdata/analytics/tlslite/integration/AsyncStateMachine.py new file mode 100755 index 00000000000..abed604321a --- /dev/null +++ b/gam/gdata/analytics/tlslite/integration/AsyncStateMachine.py @@ -0,0 +1,235 @@ +""" +A state machine for using TLS Lite with asynchronous I/O. +""" + +class AsyncStateMachine: + """ + This is an abstract class that's used to integrate TLS Lite with + asyncore and Twisted. + + This class signals wantsReadsEvent() and wantsWriteEvent(). When + the underlying socket has become readable or writeable, the event + should be passed to this class by calling inReadEvent() or + inWriteEvent(). This class will then try to read or write through + the socket, and will update its state appropriately. + + This class will forward higher-level events to its subclass. For + example, when a complete TLS record has been received, + outReadEvent() will be called with the decrypted data. + """ + + def __init__(self): + self._clear() + + def _clear(self): + #These store the various asynchronous operations (i.e. + #generators). Only one of them, at most, is ever active at a + #time. + self.handshaker = None + self.closer = None + self.reader = None + self.writer = None + + #This stores the result from the last call to the + #currently active operation. If 0 it indicates that the + #operation wants to read, if 1 it indicates that the + #operation wants to write. If None, there is no active + #operation. + self.result = None + + def _checkAssert(self, maxActive=1): + #This checks that only one operation, at most, is + #active, and that self.result is set appropriately. + activeOps = 0 + if self.handshaker: + activeOps += 1 + if self.closer: + activeOps += 1 + if self.reader: + activeOps += 1 + if self.writer: + activeOps += 1 + + if self.result == None: + if activeOps != 0: + raise AssertionError() + elif self.result in (0,1): + if activeOps != 1: + raise AssertionError() + else: + raise AssertionError() + if activeOps > maxActive: + raise AssertionError() + + def wantsReadEvent(self): + """If the state machine wants to read. + + If an operation is active, this returns whether or not the + operation wants to read from the socket. If an operation is + not active, this returns None. + + @rtype: bool or None + @return: If the state machine wants to read. + """ + if self.result != None: + return self.result == 0 + return None + + def wantsWriteEvent(self): + """If the state machine wants to write. + + If an operation is active, this returns whether or not the + operation wants to write to the socket. If an operation is + not active, this returns None. + + @rtype: bool or None + @return: If the state machine wants to write. + """ + if self.result != None: + return self.result == 1 + return None + + def outConnectEvent(self): + """Called when a handshake operation completes. + + May be overridden in subclass. + """ + pass + + def outCloseEvent(self): + """Called when a close operation completes. + + May be overridden in subclass. + """ + pass + + def outReadEvent(self, readBuffer): + """Called when a read operation completes. + + May be overridden in subclass.""" + pass + + def outWriteEvent(self): + """Called when a write operation completes. + + May be overridden in subclass.""" + pass + + def inReadEvent(self): + """Tell the state machine it can read from the socket.""" + try: + self._checkAssert() + if self.handshaker: + self._doHandshakeOp() + elif self.closer: + self._doCloseOp() + elif self.reader: + self._doReadOp() + elif self.writer: + self._doWriteOp() + else: + self.reader = self.tlsConnection.readAsync(16384) + self._doReadOp() + except: + self._clear() + raise + + def inWriteEvent(self): + """Tell the state machine it can write to the socket.""" + try: + self._checkAssert() + if self.handshaker: + self._doHandshakeOp() + elif self.closer: + self._doCloseOp() + elif self.reader: + self._doReadOp() + elif self.writer: + self._doWriteOp() + else: + self.outWriteEvent() + except: + self._clear() + raise + + def _doHandshakeOp(self): + try: + self.result = self.handshaker.next() + except StopIteration: + self.handshaker = None + self.result = None + self.outConnectEvent() + + def _doCloseOp(self): + try: + self.result = self.closer.next() + except StopIteration: + self.closer = None + self.result = None + self.outCloseEvent() + + def _doReadOp(self): + self.result = self.reader.next() + if not self.result in (0,1): + readBuffer = self.result + self.reader = None + self.result = None + self.outReadEvent(readBuffer) + + def _doWriteOp(self): + try: + self.result = self.writer.next() + except StopIteration: + self.writer = None + self.result = None + + def setHandshakeOp(self, handshaker): + """Start a handshake operation. + + @type handshaker: generator + @param handshaker: A generator created by using one of the + asynchronous handshake functions (i.e. handshakeServerAsync, or + handshakeClientxxx(..., async=True). + """ + try: + self._checkAssert(0) + self.handshaker = handshaker + self._doHandshakeOp() + except: + self._clear() + raise + + def setServerHandshakeOp(self, **args): + """Start a handshake operation. + + The arguments passed to this function will be forwarded to + L{tlslite.TLSConnection.TLSConnection.handshakeServerAsync}. + """ + handshaker = self.tlsConnection.handshakeServerAsync(**args) + self.setHandshakeOp(handshaker) + + def setCloseOp(self): + """Start a close operation. + """ + try: + self._checkAssert(0) + self.closer = self.tlsConnection.closeAsync() + self._doCloseOp() + except: + self._clear() + raise + + def setWriteOp(self, writeBuffer): + """Start a write operation. + + @type writeBuffer: str + @param writeBuffer: The string to transmit. + """ + try: + self._checkAssert(0) + self.writer = self.tlsConnection.writeAsync(writeBuffer) + self._doWriteOp() + except: + self._clear() + raise + diff --git a/gam/gdata/analytics/tlslite/integration/ClientHelper.py b/gam/gdata/analytics/tlslite/integration/ClientHelper.py new file mode 100755 index 00000000000..58e0152f9f5 --- /dev/null +++ b/gam/gdata/analytics/tlslite/integration/ClientHelper.py @@ -0,0 +1,163 @@ +""" +A helper class for using TLS Lite with stdlib clients +(httplib, xmlrpclib, imaplib, poplib). +""" + +from gdata.tlslite.Checker import Checker + +class ClientHelper: + """This is a helper class used to integrate TLS Lite with various + TLS clients (e.g. poplib, smtplib, httplib, etc.)""" + + def __init__(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings = None): + """ + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The constructor does not perform the TLS handshake itself, but + simply stores these arguments for later. The handshake is + performed only when this class needs to connect with the + server. Then you should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + self.username = None + self.password = None + self.sharedKey = None + self.certChain = None + self.privateKey = None + self.checker = None + + #SRP Authentication + if username and password and not \ + (sharedKey or certChain or privateKey): + self.username = username + self.password = password + + #Shared Key Authentication + elif username and sharedKey and not \ + (password or certChain or privateKey): + self.username = username + self.sharedKey = sharedKey + + #Certificate Chain Authentication + elif certChain and privateKey and not \ + (username or password or sharedKey): + self.certChain = certChain + self.privateKey = privateKey + + #No Authentication + elif not password and not username and not \ + sharedKey and not certChain and not privateKey: + pass + + else: + raise ValueError("Bad parameters") + + #Authenticate the server based on its cryptoID or fingerprint + if sharedKey and (cryptoID or protocol or x509Fingerprint): + raise ValueError("Can't use shared keys with other forms of"\ + "authentication") + + self.checker = Checker(cryptoID, protocol, x509Fingerprint, + x509TrustList, x509CommonName) + self.settings = settings + + self.tlsSession = None + + def _handshake(self, tlsConnection): + if self.username and self.password: + tlsConnection.handshakeClientSRP(username=self.username, + password=self.password, + checker=self.checker, + settings=self.settings, + session=self.tlsSession) + elif self.username and self.sharedKey: + tlsConnection.handshakeClientSharedKey(username=self.username, + sharedKey=self.sharedKey, + settings=self.settings) + else: + tlsConnection.handshakeClientCert(certChain=self.certChain, + privateKey=self.privateKey, + checker=self.checker, + settings=self.settings, + session=self.tlsSession) + self.tlsSession = tlsConnection.session diff --git a/gam/gdata/analytics/tlslite/integration/HTTPTLSConnection.py b/gam/gdata/analytics/tlslite/integration/HTTPTLSConnection.py new file mode 100755 index 00000000000..58e31a10805 --- /dev/null +++ b/gam/gdata/analytics/tlslite/integration/HTTPTLSConnection.py @@ -0,0 +1,169 @@ +"""TLS Lite + httplib.""" + +import socket +import httplib +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + + +class HTTPBaseTLSConnection(httplib.HTTPConnection): + """This abstract class provides a framework for adding TLS support + to httplib.""" + + default_port = 443 + + def __init__(self, host, port=None, strict=None): + if strict == None: + #Python 2.2 doesn't support strict + httplib.HTTPConnection.__init__(self, host, port) + else: + httplib.HTTPConnection.__init__(self, host, port, strict) + + def connect(self): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if hasattr(sock, 'settimeout'): + sock.settimeout(10) + sock.connect((self.host, self.port)) + + #Use a TLSConnection to emulate a socket + self.sock = TLSConnection(sock) + + #When httplib closes this, close the socket + self.sock.closeSocket = True + self._handshake(self.sock) + + def _handshake(self, tlsConnection): + """Called to perform some sort of handshake. + + This method must be overridden in a subclass to do some type of + handshake. This method will be called after the socket has + been connected but before any data has been sent. If this + method does not raise an exception, the TLS connection will be + considered valid. + + This method may (or may not) be called every time an HTTP + request is performed, depending on whether the underlying HTTP + connection is persistent. + + @type tlsConnection: L{tlslite.TLSConnection.TLSConnection} + @param tlsConnection: The connection to perform the handshake + on. + """ + raise NotImplementedError() + + +class HTTPTLSConnection(HTTPBaseTLSConnection, ClientHelper): + """This class extends L{HTTPBaseTLSConnection} to support the + common types of handshaking.""" + + def __init__(self, host, port=None, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings = None): + """Create a new HTTPTLSConnection. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The constructor does not perform the TLS handshake itself, but + simply stores these arguments for later. The handshake is + performed only when this class needs to connect with the + server. Thus you should be prepared to handle TLS-specific + exceptions when calling methods inherited from + L{httplib.HTTPConnection} such as request(), connect(), and + send(). See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type host: str + @param host: Server to connect to. + + @type port: int + @param port: Port to connect to. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + HTTPBaseTLSConnection.__init__(self, host, port) + + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + def _handshake(self, tlsConnection): + ClientHelper._handshake(self, tlsConnection) diff --git a/gam/gdata/analytics/tlslite/integration/IMAP4_TLS.py b/gam/gdata/analytics/tlslite/integration/IMAP4_TLS.py new file mode 100755 index 00000000000..e47076ccc8a --- /dev/null +++ b/gam/gdata/analytics/tlslite/integration/IMAP4_TLS.py @@ -0,0 +1,132 @@ +"""TLS Lite + imaplib.""" + +import socket +from imaplib import IMAP4 +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + +# IMAP TLS PORT +IMAP4_TLS_PORT = 993 + +class IMAP4_TLS(IMAP4, ClientHelper): + """This class extends L{imaplib.IMAP4} with TLS support.""" + + def __init__(self, host = '', port = IMAP4_TLS_PORT, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Create a new IMAP4_TLS. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The caller should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type host: str + @param host: Server to connect to. + + @type port: int + @param port: Port to connect to. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + IMAP4.__init__(self, host, port) + + + def open(self, host = '', port = IMAP4_TLS_PORT): + """Setup connection to remote server on "host:port". + + This connection will be used by the routines: + read, readline, send, shutdown. + """ + self.host = host + self.port = port + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.connect((host, port)) + self.sock = TLSConnection(self.sock) + self.sock.closeSocket = True + ClientHelper._handshake(self, self.sock) + self.file = self.sock.makefile('rb') diff --git a/gam/gdata/analytics/tlslite/integration/IntegrationHelper.py b/gam/gdata/analytics/tlslite/integration/IntegrationHelper.py new file mode 100755 index 00000000000..af5193b480e --- /dev/null +++ b/gam/gdata/analytics/tlslite/integration/IntegrationHelper.py @@ -0,0 +1,52 @@ + +class IntegrationHelper: + + def __init__(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings = None): + + self.username = None + self.password = None + self.sharedKey = None + self.certChain = None + self.privateKey = None + self.checker = None + + #SRP Authentication + if username and password and not \ + (sharedKey or certChain or privateKey): + self.username = username + self.password = password + + #Shared Key Authentication + elif username and sharedKey and not \ + (password or certChain or privateKey): + self.username = username + self.sharedKey = sharedKey + + #Certificate Chain Authentication + elif certChain and privateKey and not \ + (username or password or sharedKey): + self.certChain = certChain + self.privateKey = privateKey + + #No Authentication + elif not password and not username and not \ + sharedKey and not certChain and not privateKey: + pass + + else: + raise ValueError("Bad parameters") + + #Authenticate the server based on its cryptoID or fingerprint + if sharedKey and (cryptoID or protocol or x509Fingerprint): + raise ValueError("Can't use shared keys with other forms of"\ + "authentication") + + self.checker = Checker(cryptoID, protocol, x509Fingerprint, + x509TrustList, x509CommonName) + self.settings = settings \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/integration/POP3_TLS.py b/gam/gdata/analytics/tlslite/integration/POP3_TLS.py new file mode 100755 index 00000000000..26b37fdd84c --- /dev/null +++ b/gam/gdata/analytics/tlslite/integration/POP3_TLS.py @@ -0,0 +1,142 @@ +"""TLS Lite + poplib.""" + +import socket +from poplib import POP3 +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + +# POP TLS PORT +POP3_TLS_PORT = 995 + +class POP3_TLS(POP3, ClientHelper): + """This class extends L{poplib.POP3} with TLS support.""" + + def __init__(self, host, port = POP3_TLS_PORT, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Create a new POP3_TLS. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The caller should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type host: str + @param host: Server to connect to. + + @type port: int + @param port: Port to connect to. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + self.host = host + self.port = port + msg = "getaddrinfo returns an empty list" + self.sock = None + for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + self.sock = socket.socket(af, socktype, proto) + self.sock.connect(sa) + except socket.error, msg: + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket.error, msg + + ### New code below (all else copied from poplib) + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + self.sock = TLSConnection(self.sock) + self.sock.closeSocket = True + ClientHelper._handshake(self, self.sock) + ### + + self.file = self.sock.makefile('rb') + self._debugging = 0 + self.welcome = self._getresp() diff --git a/gam/gdata/analytics/tlslite/integration/SMTP_TLS.py b/gam/gdata/analytics/tlslite/integration/SMTP_TLS.py new file mode 100755 index 00000000000..67e0febed61 --- /dev/null +++ b/gam/gdata/analytics/tlslite/integration/SMTP_TLS.py @@ -0,0 +1,114 @@ +"""TLS Lite + smtplib.""" + +from smtplib import SMTP +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + +class SMTP_TLS(SMTP): + """This class extends L{smtplib.SMTP} with TLS support.""" + + def starttls(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Puts the connection to the SMTP server into TLS mode. + + If the server supports TLS, this will encrypt the rest of the SMTP + session. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The caller should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + (resp, reply) = self.docmd("STARTTLS") + if resp == 220: + helper = ClientHelper( + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + conn = TLSConnection(self.sock) + conn.closeSocket = True + helper._handshake(conn) + self.sock = conn + self.file = conn.makefile('rb') + return (resp, reply) diff --git a/gam/gdata/analytics/tlslite/integration/TLSAsyncDispatcherMixIn.py b/gam/gdata/analytics/tlslite/integration/TLSAsyncDispatcherMixIn.py new file mode 100755 index 00000000000..f732f62e663 --- /dev/null +++ b/gam/gdata/analytics/tlslite/integration/TLSAsyncDispatcherMixIn.py @@ -0,0 +1,139 @@ +"""TLS Lite + asyncore.""" + + +import asyncore +from gdata.tlslite.TLSConnection import TLSConnection +from AsyncStateMachine import AsyncStateMachine + + +class TLSAsyncDispatcherMixIn(AsyncStateMachine): + """This class can be "mixed in" with an + L{asyncore.dispatcher} to add TLS support. + + This class essentially sits between the dispatcher and the select + loop, intercepting events and only calling the dispatcher when + applicable. + + In the case of handle_read(), a read operation will be activated, + and when it completes, the bytes will be placed in a buffer where + the dispatcher can retrieve them by calling recv(), and the + dispatcher's handle_read() will be called. + + In the case of handle_write(), the dispatcher's handle_write() will + be called, and when it calls send(), a write operation will be + activated. + + To use this class, you must combine it with an asyncore.dispatcher, + and pass in a handshake operation with setServerHandshakeOp(). + + Below is an example of using this class with medusa. This class is + mixed in with http_channel to create http_tls_channel. Note: + 1. the mix-in is listed first in the inheritance list + + 2. the input buffer size must be at least 16K, otherwise the + dispatcher might not read all the bytes from the TLS layer, + leaving some bytes in limbo. + + 3. IE seems to have a problem receiving a whole HTTP response in a + single TLS record, so HTML pages containing '\\r\\n\\r\\n' won't + be displayed on IE. + + Add the following text into 'start_medusa.py', in the 'HTTP Server' + section:: + + from tlslite.api import * + s = open("./serverX509Cert.pem").read() + x509 = X509() + x509.parse(s) + certChain = X509CertChain([x509]) + + s = open("./serverX509Key.pem").read() + privateKey = parsePEMKey(s, private=True) + + class http_tls_channel(TLSAsyncDispatcherMixIn, + http_server.http_channel): + ac_in_buffer_size = 16384 + + def __init__ (self, server, conn, addr): + http_server.http_channel.__init__(self, server, conn, addr) + TLSAsyncDispatcherMixIn.__init__(self, conn) + self.tlsConnection.ignoreAbruptClose = True + self.setServerHandshakeOp(certChain=certChain, + privateKey=privateKey) + + hs.channel_class = http_tls_channel + + If the TLS layer raises an exception, the exception will be caught + in asyncore.dispatcher, which will call close() on this class. The + TLS layer always closes the TLS connection before raising an + exception, so the close operation will complete right away, causing + asyncore.dispatcher.close() to be called, which closes the socket + and removes this instance from the asyncore loop. + + """ + + + def __init__(self, sock=None): + AsyncStateMachine.__init__(self) + + if sock: + self.tlsConnection = TLSConnection(sock) + + #Calculate the sibling I'm being mixed in with. + #This is necessary since we override functions + #like readable(), handle_read(), etc., but we + #also want to call the sibling's versions. + for cl in self.__class__.__bases__: + if cl != TLSAsyncDispatcherMixIn and cl != AsyncStateMachine: + self.siblingClass = cl + break + else: + raise AssertionError() + + def readable(self): + result = self.wantsReadEvent() + if result != None: + return result + return self.siblingClass.readable(self) + + def writable(self): + result = self.wantsWriteEvent() + if result != None: + return result + return self.siblingClass.writable(self) + + def handle_read(self): + self.inReadEvent() + + def handle_write(self): + self.inWriteEvent() + + def outConnectEvent(self): + self.siblingClass.handle_connect(self) + + def outCloseEvent(self): + asyncore.dispatcher.close(self) + + def outReadEvent(self, readBuffer): + self.readBuffer = readBuffer + self.siblingClass.handle_read(self) + + def outWriteEvent(self): + self.siblingClass.handle_write(self) + + def recv(self, bufferSize=16384): + if bufferSize < 16384 or self.readBuffer == None: + raise AssertionError() + returnValue = self.readBuffer + self.readBuffer = None + return returnValue + + def send(self, writeBuffer): + self.setWriteOp(writeBuffer) + return len(writeBuffer) + + def close(self): + if hasattr(self, "tlsConnection"): + self.setCloseOp() + else: + asyncore.dispatcher.close(self) diff --git a/gam/gdata/analytics/tlslite/integration/TLSSocketServerMixIn.py b/gam/gdata/analytics/tlslite/integration/TLSSocketServerMixIn.py new file mode 100755 index 00000000000..10224b688b5 --- /dev/null +++ b/gam/gdata/analytics/tlslite/integration/TLSSocketServerMixIn.py @@ -0,0 +1,59 @@ +"""TLS Lite + SocketServer.""" + +from gdata.tlslite.TLSConnection import TLSConnection + +class TLSSocketServerMixIn: + """ + This class can be mixed in with any L{SocketServer.TCPServer} to + add TLS support. + + To use this class, define a new class that inherits from it and + some L{SocketServer.TCPServer} (with the mix-in first). Then + implement the handshake() method, doing some sort of server + handshake on the connection argument. If the handshake method + returns True, the RequestHandler will be triggered. Below is a + complete example of a threaded HTTPS server:: + + from SocketServer import * + from BaseHTTPServer import * + from SimpleHTTPServer import * + from tlslite.api import * + + s = open("./serverX509Cert.pem").read() + x509 = X509() + x509.parse(s) + certChain = X509CertChain([x509]) + + s = open("./serverX509Key.pem").read() + privateKey = parsePEMKey(s, private=True) + + sessionCache = SessionCache() + + class MyHTTPServer(ThreadingMixIn, TLSSocketServerMixIn, + HTTPServer): + def handshake(self, tlsConnection): + try: + tlsConnection.handshakeServer(certChain=certChain, + privateKey=privateKey, + sessionCache=sessionCache) + tlsConnection.ignoreAbruptClose = True + return True + except TLSError, error: + print "Handshake failure:", str(error) + return False + + httpd = MyHTTPServer(('localhost', 443), SimpleHTTPRequestHandler) + httpd.serve_forever() + """ + + + def finish_request(self, sock, client_address): + tlsConnection = TLSConnection(sock) + if self.handshake(tlsConnection) == True: + self.RequestHandlerClass(tlsConnection, client_address, self) + tlsConnection.close() + + #Implement this method to do some form of handshaking. Return True + #if the handshake finishes properly and the request is authorized. + def handshake(self, tlsConnection): + raise NotImplementedError() diff --git a/gam/gdata/analytics/tlslite/integration/TLSTwistedProtocolWrapper.py b/gam/gdata/analytics/tlslite/integration/TLSTwistedProtocolWrapper.py new file mode 100755 index 00000000000..c88703cacf3 --- /dev/null +++ b/gam/gdata/analytics/tlslite/integration/TLSTwistedProtocolWrapper.py @@ -0,0 +1,196 @@ +"""TLS Lite + Twisted.""" + +from twisted.protocols.policies import ProtocolWrapper, WrappingFactory +from twisted.python.failure import Failure + +from AsyncStateMachine import AsyncStateMachine +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.errors import * + +import socket +import errno + + +#The TLSConnection is created around a "fake socket" that +#plugs it into the underlying Twisted transport +class _FakeSocket: + def __init__(self, wrapper): + self.wrapper = wrapper + self.data = "" + + def send(self, data): + ProtocolWrapper.write(self.wrapper, data) + return len(data) + + def recv(self, numBytes): + if self.data == "": + raise socket.error, (errno.EWOULDBLOCK, "") + returnData = self.data[:numBytes] + self.data = self.data[numBytes:] + return returnData + +class TLSTwistedProtocolWrapper(ProtocolWrapper, AsyncStateMachine): + """This class can wrap Twisted protocols to add TLS support. + + Below is a complete example of using TLS Lite with a Twisted echo + server. + + There are two server implementations below. Echo is the original + protocol, which is oblivious to TLS. Echo1 subclasses Echo and + negotiates TLS when the client connects. Echo2 subclasses Echo and + negotiates TLS when the client sends "STARTTLS":: + + from twisted.internet.protocol import Protocol, Factory + from twisted.internet import reactor + from twisted.protocols.policies import WrappingFactory + from twisted.protocols.basic import LineReceiver + from twisted.python import log + from twisted.python.failure import Failure + import sys + from tlslite.api import * + + s = open("./serverX509Cert.pem").read() + x509 = X509() + x509.parse(s) + certChain = X509CertChain([x509]) + + s = open("./serverX509Key.pem").read() + privateKey = parsePEMKey(s, private=True) + + verifierDB = VerifierDB("verifierDB") + verifierDB.open() + + class Echo(LineReceiver): + def connectionMade(self): + self.transport.write("Welcome to the echo server!\\r\\n") + + def lineReceived(self, line): + self.transport.write(line + "\\r\\n") + + class Echo1(Echo): + def connectionMade(self): + if not self.transport.tlsStarted: + self.transport.setServerHandshakeOp(certChain=certChain, + privateKey=privateKey, + verifierDB=verifierDB) + else: + Echo.connectionMade(self) + + def connectionLost(self, reason): + pass #Handle any TLS exceptions here + + class Echo2(Echo): + def lineReceived(self, data): + if data == "STARTTLS": + self.transport.setServerHandshakeOp(certChain=certChain, + privateKey=privateKey, + verifierDB=verifierDB) + else: + Echo.lineReceived(self, data) + + def connectionLost(self, reason): + pass #Handle any TLS exceptions here + + factory = Factory() + factory.protocol = Echo1 + #factory.protocol = Echo2 + + wrappingFactory = WrappingFactory(factory) + wrappingFactory.protocol = TLSTwistedProtocolWrapper + + log.startLogging(sys.stdout) + reactor.listenTCP(1079, wrappingFactory) + reactor.run() + + This class works as follows: + + Data comes in and is given to the AsyncStateMachine for handling. + AsyncStateMachine will forward events to this class, and we'll + pass them on to the ProtocolHandler, which will proxy them to the + wrapped protocol. The wrapped protocol may then call back into + this class, and these calls will be proxied into the + AsyncStateMachine. + + The call graph looks like this: + - self.dataReceived + - AsyncStateMachine.inReadEvent + - self.out(Connect|Close|Read)Event + - ProtocolWrapper.(connectionMade|loseConnection|dataReceived) + - self.(loseConnection|write|writeSequence) + - AsyncStateMachine.(setCloseOp|setWriteOp) + """ + + #WARNING: IF YOU COPY-AND-PASTE THE ABOVE CODE, BE SURE TO REMOVE + #THE EXTRA ESCAPING AROUND "\\r\\n" + + def __init__(self, factory, wrappedProtocol): + ProtocolWrapper.__init__(self, factory, wrappedProtocol) + AsyncStateMachine.__init__(self) + self.fakeSocket = _FakeSocket(self) + self.tlsConnection = TLSConnection(self.fakeSocket) + self.tlsStarted = False + self.connectionLostCalled = False + + def connectionMade(self): + try: + ProtocolWrapper.connectionMade(self) + except TLSError, e: + self.connectionLost(Failure(e)) + ProtocolWrapper.loseConnection(self) + + def dataReceived(self, data): + try: + if not self.tlsStarted: + ProtocolWrapper.dataReceived(self, data) + else: + self.fakeSocket.data += data + while self.fakeSocket.data: + AsyncStateMachine.inReadEvent(self) + except TLSError, e: + self.connectionLost(Failure(e)) + ProtocolWrapper.loseConnection(self) + + def connectionLost(self, reason): + if not self.connectionLostCalled: + ProtocolWrapper.connectionLost(self, reason) + self.connectionLostCalled = True + + + def outConnectEvent(self): + ProtocolWrapper.connectionMade(self) + + def outCloseEvent(self): + ProtocolWrapper.loseConnection(self) + + def outReadEvent(self, data): + if data == "": + ProtocolWrapper.loseConnection(self) + else: + ProtocolWrapper.dataReceived(self, data) + + + def setServerHandshakeOp(self, **args): + self.tlsStarted = True + AsyncStateMachine.setServerHandshakeOp(self, **args) + + def loseConnection(self): + if not self.tlsStarted: + ProtocolWrapper.loseConnection(self) + else: + AsyncStateMachine.setCloseOp(self) + + def write(self, data): + if not self.tlsStarted: + ProtocolWrapper.write(self, data) + else: + #Because of the FakeSocket, write operations are guaranteed to + #terminate immediately. + AsyncStateMachine.setWriteOp(self, data) + + def writeSequence(self, seq): + if not self.tlsStarted: + ProtocolWrapper.writeSequence(self, seq) + else: + #Because of the FakeSocket, write operations are guaranteed to + #terminate immediately. + AsyncStateMachine.setWriteOp(self, "".join(seq)) diff --git a/gam/gdata/analytics/tlslite/integration/XMLRPCTransport.py b/gam/gdata/analytics/tlslite/integration/XMLRPCTransport.py new file mode 100755 index 00000000000..3f025e46e7b --- /dev/null +++ b/gam/gdata/analytics/tlslite/integration/XMLRPCTransport.py @@ -0,0 +1,137 @@ +"""TLS Lite + xmlrpclib.""" + +import xmlrpclib +import httplib +from gdata.tlslite.integration.HTTPTLSConnection import HTTPTLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + + +class XMLRPCTransport(xmlrpclib.Transport, ClientHelper): + """Handles an HTTPS transaction to an XML-RPC server.""" + + def __init__(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Create a new XMLRPCTransport. + + An instance of this class can be passed to L{xmlrpclib.ServerProxy} + to use TLS with XML-RPC calls:: + + from tlslite.api import XMLRPCTransport + from xmlrpclib import ServerProxy + + transport = XMLRPCTransport(user="alice", password="abra123") + server = ServerProxy("https://localhost", transport) + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The constructor does not perform the TLS handshake itself, but + simply stores these arguments for later. The handshake is + performed only when this class needs to connect with the + server. Thus you should be prepared to handle TLS-specific + exceptions when calling methods of L{xmlrpclib.ServerProxy}. See the + client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + + def make_connection(self, host): + # create a HTTPS connection object from a host descriptor + host, extra_headers, x509 = self.get_host_info(host) + http = HTTPTLSConnection(host, None, + self.username, self.password, + self.sharedKey, + self.certChain, self.privateKey, + self.checker.cryptoID, + self.checker.protocol, + self.checker.x509Fingerprint, + self.checker.x509TrustList, + self.checker.x509CommonName, + self.settings) + http2 = httplib.HTTP() + http2._setup(http) + return http2 diff --git a/gam/gdata/analytics/tlslite/integration/__init__.py b/gam/gdata/analytics/tlslite/integration/__init__.py new file mode 100755 index 00000000000..960f4065f26 --- /dev/null +++ b/gam/gdata/analytics/tlslite/integration/__init__.py @@ -0,0 +1,17 @@ +"""Classes for integrating TLS Lite with other packages.""" + +__all__ = ["AsyncStateMachine", + "HTTPTLSConnection", + "POP3_TLS", + "IMAP4_TLS", + "SMTP_TLS", + "XMLRPCTransport", + "TLSSocketServerMixIn", + "TLSAsyncDispatcherMixIn", + "TLSTwistedProtocolWrapper"] + +try: + import twisted + del twisted +except ImportError: + del __all__[__all__.index("TLSTwistedProtocolWrapper")] diff --git a/gam/gdata/analytics/tlslite/mathtls.py b/gam/gdata/analytics/tlslite/mathtls.py new file mode 100755 index 00000000000..3b8ede60126 --- /dev/null +++ b/gam/gdata/analytics/tlslite/mathtls.py @@ -0,0 +1,170 @@ +"""Miscellaneous helper functions.""" + +from utils.compat import * +from utils.cryptomath import * + +import hmac +import md5 +import sha + +#1024, 1536, 2048, 3072, 4096, 6144, and 8192 bit groups] +goodGroupParameters = [(2,0xEEAF0AB9ADB38DD69C33F80AFA8FC5E86072618775FF3C0B9EA2314C9C256576D674DF7496EA81D3383B4813D692C6E0E0D5D8E250B98BE48E495C1D6089DAD15DC7D7B46154D6B6CE8EF4AD69B15D4982559B297BCF1885C529F566660E57EC68EDBC3C05726CC02FD4CBF4976EAA9AFD5138FE8376435B9FC61D2FC0EB06E3),\ + (2,0x9DEF3CAFB939277AB1F12A8617A47BBBDBA51DF499AC4C80BEEEA9614B19CC4D5F4F5F556E27CBDE51C6A94BE4607A291558903BA0D0F84380B655BB9A22E8DCDF028A7CEC67F0D08134B1C8B97989149B609E0BE3BAB63D47548381DBC5B1FC764E3F4B53DD9DA1158BFD3E2B9C8CF56EDF019539349627DB2FD53D24B7C48665772E437D6C7F8CE442734AF7CCB7AE837C264AE3A9BEB87F8A2FE9B8B5292E5A021FFF5E91479E8CE7A28C2442C6F315180F93499A234DCF76E3FED135F9BB),\ + (2,0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73),\ + (2,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF),\ + (5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF),\ + (5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF),\ + (5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF)] + +def P_hash(hashModule, secret, seed, length): + bytes = createByteArrayZeros(length) + secret = bytesToString(secret) + seed = bytesToString(seed) + A = seed + index = 0 + while 1: + A = hmac.HMAC(secret, A, hashModule).digest() + output = hmac.HMAC(secret, A+seed, hashModule).digest() + for c in output: + if index >= length: + return bytes + bytes[index] = ord(c) + index += 1 + return bytes + +def PRF(secret, label, seed, length): + #Split the secret into left and right halves + S1 = secret[ : int(math.ceil(len(secret)/2.0))] + S2 = secret[ int(math.floor(len(secret)/2.0)) : ] + + #Run the left half through P_MD5 and the right half through P_SHA1 + p_md5 = P_hash(md5, S1, concatArrays(stringToBytes(label), seed), length) + p_sha1 = P_hash(sha, S2, concatArrays(stringToBytes(label), seed), length) + + #XOR the output values and return the result + for x in range(length): + p_md5[x] ^= p_sha1[x] + return p_md5 + + +def PRF_SSL(secret, seed, length): + secretStr = bytesToString(secret) + seedStr = bytesToString(seed) + bytes = createByteArrayZeros(length) + index = 0 + for x in range(26): + A = chr(ord('A')+x) * (x+1) # 'A', 'BB', 'CCC', etc.. + input = secretStr + sha.sha(A + secretStr + seedStr).digest() + output = md5.md5(input).digest() + for c in output: + if index >= length: + return bytes + bytes[index] = ord(c) + index += 1 + return bytes + +def makeX(salt, username, password): + if len(username)>=256: + raise ValueError("username too long") + if len(salt)>=256: + raise ValueError("salt too long") + return stringToNumber(sha.sha(salt + sha.sha(username + ":" + password)\ + .digest()).digest()) + +#This function is used by VerifierDB.makeVerifier +def makeVerifier(username, password, bits): + bitsIndex = {1024:0, 1536:1, 2048:2, 3072:3, 4096:4, 6144:5, 8192:6}[bits] + g,N = goodGroupParameters[bitsIndex] + salt = bytesToString(getRandomBytes(16)) + x = makeX(salt, username, password) + verifier = powMod(g, x, N) + return N, g, salt, verifier + +def PAD(n, x): + nLength = len(numberToString(n)) + s = numberToString(x) + if len(s) < nLength: + s = ("\0" * (nLength-len(s))) + s + return s + +def makeU(N, A, B): + return stringToNumber(sha.sha(PAD(N, A) + PAD(N, B)).digest()) + +def makeK(N, g): + return stringToNumber(sha.sha(numberToString(N) + PAD(N, g)).digest()) + + +""" +MAC_SSL +Modified from Python HMAC by Trevor +""" + +class MAC_SSL: + """MAC_SSL class. + + This supports the API for Cryptographic Hash Functions (PEP 247). + """ + + def __init__(self, key, msg = None, digestmod = None): + """Create a new MAC_SSL object. + + key: key for the keyed hash object. + msg: Initial input for the hash, if provided. + digestmod: A module supporting PEP 247. Defaults to the md5 module. + """ + if digestmod is None: + import md5 + digestmod = md5 + + if key == None: #TREVNEW - for faster copying + return #TREVNEW + + self.digestmod = digestmod + self.outer = digestmod.new() + self.inner = digestmod.new() + self.digest_size = digestmod.digest_size + + ipad = "\x36" * 40 + opad = "\x5C" * 40 + + self.inner.update(key) + self.inner.update(ipad) + self.outer.update(key) + self.outer.update(opad) + if msg is not None: + self.update(msg) + + + def update(self, msg): + """Update this hashing object with the string msg. + """ + self.inner.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + other = MAC_SSL(None) #TREVNEW - for faster copying + other.digest_size = self.digest_size #TREVNEW + other.digestmod = self.digestmod + other.inner = self.inner.copy() + other.outer = self.outer.copy() + return other + + def digest(self): + """Return the hash value of this hashing object. + + This returns a string containing 8-bit data. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + return "".join([hex(ord(x))[2:].zfill(2) + for x in tuple(self.digest())]) diff --git a/gam/gdata/analytics/tlslite/messages.py b/gam/gdata/analytics/tlslite/messages.py new file mode 100755 index 00000000000..afccc793a56 --- /dev/null +++ b/gam/gdata/analytics/tlslite/messages.py @@ -0,0 +1,561 @@ +"""Classes representing TLS messages.""" + +from utils.compat import * +from utils.cryptomath import * +from errors import * +from utils.codec import * +from constants import * +from X509 import X509 +from X509CertChain import X509CertChain + +import sha +import md5 + +class RecordHeader3: + def __init__(self): + self.type = 0 + self.version = (0,0) + self.length = 0 + self.ssl2 = False + + def create(self, version, type, length): + self.type = type + self.version = version + self.length = length + return self + + def write(self): + w = Writer(5) + w.add(self.type, 1) + w.add(self.version[0], 1) + w.add(self.version[1], 1) + w.add(self.length, 2) + return w.bytes + + def parse(self, p): + self.type = p.get(1) + self.version = (p.get(1), p.get(1)) + self.length = p.get(2) + self.ssl2 = False + return self + +class RecordHeader2: + def __init__(self): + self.type = 0 + self.version = (0,0) + self.length = 0 + self.ssl2 = True + + def parse(self, p): + if p.get(1)!=128: + raise SyntaxError() + self.type = ContentType.handshake + self.version = (2,0) + #We don't support 2-byte-length-headers; could be a problem + self.length = p.get(1) + return self + + +class Msg: + def preWrite(self, trial): + if trial: + w = Writer() + else: + length = self.write(True) + w = Writer(length) + return w + + def postWrite(self, w, trial): + if trial: + return w.index + else: + return w.bytes + +class Alert(Msg): + def __init__(self): + self.contentType = ContentType.alert + self.level = 0 + self.description = 0 + + def create(self, description, level=AlertLevel.fatal): + self.level = level + self.description = description + return self + + def parse(self, p): + p.setLengthCheck(2) + self.level = p.get(1) + self.description = p.get(1) + p.stopLengthCheck() + return self + + def write(self): + w = Writer(2) + w.add(self.level, 1) + w.add(self.description, 1) + return w.bytes + + +class HandshakeMsg(Msg): + def preWrite(self, handshakeType, trial): + if trial: + w = Writer() + w.add(handshakeType, 1) + w.add(0, 3) + else: + length = self.write(True) + w = Writer(length) + w.add(handshakeType, 1) + w.add(length-4, 3) + return w + + +class ClientHello(HandshakeMsg): + def __init__(self, ssl2=False): + self.contentType = ContentType.handshake + self.ssl2 = ssl2 + self.client_version = (0,0) + self.random = createByteArrayZeros(32) + self.session_id = createByteArraySequence([]) + self.cipher_suites = [] # a list of 16-bit values + self.certificate_types = [CertificateType.x509] + self.compression_methods = [] # a list of 8-bit values + self.srp_username = None # a string + + def create(self, version, random, session_id, cipher_suites, + certificate_types=None, srp_username=None): + self.client_version = version + self.random = random + self.session_id = session_id + self.cipher_suites = cipher_suites + self.certificate_types = certificate_types + self.compression_methods = [0] + self.srp_username = srp_username + return self + + def parse(self, p): + if self.ssl2: + self.client_version = (p.get(1), p.get(1)) + cipherSpecsLength = p.get(2) + sessionIDLength = p.get(2) + randomLength = p.get(2) + self.cipher_suites = p.getFixList(3, int(cipherSpecsLength/3)) + self.session_id = p.getFixBytes(sessionIDLength) + self.random = p.getFixBytes(randomLength) + if len(self.random) < 32: + zeroBytes = 32-len(self.random) + self.random = createByteArrayZeros(zeroBytes) + self.random + self.compression_methods = [0]#Fake this value + + #We're not doing a stopLengthCheck() for SSLv2, oh well.. + else: + p.startLengthCheck(3) + self.client_version = (p.get(1), p.get(1)) + self.random = p.getFixBytes(32) + self.session_id = p.getVarBytes(1) + self.cipher_suites = p.getVarList(2, 2) + self.compression_methods = p.getVarList(1, 1) + if not p.atLengthCheck(): + totalExtLength = p.get(2) + soFar = 0 + while soFar != totalExtLength: + extType = p.get(2) + extLength = p.get(2) + if extType == 6: + self.srp_username = bytesToString(p.getVarBytes(1)) + elif extType == 7: + self.certificate_types = p.getVarList(1, 1) + else: + p.getFixBytes(extLength) + soFar += 4 + extLength + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.client_hello, trial) + w.add(self.client_version[0], 1) + w.add(self.client_version[1], 1) + w.addFixSeq(self.random, 1) + w.addVarSeq(self.session_id, 1, 1) + w.addVarSeq(self.cipher_suites, 2, 2) + w.addVarSeq(self.compression_methods, 1, 1) + + extLength = 0 + if self.certificate_types and self.certificate_types != \ + [CertificateType.x509]: + extLength += 5 + len(self.certificate_types) + if self.srp_username: + extLength += 5 + len(self.srp_username) + if extLength > 0: + w.add(extLength, 2) + + if self.certificate_types and self.certificate_types != \ + [CertificateType.x509]: + w.add(7, 2) + w.add(len(self.certificate_types)+1, 2) + w.addVarSeq(self.certificate_types, 1, 1) + if self.srp_username: + w.add(6, 2) + w.add(len(self.srp_username)+1, 2) + w.addVarSeq(stringToBytes(self.srp_username), 1, 1) + + return HandshakeMsg.postWrite(self, w, trial) + + +class ServerHello(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + self.server_version = (0,0) + self.random = createByteArrayZeros(32) + self.session_id = createByteArraySequence([]) + self.cipher_suite = 0 + self.certificate_type = CertificateType.x509 + self.compression_method = 0 + + def create(self, version, random, session_id, cipher_suite, + certificate_type): + self.server_version = version + self.random = random + self.session_id = session_id + self.cipher_suite = cipher_suite + self.certificate_type = certificate_type + self.compression_method = 0 + return self + + def parse(self, p): + p.startLengthCheck(3) + self.server_version = (p.get(1), p.get(1)) + self.random = p.getFixBytes(32) + self.session_id = p.getVarBytes(1) + self.cipher_suite = p.get(2) + self.compression_method = p.get(1) + if not p.atLengthCheck(): + totalExtLength = p.get(2) + soFar = 0 + while soFar != totalExtLength: + extType = p.get(2) + extLength = p.get(2) + if extType == 7: + self.certificate_type = p.get(1) + else: + p.getFixBytes(extLength) + soFar += 4 + extLength + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.server_hello, trial) + w.add(self.server_version[0], 1) + w.add(self.server_version[1], 1) + w.addFixSeq(self.random, 1) + w.addVarSeq(self.session_id, 1, 1) + w.add(self.cipher_suite, 2) + w.add(self.compression_method, 1) + + extLength = 0 + if self.certificate_type and self.certificate_type != \ + CertificateType.x509: + extLength += 5 + + if extLength != 0: + w.add(extLength, 2) + + if self.certificate_type and self.certificate_type != \ + CertificateType.x509: + w.add(7, 2) + w.add(1, 2) + w.add(self.certificate_type, 1) + + return HandshakeMsg.postWrite(self, w, trial) + +class Certificate(HandshakeMsg): + def __init__(self, certificateType): + self.certificateType = certificateType + self.contentType = ContentType.handshake + self.certChain = None + + def create(self, certChain): + self.certChain = certChain + return self + + def parse(self, p): + p.startLengthCheck(3) + if self.certificateType == CertificateType.x509: + chainLength = p.get(3) + index = 0 + certificate_list = [] + while index != chainLength: + certBytes = p.getVarBytes(3) + x509 = X509() + x509.parseBinary(certBytes) + certificate_list.append(x509) + index += len(certBytes)+3 + if certificate_list: + self.certChain = X509CertChain(certificate_list) + elif self.certificateType == CertificateType.cryptoID: + s = bytesToString(p.getVarBytes(2)) + if s: + try: + import cryptoIDlib.CertChain + except ImportError: + raise SyntaxError(\ + "cryptoID cert chain received, cryptoIDlib not present") + self.certChain = cryptoIDlib.CertChain.CertChain().parse(s) + else: + raise AssertionError() + + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.certificate, trial) + if self.certificateType == CertificateType.x509: + chainLength = 0 + if self.certChain: + certificate_list = self.certChain.x509List + else: + certificate_list = [] + #determine length + for cert in certificate_list: + bytes = cert.writeBytes() + chainLength += len(bytes)+3 + #add bytes + w.add(chainLength, 3) + for cert in certificate_list: + bytes = cert.writeBytes() + w.addVarSeq(bytes, 1, 3) + elif self.certificateType == CertificateType.cryptoID: + if self.certChain: + bytes = stringToBytes(self.certChain.write()) + else: + bytes = createByteArraySequence([]) + w.addVarSeq(bytes, 1, 2) + else: + raise AssertionError() + return HandshakeMsg.postWrite(self, w, trial) + +class CertificateRequest(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + self.certificate_types = [] + #treat as opaque bytes for now + self.certificate_authorities = createByteArraySequence([]) + + def create(self, certificate_types, certificate_authorities): + self.certificate_types = certificate_types + self.certificate_authorities = certificate_authorities + return self + + def parse(self, p): + p.startLengthCheck(3) + self.certificate_types = p.getVarList(1, 1) + self.certificate_authorities = p.getVarBytes(2) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.certificate_request, + trial) + w.addVarSeq(self.certificate_types, 1, 1) + w.addVarSeq(self.certificate_authorities, 1, 2) + return HandshakeMsg.postWrite(self, w, trial) + +class ServerKeyExchange(HandshakeMsg): + def __init__(self, cipherSuite): + self.cipherSuite = cipherSuite + self.contentType = ContentType.handshake + self.srp_N = 0L + self.srp_g = 0L + self.srp_s = createByteArraySequence([]) + self.srp_B = 0L + self.signature = createByteArraySequence([]) + + def createSRP(self, srp_N, srp_g, srp_s, srp_B): + self.srp_N = srp_N + self.srp_g = srp_g + self.srp_s = srp_s + self.srp_B = srp_B + return self + + def parse(self, p): + p.startLengthCheck(3) + self.srp_N = bytesToNumber(p.getVarBytes(2)) + self.srp_g = bytesToNumber(p.getVarBytes(2)) + self.srp_s = p.getVarBytes(1) + self.srp_B = bytesToNumber(p.getVarBytes(2)) + if self.cipherSuite in CipherSuite.srpRsaSuites: + self.signature = p.getVarBytes(2) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.server_key_exchange, + trial) + w.addVarSeq(numberToBytes(self.srp_N), 1, 2) + w.addVarSeq(numberToBytes(self.srp_g), 1, 2) + w.addVarSeq(self.srp_s, 1, 1) + w.addVarSeq(numberToBytes(self.srp_B), 1, 2) + if self.cipherSuite in CipherSuite.srpRsaSuites: + w.addVarSeq(self.signature, 1, 2) + return HandshakeMsg.postWrite(self, w, trial) + + def hash(self, clientRandom, serverRandom): + oldCipherSuite = self.cipherSuite + self.cipherSuite = None + try: + bytes = clientRandom + serverRandom + self.write()[4:] + s = bytesToString(bytes) + return stringToBytes(md5.md5(s).digest() + sha.sha(s).digest()) + finally: + self.cipherSuite = oldCipherSuite + +class ServerHelloDone(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + + def create(self): + return self + + def parse(self, p): + p.startLengthCheck(3) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.server_hello_done, trial) + return HandshakeMsg.postWrite(self, w, trial) + +class ClientKeyExchange(HandshakeMsg): + def __init__(self, cipherSuite, version=None): + self.cipherSuite = cipherSuite + self.version = version + self.contentType = ContentType.handshake + self.srp_A = 0 + self.encryptedPreMasterSecret = createByteArraySequence([]) + + def createSRP(self, srp_A): + self.srp_A = srp_A + return self + + def createRSA(self, encryptedPreMasterSecret): + self.encryptedPreMasterSecret = encryptedPreMasterSecret + return self + + def parse(self, p): + p.startLengthCheck(3) + if self.cipherSuite in CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites: + self.srp_A = bytesToNumber(p.getVarBytes(2)) + elif self.cipherSuite in CipherSuite.rsaSuites: + if self.version in ((3,1), (3,2)): + self.encryptedPreMasterSecret = p.getVarBytes(2) + elif self.version == (3,0): + self.encryptedPreMasterSecret = \ + p.getFixBytes(len(p.bytes)-p.index) + else: + raise AssertionError() + else: + raise AssertionError() + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.client_key_exchange, + trial) + if self.cipherSuite in CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites: + w.addVarSeq(numberToBytes(self.srp_A), 1, 2) + elif self.cipherSuite in CipherSuite.rsaSuites: + if self.version in ((3,1), (3,2)): + w.addVarSeq(self.encryptedPreMasterSecret, 1, 2) + elif self.version == (3,0): + w.addFixSeq(self.encryptedPreMasterSecret, 1) + else: + raise AssertionError() + else: + raise AssertionError() + return HandshakeMsg.postWrite(self, w, trial) + +class CertificateVerify(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + self.signature = createByteArraySequence([]) + + def create(self, signature): + self.signature = signature + return self + + def parse(self, p): + p.startLengthCheck(3) + self.signature = p.getVarBytes(2) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.certificate_verify, + trial) + w.addVarSeq(self.signature, 1, 2) + return HandshakeMsg.postWrite(self, w, trial) + +class ChangeCipherSpec(Msg): + def __init__(self): + self.contentType = ContentType.change_cipher_spec + self.type = 1 + + def create(self): + self.type = 1 + return self + + def parse(self, p): + p.setLengthCheck(1) + self.type = p.get(1) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = Msg.preWrite(self, trial) + w.add(self.type,1) + return Msg.postWrite(self, w, trial) + + +class Finished(HandshakeMsg): + def __init__(self, version): + self.contentType = ContentType.handshake + self.version = version + self.verify_data = createByteArraySequence([]) + + def create(self, verify_data): + self.verify_data = verify_data + return self + + def parse(self, p): + p.startLengthCheck(3) + if self.version == (3,0): + self.verify_data = p.getFixBytes(36) + elif self.version in ((3,1), (3,2)): + self.verify_data = p.getFixBytes(12) + else: + raise AssertionError() + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.finished, trial) + w.addFixSeq(self.verify_data, 1) + return HandshakeMsg.postWrite(self, w, trial) + +class ApplicationData(Msg): + def __init__(self): + self.contentType = ContentType.application_data + self.bytes = createByteArraySequence([]) + + def create(self, bytes): + self.bytes = bytes + return self + + def parse(self, p): + self.bytes = p.bytes + return self + + def write(self): + return self.bytes \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/utils/AES.py b/gam/gdata/analytics/tlslite/utils/AES.py new file mode 100755 index 00000000000..8413f4c1093 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/AES.py @@ -0,0 +1,31 @@ +"""Abstract class for AES.""" + +class AES: + def __init__(self, key, mode, IV, implementation): + if len(key) not in (16, 24, 32): + raise AssertionError() + if mode != 2: + raise AssertionError() + if len(IV) != 16: + raise AssertionError() + self.isBlockCipher = True + self.block_size = 16 + self.implementation = implementation + if len(key)==16: + self.name = "aes128" + elif len(key)==24: + self.name = "aes192" + elif len(key)==32: + self.name = "aes256" + else: + raise AssertionError() + + #CBC-Mode encryption, returns ciphertext + #WARNING: *MAY* modify the input as well + def encrypt(self, plaintext): + assert(len(plaintext) % 16 == 0) + + #CBC-Mode decryption, returns plaintext + #WARNING: *MAY* modify the input as well + def decrypt(self, ciphertext): + assert(len(ciphertext) % 16 == 0) \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/utils/ASN1Parser.py b/gam/gdata/analytics/tlslite/utils/ASN1Parser.py new file mode 100755 index 00000000000..16b50f29cde --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/ASN1Parser.py @@ -0,0 +1,34 @@ +"""Class for parsing ASN.1""" +from compat import * +from codec import * + +#Takes a byte array which has a DER TLV field at its head +class ASN1Parser: + def __init__(self, bytes): + p = Parser(bytes) + p.get(1) #skip Type + + #Get Length + self.length = self._getASN1Length(p) + + #Get Value + self.value = p.getFixBytes(self.length) + + #Assuming this is a sequence... + def getChild(self, which): + p = Parser(self.value) + for x in range(which+1): + markIndex = p.index + p.get(1) #skip Type + length = self._getASN1Length(p) + p.getFixBytes(length) + return ASN1Parser(p.bytes[markIndex : p.index]) + + #Decode the ASN.1 DER length field + def _getASN1Length(self, p): + firstLength = p.get(1) + if firstLength<=127: + return firstLength + else: + lengthLength = firstLength & 0x7F + return p.get(lengthLength) diff --git a/gam/gdata/analytics/tlslite/utils/ASN1Parser.pyc b/gam/gdata/analytics/tlslite/utils/ASN1Parser.pyc new file mode 100755 index 0000000000000000000000000000000000000000..db8a84cd43813860d7aa20b0987e0eb6139a2b72 GIT binary patch literal 1428 zcmb`GQEL-H6ov2XCTWwVmiEEE2u6HagckZBlp<1_A}A7MBZwigjJwm#rkmZkGu0GQ ze5n7}AE5W#jjcYn3w!s@%$=Dt-<>+3Uqj6f6X>RuP zd~WwDGC26lF!`qDbxt9i+Ny)8Fxh8%_ij z!p(95ZcO4Yy2V>^OiSqPGMv-ThjdjV^XZDIG1|tzXq)H)c1fl!S~k&CUwib?tx5el zE$fLsJ>jAMFt=Gk2<##B3YSV3%opUcA~A~)FzfZDw62fTW zxrIZ`OjqY`WEHF@geBY^GfV7GvOL-0buCOmYD?{^w%X+Hwi4J7ePhxiZZOxW{AmCt zret%UE2MlAHqM3MabQ8X))i=3Mw1cuuPP<|`t?iVB8fJrPu yOSL!Jok6_;wqiTt_ejc^MWG@tXLIeuJ#m?sSmxiSAmQ+$KKV-+cO|T&I{qIExf`wk literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/tlslite/utils/Cryptlib_AES.py b/gam/gdata/analytics/tlslite/utils/Cryptlib_AES.py new file mode 100755 index 00000000000..9e101fc626d --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/Cryptlib_AES.py @@ -0,0 +1,34 @@ +"""Cryptlib AES implementation.""" + +from cryptomath import * +from AES import * + +if cryptlibpyLoaded: + + def new(key, mode, IV): + return Cryptlib_AES(key, mode, IV) + + class Cryptlib_AES(AES): + + def __init__(self, key, mode, IV): + AES.__init__(self, key, mode, IV, "cryptlib") + self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_AES) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_MODE, cryptlib_py.CRYPT_MODE_CBC) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key)) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_IV, IV) + + def __del__(self): + cryptlib_py.cryptDestroyContext(self.context) + + def encrypt(self, plaintext): + AES.encrypt(self, plaintext) + bytes = stringToBytes(plaintext) + cryptlib_py.cryptEncrypt(self.context, bytes) + return bytesToString(bytes) + + def decrypt(self, ciphertext): + AES.decrypt(self, ciphertext) + bytes = stringToBytes(ciphertext) + cryptlib_py.cryptDecrypt(self.context, bytes) + return bytesToString(bytes) diff --git a/gam/gdata/analytics/tlslite/utils/Cryptlib_RC4.py b/gam/gdata/analytics/tlslite/utils/Cryptlib_RC4.py new file mode 100755 index 00000000000..7c6d087b8d6 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/Cryptlib_RC4.py @@ -0,0 +1,28 @@ +"""Cryptlib RC4 implementation.""" + +from cryptomath import * +from RC4 import RC4 + +if cryptlibpyLoaded: + + def new(key): + return Cryptlib_RC4(key) + + class Cryptlib_RC4(RC4): + + def __init__(self, key): + RC4.__init__(self, key, "cryptlib") + self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_RC4) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key)) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key) + + def __del__(self): + cryptlib_py.cryptDestroyContext(self.context) + + def encrypt(self, plaintext): + bytes = stringToBytes(plaintext) + cryptlib_py.cryptEncrypt(self.context, bytes) + return bytesToString(bytes) + + def decrypt(self, ciphertext): + return self.encrypt(ciphertext) \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/utils/Cryptlib_TripleDES.py b/gam/gdata/analytics/tlslite/utils/Cryptlib_TripleDES.py new file mode 100755 index 00000000000..a4f8155a099 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/Cryptlib_TripleDES.py @@ -0,0 +1,35 @@ +"""Cryptlib 3DES implementation.""" + +from cryptomath import * + +from TripleDES import * + +if cryptlibpyLoaded: + + def new(key, mode, IV): + return Cryptlib_TripleDES(key, mode, IV) + + class Cryptlib_TripleDES(TripleDES): + + def __init__(self, key, mode, IV): + TripleDES.__init__(self, key, mode, IV, "cryptlib") + self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_3DES) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_MODE, cryptlib_py.CRYPT_MODE_CBC) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key)) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_IV, IV) + + def __del__(self): + cryptlib_py.cryptDestroyContext(self.context) + + def encrypt(self, plaintext): + TripleDES.encrypt(self, plaintext) + bytes = stringToBytes(plaintext) + cryptlib_py.cryptEncrypt(self.context, bytes) + return bytesToString(bytes) + + def decrypt(self, ciphertext): + TripleDES.decrypt(self, ciphertext) + bytes = stringToBytes(ciphertext) + cryptlib_py.cryptDecrypt(self.context, bytes) + return bytesToString(bytes) \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/utils/OpenSSL_AES.py b/gam/gdata/analytics/tlslite/utils/OpenSSL_AES.py new file mode 100755 index 00000000000..e60679bf509 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/OpenSSL_AES.py @@ -0,0 +1,49 @@ +"""OpenSSL/M2Crypto AES implementation.""" + +from cryptomath import * +from AES import * + +if m2cryptoLoaded: + + def new(key, mode, IV): + return OpenSSL_AES(key, mode, IV) + + class OpenSSL_AES(AES): + + def __init__(self, key, mode, IV): + AES.__init__(self, key, mode, IV, "openssl") + self.key = key + self.IV = IV + + def _createContext(self, encrypt): + context = m2.cipher_ctx_new() + if len(self.key)==16: + cipherType = m2.aes_128_cbc() + if len(self.key)==24: + cipherType = m2.aes_192_cbc() + if len(self.key)==32: + cipherType = m2.aes_256_cbc() + m2.cipher_init(context, cipherType, self.key, self.IV, encrypt) + return context + + def encrypt(self, plaintext): + AES.encrypt(self, plaintext) + context = self._createContext(1) + ciphertext = m2.cipher_update(context, plaintext) + m2.cipher_ctx_free(context) + self.IV = ciphertext[-self.block_size:] + return ciphertext + + def decrypt(self, ciphertext): + AES.decrypt(self, ciphertext) + context = self._createContext(0) + #I think M2Crypto has a bug - it fails to decrypt and return the last block passed in. + #To work around this, we append sixteen zeros to the string, below: + plaintext = m2.cipher_update(context, ciphertext+('\0'*16)) + + #If this bug is ever fixed, then plaintext will end up having a garbage + #plaintext block on the end. That's okay - the below code will discard it. + plaintext = plaintext[:len(ciphertext)] + m2.cipher_ctx_free(context) + self.IV = ciphertext[-self.block_size:] + return plaintext diff --git a/gam/gdata/analytics/tlslite/utils/OpenSSL_RC4.py b/gam/gdata/analytics/tlslite/utils/OpenSSL_RC4.py new file mode 100755 index 00000000000..ac433aad761 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/OpenSSL_RC4.py @@ -0,0 +1,25 @@ +"""OpenSSL/M2Crypto RC4 implementation.""" + +from cryptomath import * +from RC4 import RC4 + +if m2cryptoLoaded: + + def new(key): + return OpenSSL_RC4(key) + + class OpenSSL_RC4(RC4): + + def __init__(self, key): + RC4.__init__(self, key, "openssl") + self.rc4 = m2.rc4_new() + m2.rc4_set_key(self.rc4, key) + + def __del__(self): + m2.rc4_free(self.rc4) + + def encrypt(self, plaintext): + return m2.rc4_update(self.rc4, plaintext) + + def decrypt(self, ciphertext): + return self.encrypt(ciphertext) diff --git a/gam/gdata/analytics/tlslite/utils/OpenSSL_RSAKey.py b/gam/gdata/analytics/tlslite/utils/OpenSSL_RSAKey.py new file mode 100755 index 00000000000..fe1a3cd74d2 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/OpenSSL_RSAKey.py @@ -0,0 +1,148 @@ +"""OpenSSL/M2Crypto RSA implementation.""" + +from cryptomath import * + +from RSAKey import * +from Python_RSAKey import Python_RSAKey + +#copied from M2Crypto.util.py, so when we load the local copy of m2 +#we can still use it +def password_callback(v, prompt1='Enter private key passphrase:', + prompt2='Verify passphrase:'): + from getpass import getpass + while 1: + try: + p1=getpass(prompt1) + if v: + p2=getpass(prompt2) + if p1==p2: + break + else: + break + except KeyboardInterrupt: + return None + return p1 + + +if m2cryptoLoaded: + class OpenSSL_RSAKey(RSAKey): + def __init__(self, n=0, e=0): + self.rsa = None + self._hasPrivateKey = False + if (n and not e) or (e and not n): + raise AssertionError() + if n and e: + self.rsa = m2.rsa_new() + m2.rsa_set_n(self.rsa, numberToMPI(n)) + m2.rsa_set_e(self.rsa, numberToMPI(e)) + + def __del__(self): + if self.rsa: + m2.rsa_free(self.rsa) + + def __getattr__(self, name): + if name == 'e': + if not self.rsa: + return 0 + return mpiToNumber(m2.rsa_get_e(self.rsa)) + elif name == 'n': + if not self.rsa: + return 0 + return mpiToNumber(m2.rsa_get_n(self.rsa)) + else: + raise AttributeError + + def hasPrivateKey(self): + return self._hasPrivateKey + + def hash(self): + return Python_RSAKey(self.n, self.e).hash() + + def _rawPrivateKeyOp(self, m): + s = numberToString(m) + byteLength = numBytes(self.n) + if len(s)== byteLength: + pass + elif len(s) == byteLength-1: + s = '\0' + s + else: + raise AssertionError() + c = stringToNumber(m2.rsa_private_encrypt(self.rsa, s, + m2.no_padding)) + return c + + def _rawPublicKeyOp(self, c): + s = numberToString(c) + byteLength = numBytes(self.n) + if len(s)== byteLength: + pass + elif len(s) == byteLength-1: + s = '\0' + s + else: + raise AssertionError() + m = stringToNumber(m2.rsa_public_decrypt(self.rsa, s, + m2.no_padding)) + return m + + def acceptsPassword(self): return True + + def write(self, password=None): + bio = m2.bio_new(m2.bio_s_mem()) + if self._hasPrivateKey: + if password: + def f(v): return password + m2.rsa_write_key(self.rsa, bio, m2.des_ede_cbc(), f) + else: + def f(): pass + m2.rsa_write_key_no_cipher(self.rsa, bio, f) + else: + if password: + raise AssertionError() + m2.rsa_write_pub_key(self.rsa, bio) + s = m2.bio_read(bio, m2.bio_ctrl_pending(bio)) + m2.bio_free(bio) + return s + + def writeXMLPublicKey(self, indent=''): + return Python_RSAKey(self.n, self.e).write(indent) + + def generate(bits): + key = OpenSSL_RSAKey() + def f():pass + key.rsa = m2.rsa_generate_key(bits, 3, f) + key._hasPrivateKey = True + return key + generate = staticmethod(generate) + + def parse(s, passwordCallback=None): + if s.startswith("-----BEGIN "): + if passwordCallback==None: + callback = password_callback + else: + def f(v, prompt1=None, prompt2=None): + return passwordCallback() + callback = f + bio = m2.bio_new(m2.bio_s_mem()) + try: + m2.bio_write(bio, s) + key = OpenSSL_RSAKey() + if s.startswith("-----BEGIN RSA PRIVATE KEY-----"): + def f():pass + key.rsa = m2.rsa_read_key(bio, callback) + if key.rsa == None: + raise SyntaxError() + key._hasPrivateKey = True + elif s.startswith("-----BEGIN PUBLIC KEY-----"): + key.rsa = m2.rsa_read_pub_key(bio) + if key.rsa == None: + raise SyntaxError() + key._hasPrivateKey = False + else: + raise SyntaxError() + return key + finally: + m2.bio_free(bio) + else: + raise SyntaxError() + + parse = staticmethod(parse) diff --git a/gam/gdata/analytics/tlslite/utils/OpenSSL_TripleDES.py b/gam/gdata/analytics/tlslite/utils/OpenSSL_TripleDES.py new file mode 100755 index 00000000000..f5ba1656521 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/OpenSSL_TripleDES.py @@ -0,0 +1,44 @@ +"""OpenSSL/M2Crypto 3DES implementation.""" + +from cryptomath import * +from TripleDES import * + +if m2cryptoLoaded: + + def new(key, mode, IV): + return OpenSSL_TripleDES(key, mode, IV) + + class OpenSSL_TripleDES(TripleDES): + + def __init__(self, key, mode, IV): + TripleDES.__init__(self, key, mode, IV, "openssl") + self.key = key + self.IV = IV + + def _createContext(self, encrypt): + context = m2.cipher_ctx_new() + cipherType = m2.des_ede3_cbc() + m2.cipher_init(context, cipherType, self.key, self.IV, encrypt) + return context + + def encrypt(self, plaintext): + TripleDES.encrypt(self, plaintext) + context = self._createContext(1) + ciphertext = m2.cipher_update(context, plaintext) + m2.cipher_ctx_free(context) + self.IV = ciphertext[-self.block_size:] + return ciphertext + + def decrypt(self, ciphertext): + TripleDES.decrypt(self, ciphertext) + context = self._createContext(0) + #I think M2Crypto has a bug - it fails to decrypt and return the last block passed in. + #To work around this, we append sixteen zeros to the string, below: + plaintext = m2.cipher_update(context, ciphertext+('\0'*16)) + + #If this bug is ever fixed, then plaintext will end up having a garbage + #plaintext block on the end. That's okay - the below code will ignore it. + plaintext = plaintext[:len(ciphertext)] + m2.cipher_ctx_free(context) + self.IV = ciphertext[-self.block_size:] + return plaintext \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/utils/PyCrypto_AES.py b/gam/gdata/analytics/tlslite/utils/PyCrypto_AES.py new file mode 100755 index 00000000000..e38b19d6fb7 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/PyCrypto_AES.py @@ -0,0 +1,22 @@ +"""PyCrypto AES implementation.""" + +from cryptomath import * +from AES import * + +if pycryptoLoaded: + import Crypto.Cipher.AES + + def new(key, mode, IV): + return PyCrypto_AES(key, mode, IV) + + class PyCrypto_AES(AES): + + def __init__(self, key, mode, IV): + AES.__init__(self, key, mode, IV, "pycrypto") + self.context = Crypto.Cipher.AES.new(key, mode, IV) + + def encrypt(self, plaintext): + return self.context.encrypt(plaintext) + + def decrypt(self, ciphertext): + return self.context.decrypt(ciphertext) \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/utils/PyCrypto_RC4.py b/gam/gdata/analytics/tlslite/utils/PyCrypto_RC4.py new file mode 100755 index 00000000000..6c6d86afde4 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/PyCrypto_RC4.py @@ -0,0 +1,22 @@ +"""PyCrypto RC4 implementation.""" + +from cryptomath import * +from RC4 import * + +if pycryptoLoaded: + import Crypto.Cipher.ARC4 + + def new(key): + return PyCrypto_RC4(key) + + class PyCrypto_RC4(RC4): + + def __init__(self, key): + RC4.__init__(self, key, "pycrypto") + self.context = Crypto.Cipher.ARC4.new(key) + + def encrypt(self, plaintext): + return self.context.encrypt(plaintext) + + def decrypt(self, ciphertext): + return self.context.decrypt(ciphertext) \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/utils/PyCrypto_RSAKey.py b/gam/gdata/analytics/tlslite/utils/PyCrypto_RSAKey.py new file mode 100755 index 00000000000..48b5cef03fb --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/PyCrypto_RSAKey.py @@ -0,0 +1,61 @@ +"""PyCrypto RSA implementation.""" + +from cryptomath import * + +from RSAKey import * +from Python_RSAKey import Python_RSAKey + +if pycryptoLoaded: + + from Crypto.PublicKey import RSA + + class PyCrypto_RSAKey(RSAKey): + def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0): + if not d: + self.rsa = RSA.construct( (n, e) ) + else: + self.rsa = RSA.construct( (n, e, d, p, q) ) + + def __getattr__(self, name): + return getattr(self.rsa, name) + + def hasPrivateKey(self): + return self.rsa.has_private() + + def hash(self): + return Python_RSAKey(self.n, self.e).hash() + + def _rawPrivateKeyOp(self, m): + s = numberToString(m) + byteLength = numBytes(self.n) + if len(s)== byteLength: + pass + elif len(s) == byteLength-1: + s = '\0' + s + else: + raise AssertionError() + c = stringToNumber(self.rsa.decrypt((s,))) + return c + + def _rawPublicKeyOp(self, c): + s = numberToString(c) + byteLength = numBytes(self.n) + if len(s)== byteLength: + pass + elif len(s) == byteLength-1: + s = '\0' + s + else: + raise AssertionError() + m = stringToNumber(self.rsa.encrypt(s, None)[0]) + return m + + def writeXMLPublicKey(self, indent=''): + return Python_RSAKey(self.n, self.e).write(indent) + + def generate(bits): + key = PyCrypto_RSAKey() + def f(numBytes): + return bytesToString(getRandomBytes(numBytes)) + key.rsa = RSA.generate(bits, f) + return key + generate = staticmethod(generate) diff --git a/gam/gdata/analytics/tlslite/utils/PyCrypto_TripleDES.py b/gam/gdata/analytics/tlslite/utils/PyCrypto_TripleDES.py new file mode 100755 index 00000000000..8c22bb80a57 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/PyCrypto_TripleDES.py @@ -0,0 +1,22 @@ +"""PyCrypto 3DES implementation.""" + +from cryptomath import * +from TripleDES import * + +if pycryptoLoaded: + import Crypto.Cipher.DES3 + + def new(key, mode, IV): + return PyCrypto_TripleDES(key, mode, IV) + + class PyCrypto_TripleDES(TripleDES): + + def __init__(self, key, mode, IV): + TripleDES.__init__(self, key, mode, IV, "pycrypto") + self.context = Crypto.Cipher.DES3.new(key, mode, IV) + + def encrypt(self, plaintext): + return self.context.encrypt(plaintext) + + def decrypt(self, ciphertext): + return self.context.decrypt(ciphertext) \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/utils/Python_AES.py b/gam/gdata/analytics/tlslite/utils/Python_AES.py new file mode 100755 index 00000000000..657152f8921 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/Python_AES.py @@ -0,0 +1,68 @@ +"""Pure-Python AES implementation.""" + +from cryptomath import * + +from AES import * +from rijndael import rijndael + +def new(key, mode, IV): + return Python_AES(key, mode, IV) + +class Python_AES(AES): + def __init__(self, key, mode, IV): + AES.__init__(self, key, mode, IV, "python") + self.rijndael = rijndael(key, 16) + self.IV = IV + + def encrypt(self, plaintext): + AES.encrypt(self, plaintext) + + plaintextBytes = stringToBytes(plaintext) + chainBytes = stringToBytes(self.IV) + + #CBC Mode: For each block... + for x in range(len(plaintextBytes)/16): + + #XOR with the chaining block + blockBytes = plaintextBytes[x*16 : (x*16)+16] + for y in range(16): + blockBytes[y] ^= chainBytes[y] + blockString = bytesToString(blockBytes) + + #Encrypt it + encryptedBytes = stringToBytes(self.rijndael.encrypt(blockString)) + + #Overwrite the input with the output + for y in range(16): + plaintextBytes[(x*16)+y] = encryptedBytes[y] + + #Set the next chaining block + chainBytes = encryptedBytes + + self.IV = bytesToString(chainBytes) + return bytesToString(plaintextBytes) + + def decrypt(self, ciphertext): + AES.decrypt(self, ciphertext) + + ciphertextBytes = stringToBytes(ciphertext) + chainBytes = stringToBytes(self.IV) + + #CBC Mode: For each block... + for x in range(len(ciphertextBytes)/16): + + #Decrypt it + blockBytes = ciphertextBytes[x*16 : (x*16)+16] + blockString = bytesToString(blockBytes) + decryptedBytes = stringToBytes(self.rijndael.decrypt(blockString)) + + #XOR with the chaining block and overwrite the input with output + for y in range(16): + decryptedBytes[y] ^= chainBytes[y] + ciphertextBytes[(x*16)+y] = decryptedBytes[y] + + #Set the next chaining block + chainBytes = blockBytes + + self.IV = bytesToString(chainBytes) + return bytesToString(ciphertextBytes) diff --git a/gam/gdata/analytics/tlslite/utils/Python_RC4.py b/gam/gdata/analytics/tlslite/utils/Python_RC4.py new file mode 100755 index 00000000000..56ce5fb2fc2 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/Python_RC4.py @@ -0,0 +1,39 @@ +"""Pure-Python RC4 implementation.""" + +from RC4 import RC4 +from cryptomath import * + +def new(key): + return Python_RC4(key) + +class Python_RC4(RC4): + def __init__(self, key): + RC4.__init__(self, key, "python") + keyBytes = stringToBytes(key) + S = [i for i in range(256)] + j = 0 + for i in range(256): + j = (j + S[i] + keyBytes[i % len(keyBytes)]) % 256 + S[i], S[j] = S[j], S[i] + + self.S = S + self.i = 0 + self.j = 0 + + def encrypt(self, plaintext): + plaintextBytes = stringToBytes(plaintext) + S = self.S + i = self.i + j = self.j + for x in range(len(plaintextBytes)): + i = (i + 1) % 256 + j = (j + S[i]) % 256 + S[i], S[j] = S[j], S[i] + t = (S[i] + S[j]) % 256 + plaintextBytes[x] ^= S[t] + self.i = i + self.j = j + return bytesToString(plaintextBytes) + + def decrypt(self, ciphertext): + return self.encrypt(ciphertext) diff --git a/gam/gdata/analytics/tlslite/utils/Python_RSAKey.py b/gam/gdata/analytics/tlslite/utils/Python_RSAKey.py new file mode 100755 index 00000000000..2c469b572c7 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/Python_RSAKey.py @@ -0,0 +1,209 @@ +"""Pure-Python RSA implementation.""" + +from cryptomath import * +import xmltools +from ASN1Parser import ASN1Parser +from RSAKey import * + +class Python_RSAKey(RSAKey): + def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0): + if (n and not e) or (e and not n): + raise AssertionError() + self.n = n + self.e = e + self.d = d + self.p = p + self.q = q + self.dP = dP + self.dQ = dQ + self.qInv = qInv + self.blinder = 0 + self.unblinder = 0 + + def hasPrivateKey(self): + return self.d != 0 + + def hash(self): + s = self.writeXMLPublicKey('\t\t') + return hashAndBase64(s.strip()) + + def _rawPrivateKeyOp(self, m): + #Create blinding values, on the first pass: + if not self.blinder: + self.unblinder = getRandomNumber(2, self.n) + self.blinder = powMod(invMod(self.unblinder, self.n), self.e, + self.n) + + #Blind the input + m = (m * self.blinder) % self.n + + #Perform the RSA operation + c = self._rawPrivateKeyOpHelper(m) + + #Unblind the output + c = (c * self.unblinder) % self.n + + #Update blinding values + self.blinder = (self.blinder * self.blinder) % self.n + self.unblinder = (self.unblinder * self.unblinder) % self.n + + #Return the output + return c + + + def _rawPrivateKeyOpHelper(self, m): + #Non-CRT version + #c = powMod(m, self.d, self.n) + + #CRT version (~3x faster) + s1 = powMod(m, self.dP, self.p) + s2 = powMod(m, self.dQ, self.q) + h = ((s1 - s2) * self.qInv) % self.p + c = s2 + self.q * h + return c + + def _rawPublicKeyOp(self, c): + m = powMod(c, self.e, self.n) + return m + + def acceptsPassword(self): return False + + def write(self, indent=''): + if self.d: + s = indent+'\n' + else: + s = indent+'\n' + s += indent+'\t%s\n' % numberToBase64(self.n) + s += indent+'\t%s\n' % numberToBase64(self.e) + if self.d: + s += indent+'\t%s\n' % numberToBase64(self.d) + s += indent+'\t

    %s

    \n' % numberToBase64(self.p) + s += indent+'\t%s\n' % numberToBase64(self.q) + s += indent+'\t%s\n' % numberToBase64(self.dP) + s += indent+'\t%s\n' % numberToBase64(self.dQ) + s += indent+'\t%s\n' % numberToBase64(self.qInv) + s += indent+'
    ' + else: + s += indent+'' + #Only add \n if part of a larger structure + if indent != '': + s += '\n' + return s + + def writeXMLPublicKey(self, indent=''): + return Python_RSAKey(self.n, self.e).write(indent) + + def generate(bits): + key = Python_RSAKey() + p = getRandomPrime(bits/2, False) + q = getRandomPrime(bits/2, False) + t = lcm(p-1, q-1) + key.n = p * q + key.e = 3L #Needed to be long, for Java + key.d = invMod(key.e, t) + key.p = p + key.q = q + key.dP = key.d % (p-1) + key.dQ = key.d % (q-1) + key.qInv = invMod(q, p) + return key + generate = staticmethod(generate) + + def parsePEM(s, passwordCallback=None): + """Parse a string containing a or , or + PEM-encoded key.""" + + start = s.find("-----BEGIN PRIVATE KEY-----") + if start != -1: + end = s.find("-----END PRIVATE KEY-----") + if end == -1: + raise SyntaxError("Missing PEM Postfix") + s = s[start+len("-----BEGIN PRIVATE KEY -----") : end] + bytes = base64ToBytes(s) + return Python_RSAKey._parsePKCS8(bytes) + else: + start = s.find("-----BEGIN RSA PRIVATE KEY-----") + if start != -1: + end = s.find("-----END RSA PRIVATE KEY-----") + if end == -1: + raise SyntaxError("Missing PEM Postfix") + s = s[start+len("-----BEGIN RSA PRIVATE KEY -----") : end] + bytes = base64ToBytes(s) + return Python_RSAKey._parseSSLeay(bytes) + raise SyntaxError("Missing PEM Prefix") + parsePEM = staticmethod(parsePEM) + + def parseXML(s): + element = xmltools.parseAndStripWhitespace(s) + return Python_RSAKey._parseXML(element) + parseXML = staticmethod(parseXML) + + def _parsePKCS8(bytes): + p = ASN1Parser(bytes) + + version = p.getChild(0).value[0] + if version != 0: + raise SyntaxError("Unrecognized PKCS8 version") + + rsaOID = p.getChild(1).value + if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]: + raise SyntaxError("Unrecognized AlgorithmIdentifier") + + #Get the privateKey + privateKeyP = p.getChild(2) + + #Adjust for OCTET STRING encapsulation + privateKeyP = ASN1Parser(privateKeyP.value) + + return Python_RSAKey._parseASN1PrivateKey(privateKeyP) + _parsePKCS8 = staticmethod(_parsePKCS8) + + def _parseSSLeay(bytes): + privateKeyP = ASN1Parser(bytes) + return Python_RSAKey._parseASN1PrivateKey(privateKeyP) + _parseSSLeay = staticmethod(_parseSSLeay) + + def _parseASN1PrivateKey(privateKeyP): + version = privateKeyP.getChild(0).value[0] + if version != 0: + raise SyntaxError("Unrecognized RSAPrivateKey version") + n = bytesToNumber(privateKeyP.getChild(1).value) + e = bytesToNumber(privateKeyP.getChild(2).value) + d = bytesToNumber(privateKeyP.getChild(3).value) + p = bytesToNumber(privateKeyP.getChild(4).value) + q = bytesToNumber(privateKeyP.getChild(5).value) + dP = bytesToNumber(privateKeyP.getChild(6).value) + dQ = bytesToNumber(privateKeyP.getChild(7).value) + qInv = bytesToNumber(privateKeyP.getChild(8).value) + return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv) + _parseASN1PrivateKey = staticmethod(_parseASN1PrivateKey) + + def _parseXML(element): + try: + xmltools.checkName(element, "privateKey") + except SyntaxError: + xmltools.checkName(element, "publicKey") + + #Parse attributes + xmltools.getReqAttribute(element, "xmlns", "http://trevp.net/rsa\Z") + xmltools.checkNoMoreAttributes(element) + + #Parse public values ( and ) + n = base64ToNumber(xmltools.getText(xmltools.getChild(element, 0, "n"), xmltools.base64RegEx)) + e = base64ToNumber(xmltools.getText(xmltools.getChild(element, 1, "e"), xmltools.base64RegEx)) + d = 0 + p = 0 + q = 0 + dP = 0 + dQ = 0 + qInv = 0 + #Parse private values, if present + if element.childNodes.length>=3: + d = base64ToNumber(xmltools.getText(xmltools.getChild(element, 2, "d"), xmltools.base64RegEx)) + p = base64ToNumber(xmltools.getText(xmltools.getChild(element, 3, "p"), xmltools.base64RegEx)) + q = base64ToNumber(xmltools.getText(xmltools.getChild(element, 4, "q"), xmltools.base64RegEx)) + dP = base64ToNumber(xmltools.getText(xmltools.getChild(element, 5, "dP"), xmltools.base64RegEx)) + dQ = base64ToNumber(xmltools.getText(xmltools.getChild(element, 6, "dQ"), xmltools.base64RegEx)) + qInv = base64ToNumber(xmltools.getText(xmltools.getLastChild(element, 7, "qInv"), xmltools.base64RegEx)) + return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv) + _parseXML = staticmethod(_parseXML) diff --git a/gam/gdata/analytics/tlslite/utils/Python_RSAKey.pyc b/gam/gdata/analytics/tlslite/utils/Python_RSAKey.pyc new file mode 100755 index 0000000000000000000000000000000000000000..55dc48f33865f31e0175bb8f0355347ac9ccdb6c GIT binary patch literal 7978 zcmc&(OLH5?5uOD|iU0w=P0F&9KuXz#s4S`EO8g4NGA%Q`@Y!)_)_^`Iw82dGd(?z?&;Uemj79= z{%j>^bxr5 zEm*-yS(q^4gbG;uqzY3xN89I2cutsU6XL&OzVgg=Mdhl>RTIvLV8%o>6U~?}K`>*& zMHALUFf075@UteYi+E1RIU(mvI4k75kn=*$n{ZCZ1tAxNTrlChkc&bt3WFz@!Z5?!L|9qGilv?Q8G1qfv@ycqH1_$+REQ~CK1&a#&kSV zumD(A0dPVEz)2MVr&Iturvl)#3J@JeKzE4F6(BbQH7Q{sEiJM+gJL!duwl97P>#(s z9i&o=L@*Kz1&4x7-RqNV0<}lClE)dJY0Ax0wS`$=AFvL-qk_3r!Z;8QIA z6al;W(*z{*sf9k*)x_g~{W(fE-?_It80_|lErR|Rv3Zr=D(ZZo&4@z_tz~oEU z2$f2+j1JiXz>^f>`|Qp~?S1T77xT#cG4tL=622GM=#8Id+%lV`@lfoGQz?W!Lw>0n zob>|dV#RoKUY87VsYGZ;zDB_@YHyQ8r{Lt$SxlMgh6R&GrZZ}KAah~WnUonD9QJpa z9ZF^ef@2A` zl5o&(?e}-0)R_u(Afu@a2T$${LKWlWF^aOHYz0l7Gu6`crSU2<>m4mo(tZEvbK`EjJEREsKEI_(3H4?+8qdejCaDE z;I2KB5aAKev%}~;5gU839bUqZv#$j7e9hn{Y6zP{^dJN=X&E1hl}{jP}DbE30qj z-U3bVL9VGxFOoO8Q5U_u`(!~Q|9qtTRM7;zA9Z(=o-3LrPc%&zO;abDDn-+|6HV2k zY5GLdj1f=Cteha&syPIiuN%a}^ldVOjEbx?f1!_~&|CI0;M#B$y&6yZy~N(Qw3lVW zcdlK_(&+K{X>h zN*R=TC35w52QJDU%8s6!7|F`%r}9t`BD43GQ9nbwrX?sT_y7fa3BVvzs+X#8=r5r} zSo{`Bv!$9>cW$dNC7Ds-whnvtl0vfLYT*Ou5`1<%#Qm$NjuQ?46;|;$@Le-PpWDdD zb2ybE%cHRL4Jz;rUz#F`Qu6l#q5)D2h9(&{+(LnZ8&F`vVeUNQ5MQK3P=duW3-C+_ zhaS+N0Ha49g1ihN3dSv)?jYN61?2CJtRfoWIo=Ap$^$VoadQYaqv14tK30uAx0+<9 zr9>m9>fw~hM5;+em%OEbqoe}~L_hK`q7kfsy4ydrA559QC1IxkmFDlnnbi;vql0mN zuMnD@-6krn-;I(eMQZ#T%#AY@{-B&o_^m=AnSU_BU&e3IsppYR{x`^-m&Znq@;r!| zPHn$vlJjQPA&Ec4scn(X`SnuDO!0IjllE=r8SE0G;?7L4(&%kmogwdG+P80Ry_GSVxcb)CM^SLlq#DcRXH3C|c~)~X==FAj?!)}37NnW1 zFv0@ZVTTi(vHiJ4Zr&MUd>1Ar=5VZb&nimw(y{s}=T`0~oUP0LX)ZXoJ||4b=#fQa zgF(+`Om(#aoKm-VH2riB=U6)ox{(tkPy0@AINZ8$!a%9JipbrGeIEkBWPiXk;}a|$ zFSmeE`zBld1^||G_kdu6U;z7+8^Q?`g%@+j;K>`^pihRA7`q(TLCy&s;3}YP@H-%Y zXM!KX{UGkqc!LIrJ0RTtU_9OBjH_LATk6IKJA?60h)Nxslvoab+JPqKq`UXn=I)MyAafjjH z?jXNv;y_Jqlg~Z!HwZZ7|MaF-y7?E(n%~e$|8*(iu0|Y`6;PW-8by0JSRTg^`q7f{ zK20u~3Fy(os!888p69_*up!VRY8D6*&wNIl_Kdy}I)tw+L}8bdx_NMTtV0t3v1RBi z=L$k%pcT$FB4(YWlmk9asWcHPigDmj3H1@=85}ms=mMxa{09~D0B*Zt9!{F{@29Oq zpF)n0kAeykPJ>AE@XAOR+Q=K6Gp7S#5~TOw%$TXi|Ot~-G{9J2Q3+5aqAmB z+Q^W=?CfVzMn)Xj;Lae8Mh#ZSwIW?~cB>$T6WrbCY39?&6`mZ8AMI{Fl|Z@-iY?^k zR)MS+CA-<4vPyLQD6lSSDm=K`34U?;KbZeJnD(!D>}vqVE92Z&_m;~`UcG$TyX;Mk ze+Y`ixZf48)m%o0{6kLy>2^9|ywm9q!u=kqJ|*olTlOhm|78M3PrpHMiQrWNUJdx1 zU-Gg1X4>sX_?8+fjc5&$$bXYGYXl70K4YF-mrn-9!BnO1d{;rWqa(Lem?`h#Y^7Y8 zsGP6N0G4ViwQ^;)#&@Yw#v+GcTW_gIrxOmkxlp?4!7v;2gKW>IS}HY=ULelk i(~naFG#ujOyE37jS#x`_&QwG<-cz_vs(JHXwe(+~zW-wY literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/tlslite/utils/RC4.py b/gam/gdata/analytics/tlslite/utils/RC4.py new file mode 100755 index 00000000000..550692327ce --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/RC4.py @@ -0,0 +1,17 @@ +"""Abstract class for RC4.""" + +from compat import * #For False + +class RC4: + def __init__(self, keyBytes, implementation): + if len(keyBytes) < 16 or len(keyBytes) > 256: + raise ValueError() + self.isBlockCipher = False + self.name = "rc4" + self.implementation = implementation + + def encrypt(self, plaintext): + raise NotImplementedError() + + def decrypt(self, ciphertext): + raise NotImplementedError() \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/utils/RSAKey.py b/gam/gdata/analytics/tlslite/utils/RSAKey.py new file mode 100755 index 00000000000..2f5d28680a8 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/RSAKey.py @@ -0,0 +1,264 @@ +"""Abstract class for RSA.""" + +from cryptomath import * + + +class RSAKey: + """This is an abstract base class for RSA keys. + + Particular implementations of RSA keys, such as + L{OpenSSL_RSAKey.OpenSSL_RSAKey}, + L{Python_RSAKey.Python_RSAKey}, and + L{PyCrypto_RSAKey.PyCrypto_RSAKey}, + inherit from this. + + To create or parse an RSA key, don't use one of these classes + directly. Instead, use the factory functions in + L{tlslite.utils.keyfactory}. + """ + + def __init__(self, n=0, e=0): + """Create a new RSA key. + + If n and e are passed in, the new key will be initialized. + + @type n: int + @param n: RSA modulus. + + @type e: int + @param e: RSA public exponent. + """ + raise NotImplementedError() + + def __len__(self): + """Return the length of this key in bits. + + @rtype: int + """ + return numBits(self.n) + + def hasPrivateKey(self): + """Return whether or not this key has a private component. + + @rtype: bool + """ + raise NotImplementedError() + + def hash(self): + """Return the cryptoID value corresponding to this + key. + + @rtype: str + """ + raise NotImplementedError() + + def getSigningAlgorithm(self): + """Return the cryptoID sigAlgo value corresponding to this key. + + @rtype: str + """ + return "pkcs1-sha1" + + def hashAndSign(self, bytes): + """Hash and sign the passed-in bytes. + + This requires the key to have a private component. It performs + a PKCS1-SHA1 signature on the passed-in data. + + @type bytes: str or L{array.array} of unsigned bytes + @param bytes: The value which will be hashed and signed. + + @rtype: L{array.array} of unsigned bytes. + @return: A PKCS1-SHA1 signature on the passed-in data. + """ + if not isinstance(bytes, type("")): + bytes = bytesToString(bytes) + hashBytes = stringToBytes(sha1(bytes).digest()) + prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes) + sigBytes = self.sign(prefixedHashBytes) + return sigBytes + + def hashAndVerify(self, sigBytes, bytes): + """Hash and verify the passed-in bytes with the signature. + + This verifies a PKCS1-SHA1 signature on the passed-in data. + + @type sigBytes: L{array.array} of unsigned bytes + @param sigBytes: A PKCS1-SHA1 signature. + + @type bytes: str or L{array.array} of unsigned bytes + @param bytes: The value which will be hashed and verified. + + @rtype: bool + @return: Whether the signature matches the passed-in data. + """ + if not isinstance(bytes, type("")): + bytes = bytesToString(bytes) + hashBytes = stringToBytes(sha1(bytes).digest()) + prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes) + return self.verify(sigBytes, prefixedHashBytes) + + def sign(self, bytes): + """Sign the passed-in bytes. + + This requires the key to have a private component. It performs + a PKCS1 signature on the passed-in data. + + @type bytes: L{array.array} of unsigned bytes + @param bytes: The value which will be signed. + + @rtype: L{array.array} of unsigned bytes. + @return: A PKCS1 signature on the passed-in data. + """ + if not self.hasPrivateKey(): + raise AssertionError() + paddedBytes = self._addPKCS1Padding(bytes, 1) + m = bytesToNumber(paddedBytes) + if m >= self.n: + raise ValueError() + c = self._rawPrivateKeyOp(m) + sigBytes = numberToBytes(c) + return sigBytes + + def verify(self, sigBytes, bytes): + """Verify the passed-in bytes with the signature. + + This verifies a PKCS1 signature on the passed-in data. + + @type sigBytes: L{array.array} of unsigned bytes + @param sigBytes: A PKCS1 signature. + + @type bytes: L{array.array} of unsigned bytes + @param bytes: The value which will be verified. + + @rtype: bool + @return: Whether the signature matches the passed-in data. + """ + paddedBytes = self._addPKCS1Padding(bytes, 1) + c = bytesToNumber(sigBytes) + if c >= self.n: + return False + m = self._rawPublicKeyOp(c) + checkBytes = numberToBytes(m) + return checkBytes == paddedBytes + + def encrypt(self, bytes): + """Encrypt the passed-in bytes. + + This performs PKCS1 encryption of the passed-in data. + + @type bytes: L{array.array} of unsigned bytes + @param bytes: The value which will be encrypted. + + @rtype: L{array.array} of unsigned bytes. + @return: A PKCS1 encryption of the passed-in data. + """ + paddedBytes = self._addPKCS1Padding(bytes, 2) + m = bytesToNumber(paddedBytes) + if m >= self.n: + raise ValueError() + c = self._rawPublicKeyOp(m) + encBytes = numberToBytes(c) + return encBytes + + def decrypt(self, encBytes): + """Decrypt the passed-in bytes. + + This requires the key to have a private component. It performs + PKCS1 decryption of the passed-in data. + + @type encBytes: L{array.array} of unsigned bytes + @param encBytes: The value which will be decrypted. + + @rtype: L{array.array} of unsigned bytes or None. + @return: A PKCS1 decryption of the passed-in data or None if + the data is not properly formatted. + """ + if not self.hasPrivateKey(): + raise AssertionError() + c = bytesToNumber(encBytes) + if c >= self.n: + return None + m = self._rawPrivateKeyOp(c) + decBytes = numberToBytes(m) + if (len(decBytes) != numBytes(self.n)-1): #Check first byte + return None + if decBytes[0] != 2: #Check second byte + return None + for x in range(len(decBytes)-1): #Scan through for zero separator + if decBytes[x]== 0: + break + else: + return None + return decBytes[x+1:] #Return everything after the separator + + def _rawPrivateKeyOp(self, m): + raise NotImplementedError() + + def _rawPublicKeyOp(self, c): + raise NotImplementedError() + + def acceptsPassword(self): + """Return True if the write() method accepts a password for use + in encrypting the private key. + + @rtype: bool + """ + raise NotImplementedError() + + def write(self, password=None): + """Return a string containing the key. + + @rtype: str + @return: A string describing the key, in whichever format (PEM + or XML) is native to the implementation. + """ + raise NotImplementedError() + + def writeXMLPublicKey(self, indent=''): + """Return a string containing the key. + + @rtype: str + @return: A string describing the public key, in XML format. + """ + return Python_RSAKey(self.n, self.e).write(indent) + + def generate(bits): + """Generate a new key with the specified bit length. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + """ + raise NotImplementedError() + generate = staticmethod(generate) + + + # ************************************************************************** + # Helper Functions for RSA Keys + # ************************************************************************** + + def _addPKCS1SHA1Prefix(self, bytes): + prefixBytes = createByteArraySequence(\ + [48,33,48,9,6,5,43,14,3,2,26,5,0,4,20]) + prefixedBytes = prefixBytes + bytes + return prefixedBytes + + def _addPKCS1Padding(self, bytes, blockType): + padLength = (numBytes(self.n) - (len(bytes)+3)) + if blockType == 1: #Signature padding + pad = [0xFF] * padLength + elif blockType == 2: #Encryption padding + pad = createByteArraySequence([]) + while len(pad) < padLength: + padBytes = getRandomBytes(padLength * 2) + pad = [b for b in padBytes if b != 0] + pad = pad[:padLength] + else: + raise AssertionError() + + #NOTE: To be proper, we should add [0,blockType]. However, + #the zero is lost when the returned padding is converted + #to a number, so we don't even bother with it. Also, + #adding it would cause a misalignment in verify() + padding = createByteArraySequence([blockType] + pad + [0]) + paddedBytes = padding + bytes + return paddedBytes diff --git a/gam/gdata/analytics/tlslite/utils/RSAKey.pyc b/gam/gdata/analytics/tlslite/utils/RSAKey.pyc new file mode 100755 index 0000000000000000000000000000000000000000..9e81a9b5cc46bceec59d156ec80c63fc03020acd GIT binary patch literal 9727 zcmcgyOK%*<5$@gPTcoH5^{^91xdjLiAUOmukW+wM@&|GVa(2G2dR}YMrYUbka+*Co z-91&+Up=~dYX6y=`O!&lwWZRZ8vb9zmAr_;SL!y(it?+fTT$zNP5BebA5$MG^-!sw z-mWTtTs7*G9B>N7@+t3xTbf8UVI77 z$)_kZUW~V*#2Zi%3O=f;+aRFEx8EL1$+$gE64(O5CZsZ{{7I=yDSt{T)5@Qg%8c@7 zq;g34homyA{8_2YDSu8Xhn0U=D)Y*pm&y_4pH%)4^-)FLKBoMm%0DLc6 zq4a`!hz<+NUyw!&IU$u(V6{%^B)zyER@aKh&mq}~GOZ2eFoGrbREX~X~_*wH} z6SM|hFLr`nzpHyXG@c2fFma+y%2zrsI?15balAyvT>s!kUx$sx^`;f`((vu>V%lqM z$8@4F>ojz?y9gTnyx&T^(>GDkbLc*uF9(hkZIa?cS$+~SQOPFvmJE1&W1*NU;?ij{9OB`IWC%= zKKfq3VcaTX}p35M;=ODClP=Bz|4O))A--nb#V9L~Z55opuzpyZWrx?Vom|Fq zSDQf`iiEp5Y@3dS5Oj>f4nk)mFhjD6DXd{p$qF0Vv2f74g#Io$mUp-mz$kd-?9C=f zY&M@o)v`LCv$`MrZemQz>UKv1PcgMAjErcS@Y?ZUSDbzv+@r3wqF%Pt1&&hQH=?MU zH(CI=l=WyirXqo+HA~Pny+bh`lyAS_n@ZFYJ9PE3^BPw7O)u%Z?%eab1F{>(IswOi z5Vjo?iCA(T(YdT+sTkm`@*e*;DG*7JLg#xq^AnFQIy=G5rKSd6H(To=o|B-x+-*l+ z#xa4g52witrtfdHl5=O1j(5&toA4VM)M-oF+BAZ8h{Yhua0{K@D`?J^pJ*G$MaD(L z-$Mmf_d$h00z?5Ne^^z?^U74&h}+$Y3el%Vb7WiIA7>oGN9Y67Frgk)<+diblj=cD zZqa`%lkIQ7DRCoq8UnSJ!{#C&zy&2@WgubVtK;3%d#W4!fN}ad zM)=dS?u%yDbJnh{G|ruEyt#bN;d?!pExJ6Jvv@!4WqAR`vI|ic9N?8ofBge5j=h~F zDR*h^1|esJuaS;9rc<|(j$ViQSY6rf1c((fKSXe1DE*e5rTqF-Q}%mh;Z82bVvjF4 z%LihZBH|Aq05e3tUvYh1-| zD;R*GfhD~_lt_($&ly(_xcem)$vx-=b@g-N7R;28G-78`XxG8~$bQ!RfDVK@KD>wd z+FoOS&N2)Kj9e^+X{!w}yo`MpHqB8eTNpzy=Z1d`9w74>vOMe2>Y%rwV+r}i>D**M zSsEI9+eHj}qc83T`vit+ z%(D)S5T)44UBhp|rPbBFl;ANu!u}X2a4Jz!VbTu@@M2`@42pM`w0Fo49P|~b`}m?D z>h3gf;fSdA>KW#=a`X{#D}q zVcAOn?^zH;ygzE}F9GRSLfMJ#1@#hQ#NKCH0UHlH+Kz;EN^@PM;OWbP{}BT&C1ccf zdw=e7!%fF&E$1){-7|RLzQ95tH*8#8K^uW4kdqV=UOFnQNfIb!;@5bIh1{Vb(@Mfg zR1Mb(`}_@sg&ctNxwxa)z9Rat+tiu>?-i!7#lQk&88i`O72wMv4e-d>1(Yz{r?+Y< z9@x>ygICp4K4wJQFQx7Nd4*j|9;R>r4EE)BpO%OP;2y^SV85#3kJZ}Uj}`uI2`Tan z{ZE}~NQ9UC5kSAJzcSEAuuGP`nd94cesvYe0C4baKr!-*EOScCV<`%E>#NAOe)0Br zUtY5*oM1C|cZ~FDDtM5<3}8QwU}U<;fg|PM8ADogHNzSz$#e(A?P{IdYL}ss`%M;w z^`{FUv97~*svaz(Qpi|}k})$X(F}K;u@|;=DeR$@FS&pz+(K@)KMA-$0yiM4nXbks zm@{!us5wSHjC-sgJ*e*%Wa{7FJ`JqUJ8xZoNz4U~4w1K%{5X~_JMEDJ#lmRpsSRQUnCRhRjsSoXZVGBX z&E-2ODT!RjH9Qa~(o{id=>`$V$yWYhl`he4Uee>BUpx_+rzdGxJrO-5n+m;)CL9?~1fdU8I|{)vaU+5SZFvO$4>Vf)v&Z?vuu;rGj7*Ap zLx(!%fs#G-1ly&(Twk|>&47t#ocr42w^I0fG0y`GpTwpIfcD>R4%0H~Xow^;%hS72 z$Ycr=wo_LAm5X9rpvKi=l0cyL1}cCW;vhsxEb-9+!Yq7ffKLqYaRELnzy}5RlmIRG zjsRZ}%&GRgY>I7!hX869z{Kvk&i%@2@O_lQvn=R#16gZ=Aowl|_6Z1d@y`MGI@Vp2 zs$ctppp~(HzrOf zSyic3&R1q^TAEyvcoFA6QOPwIfoL8NGtfLuK|#tk9DS9tZbQFoYGHRwg%9Kza&ib& zf6ri~Z4Z0K)O}Cpk*yheZy_N0og53vS}oXQ1$raaz5MR!G+TiMW0bth%*A_Eb$3_c zCD`6!0tdCb>*`jBz&T6(;e8;Dn8|PK>Br^5Um<5tdJ3k-46$b_I#r;4>DC zA!HeCkhF+ftNmPa_f-^sj zxJxZ_DciIm!4h*&@D#FT8pV=xpY~r|2|=n5W6dJUV)S(a|G!f?TAiMF8X#D$Gb=1a zHJhQ=)6J$N1)5Fya)GzZOf{Q+)M_?e%FiW;Y*Z@oDKlv9Q!Kv0f?DA6rS3~Cs8BXa zeSx)eEM8_otLmO-@naN*>1~`I;qw_Aln7>Ba#2V_YL9(Z<8sBxK*IM=%uZD2=cXs| zJB91`^uqK>{2re^GhM^))a>-^)bzyc OUnji@m2;vxQ~Mtor#{R8 literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/tlslite/utils/TripleDES.py b/gam/gdata/analytics/tlslite/utils/TripleDES.py new file mode 100755 index 00000000000..2db45888bde --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/TripleDES.py @@ -0,0 +1,26 @@ +"""Abstract class for 3DES.""" + +from compat import * #For True + +class TripleDES: + def __init__(self, key, mode, IV, implementation): + if len(key) != 24: + raise ValueError() + if mode != 2: + raise ValueError() + if len(IV) != 8: + raise ValueError() + self.isBlockCipher = True + self.block_size = 8 + self.implementation = implementation + self.name = "3des" + + #CBC-Mode encryption, returns ciphertext + #WARNING: *MAY* modify the input as well + def encrypt(self, plaintext): + assert(len(plaintext) % 8 == 0) + + #CBC-Mode decryption, returns plaintext + #WARNING: *MAY* modify the input as well + def decrypt(self, ciphertext): + assert(len(ciphertext) % 8 == 0) diff --git a/gam/gdata/analytics/tlslite/utils/__init__.py b/gam/gdata/analytics/tlslite/utils/__init__.py new file mode 100755 index 00000000000..e96b4bef8a5 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/__init__.py @@ -0,0 +1,31 @@ +"""Toolkit for crypto and other stuff.""" + +__all__ = ["AES", + "ASN1Parser", + "cipherfactory", + "codec", + "Cryptlib_AES", + "Cryptlib_RC4", + "Cryptlib_TripleDES", + "cryptomath: cryptomath module", + "dateFuncs", + "hmac", + "JCE_RSAKey", + "compat", + "keyfactory", + "OpenSSL_AES", + "OpenSSL_RC4", + "OpenSSL_RSAKey", + "OpenSSL_TripleDES", + "PyCrypto_AES", + "PyCrypto_RC4", + "PyCrypto_RSAKey", + "PyCrypto_TripleDES", + "Python_AES", + "Python_RC4", + "Python_RSAKey", + "RC4", + "rijndael", + "RSAKey", + "TripleDES", + "xmltools"] diff --git a/gam/gdata/analytics/tlslite/utils/__init__.pyc b/gam/gdata/analytics/tlslite/utils/__init__.pyc new file mode 100755 index 0000000000000000000000000000000000000000..1d088d9e7951e5ef80d5c11905504065a5df5a13 GIT binary patch literal 825 zcmbVJO>fgc5S=7#+NMoQ3zW}*B`&$N5)yET5TXj<04PLtPnD(B;@LQ+Yp>;cXSBf!UiPXM0+J_CFXIDljTx6J$k!r@DK z@D-aI=j>fd!Q7QWTvi3SK<5}ZT9`8Mw4Bc`sl$0!laP9{http}_eI>7@kCvRS6@vI6jjfAr;?kuJyJY>!Eb*}Ybl7H(_Sh(eE>@~JYipGnG7KBz-?2L$MTv8XH5a-l y{6(g7otQF8cnUfq>upLVT2g9#q*R)xq|{|m)zaRthW4SwG=9XU+wQdfZ~XyzpUEKr literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/tlslite/utils/cipherfactory.py b/gam/gdata/analytics/tlslite/utils/cipherfactory.py new file mode 100755 index 00000000000..ccbb6b5ff9b --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/cipherfactory.py @@ -0,0 +1,111 @@ +"""Factory functions for symmetric cryptography.""" + +import os + +import Python_AES +import Python_RC4 + +import cryptomath + +tripleDESPresent = False + +if cryptomath.m2cryptoLoaded: + import OpenSSL_AES + import OpenSSL_RC4 + import OpenSSL_TripleDES + tripleDESPresent = True + +if cryptomath.cryptlibpyLoaded: + import Cryptlib_AES + import Cryptlib_RC4 + import Cryptlib_TripleDES + tripleDESPresent = True + +if cryptomath.pycryptoLoaded: + import PyCrypto_AES + import PyCrypto_RC4 + import PyCrypto_TripleDES + tripleDESPresent = True + +# ************************************************************************** +# Factory Functions for AES +# ************************************************************************** + +def createAES(key, IV, implList=None): + """Create a new AES object. + + @type key: str + @param key: A 16, 24, or 32 byte string. + + @type IV: str + @param IV: A 16 byte string + + @rtype: L{tlslite.utils.AES} + @return: An AES object. + """ + if implList == None: + implList = ["cryptlib", "openssl", "pycrypto", "python"] + + for impl in implList: + if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: + return Cryptlib_AES.new(key, 2, IV) + elif impl == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_AES.new(key, 2, IV) + elif impl == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_AES.new(key, 2, IV) + elif impl == "python": + return Python_AES.new(key, 2, IV) + raise NotImplementedError() + +def createRC4(key, IV, implList=None): + """Create a new RC4 object. + + @type key: str + @param key: A 16 to 32 byte string. + + @type IV: object + @param IV: Ignored, whatever it is. + + @rtype: L{tlslite.utils.RC4} + @return: An RC4 object. + """ + if implList == None: + implList = ["cryptlib", "openssl", "pycrypto", "python"] + + if len(IV) != 0: + raise AssertionError() + for impl in implList: + if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: + return Cryptlib_RC4.new(key) + elif impl == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_RC4.new(key) + elif impl == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_RC4.new(key) + elif impl == "python": + return Python_RC4.new(key) + raise NotImplementedError() + +#Create a new TripleDES instance +def createTripleDES(key, IV, implList=None): + """Create a new 3DES object. + + @type key: str + @param key: A 24 byte string. + + @type IV: str + @param IV: An 8 byte string + + @rtype: L{tlslite.utils.TripleDES} + @return: A 3DES object. + """ + if implList == None: + implList = ["cryptlib", "openssl", "pycrypto"] + + for impl in implList: + if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: + return Cryptlib_TripleDES.new(key, 2, IV) + elif impl == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_TripleDES.new(key, 2, IV) + elif impl == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_TripleDES.new(key, 2, IV) + raise NotImplementedError() \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/utils/codec.py b/gam/gdata/analytics/tlslite/utils/codec.py new file mode 100755 index 00000000000..13022a0b932 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/codec.py @@ -0,0 +1,94 @@ +"""Classes for reading/writing binary data (such as TLS records).""" + +from compat import * + +class Writer: + def __init__(self, length=0): + #If length is zero, then this is just a "trial run" to determine length + self.index = 0 + self.bytes = createByteArrayZeros(length) + + def add(self, x, length): + if self.bytes: + newIndex = self.index+length-1 + while newIndex >= self.index: + self.bytes[newIndex] = x & 0xFF + x >>= 8 + newIndex -= 1 + self.index += length + + def addFixSeq(self, seq, length): + if self.bytes: + for e in seq: + self.add(e, length) + else: + self.index += len(seq)*length + + def addVarSeq(self, seq, length, lengthLength): + if self.bytes: + self.add(len(seq)*length, lengthLength) + for e in seq: + self.add(e, length) + else: + self.index += lengthLength + (len(seq)*length) + + +class Parser: + def __init__(self, bytes): + self.bytes = bytes + self.index = 0 + + def get(self, length): + if self.index + length > len(self.bytes): + raise SyntaxError() + x = 0 + for count in range(length): + x <<= 8 + x |= self.bytes[self.index] + self.index += 1 + return x + + def getFixBytes(self, lengthBytes): + bytes = self.bytes[self.index : self.index+lengthBytes] + self.index += lengthBytes + return bytes + + def getVarBytes(self, lengthLength): + lengthBytes = self.get(lengthLength) + return self.getFixBytes(lengthBytes) + + def getFixList(self, length, lengthList): + l = [0] * lengthList + for x in range(lengthList): + l[x] = self.get(length) + return l + + def getVarList(self, length, lengthLength): + lengthList = self.get(lengthLength) + if lengthList % length != 0: + raise SyntaxError() + lengthList = int(lengthList/length) + l = [0] * lengthList + for x in range(lengthList): + l[x] = self.get(length) + return l + + def startLengthCheck(self, lengthLength): + self.lengthCheck = self.get(lengthLength) + self.indexCheck = self.index + + def setLengthCheck(self, length): + self.lengthCheck = length + self.indexCheck = self.index + + def stopLengthCheck(self): + if (self.index - self.indexCheck) != self.lengthCheck: + raise SyntaxError() + + def atLengthCheck(self): + if (self.index - self.indexCheck) < self.lengthCheck: + return False + elif (self.index - self.indexCheck) == self.lengthCheck: + return True + else: + raise SyntaxError() \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/utils/codec.pyc b/gam/gdata/analytics/tlslite/utils/codec.pyc new file mode 100755 index 0000000000000000000000000000000000000000..d1c3688ba6a848233caa97cf3daf80dcff5f618f GIT binary patch literal 4285 zcmb_f?{5=F5S=^!NE}EBzY;=;Fti9m75IRvq7WfSD5!0eqH|hk6m&X#hjYZSliN*k zq~uG(f80NSzBhYzY)HQZgKoUHw>LNM&D)tx;or%LAE%?$hUEVW_#&~0 zzO+19iwhEuOI(zXB1a;Bt@{#}B(6wY?oVOKnAB?%^zj&GxfL0cnCiPuR?U(qPTS4< zAF|ZqxB511N7;Tgj%-w|nZ3qlH8Rz;l{ywSI$3P)+)F>SPxet>dWW=jcfAoYA`iyNIaTRn>%mJ*IB&gR)cqhdr4Z7)F%NUz$Fu;Bw66)s< zJUMc=a4Nva)vNk)jkH?Wmb#8%=VyVOypJDmM0P+AYZ1fbT`J*E#?|UuFRN%3(pekK~|`Pj8hZ zTN7{qKA*T(@NAJ21z9=7dtlg;hj?vwPF}UuZ}GkQ`^gVAC-oXXzM3+X=_eY7s#FAg z5|B|{6Oc^-pa-Qa; zyFebEy_pt(<*b4SZ%4j(yHfx^%jx2KUn~|l+4yn^G~~)50jmzE;^uBq@K`SbAqL;l3;T ze?}R2`W8M7Q$?PxDpR$-IK!|VZ6{%9xgZR;JMmr%x4{^01I;gW7EHk$$EBmw?(={= zkqpgU**jaRR4QXyN?t~p8K{t4xWt8TVUk{j{D=Yc==I8@tCJ{?u1=z`YoT8+v&eJt zSYGGX(F4YG29mxX~!%HcpU&|A%T5f6@; zJ2h#{$1Pm0C?b&>D4p|CvHy*M6Fb74K^5W+H0;S%QL-90^^wRwo}kD9$DuFXr-EAU zcKWk7540|5=lKl}A%Ew<`JDZob1V;dyC9Ig$nGO~g=M|^i0`}7sFhNCluAF)O~5!U zNA>--jk-T)StrxRl11%ia$GrdX9VM1O#&O8y|(>&0<}Ul6MF|P4b?2(HSerf_Aa7y zUV>FVS+x3m&f~^mr*(9~g4-KCD)d&{gwo(Dz-LmRPuCX+-tBg_SEF(n0W!J;G89F1 z%l2!yzXhB9+%YbOr8>EWL5@pnyaVG}9>mfsWC~*jYlB%BJu0~Jb&hjaIETxb)5==) zfY?!}H1>yZm@6ESlQ~MDpOHtnfpVLm9es(N{UB{TcdtUG zh2fKrqPq-#0UVDPA$V!mc<7T34wIMiFsF0w2&l3l<`Ia4mC4vBv+g{g zUs>TC=@*>zzU)ujo&EkqREll*$i|%%RB6aea06x_b|!5EvGbr{lEKJda>hlDhQ;d0 ziy>%YSc~(-XxLX=PAKVH!`@0whePv7McPB&>1DL76=P%8UPj zftwkz(fwG#X6eYa*xa z#hdkty!@J;V9#SJxIm+a#eS;@u5xaU<~q$yn(t`7r@0MtoFg5bwtWN_;fqcUB~bP% z#mYpb1Y5>$k^ff*zC_L}Lk+{Y)4-9J6gN8CJCW5hsd^h&kEe3l$n+1tMR%HhOwnsc LF8dSy#q0k8G*lq4 literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/tlslite/utils/compat.py b/gam/gdata/analytics/tlslite/utils/compat.py new file mode 100755 index 00000000000..7d2d9250d84 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/compat.py @@ -0,0 +1,140 @@ +"""Miscellaneous functions to mask Python version differences.""" + +import sys +import os + +if sys.version_info < (2,2): + raise AssertionError("Python 2.2 or later required") + +if sys.version_info < (2,3): + + def enumerate(collection): + return zip(range(len(collection)), collection) + + class Set: + def __init__(self, seq=None): + self.values = {} + if seq: + for e in seq: + self.values[e] = None + + def add(self, e): + self.values[e] = None + + def discard(self, e): + if e in self.values.keys(): + del(self.values[e]) + + def union(self, s): + ret = Set() + for e in self.values.keys(): + ret.values[e] = None + for e in s.values.keys(): + ret.values[e] = None + return ret + + def issubset(self, other): + for e in self.values.keys(): + if e not in other.values.keys(): + return False + return True + + def __nonzero__( self): + return len(self.values.keys()) + + def __contains__(self, e): + return e in self.values.keys() + + def __iter__(self): + return iter(set.values.keys()) + + +if os.name != "java": + + import array + def createByteArraySequence(seq): + return array.array('B', seq) + def createByteArrayZeros(howMany): + return array.array('B', [0] * howMany) + def concatArrays(a1, a2): + return a1+a2 + + def bytesToString(bytes): + return bytes.tostring() + def stringToBytes(s): + bytes = createByteArrayZeros(0) + bytes.fromstring(s) + return bytes + + import math + def numBits(n): + if n==0: + return 0 + s = "%x" % n + return ((len(s)-1)*4) + \ + {'0':0, '1':1, '2':2, '3':2, + '4':3, '5':3, '6':3, '7':3, + '8':4, '9':4, 'a':4, 'b':4, + 'c':4, 'd':4, 'e':4, 'f':4, + }[s[0]] + return int(math.floor(math.log(n, 2))+1) + + BaseException = Exception + import sys + import traceback + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr + +else: + #Jython 2.1 is missing lots of python 2.3 stuff, + #which we have to emulate here: + #NOTE: JYTHON SUPPORT NO LONGER WORKS, DUE TO USE OF GENERATORS. + #THIS CODE IS LEFT IN SO THAT ONE JYTHON UPDATES TO 2.2, IT HAS A + #CHANCE OF WORKING AGAIN. + + import java + import jarray + + def createByteArraySequence(seq): + if isinstance(seq, type("")): #If it's a string, convert + seq = [ord(c) for c in seq] + return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed + def createByteArrayZeros(howMany): + return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed + def concatArrays(a1, a2): + l = list(a1)+list(a2) + return createByteArraySequence(l) + + #WAY TOO SLOW - MUST BE REPLACED------------ + def bytesToString(bytes): + return "".join([chr(b) for b in bytes]) + + def stringToBytes(s): + bytes = createByteArrayZeros(len(s)) + for count, c in enumerate(s): + bytes[count] = ord(c) + return bytes + #WAY TOO SLOW - MUST BE REPLACED------------ + + def numBits(n): + if n==0: + return 0 + n= 1L * n; #convert to long, if it isn't already + return n.__tojava__(java.math.BigInteger).bitLength() + + #Adjust the string to an array of bytes + def stringToJavaByteArray(s): + bytes = jarray.zeros(len(s), 'b') + for count, c in enumerate(s): + x = ord(c) + if x >= 128: x -= 256 + bytes[count] = x + return bytes + + BaseException = java.lang.Exception + import sys + import traceback + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/utils/compat.pyc b/gam/gdata/analytics/tlslite/utils/compat.pyc new file mode 100755 index 0000000000000000000000000000000000000000..055a640be90e832a0fedf00ca73833470649c408 GIT binary patch literal 6584 zcmcIoYjYIG6}_{oH+rGR3OU4V2oDu0D2#2eW8w$_c5sDFYL_kz`hxQA-w;N|B@7elJ zFKfFr&urc0^@G^%*MEQFcJr+M%oG;w^(5WdF@?$6#$HSRgTF>v;Z&TjKlH}68}+=X zcVlOYdSQ-wXSCMbwuz>$$C?tnAHu$C^`6fMd__14{&KsM=bf&(8Xq3os|RrwcTBO}Nn#goyRPk~ z&TRKw+O^y5{NONl*A7qIG+s1W@4yt$;}jk&^^`!@Z5r3+6aUz}h08WU3?K*u15_d} zfds%eP_qNO6=eX>sxlKqGa(CDjw^$=Jct2tu(?4=+FXS;7+8*qr9x9hpn(+E z<@kLv_ns`v^9lt(-yUd1ZXea;F+v7yrBOMbn*ezhcYDS<&fCZ{c*=}!q(z5IOOrSks;3<3~h12e}LRBWW(3R@IOK(Jy6x_w&(lGf!hhhfYQ z4%?K+0A7hUE*CO%Rr7F;9A(_l4mwQ{W)*LRaU*jf*X(6TTxY@9SrB5a7M#{=5LH#Q zt2u6gIllgyJMTR?N83OfDvglxPM(xvkx8q0v?z}u_iL&WkgBETt1sfNHR|q1MbAf= z0#!S6w`+=#=}S|l*7lxa!3*H~;Z=k0r-DKH9iVp^L*RX)xAZ2`Z>b8c5^7it^wDgq z!jB@rm{y}G%d>Axkw?)L3jDtUjiP=(&`MF?kWwFMi;)?Y(21gUp1C;9Y!qF6Mb@&a zaaBc8->kqi0ts7~7h2d#S&4Z?1IkX(&#-P77^5{YjevZMm7?hSQ0B%4ZSQ<<79W@> z(i$2?2YJ%#;?ZYTjtZ%JI8VZK95Nqh#$3 z)HLr^HQHYMEFJ)v2vv)7IDm}sM7;%^b^AbnG&Hen`CS6DL0lB^iMKHe>rnD{7TVDa z?ZRN6uyx|h9WKzswuhS}=93cAB~0jl6`Jo2gvR!uqoS{9?2c&_9e zljm;!^}{$j8HENbnC=Y?S_nz|DcVc%Y0dId@u$!LQzIzXN@%0S7h)tvnEC^MyDDNlgn|DI9wh@h-QnNVCom6i?wdI|?aT*>ti;c~jT; z91X9E`I-fPN{JM*$hyzreIyJI-#`L9G<9px@Ce|W;d#D6?RJX%ps$5?a4!{J1{swW ze)2~-A#FD?v1JjEV3zwdnv%;hpw!_10t161`1UI-j9{8TG@5GA3pzp@=$KUH40gRM zsgA(5_a<7f>AlHA+mydef{C#|=B_1%F%gx6}T%zYT zB_+`ag1ulkmZb?TQ?>$*!dH`O182z;(M`z_Zyo1-&w2VCXk-Doe z(wvc6Y+YQ8vZMx9LveIoBx;5|t&P6QXi}l|Lzb`+m?o@ao;I$29$5{}dqYr2N^zHc zWgXWnNJCH~%F+nG0`-1tfYS)zw$b|1_%rt6^5mUsxh7Z`eUP)cHl-U#SjU5R6`Vvy zy|soD0?ds=f~F7Z)bJrmA9qvhRDX3&_;WOc7dQ{cbw?rgMNGsp(Z^VAh_OM6zJzWc zldpqH%MPR_sOG%*-r$>f!5jPzIE}ORZsA)lqb0n_0l0pw%{d2oGsvqvTOh&Wt0=$h zRPmlKTKJZVOfAFE$M!zw&Kdi9ZaPAK`O}=Rm3AIv&ft5P=FF$5`>n}3Zr4W@cQ2#x z@UJn?F|NMizJ#eR752hiIO)USPSfA)&EQNn#O0i&PU_Q$38 zqw-bxCtqGaJmxTrmHZFn_1g8*XA^Exq9#aN!mSu^8SX zxlh6^K=>ty&j2i*;cxhe>&Iq*2bkiYxCSLs*U;USl2;xHa=#?MZ0SKLijuqyWK&iE z2L;VK9vAxGtnsyRCwH{n9czJeY#QztMP7Kv(x+Muc?#CrrPe9xmw83)u;)H zWsQ|Vh1dPXR&33n#T$@X`EXN2{fRFUO!(N|8Cp%bcP?1E?YZp_?VmHCEPXdo0YEAX o!9r!BGF`1*TEO#k1-~k;aqzP^Tl05{<8Z=8EBYq!o6`2+zo^La>i_@% literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/tlslite/utils/cryptomath.py b/gam/gdata/analytics/tlslite/utils/cryptomath.py new file mode 100755 index 00000000000..92fb77437bd --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/cryptomath.py @@ -0,0 +1,404 @@ +"""cryptomath module + +This module has basic math/crypto code.""" + +import os +import sys +import math +import base64 +import binascii +if sys.version_info[:2] <= (2, 4): + from sha import sha as sha1 +else: + from hashlib import sha1 + +from compat import * + + +# ************************************************************************** +# Load Optional Modules +# ************************************************************************** + +# Try to load M2Crypto/OpenSSL +try: + from M2Crypto import m2 + m2cryptoLoaded = True + +except ImportError: + m2cryptoLoaded = False + + +# Try to load cryptlib +try: + import cryptlib_py + try: + cryptlib_py.cryptInit() + except cryptlib_py.CryptException, e: + #If tlslite and cryptoIDlib are both present, + #they might each try to re-initialize this, + #so we're tolerant of that. + if e[0] != cryptlib_py.CRYPT_ERROR_INITED: + raise + cryptlibpyLoaded = True + +except ImportError: + cryptlibpyLoaded = False + +#Try to load GMPY +try: + import gmpy + gmpyLoaded = True +except ImportError: + gmpyLoaded = False + +#Try to load pycrypto +try: + import Crypto.Cipher.AES + pycryptoLoaded = True +except ImportError: + pycryptoLoaded = False + + +# ************************************************************************** +# PRNG Functions +# ************************************************************************** + +# Get os.urandom PRNG +try: + os.urandom(1) + def getRandomBytes(howMany): + return stringToBytes(os.urandom(howMany)) + prngName = "os.urandom" + +except: + # Else get cryptlib PRNG + if cryptlibpyLoaded: + def getRandomBytes(howMany): + randomKey = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, + cryptlib_py.CRYPT_ALGO_AES) + cryptlib_py.cryptSetAttribute(randomKey, + cryptlib_py.CRYPT_CTXINFO_MODE, + cryptlib_py.CRYPT_MODE_OFB) + cryptlib_py.cryptGenerateKey(randomKey) + bytes = createByteArrayZeros(howMany) + cryptlib_py.cryptEncrypt(randomKey, bytes) + return bytes + prngName = "cryptlib" + + else: + #Else get UNIX /dev/urandom PRNG + try: + devRandomFile = open("/dev/urandom", "rb") + def getRandomBytes(howMany): + return stringToBytes(devRandomFile.read(howMany)) + prngName = "/dev/urandom" + except IOError: + #Else get Win32 CryptoAPI PRNG + try: + import win32prng + def getRandomBytes(howMany): + s = win32prng.getRandomBytes(howMany) + if len(s) != howMany: + raise AssertionError() + return stringToBytes(s) + prngName ="CryptoAPI" + except ImportError: + #Else no PRNG :-( + def getRandomBytes(howMany): + raise NotImplementedError("No Random Number Generator "\ + "available.") + prngName = "None" + +# ************************************************************************** +# Converter Functions +# ************************************************************************** + +def bytesToNumber(bytes): + total = 0L + multiplier = 1L + for count in range(len(bytes)-1, -1, -1): + byte = bytes[count] + total += multiplier * byte + multiplier *= 256 + return total + +def numberToBytes(n): + howManyBytes = numBytes(n) + bytes = createByteArrayZeros(howManyBytes) + for count in range(howManyBytes-1, -1, -1): + bytes[count] = int(n % 256) + n >>= 8 + return bytes + +def bytesToBase64(bytes): + s = bytesToString(bytes) + return stringToBase64(s) + +def base64ToBytes(s): + s = base64ToString(s) + return stringToBytes(s) + +def numberToBase64(n): + bytes = numberToBytes(n) + return bytesToBase64(bytes) + +def base64ToNumber(s): + bytes = base64ToBytes(s) + return bytesToNumber(bytes) + +def stringToNumber(s): + bytes = stringToBytes(s) + return bytesToNumber(bytes) + +def numberToString(s): + bytes = numberToBytes(s) + return bytesToString(bytes) + +def base64ToString(s): + try: + return base64.decodestring(s) + except binascii.Error, e: + raise SyntaxError(e) + except binascii.Incomplete, e: + raise SyntaxError(e) + +def stringToBase64(s): + return base64.encodestring(s).replace("\n", "") + +def mpiToNumber(mpi): #mpi is an openssl-format bignum string + if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number + raise AssertionError() + bytes = stringToBytes(mpi[4:]) + return bytesToNumber(bytes) + +def numberToMPI(n): + bytes = numberToBytes(n) + ext = 0 + #If the high-order bit is going to be set, + #add an extra byte of zeros + if (numBits(n) & 0x7)==0: + ext = 1 + length = numBytes(n) + ext + bytes = concatArrays(createByteArrayZeros(4+ext), bytes) + bytes[0] = (length >> 24) & 0xFF + bytes[1] = (length >> 16) & 0xFF + bytes[2] = (length >> 8) & 0xFF + bytes[3] = length & 0xFF + return bytesToString(bytes) + + + +# ************************************************************************** +# Misc. Utility Functions +# ************************************************************************** + +def numBytes(n): + if n==0: + return 0 + bits = numBits(n) + return int(math.ceil(bits / 8.0)) + +def hashAndBase64(s): + return stringToBase64(sha1(s).digest()) + +def getBase64Nonce(numChars=22): #defaults to an 132 bit nonce + bytes = getRandomBytes(numChars) + bytesStr = "".join([chr(b) for b in bytes]) + return stringToBase64(bytesStr)[:numChars] + + +# ************************************************************************** +# Big Number Math +# ************************************************************************** + +def getRandomNumber(low, high): + if low >= high: + raise AssertionError() + howManyBits = numBits(high) + howManyBytes = numBytes(high) + lastBits = howManyBits % 8 + while 1: + bytes = getRandomBytes(howManyBytes) + if lastBits: + bytes[0] = bytes[0] % (1 << lastBits) + n = bytesToNumber(bytes) + if n >= low and n < high: + return n + +def gcd(a,b): + a, b = max(a,b), min(a,b) + while b: + a, b = b, a % b + return a + +def lcm(a, b): + #This will break when python division changes, but we can't use // cause + #of Jython + return (a * b) / gcd(a, b) + +#Returns inverse of a mod b, zero if none +#Uses Extended Euclidean Algorithm +def invMod(a, b): + c, d = a, b + uc, ud = 1, 0 + while c != 0: + #This will break when python division changes, but we can't use // + #cause of Jython + q = d / c + c, d = d-(q*c), c + uc, ud = ud - (q * uc), uc + if d == 1: + return ud % b + return 0 + + +if gmpyLoaded: + def powMod(base, power, modulus): + base = gmpy.mpz(base) + power = gmpy.mpz(power) + modulus = gmpy.mpz(modulus) + result = pow(base, power, modulus) + return long(result) + +else: + #Copied from Bryan G. Olson's post to comp.lang.python + #Does left-to-right instead of pow()'s right-to-left, + #thus about 30% faster than the python built-in with small bases + def powMod(base, power, modulus): + nBitScan = 5 + + """ Return base**power mod modulus, using multi bit scanning + with nBitScan bits at a time.""" + + #TREV - Added support for negative exponents + negativeResult = False + if (power < 0): + power *= -1 + negativeResult = True + + exp2 = 2**nBitScan + mask = exp2 - 1 + + # Break power into a list of digits of nBitScan bits. + # The list is recursive so easy to read in reverse direction. + nibbles = None + while power: + nibbles = int(power & mask), nibbles + power = power >> nBitScan + + # Make a table of powers of base up to 2**nBitScan - 1 + lowPowers = [1] + for i in xrange(1, exp2): + lowPowers.append((lowPowers[i-1] * base) % modulus) + + # To exponentiate by the first nibble, look it up in the table + nib, nibbles = nibbles + prod = lowPowers[nib] + + # For the rest, square nBitScan times, then multiply by + # base^nibble + while nibbles: + nib, nibbles = nibbles + for i in xrange(nBitScan): + prod = (prod * prod) % modulus + if nib: prod = (prod * lowPowers[nib]) % modulus + + #TREV - Added support for negative exponents + if negativeResult: + prodInv = invMod(prod, modulus) + #Check to make sure the inverse is correct + if (prod * prodInv) % modulus != 1: + raise AssertionError() + return prodInv + return prod + + +#Pre-calculate a sieve of the ~100 primes < 1000: +def makeSieve(n): + sieve = range(n) + for count in range(2, int(math.sqrt(n))): + if sieve[count] == 0: + continue + x = sieve[count] * 2 + while x < len(sieve): + sieve[x] = 0 + x += sieve[count] + sieve = [x for x in sieve[2:] if x] + return sieve + +sieve = makeSieve(1000) + +def isPrime(n, iterations=5, display=False): + #Trial division with sieve + for x in sieve: + if x >= n: return True + if n % x == 0: return False + #Passed trial division, proceed to Rabin-Miller + #Rabin-Miller implemented per Ferguson & Schneier + #Compute s, t for Rabin-Miller + if display: print "*", + s, t = n-1, 0 + while s % 2 == 0: + s, t = s/2, t+1 + #Repeat Rabin-Miller x times + a = 2 #Use 2 as a base for first iteration speedup, per HAC + for count in range(iterations): + v = powMod(a, s, n) + if v==1: + continue + i = 0 + while v != n-1: + if i == t-1: + return False + else: + v, i = powMod(v, 2, n), i+1 + a = getRandomNumber(2, n) + return True + +def getRandomPrime(bits, display=False): + if bits < 10: + raise AssertionError() + #The 1.5 ensures the 2 MSBs are set + #Thus, when used for p,q in RSA, n will have its MSB set + # + #Since 30 is lcm(2,3,5), we'll set our test numbers to + #29 % 30 and keep them there + low = (2L ** (bits-1)) * 3/2 + high = 2L ** bits - 30 + p = getRandomNumber(low, high) + p += 29 - (p % 30) + while 1: + if display: print ".", + p += 30 + if p >= high: + p = getRandomNumber(low, high) + p += 29 - (p % 30) + if isPrime(p, display=display): + return p + +#Unused at the moment... +def getRandomSafePrime(bits, display=False): + if bits < 10: + raise AssertionError() + #The 1.5 ensures the 2 MSBs are set + #Thus, when used for p,q in RSA, n will have its MSB set + # + #Since 30 is lcm(2,3,5), we'll set our test numbers to + #29 % 30 and keep them there + low = (2 ** (bits-2)) * 3/2 + high = (2 ** (bits-1)) - 30 + q = getRandomNumber(low, high) + q += 29 - (q % 30) + while 1: + if display: print ".", + q += 30 + if (q >= high): + q = getRandomNumber(low, high) + q += 29 - (q % 30) + #Ideas from Tom Wu's SRP code + #Do trial division on p and q before Rabin-Miller + if isPrime(q, 0, display=display): + p = (2 * q) + 1 + if isPrime(p, display=display): + if isPrime(q, display=display): + return p diff --git a/gam/gdata/analytics/tlslite/utils/cryptomath.pyc b/gam/gdata/analytics/tlslite/utils/cryptomath.pyc new file mode 100755 index 0000000000000000000000000000000000000000..309f7f4ed9f86ced99684b55fd747f40122320a9 GIT binary patch literal 11026 zcmcIq+ix4`bw5K=6iLgHY+Wq*vZn2|SGL#kT6^(cyoqgD-Y8kq?oewvjJ?_sXH1Pv z4#}C3mrCSfyV^xj6n$ucMNkxdDNwXPfB<>uTi=>K6zOySfGWgZ-$aK z4uT+DTRi;cyMO2MJLfyY?EfAty*cev>XQ5$!uNH2qUEfJD^dfs#LY<4k{Sx8oRL~4 zEoY^cP0Kl{<p``YCyum2k-VT%aXPLyy>Gpm8BW5n=zCef>X5`_Fu>|E$!GYX4*V`hS`BKW6%u#k1teadAtsV~OP+kS`#Z z6XJp)FJa`5_59Nk{@ij8rfs8a>t4@nofhw;xMSju%MRpL7WYuvXH3VVQ_ef3{CF>e zrMJ-Ci21bCPK$du?LJ|aJKAUb6?S(glXf!8ol2H{pik4FHk!7vU~xyzN6*Wbh&r6d_~Le(RQkHuhXR71*wNyow(g{;*D~v?QS-`QfXzw zkCKP-h7*<7oyf13+2yk7U9PuXZ{Ghk{#E=8Ch>D1a~4PpE0t)&F%PW9!xi(y_6w%S zL9GQJRD4#~N%-OLMSP+OkQjeZw2W*EkBGmj4zJ9yTYQlH;?NHoEA5+Gu@}XhwjIU8 z_}UDez-_n8a;(^Be|pCWw&qY2CDGd`Ex!A((QY@I-X*8gi7vIAz-f5l!-ng`&cnDF zHT~FoxEcG+=;1D><~v*QCMxOVsU&3HT=|A7~HAr}9hw-rxnCAUIG-Uvfy ztLB9eBzL46Ukda~S4I9V*fpr+NO%AW@A31T(|wE7qc3|a#f#|1A{#2L(HD2u5OxI z%#Z5oRTPY_8Z)Gd!X#Air zfFr`ea86fnJhoP%v!v~^kyg2~*;@C)a-!euuES4oJ{)}?4>%bl3+Dhp>D}g8fxSlJ2$yDD#8ElI(5**v z@;u9y?X1i`S6KsawsSIh6%(N74#_*1qMW;FL&n7TvL)?y?W=t?^Z8l@S)AXS%>{4%U2C<%u=06~tQ2SFjS!o+_f*TSM z@&zD}x6S|vM)6-cNiOy|`2#$l^RA_UBdRjw>b1mh!0H(=8h^^qm&s2U0|tT9)6g&S zGb`6vk&_RfBlNorUJ})L=E~Rj=Tnd8xCMo!V6#OSjcj7IU1oI_#1CTAhcXBhS`(LO zdXv}o8IVCT5qp6#B<>mK5XDM^rzfn*l!dgTzNJm$;e{+r;eI8ZGTMmT1r*^gPK-8{voo=OVPGmrkQ`rOG zezK5?{?D+&{_X$%@~|&(o0mWg)5&W!-$5~*?CEQh?*1=O^9OJHGWU5F#B7^<9{9FX zsTwHx%Ry|L%JxlfJGCXV?XL#0ZB>$snCTnjcB&-ChW`46+a`)GvW0NHb%=0-P#kaz z?5(Z_i;_cx^CYWR&^N_`TeJE>4P%_sro)hSxc`CM}pz&7J0mk%#MiUf!U5}S2 z!()Z*bw6;Ty6>x7(}+Oh_Ucv;J3aGE|F;~}+l(CG>}aKm3;-07K{-Er9+UkoJ_%H3 ztwRWq7$o(PjqXI*H;ckv-r=eS-$uViU&U^fhTpRb3w{c~JLBenyxoN4g#k9bUC-J^PY zQ9%+y_*rBtyBG$TW8r|jSEe?h?6Bl%Mna@6vTO@5KzV@yh)j-{5C&F)h=tFr24nO_ zi&zFR;kVtqTt^+|zQ&s2-5SO^s448$6irPsq9oT~7Ad27yo$*kP;E>!#H08#B)=wM zxUO-L$xl!MYKH2kG(iBIxar4{eGX0LqS+4WPOP`jBz97y;>32}ovFNdS*AJT?$U@i z2Ib*+tQ5TN+*|%B9wWv=3@Y;3Y1}`@ty!2M|0kg*5uh(argV%3Q%;)1tnQU5q-ju% zGRVjjB!s`jOqHDtt|n5xu4-%FK-qo;MB{7bf3#5d{N}Ec*Q#|eazJcQdHf3&O*D4I z%IyuRC`i@VBueb7D4=J@;CBML?5UP-6M#a|;wy-|^hU!(QRQDByu000`&Sqr+-_FfSa+l8T{-X5$)Yq zI9B#N2VCTs-QOOfvd;pv@cgZ5^LUNu1)LgEuI6PMf5>ED&G)s8_2G`RTK(qF|a%6e}c6avF+Z zP@)*aT>WHaMek()fvSjjX)a?-aFwhqp#~r`V$B%o>;o7c(daChpSlNA&fszfci1~d z!GzaHjUcC!mhNfXV0S|zPHcRi(=*?Gpr8VS7IEwf=%7wd^=j1J|HM}UDklM^Q!hB- z6uOvgPwqYFR2s-}Z#I)92LB%LanL($)?2^Cyaqn9ziA^qG@3nT;D%uUQ0<2(HjLjPHb%*_l)#!S&Y4nbj7bFRBuOQS0qKYgC?fr7qkL67zNQVAZJ#G-Q4hah6elsQb}B)Hr!G_+ql~P-aD9#zY$QP`y!uNXTSv6mwE7;pA%W zjTVyaT(Z^qp?-G~9B8(Kh5=;u<*h)Yel#PkMgHlln9WGNR_H~@c@2_JSOP=r% zFa^s0i14{tzEI^*N8F`L3 z9R+!UD!c)~D={oG3wxM^JeWGOx_=J+9afVl>CEU>LZb2j{R0}pvbDFlcUnDx3h>sW zNuBDebyFwq%Rzo)5F)fc?T z@&ecXgza=qa}=oUkvt}8fS5!foK3avnEZR-HJsRg=Gm%kx{TNBENG$SL{Cl3?5`t_ z9ciu{KJ6a$HBvkFRlfs&T?cf+wyOhJSPq^INO^5C=m6FBZ4{7-&F5j(C99YzS*2Va zpgwJlW=T(FQJ*WKbpmRgm)UII8p}-LIk%g%?JN1u$PqL)c21aL!lz9MBH_!T#Z_B3u+MIkp#EK){EwZguQk_sd0hOx=1Q+G73P<@Jg z58^(ANU%(sHd<7n+H~KO; zg@L^V%Hh_d3&bCsA7;)N?F2Vmr4K~5Jn>V*6jZf43}b{eZj#9#P#h2=UtUNhbxeE>kAz`<06f0piF zDH1}HiqczB$S}Jg1{5t|mRV|s-Tzzn%?U9Bs7^>&)2$RZNB3W46#`3Ifh~D1u}U5! zuLHw#C}3jCZP@4AA!t~-$or+!i#RakHrf(^ZwlRBV=Q?>>skz zaS&CK*f{pS(mb;vpn(Ny^n8AFFZ5g9_oz3_+eijZTyoa96moePMlx!`n$6_Zt{JSd zG;ZZGr|_-85;cg5zKBosa}er)vAz8l3fiF&%mjEl36P$GS)1Ak% zACyqVC)IaonI!80`w^4jc!{MGAS$PM8-YxcaMSAv1#W-FI`YT<4hg&1e?($~=(x_H zKwuOz_HP_2(Z`f1cc~YSJdj#Ljx5l57B5rF>I44#|Ew{^Uh++&|5(9AVBzlo=~@Rja6DIV8&P zlMaX)2Hh9+uYN=L|Hls1?yscl^3UbPX;%eyb6O_$?#tf(6EIGrh~F-M^7cjYx7XP4 zImn=NfIpq+>p<0c?5Q5WTqJrKO{pH3hd(dY`?|o5s+sHP=IM`e(<UH2a)i^gA0~IL}|_P`0L4 z7w&B7Mv^_-D)|B@BjL|5Do)Eplx>8J_7OIbGs}0)p_ty+OvdipJzHOxy~c(sBnu?W zJnCx$lW)7u$0d?;B;O^uN1`Ca>$6=W`GiE>(T`YCXZ0CNJ0xF#>}sNx{;*$c@zB@I zZ`{5`*+zc{lFu;nVg6586PfW$sgQ+3DZ%ZOa?FQhsfxJ?K$O;HS;wCNm3B)Qagaf1 zCJV<@EEO?sOoP&c8TKv?p?~IJb~Ils77NAPp8thnwwNzY;gcOH6-V(sTpUGEd!V>b M92v=s9UGbcf2N*miU0rr literal 0 HcmV?d00001 diff --git a/gam/gdata/analytics/tlslite/utils/dateFuncs.py b/gam/gdata/analytics/tlslite/utils/dateFuncs.py new file mode 100755 index 00000000000..38812ebf853 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/dateFuncs.py @@ -0,0 +1,75 @@ + +import os + +#Functions for manipulating datetime objects +#CCYY-MM-DDThh:mm:ssZ +def parseDateClass(s): + year, month, day = s.split("-") + day, tail = day[:2], day[2:] + hour, minute, second = tail[1:].split(":") + second = second[:2] + year, month, day = int(year), int(month), int(day) + hour, minute, second = int(hour), int(minute), int(second) + return createDateClass(year, month, day, hour, minute, second) + + +if os.name != "java": + from datetime import datetime, timedelta + + #Helper functions for working with a date/time class + def createDateClass(year, month, day, hour, minute, second): + return datetime(year, month, day, hour, minute, second) + + def printDateClass(d): + #Split off fractional seconds, append 'Z' + return d.isoformat().split(".")[0]+"Z" + + def getNow(): + return datetime.utcnow() + + def getHoursFromNow(hours): + return datetime.utcnow() + timedelta(hours=hours) + + def getMinutesFromNow(minutes): + return datetime.utcnow() + timedelta(minutes=minutes) + + def isDateClassExpired(d): + return d < datetime.utcnow() + + def isDateClassBefore(d1, d2): + return d1 < d2 + +else: + #Jython 2.1 is missing lots of python 2.3 stuff, + #which we have to emulate here: + import java + import jarray + + def createDateClass(year, month, day, hour, minute, second): + c = java.util.Calendar.getInstance() + c.setTimeZone(java.util.TimeZone.getTimeZone("UTC")) + c.set(year, month-1, day, hour, minute, second) + return c + + def printDateClass(d): + return "%04d-%02d-%02dT%02d:%02d:%02dZ" % \ + (d.get(d.YEAR), d.get(d.MONTH)+1, d.get(d.DATE), \ + d.get(d.HOUR_OF_DAY), d.get(d.MINUTE), d.get(d.SECOND)) + + def getNow(): + c = java.util.Calendar.getInstance() + c.setTimeZone(java.util.TimeZone.getTimeZone("UTC")) + c.get(c.HOUR) #force refresh? + return c + + def getHoursFromNow(hours): + d = getNow() + d.add(d.HOUR, hours) + return d + + def isDateClassExpired(d): + n = getNow() + return d.before(n) + + def isDateClassBefore(d1, d2): + return d1.before(d2) diff --git a/gam/gdata/analytics/tlslite/utils/entropy.c b/gam/gdata/analytics/tlslite/utils/entropy.c new file mode 100755 index 00000000000..c627794d2da --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/entropy.c @@ -0,0 +1,173 @@ + +#include "Python.h" + + +#ifdef MS_WINDOWS + +/* The following #define is not needed on VC6 with the Platform SDK, and it +may not be needed on VC7, I'm not sure. I don't think it hurts anything.*/ +#define _WIN32_WINNT 0x0400 + +#include + + +typedef BOOL (WINAPI *CRYPTACQUIRECONTEXTA)(HCRYPTPROV *phProv,\ + LPCSTR pszContainer, LPCSTR pszProvider, DWORD dwProvType,\ + DWORD dwFlags ); +typedef BOOL (WINAPI *CRYPTGENRANDOM)(HCRYPTPROV hProv, DWORD dwLen,\ + BYTE *pbBuffer ); +typedef BOOL (WINAPI *CRYPTRELEASECONTEXT)(HCRYPTPROV hProv,\ + DWORD dwFlags); + + +static PyObject* entropy(PyObject *self, PyObject *args) +{ + int howMany = 0; + HINSTANCE hAdvAPI32 = NULL; + CRYPTACQUIRECONTEXTA pCryptAcquireContextA = NULL; + CRYPTGENRANDOM pCryptGenRandom = NULL; + CRYPTRELEASECONTEXT pCryptReleaseContext = NULL; + HCRYPTPROV hCryptProv = 0; + unsigned char* bytes = NULL; + PyObject* returnVal = NULL; + + + /* Read arguments */ + if (!PyArg_ParseTuple(args, "i", &howMany)) + return(NULL); + + /* Obtain handle to the DLL containing CryptoAPI + This should not fail */ + if( (hAdvAPI32 = GetModuleHandle("advapi32.dll")) == NULL) { + PyErr_Format(PyExc_SystemError, + "Advapi32.dll not found"); + return NULL; + } + + /* Obtain pointers to the CryptoAPI functions + This will fail on some early version of Win95 */ + pCryptAcquireContextA = (CRYPTACQUIRECONTEXTA)GetProcAddress(hAdvAPI32,\ + "CryptAcquireContextA"); + pCryptGenRandom = (CRYPTGENRANDOM)GetProcAddress(hAdvAPI32,\ + "CryptGenRandom"); + pCryptReleaseContext = (CRYPTRELEASECONTEXT) GetProcAddress(hAdvAPI32,\ + "CryptReleaseContext"); + if (pCryptAcquireContextA == NULL || pCryptGenRandom == NULL || + pCryptReleaseContext == NULL) { + PyErr_Format(PyExc_NotImplementedError, + "CryptoAPI not available on this version of Windows"); + return NULL; + } + + /* Allocate bytes */ + if ((bytes = (unsigned char*)PyMem_Malloc(howMany)) == NULL) + return PyErr_NoMemory(); + + + /* Acquire context */ + if(!pCryptAcquireContextA(&hCryptProv, NULL, NULL, PROV_RSA_FULL, + CRYPT_VERIFYCONTEXT)) { + PyErr_Format(PyExc_SystemError, + "CryptAcquireContext failed, error %d", GetLastError()); + PyMem_Free(bytes); + return NULL; + } + + /* Get random data */ + if(!pCryptGenRandom(hCryptProv, howMany, bytes)) { + PyErr_Format(PyExc_SystemError, + "CryptGenRandom failed, error %d", GetLastError()); + PyMem_Free(bytes); + CryptReleaseContext(hCryptProv, 0); + return NULL; + } + + /* Build return value */ + returnVal = Py_BuildValue("s#", bytes, howMany); + PyMem_Free(bytes); + + /* Release context */ + if (!pCryptReleaseContext(hCryptProv, 0)) { + PyErr_Format(PyExc_SystemError, + "CryptReleaseContext failed, error %d", GetLastError()); + return NULL; + } + + return returnVal; +} + +#elif defined(HAVE_UNISTD_H) && defined(HAVE_FCNTL_H) + +#include +#include + +static PyObject* entropy(PyObject *self, PyObject *args) +{ + int howMany; + int fd; + unsigned char* bytes = NULL; + PyObject* returnVal = NULL; + + + /* Read arguments */ + if (!PyArg_ParseTuple(args, "i", &howMany)) + return(NULL); + + /* Allocate bytes */ + if ((bytes = (unsigned char*)PyMem_Malloc(howMany)) == NULL) + return PyErr_NoMemory(); + + /* Open device */ + if ((fd = open("/dev/urandom", O_RDONLY, 0)) == -1) { + PyErr_Format(PyExc_NotImplementedError, + "No entropy source found"); + PyMem_Free(bytes); + return NULL; + } + + /* Get random data */ + if (read(fd, bytes, howMany) < howMany) { + PyErr_Format(PyExc_SystemError, + "Reading from /dev/urandom failed"); + PyMem_Free(bytes); + close(fd); + return NULL; + } + + /* Build return value */ + returnVal = Py_BuildValue("s#", bytes, howMany); + PyMem_Free(bytes); + + /* Close device */ + close(fd); + + return returnVal; +} + +#else + +static PyObject* entropy(PyObject *self, PyObject *args) +{ + PyErr_Format(PyExc_NotImplementedError, + "Function not supported"); + return NULL; +} + +#endif + + + +/* List of functions exported by this module */ + +static struct PyMethodDef entropy_functions[] = { + {"entropy", (PyCFunction)entropy, METH_VARARGS, "Return a string of random bytes produced by a platform-specific\nentropy source."}, + {NULL, NULL} /* Sentinel */ +}; + + +/* Initialize this module. */ + +PyMODINIT_FUNC initentropy(void) +{ + Py_InitModule("entropy", entropy_functions); +} \ No newline at end of file diff --git a/gam/gdata/analytics/tlslite/utils/hmac.py b/gam/gdata/analytics/tlslite/utils/hmac.py new file mode 100755 index 00000000000..fe8feec219c --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/hmac.py @@ -0,0 +1,104 @@ +"""HMAC (Keyed-Hashing for Message Authentication) Python module. + +Implements the HMAC algorithm as described by RFC 2104. + +(This file is modified from the standard library version to do faster +copying) +""" + +def _strxor(s1, s2): + """Utility method. XOR the two strings s1 and s2 (must have same length). + """ + return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2)) + +# The size of the digests returned by HMAC depends on the underlying +# hashing module used. +digest_size = None + +class HMAC: + """RFC2104 HMAC class. + + This supports the API for Cryptographic Hash Functions (PEP 247). + """ + + def __init__(self, key, msg = None, digestmod = None): + """Create a new HMAC object. + + key: key for the keyed hash object. + msg: Initial input for the hash, if provided. + digestmod: A module supporting PEP 247. Defaults to the md5 module. + """ + if digestmod is None: + import md5 + digestmod = md5 + + if key == None: #TREVNEW - for faster copying + return #TREVNEW + + self.digestmod = digestmod + self.outer = digestmod.new() + self.inner = digestmod.new() + self.digest_size = digestmod.digest_size + + blocksize = 64 + ipad = "\x36" * blocksize + opad = "\x5C" * blocksize + + if len(key) > blocksize: + key = digestmod.new(key).digest() + + key = key + chr(0) * (blocksize - len(key)) + self.outer.update(_strxor(key, opad)) + self.inner.update(_strxor(key, ipad)) + if msg is not None: + self.update(msg) + +## def clear(self): +## raise NotImplementedError, "clear() method not available in HMAC." + + def update(self, msg): + """Update this hashing object with the string msg. + """ + self.inner.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + other = HMAC(None) #TREVNEW - for faster copying + other.digest_size = self.digest_size #TREVNEW + other.digestmod = self.digestmod + other.inner = self.inner.copy() + other.outer = self.outer.copy() + return other + + def digest(self): + """Return the hash value of this hashing object. + + This returns a string containing 8-bit data. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + return "".join([hex(ord(x))[2:].zfill(2) + for x in tuple(self.digest())]) + +def new(key, msg = None, digestmod = None): + """Create a new hashing object and return it. + + key: The starting key for the hash. + msg: if available, will immediately be hashed into the object's starting + state. + + You can now feed arbitrary strings into the object using its update() + method, and can ask for the hash value at any time by calling its digest() + method. + """ + return HMAC(key, msg, digestmod) diff --git a/gam/gdata/analytics/tlslite/utils/jython_compat.py b/gam/gdata/analytics/tlslite/utils/jython_compat.py new file mode 100755 index 00000000000..1245183a99c --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/jython_compat.py @@ -0,0 +1,195 @@ +"""Miscellaneous functions to mask Python/Jython differences.""" + +import os +import sha + +if os.name != "java": + BaseException = Exception + + from sets import Set + import array + import math + + def createByteArraySequence(seq): + return array.array('B', seq) + def createByteArrayZeros(howMany): + return array.array('B', [0] * howMany) + def concatArrays(a1, a2): + return a1+a2 + + def bytesToString(bytes): + return bytes.tostring() + + def stringToBytes(s): + bytes = createByteArrayZeros(0) + bytes.fromstring(s) + return bytes + + def numBits(n): + if n==0: + return 0 + return int(math.floor(math.log(n, 2))+1) + + class CertChainBase: pass + class SelfTestBase: pass + class ReportFuncBase: pass + + #Helper functions for working with sets (from Python 2.3) + def iterSet(set): + return iter(set) + + def getListFromSet(set): + return list(set) + + #Factory function for getting a SHA1 object + def getSHA1(s): + return sha.sha(s) + + import sys + import traceback + + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr + +else: + #Jython 2.1 is missing lots of python 2.3 stuff, + #which we have to emulate here: + import java + import jarray + + BaseException = java.lang.Exception + + def createByteArraySequence(seq): + if isinstance(seq, type("")): #If it's a string, convert + seq = [ord(c) for c in seq] + return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed + def createByteArrayZeros(howMany): + return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed + def concatArrays(a1, a2): + l = list(a1)+list(a2) + return createByteArraySequence(l) + + #WAY TOO SLOW - MUST BE REPLACED------------ + def bytesToString(bytes): + return "".join([chr(b) for b in bytes]) + + def stringToBytes(s): + bytes = createByteArrayZeros(len(s)) + for count, c in enumerate(s): + bytes[count] = ord(c) + return bytes + #WAY TOO SLOW - MUST BE REPLACED------------ + + def numBits(n): + if n==0: + return 0 + n= 1L * n; #convert to long, if it isn't already + return n.__tojava__(java.math.BigInteger).bitLength() + + #This properly creates static methods for Jython + class staticmethod: + def __init__(self, anycallable): self.__call__ = anycallable + + #Properties are not supported for Jython + class property: + def __init__(self, anycallable): pass + + #True and False have to be specially defined + False = 0 + True = 1 + + class StopIteration(Exception): pass + + def enumerate(collection): + return zip(range(len(collection)), collection) + + class Set: + def __init__(self, seq=None): + self.values = {} + if seq: + for e in seq: + self.values[e] = None + + def add(self, e): + self.values[e] = None + + def discard(self, e): + if e in self.values.keys(): + del(self.values[e]) + + def union(self, s): + ret = Set() + for e in self.values.keys(): + ret.values[e] = None + for e in s.values.keys(): + ret.values[e] = None + return ret + + def issubset(self, other): + for e in self.values.keys(): + if e not in other.values.keys(): + return False + return True + + def __nonzero__( self): + return len(self.values.keys()) + + def __contains__(self, e): + return e in self.values.keys() + + def iterSet(set): + return set.values.keys() + + def getListFromSet(set): + return set.values.keys() + + """ + class JCE_SHA1: + def __init__(self, s=None): + self.md = java.security.MessageDigest.getInstance("SHA1") + if s: + self.update(s) + + def update(self, s): + self.md.update(s) + + def copy(self): + sha1 = JCE_SHA1() + sha1.md = self.md.clone() + return sha1 + + def digest(self): + digest = self.md.digest() + bytes = jarray.zeros(20, 'h') + for count in xrange(20): + x = digest[count] + if x < 0: x += 256 + bytes[count] = x + return bytes + """ + + #Factory function for getting a SHA1 object + #The JCE_SHA1 class is way too slow... + #the sha.sha object we use instead is broken in the jython 2.1 + #release, and needs to be patched + def getSHA1(s): + #return JCE_SHA1(s) + return sha.sha(s) + + + #Adjust the string to an array of bytes + def stringToJavaByteArray(s): + bytes = jarray.zeros(len(s), 'b') + for count, c in enumerate(s): + x = ord(c) + if x >= 128: x -= 256 + bytes[count] = x + return bytes + + import sys + import traceback + + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr diff --git a/gam/gdata/analytics/tlslite/utils/keyfactory.py b/gam/gdata/analytics/tlslite/utils/keyfactory.py new file mode 100755 index 00000000000..5005af7f5b7 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/keyfactory.py @@ -0,0 +1,243 @@ +"""Factory functions for asymmetric cryptography. +@sort: generateRSAKey, parseXMLKey, parsePEMKey, parseAsPublicKey, +parseAsPrivateKey +""" + +from compat import * + +from RSAKey import RSAKey +from Python_RSAKey import Python_RSAKey +import cryptomath + +if cryptomath.m2cryptoLoaded: + from OpenSSL_RSAKey import OpenSSL_RSAKey + +if cryptomath.pycryptoLoaded: + from PyCrypto_RSAKey import PyCrypto_RSAKey + +# ************************************************************************** +# Factory Functions for RSA Keys +# ************************************************************************** + +def generateRSAKey(bits, implementations=["openssl", "python"]): + """Generate an RSA key with the specified bit length. + + @type bits: int + @param bits: Desired bit length of the new key's modulus. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: A new RSA private key. + """ + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_RSAKey.generate(bits) + elif implementation == "python": + return Python_RSAKey.generate(bits) + raise ValueError("No acceptable implementations") + +def parseXMLKey(s, private=False, public=False, implementations=["python"]): + """Parse an XML-format key. + + The XML format used here is specific to tlslite and cryptoIDlib. The + format can store the public component of a key, or the public and + private components. For example:: + + + 4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou... + Aw== + + + + 4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou... + Aw== + JZ0TIgUxWXmL8KJ0VqyG1V0J3ern9pqIoB0xmy... +

    5PreIj6z6ldIGL1V4+1C36dQFHNCQHJvW52GXc... + /E/wDit8YXPCxx126zTq2ilQ3IcW54NJYyNjiZ... + mKc+wX8inDowEH45Qp4slRo1YveBgExKPROu6... + qDVKtBz9lk0shL5PR3ickXDgkwS576zbl2ztB... + j6E8EA7dNsTImaXexAmLA1DoeArsYeFAInr... + + + @type s: str + @param s: A string containing an XML public or private key. + + @type private: bool + @param private: If True, a L{SyntaxError} will be raised if the private + key component is not present. + + @type public: bool + @param public: If True, the private key component (if present) will be + discarded, so this function will always return a public key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA key. + + @raise SyntaxError: If the key is not properly formatted. + """ + for implementation in implementations: + if implementation == "python": + key = Python_RSAKey.parseXML(s) + break + else: + raise ValueError("No acceptable implementations") + + return _parseKeyHelper(key, private, public) + +#Parse as an OpenSSL or Python key +def parsePEMKey(s, private=False, public=False, passwordCallback=None, + implementations=["openssl", "python"]): + """Parse a PEM-format key. + + The PEM format is used by OpenSSL and other tools. The + format is typically used to store both the public and private + components of a key. For example:: + + -----BEGIN RSA PRIVATE KEY----- + MIICXQIBAAKBgQDYscuoMzsGmW0pAYsmyHltxB2TdwHS0dImfjCMfaSDkfLdZY5+ + dOWORVns9etWnr194mSGA1F0Pls/VJW8+cX9+3vtJV8zSdANPYUoQf0TP7VlJxkH + dSRkUbEoz5bAAs/+970uos7n7iXQIni+3erUTdYEk2iWnMBjTljfgbK/dQIDAQAB + AoGAJHoJZk75aKr7DSQNYIHuruOMdv5ZeDuJvKERWxTrVJqE32/xBKh42/IgqRrc + esBN9ZregRCd7YtxoL+EVUNWaJNVx2mNmezEznrc9zhcYUrgeaVdFO2yBF1889zO + gCOVwrO8uDgeyj6IKa25H6c1N13ih/o7ZzEgWbGG+ylU1yECQQDv4ZSJ4EjSh/Fl + aHdz3wbBa/HKGTjC8iRy476Cyg2Fm8MZUe9Yy3udOrb5ZnS2MTpIXt5AF3h2TfYV + VoFXIorjAkEA50FcJmzT8sNMrPaV8vn+9W2Lu4U7C+K/O2g1iXMaZms5PC5zV5aV + CKXZWUX1fq2RaOzlbQrpgiolhXpeh8FjxwJBAOFHzSQfSsTNfttp3KUpU0LbiVvv + i+spVSnA0O4rq79KpVNmK44Mq67hsW1P11QzrzTAQ6GVaUBRv0YS061td1kCQHnP + wtN2tboFR6lABkJDjxoGRvlSt4SOPr7zKGgrWjeiuTZLHXSAnCY+/hr5L9Q3ZwXG + 6x6iBdgLjVIe4BZQNtcCQQDXGv/gWinCNTN3MPWfTW/RGzuMYVmyBFais0/VrgdH + h1dLpztmpQqfyH/zrBXQ9qL/zR4ojS6XYneO/U18WpEe + -----END RSA PRIVATE KEY----- + + To generate a key like this with OpenSSL, run:: + + openssl genrsa 2048 > key.pem + + This format also supports password-encrypted private keys. TLS + Lite can only handle password-encrypted private keys when OpenSSL + and M2Crypto are installed. In this case, passwordCallback will be + invoked to query the user for the password. + + @type s: str + @param s: A string containing a PEM-encoded public or private key. + + @type private: bool + @param private: If True, a L{SyntaxError} will be raised if the + private key component is not present. + + @type public: bool + @param public: If True, the private key component (if present) will + be discarded, so this function will always return a public key. + + @type passwordCallback: callable + @param passwordCallback: This function will be called, with no + arguments, if the PEM-encoded private key is password-encrypted. + The callback should return the password string. If the password is + incorrect, SyntaxError will be raised. If no callback is passed + and the key is password-encrypted, a prompt will be displayed at + the console. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA key. + + @raise SyntaxError: If the key is not properly formatted. + """ + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + key = OpenSSL_RSAKey.parse(s, passwordCallback) + break + elif implementation == "python": + key = Python_RSAKey.parsePEM(s) + break + else: + raise ValueError("No acceptable implementations") + + return _parseKeyHelper(key, private, public) + + +def _parseKeyHelper(key, private, public): + if private: + if not key.hasPrivateKey(): + raise SyntaxError("Not a private key!") + + if public: + return _createPublicKey(key) + + if private: + if hasattr(key, "d"): + return _createPrivateKey(key) + else: + return key + + return key + +def parseAsPublicKey(s): + """Parse an XML or PEM-formatted public key. + + @type s: str + @param s: A string containing an XML or PEM-encoded public or private key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA public key. + + @raise SyntaxError: If the key is not properly formatted. + """ + try: + return parsePEMKey(s, public=True) + except: + return parseXMLKey(s, public=True) + +def parsePrivateKey(s): + """Parse an XML or PEM-formatted private key. + + @type s: str + @param s: A string containing an XML or PEM-encoded private key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA private key. + + @raise SyntaxError: If the key is not properly formatted. + """ + try: + return parsePEMKey(s, private=True) + except: + return parseXMLKey(s, private=True) + +def _createPublicKey(key): + """ + Create a new public key. Discard any private component, + and return the most efficient key possible. + """ + if not isinstance(key, RSAKey): + raise AssertionError() + return _createPublicRSAKey(key.n, key.e) + +def _createPrivateKey(key): + """ + Create a new private key. Return the most efficient key possible. + """ + if not isinstance(key, RSAKey): + raise AssertionError() + if not key.hasPrivateKey(): + raise AssertionError() + return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP, + key.dQ, key.qInv) + +def _createPublicRSAKey(n, e, implementations = ["openssl", "pycrypto", + "python"]): + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_RSAKey(n, e) + elif implementation == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_RSAKey(n, e) + elif implementation == "python": + return Python_RSAKey(n, e) + raise ValueError("No acceptable implementations") + +def _createPrivateRSAKey(n, e, d, p, q, dP, dQ, qInv, + implementations = ["pycrypto", "python"]): + for implementation in implementations: + if implementation == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_RSAKey(n, e, d, p, q, dP, dQ, qInv) + elif implementation == "python": + return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv) + raise ValueError("No acceptable implementations") diff --git a/gam/gdata/analytics/tlslite/utils/keyfactory.pyc b/gam/gdata/analytics/tlslite/utils/keyfactory.pyc new file mode 100755 index 0000000000000000000000000000000000000000..d42210792236745793bb9fac20fccb6231a39edb GIT binary patch literal 9043 zcmd5?-E!MVc5YI#EXlv)@oZIgE4A3!*^G82iy}o*l**$r|0GeQC=#UjSDtl&K$8Ro z0uUOMNUohq#q%0@fmG!Y@(8)jMecH+yF9?|Io$wA+uF>`j4%B7 zPQD9~&KjZX_#ZaHbPvdPFxLaW@gAA?e@MQAnI94D5%~@l{+MWwdF2Vwp76?3qCMr6 z&xrOJuRPQN3jRf?e$m8a@+%{PfhA5E|qKPBAsqYw&F0_IxEdzD*3bd zOPW>2T|-x~Wu5nh7DvAZ3edE!{}KK+KuE;J;&-CD0#!j@yg!2X%G4cL<{Qym7VVFr z{oJNztyc0ri1sJEf0i%b`{3S(HVgVPX1GY-=JeDsRF~jch9?UTdkw{;K;DK(?%OnZ z7bb6kfgy4kCQgL7gbYKx5+=hX@|KYIAyUI0Vcz3DaY12%4_Eg|(*@x!VYh$eYF67o z-DLj(#j(dpNiikZmUKy{(v9v8BzHh1W>Zz~=~R=tx+@vf?7M^Q^>qpUUb$17Vlz9C zbkh}Wz*=SGHRdSOoexGzR*zFP=?zZvHIqh`Ha5o0C+Q%~1F7@}*I~rxGm&~l(%^w8TTk7m)YM!QMM#SRjKVNU4u&c$TsMRny$i? zuDpT$+;u2~zKs-j;G$$lG0{-66pd;!O37V?BG5pT9Us_Gj4{nSj^%9dUMNiDkBf@% z(R`frhix0si|~S_pq4#&-M6g1L0>4g&0dTYQ|VLZbzf6l<+aD3*PK7EVaGjTMcbfH zxaeSU+&q2;E$kr_WbIMti_oLc!_a3T6_hcN@C8Bhmg#C!!*CD)R?O#>axa|(r=p?9|>2`F$%9t9O3Lg(iz(FpwUmZ@&5FWrWTNzKjG*I9yy;;NAYO7RGm~9iDb}>V^Krq}dvk zQpMAq#?|yV+SobWrH;98Ulpv(&SW(GWT*CFqTM)Xz+~jmO)oxX?lSwp|oYk*lx>4ONsLe#YeA=FthkECefm-Eo zR8%)_S}EPkSvUEUc%o{@nIT)z_BGA)^OIslJ|8DP9aufQ$~B5^X0~r!?yx~AQIU6b z^|F=gU*6Obd&ybXh|S#0Cj+kv=Jnw)nNQ`@ds>;*3nQgPC+Sfs9nD!Z?XWgIN*7FL zaUh60kgO+f;fcWwGuPpI_DZ;6Xh~z6ZuUX@!PDs`R)rb*N)Kx7>pORxf&lmJUi*R6 zwJc+iaPXnflj_cxZh<|O{!p936cg_M-a!O045>>cN72!ob>aBEft(i1wqTQB>Zaww zQYZrx_2Yq92Djf^R^hb=i{Pd_4+0C+H(-8`^l$y8@kMAlQxymN`j*7N!3lBh=_=X7>6yM@C0n(yq7Vi$OwzT^%DY1 zy{j1Z0Lb|Zga!Q_yovz!H3qLIq1Euiu!`Xw@f7}MvBwP*6Ai;w?%N81Ct0JHwy$0kqN90BTxOq;09yBEOb>dKoj_A2#q4zbz$H~0nImVuI=-H z7R10m8`)mqUnYNCD06UCkqeDk($-Cbh*;Lu&TYCdS$O+oK7xJ=7ehU+cWocSf{CPLg2ZmYwi4HOmCWJnPJ@+_WIpoM5`8Z>1sL?5K3Fe>C+SI zv~#(aP>Rl8u2wC#3nyb|e14`~Cpt7YKD{pH<>sXBG)}MbyRpb5Qyj!&kwX7UcGQ3n zWtsAR$Dw^WtL?SjiB;OnH!jLe<+R+G#75;2o#khyqwdcJYWu?JQ>CFDoyVq`qi8C% zKRXWy^|R-V8|OSV&h_bZm@E{PSmGq9M$6G%eGswsIj3=r&#&SsVI%qwYj<9jFram zi@oe-F>)U3NA=d3(iyQtC7YNv63U!Vw%F=4FIv&wRZLdSXGXW`*nQnH1}&QoQb)td z&1oimesnUcReLp7FZTdu?G`WWi=9$eZ(LsoE7CWa-Kd%Ao%6VJwYOii8|6_k9zVNE z?hRNoT8T!hGiO#$SChvL$*8MEF9BAVm4MKVTaLM1>qt%->CEM6ZaA@y z7-Rlo7wheWZ)!9`_jMgs=8wDEA zbgE@nMai^|uOt1YZf48%^6pus*{e4r^6_kZ)^3bI^bk0AA`Pdn%{6-v)k^lv9of~Z z-t;6gb26>!{#7Y5ljGK~mTa|6dLFrmrkZwM;9(RsH`jbQce@$$Mb@q0JPjcZBaWe8 zB6tR0!;gZVbJ~)evAKwTy^}1C29ztscH$}N5D9GCbR@uNoySyx>=groEF0T49O0Q{ zD~#P(j`o6@{DcKC!3!}+fqTAEjjyi+M?ncey=8*W8vss+1Kjrvmu?2s^t~#lg2#rl zm^hA03LY;^=7JZ6Fe*s}Q*2jN7{D2yb{0N&74`B1%%Gds)+H5>&o2@Ue&-xrvyTP%@`K#;gL{@-Z*sfIyfF=oDD# zPc*jx0q7v&bK+d}*uWYanvXyi)yu>7sJ$M%_O=IrkuOFE9daD1x?79TGFZRB=3=;M z&1dvhM#X6u%?4*hw{Yv45&hc^oUYw~GRVs|lqsmMA`(ML0FEHcGN^A9{||8UkH`;g zAY_&iZOXicco%YN9!mWPRfEVms+ONZhYddHGaUI0i_f8O@q;gYU(M!dT7C|n?;J|Q zs)}^l1kwx_LiRBi^enVI2h_MJ|H)#%-$McU9Oo+p!$ZKs(DoJPNC8!goX^!)1ai=j z@Nwb@to>9ZjOpN^0Nw#r1Il)4n3MEB6H>!b@WXRNWxk*?oc~ahcvcqG?N>Y*tMOE& zi>Hi%vXHcO@reuv-;%@@EKJ{Qj&v7U?n zpI}mk@+6@Z;PkWbN@yLPCs5uGt9ZJ?C&7bWW0yq>3SK}g@WLF%#u@Bd2@#~eS;SX2Vn@~_ZzyTH-O zatFH&6=CaW=wZTsfnF5cEL_uohZG)k$Q>tK078cv*Hz}gI3D7&+~0xU#9NRBqmiVX zNNqtvVH%ibkO0^cUoV(hM*#UZ(IApo@QCYJ~^a^I@t z9pDPMV7~1(6vP2|1?>cThZk<7TbP@#U>V8SiHdE z&Lx17{4aQfC2$G+5-j1@A^tx`aM=3hjb^ED-tc@oI&Q@krjEw?(Md#}ZxEG>A54xQ z+all%H|R3leaZ-ZI4YHYg|#oSK$j__VREgIaba^c> (7 - t)) & 0x01 + +B = [0, 1, 1, 0, 0, 0, 1, 1] + +# affine transform: box[i] <- B + A*box[i] +cox = [[0] * 8 for i in xrange(256)] +for i in xrange(256): + for t in xrange(8): + cox[i][t] = B[t] + for j in xrange(8): + cox[i][t] ^= A[t][j] * box[i][j] + +# S-boxes and inverse S-boxes +S = [0] * 256 +Si = [0] * 256 +for i in xrange(256): + S[i] = cox[i][0] << 7 + for t in xrange(1, 8): + S[i] ^= cox[i][t] << (7-t) + Si[S[i] & 0xFF] = i + +# T-boxes +G = [[2, 1, 1, 3], + [3, 2, 1, 1], + [1, 3, 2, 1], + [1, 1, 3, 2]] + +AA = [[0] * 8 for i in xrange(4)] + +for i in xrange(4): + for j in xrange(4): + AA[i][j] = G[i][j] + AA[i][i+4] = 1 + +for i in xrange(4): + pivot = AA[i][i] + if pivot == 0: + t = i + 1 + while AA[t][i] == 0 and t < 4: + t += 1 + assert t != 4, 'G matrix must be invertible' + for j in xrange(8): + AA[i][j], AA[t][j] = AA[t][j], AA[i][j] + pivot = AA[i][i] + for j in xrange(8): + if AA[i][j] != 0: + AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255] + for t in xrange(4): + if i != t: + for j in xrange(i+1, 8): + AA[t][j] ^= mul(AA[i][j], AA[t][i]) + AA[t][i] = 0 + +iG = [[0] * 4 for i in xrange(4)] + +for i in xrange(4): + for j in xrange(4): + iG[i][j] = AA[i][j + 4] + +def mul4(a, bs): + if a == 0: + return 0 + r = 0 + for b in bs: + r <<= 8 + if b != 0: + r = r | mul(a, b) + return r + +T1 = [] +T2 = [] +T3 = [] +T4 = [] +T5 = [] +T6 = [] +T7 = [] +T8 = [] +U1 = [] +U2 = [] +U3 = [] +U4 = [] + +for t in xrange(256): + s = S[t] + T1.append(mul4(s, G[0])) + T2.append(mul4(s, G[1])) + T3.append(mul4(s, G[2])) + T4.append(mul4(s, G[3])) + + s = Si[t] + T5.append(mul4(s, iG[0])) + T6.append(mul4(s, iG[1])) + T7.append(mul4(s, iG[2])) + T8.append(mul4(s, iG[3])) + + U1.append(mul4(t, iG[0])) + U2.append(mul4(t, iG[1])) + U3.append(mul4(t, iG[2])) + U4.append(mul4(t, iG[3])) + +# round constants +rcon = [1] +r = 1 +for t in xrange(1, 30): + r = mul(2, r) + rcon.append(r) + +del A +del AA +del pivot +del B +del G +del box +del log +del alog +del i +del j +del r +del s +del t +del mul +del mul4 +del cox +del iG + +class rijndael: + def __init__(self, key, block_size = 16): + if block_size != 16 and block_size != 24 and block_size != 32: + raise ValueError('Invalid block size: ' + str(block_size)) + if len(key) != 16 and len(key) != 24 and len(key) != 32: + raise ValueError('Invalid key size: ' + str(len(key))) + self.block_size = block_size + + ROUNDS = num_rounds[len(key)][block_size] + BC = block_size / 4 + # encryption round keys + Ke = [[0] * BC for i in xrange(ROUNDS + 1)] + # decryption round keys + Kd = [[0] * BC for i in xrange(ROUNDS + 1)] + ROUND_KEY_COUNT = (ROUNDS + 1) * BC + KC = len(key) / 4 + + # copy user material bytes into temporary ints + tk = [] + for i in xrange(0, KC): + tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) | + (ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3])) + + # copy values into round key arrays + t = 0 + j = 0 + while j < KC and t < ROUND_KEY_COUNT: + Ke[t / BC][t % BC] = tk[j] + Kd[ROUNDS - (t / BC)][t % BC] = tk[j] + j += 1 + t += 1 + tt = 0 + rconpointer = 0 + while t < ROUND_KEY_COUNT: + # extrapolate using phi (the round key evolution function) + tt = tk[KC - 1] + tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \ + (S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \ + (S[ tt & 0xFF] & 0xFF) << 8 ^ \ + (S[(tt >> 24) & 0xFF] & 0xFF) ^ \ + (rcon[rconpointer] & 0xFF) << 24 + rconpointer += 1 + if KC != 8: + for i in xrange(1, KC): + tk[i] ^= tk[i-1] + else: + for i in xrange(1, KC / 2): + tk[i] ^= tk[i-1] + tt = tk[KC / 2 - 1] + tk[KC / 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \ + (S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \ + (S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \ + (S[(tt >> 24) & 0xFF] & 0xFF) << 24 + for i in xrange(KC / 2 + 1, KC): + tk[i] ^= tk[i-1] + # copy values into round key arrays + j = 0 + while j < KC and t < ROUND_KEY_COUNT: + Ke[t / BC][t % BC] = tk[j] + Kd[ROUNDS - (t / BC)][t % BC] = tk[j] + j += 1 + t += 1 + # inverse MixColumn where needed + for r in xrange(1, ROUNDS): + for j in xrange(BC): + tt = Kd[r][j] + Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \ + U2[(tt >> 16) & 0xFF] ^ \ + U3[(tt >> 8) & 0xFF] ^ \ + U4[ tt & 0xFF] + self.Ke = Ke + self.Kd = Kd + + def encrypt(self, plaintext): + if len(plaintext) != self.block_size: + raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext))) + Ke = self.Ke + + BC = self.block_size / 4 + ROUNDS = len(Ke) - 1 + if BC == 4: + SC = 0 + elif BC == 6: + SC = 1 + else: + SC = 2 + s1 = shifts[SC][1][0] + s2 = shifts[SC][2][0] + s3 = shifts[SC][3][0] + a = [0] * BC + # temporary work array + t = [] + # plaintext to ints + key + for i in xrange(BC): + t.append((ord(plaintext[i * 4 ]) << 24 | + ord(plaintext[i * 4 + 1]) << 16 | + ord(plaintext[i * 4 + 2]) << 8 | + ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i]) + # apply round transforms + for r in xrange(1, ROUNDS): + for i in xrange(BC): + a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^ + T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^ + T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^ + T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i] + t = copy.copy(a) + # last round is special + result = [] + for i in xrange(BC): + tt = Ke[ROUNDS][i] + result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF) + result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF) + result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF) + result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF) + return string.join(map(chr, result), '') + + def decrypt(self, ciphertext): + if len(ciphertext) != self.block_size: + raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext))) + Kd = self.Kd + + BC = self.block_size / 4 + ROUNDS = len(Kd) - 1 + if BC == 4: + SC = 0 + elif BC == 6: + SC = 1 + else: + SC = 2 + s1 = shifts[SC][1][1] + s2 = shifts[SC][2][1] + s3 = shifts[SC][3][1] + a = [0] * BC + # temporary work array + t = [0] * BC + # ciphertext to ints + key + for i in xrange(BC): + t[i] = (ord(ciphertext[i * 4 ]) << 24 | + ord(ciphertext[i * 4 + 1]) << 16 | + ord(ciphertext[i * 4 + 2]) << 8 | + ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i] + # apply round transforms + for r in xrange(1, ROUNDS): + for i in xrange(BC): + a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^ + T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^ + T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^ + T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i] + t = copy.copy(a) + # last round is special + result = [] + for i in xrange(BC): + tt = Kd[ROUNDS][i] + result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF) + result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF) + result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF) + result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF) + return string.join(map(chr, result), '') + +def encrypt(key, block): + return rijndael(key, len(block)).encrypt(block) + +def decrypt(key, block): + return rijndael(key, len(block)).decrypt(block) + +def test(): + def t(kl, bl): + b = 'b' * bl + r = rijndael('a' * kl, bl) + assert r.decrypt(r.encrypt(b)) == b + t(16, 16) + t(16, 24) + t(16, 32) + t(24, 16) + t(24, 24) + t(24, 32) + t(32, 16) + t(32, 24) + t(32, 32) + diff --git a/gam/gdata/analytics/tlslite/utils/win32prng.c b/gam/gdata/analytics/tlslite/utils/win32prng.c new file mode 100755 index 00000000000..de08b3b3b90 --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/win32prng.c @@ -0,0 +1,63 @@ + +#include "Python.h" +#define _WIN32_WINNT 0x0400 /* Needed for CryptoAPI on some systems */ +#include + + +static PyObject* getRandomBytes(PyObject *self, PyObject *args) +{ + int howMany; + HCRYPTPROV hCryptProv; + unsigned char* bytes = NULL; + PyObject* returnVal = NULL; + + + /* Read Arguments */ + if (!PyArg_ParseTuple(args, "i", &howMany)) + return(NULL); + + /* Get Context */ + if(CryptAcquireContext( + &hCryptProv, + NULL, + NULL, + PROV_RSA_FULL, + CRYPT_VERIFYCONTEXT) == 0) + return Py_BuildValue("s#", NULL, 0); + + + /* Allocate bytes */ + bytes = malloc(howMany); + + + /* Get random data */ + if(CryptGenRandom( + hCryptProv, + howMany, + bytes) == 0) + returnVal = Py_BuildValue("s#", NULL, 0); + else + returnVal = Py_BuildValue("s#", bytes, howMany); + + free(bytes); + CryptReleaseContext(hCryptProv, 0); + + return returnVal; +} + + + +/* List of functions exported by this module */ + +static struct PyMethodDef win32prng_functions[] = { + {"getRandomBytes", (PyCFunction)getRandomBytes, METH_VARARGS}, + {NULL, NULL} /* Sentinel */ +}; + + +/* Initialize this module. */ + +DL_EXPORT(void) initwin32prng(void) +{ + Py_InitModule("win32prng", win32prng_functions); +} diff --git a/gam/gdata/analytics/tlslite/utils/xmltools.py b/gam/gdata/analytics/tlslite/utils/xmltools.py new file mode 100755 index 00000000000..c1e8c4d950a --- /dev/null +++ b/gam/gdata/analytics/tlslite/utils/xmltools.py @@ -0,0 +1,202 @@ +"""Helper functions for XML. + +This module has misc. helper functions for working with XML DOM nodes.""" + +from compat import * +import os +import re + +if os.name == "java": + # Only for Jython + from javax.xml.parsers import * + import java + + builder = DocumentBuilderFactory.newInstance().newDocumentBuilder() + + def parseDocument(s): + stream = java.io.ByteArrayInputStream(java.lang.String(s).getBytes()) + return builder.parse(stream) +else: + from xml.dom import minidom + from xml.sax import saxutils + + def parseDocument(s): + return minidom.parseString(s) + +def parseAndStripWhitespace(s): + try: + element = parseDocument(s).documentElement + except BaseException, e: + raise SyntaxError(str(e)) + stripWhitespace(element) + return element + +#Goes through a DOM tree and removes whitespace besides child elements, +#as long as this whitespace is correctly tab-ified +def stripWhitespace(element, tab=0): + element.normalize() + + lastSpacer = "\n" + ("\t"*tab) + spacer = lastSpacer + "\t" + + #Zero children aren't allowed (i.e. ) + #This makes writing output simpler, and matches Canonical XML + if element.childNodes.length==0: #DON'T DO len(element.childNodes) - doesn't work in Jython + raise SyntaxError("Empty XML elements not allowed") + + #If there's a single child, it must be text context + if element.childNodes.length==1: + if element.firstChild.nodeType == element.firstChild.TEXT_NODE: + #If it's an empty element, remove + if element.firstChild.data == lastSpacer: + element.removeChild(element.firstChild) + return + #If not text content, give an error + elif element.firstChild.nodeType == element.firstChild.ELEMENT_NODE: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + else: + raise SyntaxError("Unexpected node type in XML document") + + #Otherwise there's multiple child element + child = element.firstChild + while child: + if child.nodeType == child.ELEMENT_NODE: + stripWhitespace(child, tab+1) + child = child.nextSibling + elif child.nodeType == child.TEXT_NODE: + if child == element.lastChild: + if child.data != lastSpacer: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + elif child.data != spacer: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + next = child.nextSibling + element.removeChild(child) + child = next + else: + raise SyntaxError("Unexpected node type in XML document") + + +def checkName(element, name): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Missing element: '%s'" % name) + + if name == None: + return + + if element.tagName != name: + raise SyntaxError("Wrong element name: should be '%s', is '%s'" % (name, element.tagName)) + +def getChild(element, index, name=None): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getChild()") + + child = element.childNodes.item(index) + if child == None: + raise SyntaxError("Missing child: '%s'" % name) + checkName(child, name) + return child + +def getChildIter(element, index): + class ChildIter: + def __init__(self, element, index): + self.element = element + self.index = index + + def next(self): + if self.index < len(self.element.childNodes): + retVal = self.element.childNodes.item(self.index) + self.index += 1 + else: + retVal = None + return retVal + + def checkEnd(self): + if self.index != len(self.element.childNodes): + raise SyntaxError("Too many elements under: '%s'" % self.element.tagName) + return ChildIter(element, index) + +def getChildOrNone(element, index): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getChild()") + child = element.childNodes.item(index) + return child + +def getLastChild(element, index, name=None): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getLastChild()") + + child = element.childNodes.item(index) + if child == None: + raise SyntaxError("Missing child: '%s'" % name) + if child != element.lastChild: + raise SyntaxError("Too many elements under: '%s'" % element.tagName) + checkName(child, name) + return child + +#Regular expressions for syntax-checking attribute and element content +nsRegEx = "http://trevp.net/cryptoID\Z" +cryptoIDRegEx = "([a-km-z3-9]{5}\.){3}[a-km-z3-9]{5}\Z" +urlRegEx = "http(s)?://.{1,100}\Z" +sha1Base64RegEx = "[A-Za-z0-9+/]{27}=\Z" +base64RegEx = "[A-Za-z0-9+/]+={0,4}\Z" +certsListRegEx = "(0)?(1)?(2)?(3)?(4)?(5)?(6)?(7)?(8)?(9)?\Z" +keyRegEx = "[A-Z]\Z" +keysListRegEx = "(A)?(B)?(C)?(D)?(E)?(F)?(G)?(H)?(I)?(J)?(K)?(L)?(M)?(N)?(O)?(P)?(Q)?(R)?(S)?(T)?(U)?(V)?(W)?(X)?(Y)?(Z)?\Z" +dateTimeRegEx = "\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ\Z" +shortStringRegEx = ".{1,100}\Z" +exprRegEx = "[a-zA-Z0-9 ,()]{1,200}\Z" +notAfterDeltaRegEx = "0|([1-9][0-9]{0,8})\Z" #A number from 0 to (1 billion)-1 +booleanRegEx = "(true)|(false)" + +def getReqAttribute(element, attrName, regEx=""): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getReqAttribute()") + + value = element.getAttribute(attrName) + if not value: + raise SyntaxError("Missing Attribute: " + attrName) + if not re.match(regEx, value): + raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value)) + element.removeAttribute(attrName) + return str(value) #de-unicode it; this is needed for bsddb, for example + +def getAttribute(element, attrName, regEx=""): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getAttribute()") + + value = element.getAttribute(attrName) + if value: + if not re.match(regEx, value): + raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value)) + element.removeAttribute(attrName) + return str(value) #de-unicode it; this is needed for bsddb, for example + +def checkNoMoreAttributes(element): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in checkNoMoreAttributes()") + + if element.attributes.length!=0: + raise SyntaxError("Extra attributes on '%s'" % element.tagName) + +def getText(element, regEx=""): + textNode = element.firstChild + if textNode == None: + raise SyntaxError("Empty element '%s'" % element.tagName) + if textNode.nodeType != textNode.TEXT_NODE: + raise SyntaxError("Non-text node: '%s'" % element.tagName) + if not re.match(regEx, textNode.data): + raise SyntaxError("Bad Text Value for '%s': '%s' " % (element.tagName, textNode.data)) + return str(textNode.data) #de-unicode it; this is needed for bsddb, for example + +#Function for adding tabs to a string +def indent(s, steps, ch="\t"): + tabs = ch*steps + if s[-1] != "\n": + s = tabs + s.replace("\n", "\n"+tabs) + else: + s = tabs + s.replace("\n", "\n"+tabs) + s = s[ : -len(tabs)] + return s + +def escape(s): + return saxutils.escape(s) diff --git a/gam/gdata/analytics/tlslite/utils/xmltools.pyc b/gam/gdata/analytics/tlslite/utils/xmltools.pyc new file mode 100755 index 0000000000000000000000000000000000000000..06981e70653ed0af89fffd8971f5a2cb46f11ca7 GIT binary patch literal 8173 zcmb_hU3U}58J?Adv4o6`0rM5YLLkTi8wg*Cn}itSG)_RYYbOw4j=E^qwy?A-cE;ES zd=qj`drt3qdfMxr-uLt-_r2&%Z<`;`AJHGsKF>QVS$3R+7GY*I`_A|K@x1TMWdA)- zxH{sOYbyPh!SBaNy5%SpC{;o3s34;nj+$dBs~Qm(^r=dp3U{l={pzJskMqnc1r_X4-=MHZMWfQEPc@3FIiM;7%HdSIRb#Je4ywu^ zzWdX*L0$?&D!>4H)HjZ*>{CHeh{If7Fd)POD%dN;gDM!5!idxkN$nvO>{FG)Di~Ik zBP!UhDo0gtKvj;Z;Gn9!p@I=rIj(|3s`91^4y(!u6&z6&R|Q8^<)jLZsmdu8yrC*@ zso=P(oK^vvosf~wNZFP0+bTFIQ=b*$DIu0rfL^DijD4Sx2Jdifv!%Dmc^}!QVWSl$ z?!rn`Gxa#q?n0cn_wU>uFBImM>e_9_!Ac`^mwXiKx;E}E?bKm4PL}J@qPto*OYH65 zx_ifs;vm%H_5a{s2{KlO**j&E!_Q;?sbAwa)gX&Bg`_VbGpKaboZYn#mt%{^qVq&!;cm* zic+9q;fB2zL&Nsk?8e8Vm3kux6SE8DmY?Xb#9?I%S~_YOtOTNxOkYE3YGQsdju#u@ znBQvYv8EsSi(xXq82HAYHx1pWn{a-`)Ejz!t=TYf+|c8#bu)mjGRdvDw$cnEGX#Pr zpOkajvCQM5n)Rq2#7#@!lC{4kqX{e9>=agVNXtsgzBZe<3$Mt^J;-EI%K{nH%Y)dH zJ$W4d5R&GM%8?aP&}68ra0Z3-{aj=!U=i)T=#0vlj9Mx4g|A7MHsnI86pV}{%kflNjLWzseIQLc+pQ%=^|?+iL4 zPQj`3yGHhM+te1}>$g}EIQkgIg60-e!J-3)z)LU@2zzKFD=oy#00#uqDTU1kxi*9C z5VpZCY4DwN$ z2|xKxsJpkWC#N=YYVDuuMNTDub&zv0$n8J}9LN)&*en45&`YQeD~s&(2!yI}jOzILFV92D)qw;yVU{iZY9y@D6QS?9&BbTJ&fy*aGy zUH~K#2rFx9R6+8*NLarK!gRA`)&*Hp#nXUS-Sh)@wWG7{N(2mZ z&z{j|^=Z_85ru25ux7$Qz}YqHt(s ze~l2Rx#aC*V^%NJ6K$s0MzECFKZns}ptqTwXNF<6xy)f_Br}-JJ9%f=+2<6I5905L z7NN2*;OYur0a36)z?7WMpff`pL zpS@$uPB0Ty$)Qw^qtJ6v5SQwS)JW%OJ&}ayMr|pqEmM8&V(1h*A%sj3ViTn@^@}PQ8w;vN4IcBH}0v*)rX0bGVza&^xrC?;;v8^5TSLeH^*vB`u|6 zhzLQ%9!jH{O5c`)XseY%iH5O_tKMPIM492NnjU%Ry@Ra84O;$E7IlEZnm0p*zQ=a9z$UKVuz)F$-fopz z<`D!$!>sXMU@L-e5}GglMo)CgJB#t?`g8%Aa*jENo#9NMutQ#)^DZ;C$5Fu@OV7wVg&)s;usg@krh4Jzc^5Vujwdw zf>G0xz%fT4RjR91qGh!zZmwFjr?hI-yU3N0f8OOS%aMh>#u3FE$8gXVrOA#yC!fpr zZ*JDc#sqS)6KT9z`x?_4#szJ7X(K?&j{QR&V4*0dko0wARw0;M&-arcE@5$M06z`P zJGnIhU@?vDoai0U5?9-txSP<2KIM-4kqO)aa=-7?euK*YnIAvt)acQ`UWCywvsH0> zma@Cc#-OI#JYNorDeIHSclLa@JJH+^dcRjNSH^`q`NvJ<@=mdd%WSKcw94CK2bpSc zXLI13(!oE>Eh^((j}yKkuD=$LAr``4e2`4dr0q(-avDPC>-e_ z%J4U;G>NL4NK;6+kfxD7LHaq;r${qMpCNsYbQ|doQW@zk(l3yHiR2;8BF!Ovf%GNP zJ*4|cUm;aw>eHBRKCpjd{E=*qrHM{f!Mf;Ly0}8$1|~kks=*cad};I{SoW@E8AIZW zFG>$Cf#nZS{SbV4Z(|gr&?rkLSqVp9lotGk4o3|rs$+;$c>$*1qVS4_$oL2DTC7md zw1Y|q2U>ggA)y`*zQ8Rw{R}QF+9n8RmsMK=duyk5NCy`e>_KmDZIRsC*%rO<$)v#@ z#G@4xZVN^`bVUAdVE`UEJ2V$Tz)C2Wh&%&W7wX!}xot;*)G#_-aiWnhf}v{DH?<|X zg|UakP9u+&(-qee{(LNyfxg_;CE?=qnv|Y$CB5-NgO~-_+s%|eg4AWkgH9h}-C-Q% z5brWRA%i>8r_KK#(ofbH!fX+>zo`$eDC%JIX!pa=+u<){{Ted$Ca_WOXUsle_93&a z67xPH?FO@tk@ZAAGVPx+n&!Q!#Z?L<6$hg;CW>VWN#8;CisImF18%k8=d2}KKli;@ zl9Db9OzvP#_9PnLiIcFyXI&a)gsU`x2MU|`F4b7q6dLOv(TYa{Y(SEv9a<4FZMt{TDW z*|w4+$mni|gND8KXH&fJ0Q!F2hCXR(>01C02gtPbChSoo?S)!;!C~(=_JfSLYRuq9 zNbbkddk@~f5#;7b-x=R16pxp*_VC>~8Q4JN4OVPg1<(H0DyWh6KtY3$z@C%ymaR8J zq$dV&apm8lnA%4k2lEkF$B1*#IpNr=#tvu^e#J@_Pyp-TI}X_UVw|Opq{eVvVmt60 zhXT1$1{m?e_0R^6mFIq>5?a9{t9YS3E?R?--igRVk-h!2Z!KJW61Ez+zmubvg?+|% zg74adEiFz1u7awETR+_!w$bi*TN0VSp_E=86`UdGc;+bX?P_gA>6|-$^tDAQIb^77 zek<(tL(AtF`5#;+SEHN~Pw4VQQLW-m7e_=+U5lG7paz@7+Ug?XTtiLeTin_>-lN9l z^&!#r8IAE^`#tKWeGU7>ubDVmPw!$^XCiI~16H>eVmTQeFX?HV(l>pY(JWn+={#LKf|J36+Meo$7DhW((|)=T~+KKWd` zVrhFod(`#K0FKwg`gUEzzcY5=7|Y?h{ooY%=&B|KSJ>fP9XHd`K_2Ut@Ooq)(kzi` zip%!IehGtOGr53EtXpBj_?FBN=n)2y`Z5IoLoqYR-^_5Pm>Y72GWl%2P&|iJ$nVeh6$kp@SaThis is the text) + extension_elements: list (optional) A list of ExtensionElement instances + which are children of this element. + extension_attributes: dict (optional) A dictionary of strings which are + the values for additional XML attributes of this element. + """ + + self.start_index = start_index + self.category = category or [] + self.id = atom_id + self.link = link or [] + self.title = title + self.updated = updated + self.entry = entry or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SitesFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SitesFeed, xml_string) + + +class SitemapsEntry(atom.Entry, LinkFinder): + """A Google Webmaster Tools meta Sitemaps Entry flavor of an Atom Entry """ + + _tag = atom.Entry._tag + _namespace = atom.Entry._namespace + _children = atom.Entry._children.copy() + _attributes = atom.Entry._attributes.copy() + _children['{%s}sitemap-type' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_type', SitemapType) + _children['{%s}sitemap-status' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_status', SitemapStatus) + _children['{%s}sitemap-last-downloaded' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_last_downloaded', SitemapLastDownloaded) + _children['{%s}sitemap-url-count' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_url_count', SitemapUrlCount) + _children['{%s}sitemap-mobile-markup-language' % GWEBMASTERTOOLS_NAMESPACE] \ + = ('sitemap_mobile_markup_language', SitemapMobileMarkupLanguage) + _children['{%s}sitemap-news-publication-label' % GWEBMASTERTOOLS_NAMESPACE] \ + = ('sitemap_news_publication_label', SitemapNewsPublicationLabel) + + def __GetId(self): + return self.__id + + # This method was created to strip the unwanted whitespace from the id's + # text node. + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __init__(self, category=None, content=None, + atom_id=None, link=None, title=None, updated=None, + sitemap_type=None, sitemap_status=None, sitemap_last_downloaded=None, + sitemap_url_count=None, sitemap_mobile_markup_language=None, + sitemap_news_publication_label=None, + extension_elements=None, extension_attributes=None, text=None): + atom.Entry.__init__(self, category=category, + content=content, atom_id=atom_id, link=link, + title=title, updated=updated, text=text) + + self.sitemap_type = sitemap_type + self.sitemap_status = sitemap_status + self.sitemap_last_downloaded = sitemap_last_downloaded + self.sitemap_url_count = sitemap_url_count + self.sitemap_mobile_markup_language = sitemap_mobile_markup_language + self.sitemap_news_publication_label = sitemap_news_publication_label + + +def SitemapsEntryFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapsEntry, xml_string) + + +class SitemapsFeed(atom.Feed, LinkFinder): + """A Google Webmaster Tools meta Sitemaps feed flavor of an Atom Feed""" + + _tag = atom.Feed._tag + _namespace = atom.Feed._namespace + _children = atom.Feed._children.copy() + _attributes = atom.Feed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [SitemapsEntry]) + _children['{%s}sitemap-mobile' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_mobile', SitemapMobile) + _children['{%s}sitemap-news' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_news', SitemapNews) + del _children['{%s}generator' % atom.ATOM_NAMESPACE] + del _children['{%s}author' % atom.ATOM_NAMESPACE] + del _children['{%s}contributor' % atom.ATOM_NAMESPACE] + del _children['{%s}logo' % atom.ATOM_NAMESPACE] + del _children['{%s}icon' % atom.ATOM_NAMESPACE] + del _children['{%s}rights' % atom.ATOM_NAMESPACE] + del _children['{%s}subtitle' % atom.ATOM_NAMESPACE] + + def __GetId(self): + return self.__id + + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __init__(self, category=None, content=None, + atom_id=None, link=None, title=None, updated=None, + entry=None, sitemap_mobile=None, sitemap_news=None, + extension_elements=None, extension_attributes=None, text=None): + + self.category = category or [] + self.id = atom_id + self.link = link or [] + self.title = title + self.updated = updated + self.entry = entry or [] + self.text = text + self.sitemap_mobile = sitemap_mobile + self.sitemap_news = sitemap_news + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SitemapsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapsFeed, xml_string) diff --git a/gam/gdata/analytics/webmastertools/data.py b/gam/gdata/analytics/webmastertools/data.py new file mode 100755 index 00000000000..8b50a47a89e --- /dev/null +++ b/gam/gdata/analytics/webmastertools/data.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains the data classes of the Google Webmaster Tools Data API""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import atom.data +import gdata.data +import gdata.opensearch.data + + +WT_TEMPLATE = '{http://schemas.google.com/webmaster/tools/2007/}%s' + + +class CrawlIssueCrawlType(atom.core.XmlElement): + """Type of crawl of the crawl issue""" + _qname = WT_TEMPLATE % 'crawl-type' + + +class CrawlIssueDateDetected(atom.core.XmlElement): + """Detection date for the issue""" + _qname = WT_TEMPLATE % 'date-detected' + + +class CrawlIssueDetail(atom.core.XmlElement): + """Detail of the crawl issue""" + _qname = WT_TEMPLATE % 'detail' + + +class CrawlIssueIssueType(atom.core.XmlElement): + """Type of crawl issue""" + _qname = WT_TEMPLATE % 'issue-type' + + +class CrawlIssueLinkedFromUrl(atom.core.XmlElement): + """Source URL that links to the issue URL""" + _qname = WT_TEMPLATE % 'linked-from' + + +class CrawlIssueUrl(atom.core.XmlElement): + """URL affected by the crawl issue""" + _qname = WT_TEMPLATE % 'url' + + +class CrawlIssueEntry(gdata.data.GDEntry): + """Describes a crawl issue entry""" + date_detected = CrawlIssueDateDetected + url = CrawlIssueUrl + detail = CrawlIssueDetail + issue_type = CrawlIssueIssueType + crawl_type = CrawlIssueCrawlType + linked_from = [CrawlIssueLinkedFromUrl] + + +class CrawlIssuesFeed(gdata.data.GDFeed): + """Feed of crawl issues for a particular site""" + entry = [CrawlIssueEntry] + + +class Indexed(atom.core.XmlElement): + """Describes the indexing status of a site""" + _qname = WT_TEMPLATE % 'indexed' + + +class Keyword(atom.core.XmlElement): + """A keyword in a site or in a link to a site""" + _qname = WT_TEMPLATE % 'keyword' + source = 'source' + + +class KeywordEntry(gdata.data.GDEntry): + """Describes a keyword entry""" + + +class KeywordsFeed(gdata.data.GDFeed): + """Feed of keywords for a particular site""" + entry = [KeywordEntry] + keyword = [Keyword] + + +class LastCrawled(atom.core.XmlElement): + """Describes the last crawled date of a site""" + _qname = WT_TEMPLATE % 'last-crawled' + + +class MessageBody(atom.core.XmlElement): + """Message body""" + _qname = WT_TEMPLATE % 'body' + + +class MessageDate(atom.core.XmlElement): + """Message date""" + _qname = WT_TEMPLATE % 'date' + + +class MessageLanguage(atom.core.XmlElement): + """Message language""" + _qname = WT_TEMPLATE % 'language' + + +class MessageRead(atom.core.XmlElement): + """Indicates if the message has already been read""" + _qname = WT_TEMPLATE % 'read' + + +class MessageSubject(atom.core.XmlElement): + """Message subject""" + _qname = WT_TEMPLATE % 'subject' + + +class SiteId(atom.core.XmlElement): + """Site URL""" + _qname = WT_TEMPLATE % 'id' + + +class MessageEntry(gdata.data.GDEntry): + """Describes a message entry""" + wt_id = SiteId + subject = MessageSubject + date = MessageDate + body = MessageBody + language = MessageLanguage + read = MessageRead + + +class MessagesFeed(gdata.data.GDFeed): + """Describes a messages feed""" + entry = [MessageEntry] + + +class SitemapEntry(gdata.data.GDEntry): + """Describes a sitemap entry""" + indexed = Indexed + wt_id = SiteId + + +class SitemapMobileMarkupLanguage(atom.core.XmlElement): + """Describes a markup language for URLs in this sitemap""" + _qname = WT_TEMPLATE % 'sitemap-mobile-markup-language' + + +class SitemapMobile(atom.core.XmlElement): + """Lists acceptable mobile markup languages for URLs in this sitemap""" + _qname = WT_TEMPLATE % 'sitemap-mobile' + sitemap_mobile_markup_language = [SitemapMobileMarkupLanguage] + + +class SitemapNewsPublicationLabel(atom.core.XmlElement): + """Specifies the publication label for this sitemap""" + _qname = WT_TEMPLATE % 'sitemap-news-publication-label' + + +class SitemapNews(atom.core.XmlElement): + """Lists publication labels for this sitemap""" + _qname = WT_TEMPLATE % 'sitemap-news' + sitemap_news_publication_label = [SitemapNewsPublicationLabel] + + +class SitemapType(atom.core.XmlElement): + """Indicates the type of sitemap. Not used for News or Mobile Sitemaps""" + _qname = WT_TEMPLATE % 'sitemap-type' + + +class SitemapUrlCount(atom.core.XmlElement): + """Indicates the number of URLs contained in the sitemap""" + _qname = WT_TEMPLATE % 'sitemap-url-count' + + +class SitemapsFeed(gdata.data.GDFeed): + """Describes a sitemaps feed""" + entry = [SitemapEntry] + + +class VerificationMethod(atom.core.XmlElement): + """Describes a verification method that may be used for a site""" + _qname = WT_TEMPLATE % 'verification-method' + in_use = 'in-use' + type = 'type' + + +class Verified(atom.core.XmlElement): + """Describes the verification status of a site""" + _qname = WT_TEMPLATE % 'verified' + + +class SiteEntry(gdata.data.GDEntry): + """Describes a site entry""" + indexed = Indexed + wt_id = SiteId + verified = Verified + last_crawled = LastCrawled + verification_method = [VerificationMethod] + + +class SitesFeed(gdata.data.GDFeed): + """Describes a sites feed""" + entry = [SiteEntry] + + diff --git a/gam/gdata/analytics/webmastertools/service.py b/gam/gdata/analytics/webmastertools/service.py new file mode 100755 index 00000000000..8c3286db40b --- /dev/null +++ b/gam/gdata/analytics/webmastertools/service.py @@ -0,0 +1,516 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Yu-Jie Lin +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GWebmasterToolsService extends the GDataService to streamline +Google Webmaster Tools operations. + + GWebmasterToolsService: Provides methods to query feeds and manipulate items. + Extends GDataService. +""" + +__author__ = 'livibetter (Yu-Jie Lin)' + +import urllib +import gdata +import atom.service +import gdata.service +import gdata.webmastertools as webmastertools +import atom + + +FEED_BASE = 'https://www.google.com/webmasters/tools/feeds/' +SITES_FEED = FEED_BASE + 'sites/' +SITE_TEMPLATE = SITES_FEED + '%s' +SITEMAPS_FEED_TEMPLATE = FEED_BASE + '%(site_id)s/sitemaps/' +SITEMAP_TEMPLATE = SITEMAPS_FEED_TEMPLATE + '%(sitemap_id)s' + + +class Error(Exception): + pass + + +class RequestError(Error): + pass + + +class GWebmasterToolsService(gdata.service.GDataService): + """Client for the Google Webmaster Tools service.""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', **kwargs): + """Creates a client for the Google Webmaster Tools service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'www.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='sitemaps', source=source, + server=server, **kwargs) + + def GetSitesFeed(self, uri=SITES_FEED, + converter=webmastertools.SitesFeedFromString): + """Gets sites feed. + + Args: + uri: str (optional) URI to retrieve sites feed. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitesFeedFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesFeed object. + """ + return self.Get(uri, converter=converter) + + def AddSite(self, site_uri, uri=SITES_FEED, + url_params=None, escape_params=True, converter=None): + """Adds a site to Google Webmaster Tools. + + Args: + site_uri: str URI of which site to add. + uri: str (optional) URI to add a site. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitesEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry() + site_entry.content = atom.Content(src=site_uri) + response = self.Post(site_entry, uri, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def DeleteSite(self, site_uri, uri=SITE_TEMPLATE, + url_params=None, escape_params=True): + """Removes a site from Google Webmaster Tools. + + Args: + site_uri: str URI of which site to remove. + uri: str (optional) A URI template to send DELETE request. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + True if the delete succeeded. + """ + + return self.Delete( + uri % urllib.quote_plus(site_uri), + url_params=url_params, escape_params=escape_params) + + def VerifySite(self, site_uri, verification_method, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Requests a verification of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + verification_method: str The method to verify a site. Valid values are + 'htmlpage', and 'metatag'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + verification_method=webmastertools.VerificationMethod( + type=verification_method, in_use='true') + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + + def UpdateGeoLocation(self, site_uri, geolocation, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Updates geolocation setting of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + geolocation: str The geographic location. Valid values are listed in + http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + geolocation=webmastertools.GeoLocation(text=geolocation) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def UpdateCrawlRate(self, site_uri, crawl_rate, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Updates crawl rate setting of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + crawl_rate: str The crawl rate for a site. Valid values are 'slower', + 'normal', and 'faster'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + crawl_rate=webmastertools.CrawlRate(text=crawl_rate) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def UpdatePreferredDomain(self, site_uri, preferred_domain, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Updates preferred domain setting of a site. + + Note that if using 'preferwww', will also need www.example.com in account to + take effect. + + Args: + site_uri: str URI of which site to add sitemap for. + preferred_domain: str The preferred domain for a site. Valid values are 'none', + 'preferwww', and 'prefernowww'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + preferred_domain=webmastertools.PreferredDomain(text=preferred_domain) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def UpdateEnhancedImageSearch(self, site_uri, enhanced_image_search, + uri=SITE_TEMPLATE, url_params=None, escape_params=True, converter=None): + """Updates enhanced image search setting of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + enhanced_image_search: str The enhanced image search setting for a site. + Valid values are 'true', and 'false'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + enhanced_image_search=webmastertools.EnhancedImageSearch( + text=enhanced_image_search) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def GetSitemapsFeed(self, site_uri, uri=SITEMAPS_FEED_TEMPLATE, + converter=webmastertools.SitemapsFeedFromString): + """Gets sitemaps feed of a site. + + Args: + site_uri: str (optional) URI of which site to retrieve its sitemaps feed. + uri: str (optional) URI to retrieve sites feed. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsFeedFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsFeed object. + """ + return self.Get(uri % {'site_id': urllib.quote_plus(site_uri)}, + converter=converter) + + def AddSitemap(self, site_uri, sitemap_uri, sitemap_type='WEB', + uri=SITEMAPS_FEED_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Adds a regular sitemap to a site. + + Args: + site_uri: str URI of which site to add sitemap for. + sitemap_uri: str URI of sitemap to add to a site. + sitemap_type: str Type of added sitemap. Valid types: WEB, VIDEO, or CODE. + uri: str (optional) URI template to add a sitemap. + Default SITEMAP_FEED_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsEntry object. + """ + + sitemap_entry = webmastertools.SitemapsEntry( + atom_id=atom.Id(text=sitemap_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sitemap-regular'), + sitemap_type=webmastertools.SitemapType(text=sitemap_type)) + response = self.Post( + sitemap_entry, + uri % {'site_id': urllib.quote_plus(site_uri)}, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitemapsEntryFromString(response.ToString()) + return response + + def AddMobileSitemap(self, site_uri, sitemap_uri, + sitemap_mobile_markup_language='XHTML', uri=SITEMAPS_FEED_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Adds a mobile sitemap to a site. + + Args: + site_uri: str URI of which site to add sitemap for. + sitemap_uri: str URI of sitemap to add to a site. + sitemap_mobile_markup_language: str Format of added sitemap. Valid types: + XHTML, WML, or cHTML. + uri: str (optional) URI template to add a sitemap. + Default SITEMAP_FEED_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsEntry object. + """ + # FIXME + sitemap_entry = webmastertools.SitemapsEntry( + atom_id=atom.Id(text=sitemap_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sitemap-mobile'), + sitemap_mobile_markup_language=\ + webmastertools.SitemapMobileMarkupLanguage( + text=sitemap_mobile_markup_language)) + print sitemap_entry + response = self.Post( + sitemap_entry, + uri % {'site_id': urllib.quote_plus(site_uri)}, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitemapsEntryFromString(response.ToString()) + return response + + def AddNewsSitemap(self, site_uri, sitemap_uri, + sitemap_news_publication_label, uri=SITEMAPS_FEED_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Adds a news sitemap to a site. + + Args: + site_uri: str URI of which site to add sitemap for. + sitemap_uri: str URI of sitemap to add to a site. + sitemap_news_publication_label: str, list of str Publication Labels for + sitemap. + uri: str (optional) URI template to add a sitemap. + Default SITEMAP_FEED_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsEntry object. + """ + + sitemap_entry = webmastertools.SitemapsEntry( + atom_id=atom.Id(text=sitemap_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sitemap-news'), + sitemap_news_publication_label=[], + ) + if isinstance(sitemap_news_publication_label, str): + sitemap_news_publication_label = [sitemap_news_publication_label] + for label in sitemap_news_publication_label: + sitemap_entry.sitemap_news_publication_label.append( + webmastertools.SitemapNewsPublicationLabel(text=label)) + print sitemap_entry + response = self.Post( + sitemap_entry, + uri % {'site_id': urllib.quote_plus(site_uri)}, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitemapsEntryFromString(response.ToString()) + return response + + def DeleteSitemap(self, site_uri, sitemap_uri, uri=SITEMAP_TEMPLATE, + url_params=None, escape_params=True): + """Removes a sitemap from a site. + + Args: + site_uri: str URI of which site to remove a sitemap from. + sitemap_uri: str URI of sitemap to remove from a site. + uri: str (optional) A URI template to send DELETE request. + Default SITEMAP_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + True if the delete succeeded. + """ + + return self.Delete( + uri % {'site_id': urllib.quote_plus(site_uri), + 'sitemap_id': urllib.quote_plus(sitemap_uri)}, + url_params=url_params, escape_params=escape_params) diff --git a/gam/gdata/analytics/youtube/__init__.py b/gam/gdata/analytics/youtube/__init__.py new file mode 100755 index 00000000000..c41aaea528e --- /dev/null +++ b/gam/gdata/analytics/youtube/__init__.py @@ -0,0 +1,684 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = ('api.stephaniel@gmail.com (Stephanie Liu)' + ', api.jhartmann@gmail.com (Jochen Hartmann)') + +import atom +import gdata +import gdata.media as Media +import gdata.geo as Geo + +YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007' +YOUTUBE_FORMAT = '{http://gdata.youtube.com/schemas/2007}format' +YOUTUBE_DEVELOPER_TAG_SCHEME = '%s/%s' % (YOUTUBE_NAMESPACE, + 'developertags.cat') +YOUTUBE_SUBSCRIPTION_TYPE_SCHEME = '%s/%s' % (YOUTUBE_NAMESPACE, + 'subscriptiontypes.cat') + +class Username(atom.AtomBase): + """The YouTube Username element""" + _tag = 'username' + _namespace = YOUTUBE_NAMESPACE + +class QueryString(atom.AtomBase): + """The YouTube QueryString element""" + _tag = 'queryString' + _namespace = YOUTUBE_NAMESPACE + + +class FirstName(atom.AtomBase): + """The YouTube FirstName element""" + _tag = 'firstName' + _namespace = YOUTUBE_NAMESPACE + + +class LastName(atom.AtomBase): + """The YouTube LastName element""" + _tag = 'lastName' + _namespace = YOUTUBE_NAMESPACE + + +class Age(atom.AtomBase): + """The YouTube Age element""" + _tag = 'age' + _namespace = YOUTUBE_NAMESPACE + + +class Books(atom.AtomBase): + """The YouTube Books element""" + _tag = 'books' + _namespace = YOUTUBE_NAMESPACE + + +class Gender(atom.AtomBase): + """The YouTube Gender element""" + _tag = 'gender' + _namespace = YOUTUBE_NAMESPACE + + +class Company(atom.AtomBase): + """The YouTube Company element""" + _tag = 'company' + _namespace = YOUTUBE_NAMESPACE + + +class Hobbies(atom.AtomBase): + """The YouTube Hobbies element""" + _tag = 'hobbies' + _namespace = YOUTUBE_NAMESPACE + + +class Hometown(atom.AtomBase): + """The YouTube Hometown element""" + _tag = 'hometown' + _namespace = YOUTUBE_NAMESPACE + + +class Location(atom.AtomBase): + """The YouTube Location element""" + _tag = 'location' + _namespace = YOUTUBE_NAMESPACE + + +class Movies(atom.AtomBase): + """The YouTube Movies element""" + _tag = 'movies' + _namespace = YOUTUBE_NAMESPACE + + +class Music(atom.AtomBase): + """The YouTube Music element""" + _tag = 'music' + _namespace = YOUTUBE_NAMESPACE + + +class Occupation(atom.AtomBase): + """The YouTube Occupation element""" + _tag = 'occupation' + _namespace = YOUTUBE_NAMESPACE + + +class School(atom.AtomBase): + """The YouTube School element""" + _tag = 'school' + _namespace = YOUTUBE_NAMESPACE + + +class Relationship(atom.AtomBase): + """The YouTube Relationship element""" + _tag = 'relationship' + _namespace = YOUTUBE_NAMESPACE + + +class Recorded(atom.AtomBase): + """The YouTube Recorded element""" + _tag = 'recorded' + _namespace = YOUTUBE_NAMESPACE + + +class Statistics(atom.AtomBase): + """The YouTube Statistics element.""" + _tag = 'statistics' + _namespace = YOUTUBE_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['viewCount'] = 'view_count' + _attributes['videoWatchCount'] = 'video_watch_count' + _attributes['subscriberCount'] = 'subscriber_count' + _attributes['lastWebAccess'] = 'last_web_access' + _attributes['favoriteCount'] = 'favorite_count' + + def __init__(self, view_count=None, video_watch_count=None, + favorite_count=None, subscriber_count=None, last_web_access=None, + extension_elements=None, extension_attributes=None, text=None): + + self.view_count = view_count + self.video_watch_count = video_watch_count + self.subscriber_count = subscriber_count + self.last_web_access = last_web_access + self.favorite_count = favorite_count + + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Status(atom.AtomBase): + """The YouTube Status element""" + _tag = 'status' + _namespace = YOUTUBE_NAMESPACE + + +class Position(atom.AtomBase): + """The YouTube Position element. The position in a playlist feed.""" + _tag = 'position' + _namespace = YOUTUBE_NAMESPACE + + +class Racy(atom.AtomBase): + """The YouTube Racy element.""" + _tag = 'racy' + _namespace = YOUTUBE_NAMESPACE + +class Description(atom.AtomBase): + """The YouTube Description element.""" + _tag = 'description' + _namespace = YOUTUBE_NAMESPACE + + +class Private(atom.AtomBase): + """The YouTube Private element.""" + _tag = 'private' + _namespace = YOUTUBE_NAMESPACE + + +class NoEmbed(atom.AtomBase): + """The YouTube VideoShare element. Whether a video can be embedded or not.""" + _tag = 'noembed' + _namespace = YOUTUBE_NAMESPACE + + +class Comments(atom.AtomBase): + """The GData Comments element""" + _tag = 'comments' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, feed_link=None, extension_elements=None, + extension_attributes=None, text=None): + + self.feed_link = feed_link + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Rating(atom.AtomBase): + """The GData Rating element""" + _tag = 'rating' + _namespace = gdata.GDATA_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['min'] = 'min' + _attributes['max'] = 'max' + _attributes['numRaters'] = 'num_raters' + _attributes['average'] = 'average' + + def __init__(self, min=None, max=None, + num_raters=None, average=None, extension_elements=None, + extension_attributes=None, text=None): + + self.min = min + self.max = max + self.num_raters = num_raters + self.average = average + + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class YouTubePlaylistVideoEntry(gdata.GDataEntry): + """Represents a YouTubeVideoEntry on a YouTubePlaylist.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', + Description) + _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) + _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}location' % YOUTUBE_NAMESPACE] = ('location', Location) + _children['{%s}position' % YOUTUBE_NAMESPACE] = ('position', Position) + _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, title=None, + updated=None, feed_link=None, description=None, + rating=None, comments=None, statistics=None, + location=None, position=None, media=None, + extension_elements=None, extension_attributes=None): + + self.feed_link = feed_link + self.description = description + self.rating = rating + self.comments = comments + self.statistics = statistics + self.location = location + self.position = position + self.media = media + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, + link=link, published=published, title=title, + updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + +class YouTubeVideoCommentEntry(gdata.GDataEntry): + """Represents a comment on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + +class YouTubeSubscriptionEntry(gdata.GDataEntry): + """Represents a subscription entry on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) + _children['{%s}queryString' % YOUTUBE_NAMESPACE] = ( + 'query_string', QueryString) + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, title=None, + updated=None, username=None, query_string=None, feed_link=None, + extension_elements=None, extension_attributes=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated) + + self.username = username + self.query_string = query_string + self.feed_link = feed_link + + + def GetSubscriptionType(self): + """Retrieve the type of this subscription. + + Returns: + A string that is either 'channel, 'query' or 'favorites' + """ + for category in self.category: + if category.scheme == YOUTUBE_SUBSCRIPTION_TYPE_SCHEME: + return category.term + + +class YouTubeVideoResponseEntry(gdata.GDataEntry): + """Represents a video response. """ + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) + _children['{%s}noembed' % YOUTUBE_NAMESPACE] = ('noembed', NoEmbed) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}racy' % YOUTUBE_NAMESPACE] = ('racy', Racy) + _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, rating=None, + noembed=None, statistics=None, racy=None, media=None, + extension_elements=None, extension_attributes=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated) + + self.rating = rating + self.noembed = noembed + self.statistics = statistics + self.racy = racy + self.media = media or Media.Group() + + +class YouTubeContactEntry(gdata.GDataEntry): + """Represents a contact entry.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) + _children['{%s}status' % YOUTUBE_NAMESPACE] = ('status', Status) + + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, + username=None, status=None, extension_elements=None, + extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated) + + self.username = username + self.status = status + + +class YouTubeVideoEntry(gdata.GDataEntry): + """Represents a video on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) + _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) + _children['{%s}noembed' % YOUTUBE_NAMESPACE] = ('noembed', NoEmbed) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}recorded' % YOUTUBE_NAMESPACE] = ('recorded', Recorded) + _children['{%s}racy' % YOUTUBE_NAMESPACE] = ('racy', Racy) + _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) + _children['{%s}where' % gdata.geo.GEORSS_NAMESPACE] = ('geo', Geo.Where) + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, rating=None, + noembed=None, statistics=None, racy=None, media=None, geo=None, + recorded=None, comments=None, extension_elements=None, + extension_attributes=None): + + self.rating = rating + self.noembed = noembed + self.statistics = statistics + self.racy = racy + self.comments = comments + self.media = media or Media.Group() + self.geo = geo + self.recorded = recorded + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + def GetSwfUrl(self): + """Return the URL for the embeddable Video + + Returns: + URL of the embeddable video + """ + if self.media.content: + for content in self.media.content: + if content.extension_attributes[YOUTUBE_FORMAT] == '5': + return content.url + else: + return None + + def AddDeveloperTags(self, developer_tags): + """Add a developer tag for this entry. + + Developer tags can only be set during the initial upload. + + Arguments: + developer_tags: A list of developer tags as strings. + + Returns: + A list of all developer tags for this video entry. + """ + for tag_text in developer_tags: + self.media.category.append(gdata.media.Category( + text=tag_text, label=tag_text, scheme=YOUTUBE_DEVELOPER_TAG_SCHEME)) + + return self.GetDeveloperTags() + + def GetDeveloperTags(self): + """Retrieve developer tags for this video entry.""" + developer_tags = [] + for category in self.media.category: + if category.scheme == YOUTUBE_DEVELOPER_TAG_SCHEME: + developer_tags.append(category) + if len(developer_tags) > 0: + return developer_tags + + def GetYouTubeCategoryAsString(self): + """Convenience method to return the YouTube category as string. + + YouTubeVideoEntries can contain multiple Category objects with differing + schemes. This method returns only the category with the correct + scheme, ignoring developer tags. + """ + for category in self.media.category: + if category.scheme != YOUTUBE_DEVELOPER_TAG_SCHEME: + return category.text + +class YouTubeUserEntry(gdata.GDataEntry): + """Represents a user on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) + _children['{%s}firstName' % YOUTUBE_NAMESPACE] = ('first_name', FirstName) + _children['{%s}lastName' % YOUTUBE_NAMESPACE] = ('last_name', LastName) + _children['{%s}age' % YOUTUBE_NAMESPACE] = ('age', Age) + _children['{%s}books' % YOUTUBE_NAMESPACE] = ('books', Books) + _children['{%s}gender' % YOUTUBE_NAMESPACE] = ('gender', Gender) + _children['{%s}company' % YOUTUBE_NAMESPACE] = ('company', Company) + _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', + Description) + _children['{%s}hobbies' % YOUTUBE_NAMESPACE] = ('hobbies', Hobbies) + _children['{%s}hometown' % YOUTUBE_NAMESPACE] = ('hometown', Hometown) + _children['{%s}location' % YOUTUBE_NAMESPACE] = ('location', Location) + _children['{%s}movies' % YOUTUBE_NAMESPACE] = ('movies', Movies) + _children['{%s}music' % YOUTUBE_NAMESPACE] = ('music', Music) + _children['{%s}occupation' % YOUTUBE_NAMESPACE] = ('occupation', Occupation) + _children['{%s}school' % YOUTUBE_NAMESPACE] = ('school', School) + _children['{%s}relationship' % YOUTUBE_NAMESPACE] = ('relationship', + Relationship) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + _children['{%s}thumbnail' % gdata.media.MEDIA_NAMESPACE] = ('thumbnail', + Media.Thumbnail) + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, + username=None, first_name=None, last_name=None, age=None, + books=None, gender=None, company=None, description=None, + hobbies=None, hometown=None, location=None, movies=None, + music=None, occupation=None, school=None, relationship=None, + statistics=None, feed_link=None, extension_elements=None, + extension_attributes=None, text=None): + + self.username = username + self.first_name = first_name + self.last_name = last_name + self.age = age + self.books = books + self.gender = gender + self.company = company + self.description = description + self.hobbies = hobbies + self.hometown = hometown + self.location = location + self.movies = movies + self.music = music + self.occupation = occupation + self.school = school + self.relationship = relationship + self.statistics = statistics + self.feed_link = feed_link + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, + link=link, published=published, + title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class YouTubeVideoFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a video feed on YouTube.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [YouTubeVideoEntry]) + +class YouTubePlaylistEntry(gdata.GDataEntry): + """Represents a playlist in YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', + Description) + _children['{%s}private' % YOUTUBE_NAMESPACE] = ('private', + Private) + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, title=None, + updated=None, private=None, feed_link=None, + description=None, extension_elements=None, + extension_attributes=None): + + self.description = description + self.private = private + self.feed_link = feed_link + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, + link=link, published=published, title=title, + updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + + +class YouTubePlaylistFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of a user's playlists """ + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubePlaylistEntry]) + + +class YouTubePlaylistVideoFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of video entry on a playlist.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubePlaylistVideoEntry]) + + +class YouTubeContactFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of a users contacts.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeContactEntry]) + + +class YouTubeSubscriptionFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of a users subscriptions.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeSubscriptionEntry]) + + +class YouTubeVideoCommentFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of comments for a video.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeVideoCommentEntry]) + + +class YouTubeVideoResponseFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of video responses.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeVideoResponseEntry]) + + +def YouTubeVideoFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoFeed, xml_string) + + +def YouTubeVideoEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoEntry, xml_string) + + +def YouTubeContactFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeContactFeed, xml_string) + + +def YouTubeContactEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeContactEntry, xml_string) + + +def YouTubeVideoCommentFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoCommentFeed, xml_string) + + +def YouTubeVideoCommentEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoCommentEntry, xml_string) + + +def YouTubeUserFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoFeed, xml_string) + + +def YouTubeUserEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeUserEntry, xml_string) + + +def YouTubePlaylistFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistFeed, xml_string) + + +def YouTubePlaylistVideoFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistVideoFeed, xml_string) + + +def YouTubePlaylistEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistEntry, xml_string) + + +def YouTubePlaylistVideoEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistVideoEntry, xml_string) + + +def YouTubeSubscriptionFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeSubscriptionFeed, xml_string) + + +def YouTubeSubscriptionEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeSubscriptionEntry, xml_string) + + +def YouTubeVideoResponseFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoResponseFeed, xml_string) + + +def YouTubeVideoResponseEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoResponseEntry, xml_string) diff --git a/gam/gdata/analytics/youtube/client.py b/gam/gdata/analytics/youtube/client.py new file mode 100755 index 00000000000..2e34d6af1a5 --- /dev/null +++ b/gam/gdata/analytics/youtube/client.py @@ -0,0 +1,264 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains a client to communicate with the YouTube servers. + + A quick and dirty port of the YouTube GDATA 1.0 Python client + libraries to version 2.0 of the GDATA library. + +""" + +# __author__ = 's.@google.com (John Skidgel)' + +import logging + +import gdata.client +import gdata.youtube.data +import atom.data +import atom.http_core + +# Constants +# ----------------------------------------------------------------------------- +YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL = 'https://www.google.com/youtube/accounts/ClientLogin' +YOUTUBE_SUPPORTED_UPLOAD_TYPES = ('mov', 'avi', 'wmv', 'mpg', 'quicktime', + 'flv') +YOUTUBE_QUERY_VALID_TIME_PARAMETERS = ('today', 'this_week', 'this_month', + 'all_time') +YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS = ('published', 'viewCount', 'rating', + 'relevance') +YOUTUBE_QUERY_VALID_RACY_PARAMETERS = ('include', 'exclude') +YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS = ('1', '5', '6') +YOUTUBE_STANDARDFEEDS = ('most_recent', 'recently_featured', + 'top_rated', 'most_viewed','watch_on_mobile') + +YOUTUBE_UPLOAD_TOKEN_URI = 'http://gdata.youtube.com/action/GetUploadToken' +YOUTUBE_SERVER = 'gdata.youtube.com/feeds/api' +YOUTUBE_SERVICE = 'youtube' +YOUTUBE_VIDEO_FEED_URI = 'http://%s/videos' % YOUTUBE_SERVER +YOUTUBE_USER_FEED_URI = 'http://%s/users/' % YOUTUBE_SERVER + +# Takes a youtube video ID. +YOUTUBE_CAPTION_FEED_URI = 'http://gdata.youtube.com/feeds/api/videos/%s/captions' + +# Takes a youtube video ID and a caption track ID. +YOUTUBE_CAPTION_URI = 'http://gdata.youtube.com/feeds/api/videos/%s/captiondata/%s' + +YOUTUBE_CAPTION_MIME_TYPE = 'application/vnd.youtube.timedtext; charset=UTF-8' + + +# Classes +# ----------------------------------------------------------------------------- +class Error(Exception): + """Base class for errors within the YouTube service.""" + pass + + +class RequestError(Error): + """Error class that is thrown in response to an invalid HTTP Request.""" + pass + + +class YouTubeError(Error): + """YouTube service specific error class.""" + pass + + +class YouTubeClient(gdata.client.GDClient): + """Client for the YouTube service. + + Performs a partial list of Google Data YouTube API functions, such as + retrieving the videos feed for a user and the feed for a video. + YouTube Service requires authentication for any write, update or delete + actions. + """ + api_version = '2' + auth_service = YOUTUBE_SERVICE + auth_scopes = ['http://%s' % YOUTUBE_SERVER, 'https://%s' % YOUTUBE_SERVER] + + def get_videos(self, uri=YOUTUBE_VIDEO_FEED_URI, auth_token=None, + desired_class=gdata.youtube.data.VideoFeed, + **kwargs): + """Retrieves a YouTube video feed. + Args: + uri: A string representing the URI of the feed that is to be retrieved. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.get_feed(uri, auth_token=auth_token, + desired_class=desired_class, + **kwargs) + + GetVideos = get_videos + + + def get_user_feed(self, uri=None, username=None): + """Retrieve a YouTubeVideoFeed of user uploaded videos. + + Either a uri or a username must be provided. This will retrieve list + of videos uploaded by specified user. The uri will be of format + "http://gdata.youtube.com/feeds/api/users/{username}/uploads". + + Args: + uri: An optional string representing the URI of the user feed that is + to be retrieved. + username: An optional string representing the username. + + Returns: + A YouTubeUserFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubeUserFeed() method. + """ + if uri is None and username is None: + raise YouTubeError('You must provide at least a uri or a username ' + 'to the GetYouTubeUserFeed() method') + elif username and not uri: + uri = '%s%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'uploads') + return self.get_feed(uri, desired_class=gdata.youtube.data.VideoFeed) + + GetUserFeed = get_user_feed + + + def get_video_entry(self, uri=None, video_id=None, + auth_token=None, **kwargs): + """Retrieve a YouTubeVideoEntry. + + Either a uri or a video_id must be provided. + + Args: + uri: An optional string representing the URI of the entry that is to + be retrieved. + video_id: An optional string representing the ID of the video. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeVideoEntry() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the get_youtube_video_entry() method') + elif video_id and uri is None: + uri = '%s/%s' % (YOUTUBE_VIDEO_FEED_URI, video_id) + return self.get_feed(uri, + desired_class=gdata.youtube.data.VideoEntry, + auth_token=auth_token, + **kwargs) + + GetVideoEntry = get_video_entry + + + def get_caption_feed(self, uri): + """Retrieve a Caption feed of tracks. + + Args: + uri: A string representing the caption feed's URI to be retrieved. + + Returns: + A YouTube CaptionFeed if successfully retrieved. + """ + return self.get_feed(uri, desired_class=gdata.youtube.data.CaptionFeed) + + GetCaptionFeed = get_caption_feed + + def get_caption_track(self, track_url, client_id, + developer_key, auth_token=None, **kwargs): + http_request = atom.http_core.HttpRequest(uri = track_url, method = 'GET') + dev_key = 'key=' + developer_key + authsub = 'AuthSub token="' + str(auth_token) + '"' + http_request.headers = { + 'Authorization': authsub, + 'X-GData-Client': client_id, + 'X-GData-Key': dev_key + } + return self.request(http_request=http_request, **kwargs) + + GetCaptionTrack = get_caption_track + + def create_track(self, video_id, title, language, body, client_id, + developer_key, auth_token=None, title_type='text', **kwargs): + """Creates a closed-caption track and adds to an existing YouTube video. + """ + new_entry = gdata.youtube.data.TrackEntry( + content = gdata.youtube.data.TrackContent(text = body, lang = language)) + uri = YOUTUBE_CAPTION_FEED_URI % video_id + http_request = atom.http_core.HttpRequest(uri = uri, method = 'POST') + dev_key = 'key=' + developer_key + authsub = 'AuthSub token="' + str(auth_token) + '"' + http_request.headers = { + 'Content-Type': YOUTUBE_CAPTION_MIME_TYPE, + 'Content-Language': language, + 'Slug': title, + 'Authorization': authsub, + 'GData-Version': self.api_version, + 'X-GData-Client': client_id, + 'X-GData-Key': dev_key + } + http_request.add_body_part(body, http_request.headers['Content-Type']) + return self.request(http_request = http_request, + desired_class = new_entry.__class__, **kwargs) + + + CreateTrack = create_track + + def delete_track(self, video_id, track, client_id, developer_key, + auth_token=None, **kwargs): + """Deletes a track.""" + if isinstance(track, gdata.youtube.data.TrackEntry): + track_id_text_node = track.get_id().split(':') + track_id = track_id_text_node[3] + else: + track_id = track + uri = YOUTUBE_CAPTION_URI % (video_id, track_id) + http_request = atom.http_core.HttpRequest(uri = uri, method = 'DELETE') + dev_key = 'key=' + developer_key + authsub = 'AuthSub token="' + str(auth_token) + '"' + http_request.headers = { + 'Authorization': authsub, + 'GData-Version': self.api_version, + 'X-GData-Client': client_id, + 'X-GData-Key': dev_key + } + return self.request(http_request=http_request, **kwargs) + + DeleteTrack = delete_track + + def update_track(self, video_id, track, body, client_id, developer_key, + auth_token=None, **kwargs): + """Updates a closed-caption track for an existing YouTube video. + """ + track_id_text_node = track.get_id().split(':') + track_id = track_id_text_node[3] + uri = YOUTUBE_CAPTION_URI % (video_id, track_id) + http_request = atom.http_core.HttpRequest(uri = uri, method = 'PUT') + dev_key = 'key=' + developer_key + authsub = 'AuthSub token="' + str(auth_token) + '"' + http_request.headers = { + 'Content-Type': YOUTUBE_CAPTION_MIME_TYPE, + 'Authorization': authsub, + 'GData-Version': self.api_version, + 'X-GData-Client': client_id, + 'X-GData-Key': dev_key + } + http_request.add_body_part(body, http_request.headers['Content-Type']) + return self.request(http_request = http_request, + desired_class = track.__class__, **kwargs) + + UpdateTrack = update_track diff --git a/gam/gdata/analytics/youtube/data.py b/gam/gdata/analytics/youtube/data.py new file mode 100755 index 00000000000..4ef2d621356 --- /dev/null +++ b/gam/gdata/analytics/youtube/data.py @@ -0,0 +1,502 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains the data classes of the YouTube Data API""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import atom.data +import gdata.data +import gdata.geo.data +import gdata.media.data +import gdata.opensearch.data +import gdata.youtube.data + + +YT_TEMPLATE = '{http://gdata.youtube.com/schemas/2007/}%s' + + +class ComplaintEntry(gdata.data.GDEntry): + """Describes a complaint about a video""" + + +class ComplaintFeed(gdata.data.GDFeed): + """Describes complaints about a video""" + entry = [ComplaintEntry] + + +class RatingEntry(gdata.data.GDEntry): + """A rating about a video""" + rating = gdata.data.Rating + + +class RatingFeed(gdata.data.GDFeed): + """Describes ratings for a video""" + entry = [RatingEntry] + + +class YouTubeMediaContent(gdata.media.data.MediaContent): + """Describes a you tube media content""" + _qname = gdata.media.data.MEDIA_TEMPLATE % 'content' + format = 'format' + + +class YtAge(atom.core.XmlElement): + """User's age""" + _qname = YT_TEMPLATE % 'age' + + +class YtBooks(atom.core.XmlElement): + """User's favorite books""" + _qname = YT_TEMPLATE % 'books' + + +class YtCompany(atom.core.XmlElement): + """User's company""" + _qname = YT_TEMPLATE % 'company' + + +class YtDescription(atom.core.XmlElement): + """Description""" + _qname = YT_TEMPLATE % 'description' + + +class YtDuration(atom.core.XmlElement): + """Video duration""" + _qname = YT_TEMPLATE % 'duration' + seconds = 'seconds' + + +class YtFirstName(atom.core.XmlElement): + """User's first name""" + _qname = YT_TEMPLATE % 'firstName' + + +class YtGender(atom.core.XmlElement): + """User's gender""" + _qname = YT_TEMPLATE % 'gender' + + +class YtHobbies(atom.core.XmlElement): + """User's hobbies""" + _qname = YT_TEMPLATE % 'hobbies' + + +class YtHometown(atom.core.XmlElement): + """User's hometown""" + _qname = YT_TEMPLATE % 'hometown' + + +class YtLastName(atom.core.XmlElement): + """User's last name""" + _qname = YT_TEMPLATE % 'lastName' + + +class YtLocation(atom.core.XmlElement): + """Location""" + _qname = YT_TEMPLATE % 'location' + + +class YtMovies(atom.core.XmlElement): + """User's favorite movies""" + _qname = YT_TEMPLATE % 'movies' + + +class YtMusic(atom.core.XmlElement): + """User's favorite music""" + _qname = YT_TEMPLATE % 'music' + + +class YtNoEmbed(atom.core.XmlElement): + """Disables embedding for the video""" + _qname = YT_TEMPLATE % 'noembed' + + +class YtOccupation(atom.core.XmlElement): + """User's occupation""" + _qname = YT_TEMPLATE % 'occupation' + + +class YtPlaylistId(atom.core.XmlElement): + """Playlist id""" + _qname = YT_TEMPLATE % 'playlistId' + + +class YtPosition(atom.core.XmlElement): + """Video position on the playlist""" + _qname = YT_TEMPLATE % 'position' + + +class YtPrivate(atom.core.XmlElement): + """Flags the entry as private""" + _qname = YT_TEMPLATE % 'private' + + +class YtQueryString(atom.core.XmlElement): + """Keywords or query string associated with a subscription""" + _qname = YT_TEMPLATE % 'queryString' + + +class YtRacy(atom.core.XmlElement): + """Mature content""" + _qname = YT_TEMPLATE % 'racy' + + +class YtRecorded(atom.core.XmlElement): + """Date when the video was recorded""" + _qname = YT_TEMPLATE % 'recorded' + + +class YtRelationship(atom.core.XmlElement): + """User's relationship status""" + _qname = YT_TEMPLATE % 'relationship' + + +class YtSchool(atom.core.XmlElement): + """User's school""" + _qname = YT_TEMPLATE % 'school' + + +class YtStatistics(atom.core.XmlElement): + """Video and user statistics""" + _qname = YT_TEMPLATE % 'statistics' + favorite_count = 'favoriteCount' + video_watch_count = 'videoWatchCount' + view_count = 'viewCount' + last_web_access = 'lastWebAccess' + subscriber_count = 'subscriberCount' + + +class YtStatus(atom.core.XmlElement): + """Status of a contact""" + _qname = YT_TEMPLATE % 'status' + + +class YtUserProfileStatistics(YtStatistics): + """User statistics""" + _qname = YT_TEMPLATE % 'statistics' + + +class YtUsername(atom.core.XmlElement): + """Youtube username""" + _qname = YT_TEMPLATE % 'username' + + +class FriendEntry(gdata.data.BatchEntry): + """Describes a contact in friend list""" + username = YtUsername + status = YtStatus + email = gdata.data.Email + + +class FriendFeed(gdata.data.BatchFeed): + """Describes user's friends""" + entry = [FriendEntry] + + +class YtVideoStatistics(YtStatistics): + """Video statistics""" + _qname = YT_TEMPLATE % 'statistics' + + +class ChannelEntry(gdata.data.GDEntry): + """Describes a video channel""" + + +class ChannelFeed(gdata.data.GDFeed): + """Describes channels""" + entry = [ChannelEntry] + + +class FavoriteEntry(gdata.data.BatchEntry): + """Describes a favorite video""" + + +class FavoriteFeed(gdata.data.BatchFeed): + """Describes favorite videos""" + entry = [FavoriteEntry] + + +class YouTubeMediaCredit(gdata.media.data.MediaCredit): + """Describes a you tube media credit""" + _qname = gdata.media.data.MEDIA_TEMPLATE % 'credit' + type = 'type' + + +class YouTubeMediaRating(gdata.media.data.MediaRating): + """Describes a you tube media rating""" + _qname = gdata.media.data.MEDIA_TEMPLATE % 'rating' + country = 'country' + + +class YtAboutMe(atom.core.XmlElement): + """User's self description""" + _qname = YT_TEMPLATE % 'aboutMe' + + +class UserProfileEntry(gdata.data.BatchEntry): + """Describes an user's profile""" + relationship = YtRelationship + description = YtDescription + location = YtLocation + statistics = YtUserProfileStatistics + school = YtSchool + music = YtMusic + first_name = YtFirstName + gender = YtGender + occupation = YtOccupation + hometown = YtHometown + company = YtCompany + movies = YtMovies + books = YtBooks + username = YtUsername + about_me = YtAboutMe + last_name = YtLastName + age = YtAge + thumbnail = gdata.media.data.MediaThumbnail + hobbies = YtHobbies + + +class UserProfileFeed(gdata.data.BatchFeed): + """Describes a feed of user's profile""" + entry = [UserProfileEntry] + + +class YtAspectRatio(atom.core.XmlElement): + """The aspect ratio of a media file""" + _qname = YT_TEMPLATE % 'aspectRatio' + + +class YtBasePublicationState(atom.core.XmlElement): + """Status of an unpublished entry""" + _qname = YT_TEMPLATE % 'state' + help_url = 'helpUrl' + + +class YtPublicationState(YtBasePublicationState): + """Status of an unpublished video""" + _qname = YT_TEMPLATE % 'state' + name = 'name' + reason_code = 'reasonCode' + + +class YouTubeAppControl(atom.data.Control): + """Describes a you tube app control""" + _qname = (atom.data.APP_TEMPLATE_V1 % 'control', + atom.data.APP_TEMPLATE_V2 % 'control') + state = YtPublicationState + + +class YtCaptionPublicationState(YtBasePublicationState): + """Status of an unpublished caption track""" + _qname = YT_TEMPLATE % 'state' + reason_code = 'reasonCode' + name = 'name' + + +class YouTubeCaptionAppControl(atom.data.Control): + """Describes a you tube caption app control""" + _qname = atom.data.APP_TEMPLATE_V2 % 'control' + state = YtCaptionPublicationState + + +class CaptionTrackEntry(gdata.data.GDEntry): + """Describes a caption track""" + + +class CaptionTrackFeed(gdata.data.GDFeed): + """Describes caption tracks""" + entry = [CaptionTrackEntry] + + +class YtCountHint(atom.core.XmlElement): + """Hint as to how many entries the linked feed contains""" + _qname = YT_TEMPLATE % 'countHint' + + +class PlaylistLinkEntry(gdata.data.BatchEntry): + """Describes a playlist""" + description = YtDescription + playlist_id = YtPlaylistId + count_hint = YtCountHint + private = YtPrivate + + +class PlaylistLinkFeed(gdata.data.BatchFeed): + """Describes list of playlists""" + entry = [PlaylistLinkEntry] + + +class YtModerationStatus(atom.core.XmlElement): + """Moderation status""" + _qname = YT_TEMPLATE % 'moderationStatus' + + +class YtPlaylistTitle(atom.core.XmlElement): + """Playlist title""" + _qname = YT_TEMPLATE % 'playlistTitle' + + +class SubscriptionEntry(gdata.data.BatchEntry): + """Describes user's channel subscritpions""" + count_hint = YtCountHint + playlist_title = YtPlaylistTitle + thumbnail = gdata.media.data.MediaThumbnail + username = YtUsername + query_string = YtQueryString + playlist_id = YtPlaylistId + + +class SubscriptionFeed(gdata.data.BatchFeed): + """Describes list of user's video subscriptions""" + entry = [SubscriptionEntry] + + +class YtSpam(atom.core.XmlElement): + """Indicates that the entry probably contains spam""" + _qname = YT_TEMPLATE % 'spam' + + +class CommentEntry(gdata.data.BatchEntry): + """Describes a comment for a video""" + spam = YtSpam + + +class CommentFeed(gdata.data.BatchFeed): + """Describes comments for a video""" + entry = [CommentEntry] + + +class YtUploaded(atom.core.XmlElement): + """Date/Time at which the video was uploaded""" + _qname = YT_TEMPLATE % 'uploaded' + + +class YtVideoId(atom.core.XmlElement): + """Video id""" + _qname = YT_TEMPLATE % 'videoid' + + +class YouTubeMediaGroup(gdata.media.data.MediaGroup): + """Describes a you tube media group""" + _qname = gdata.media.data.MEDIA_TEMPLATE % 'group' + videoid = YtVideoId + private = YtPrivate + duration = YtDuration + aspect_ratio = YtAspectRatio + uploaded = YtUploaded + + +class VideoEntryBase(gdata.data.GDEntry): + """Elements that describe or contain videos""" + group = YouTubeMediaGroup + statistics = YtVideoStatistics + racy = YtRacy + recorded = YtRecorded + where = gdata.geo.data.GeoRssWhere + rating = gdata.data.Rating + noembed = YtNoEmbed + location = YtLocation + comments = gdata.data.Comments + + +class PlaylistEntry(gdata.data.BatchEntry): + """Describes a video in a playlist""" + description = YtDescription + position = YtPosition + + +class PlaylistFeed(gdata.data.BatchFeed): + """Describes videos in a playlist""" + private = YtPrivate + group = YouTubeMediaGroup + playlist_id = YtPlaylistId + entry = [PlaylistEntry] + + +class VideoEntry(gdata.data.BatchEntry): + """Describes a video""" + + +class VideoFeed(gdata.data.BatchFeed): + """Describes a video feed""" + entry = [VideoEntry] + + +class VideoMessageEntry(gdata.data.BatchEntry): + """Describes a video message""" + description = YtDescription + + +class VideoMessageFeed(gdata.data.BatchFeed): + """Describes videos in a videoMessage""" + entry = [VideoMessageEntry] + + +class UserEventEntry(gdata.data.GDEntry): + """Describes a user event""" + playlist_id = YtPlaylistId + videoid = YtVideoId + username = YtUsername + query_string = YtQueryString + rating = gdata.data.Rating + + +class UserEventFeed(gdata.data.GDFeed): + """Describes list of events""" + entry = [UserEventEntry] + + +class VideoModerationEntry(gdata.data.GDEntry): + """Describes video moderation""" + moderation_status = YtModerationStatus + videoid = YtVideoId + + +class VideoModerationFeed(gdata.data.GDFeed): + """Describes a video moderation feed""" + entry = [VideoModerationEntry] + + +class TrackContent(atom.data.Content): + lang = atom.data.XML_TEMPLATE % 'lang' + + +class TrackEntry(gdata.data.GDEntry): + """Represents the URL for a caption track""" + content = TrackContent + + def get_caption_track_id(self): + """Extracts the ID of this caption track. + Returns: + The caption track's id as a string. + """ + if self.id.text: + match = CAPTION_TRACK_ID_PATTERN.match(self.id.text) + if match: + return match.group(2) + return None + + GetCaptionTrackId = get_caption_track_id + + +class CaptionFeed(gdata.data.GDFeed): + """Represents a caption feed for a video on YouTube.""" + entry = [TrackEntry] diff --git a/gam/gdata/analytics/youtube/service.py b/gam/gdata/analytics/youtube/service.py new file mode 100755 index 00000000000..9e0346f6249 --- /dev/null +++ b/gam/gdata/analytics/youtube/service.py @@ -0,0 +1,1563 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""YouTubeService extends GDataService to streamline YouTube operations. + + YouTubeService: Provides methods to perform CRUD operations on YouTube feeds. + Extends GDataService. +""" + +__author__ = ('api.stephaniel@gmail.com (Stephanie Liu), ' + 'api.jhartmann@gmail.com (Jochen Hartmann)') + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import os +import atom +import gdata +import gdata.service +import gdata.youtube + +YOUTUBE_SERVER = 'gdata.youtube.com' +YOUTUBE_SERVICE = 'youtube' +YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL = 'https://www.google.com/youtube/accounts/ClientLogin' +YOUTUBE_SUPPORTED_UPLOAD_TYPES = ('mov', 'avi', 'wmv', 'mpg', 'quicktime', + 'flv', 'mp4', 'x-flv') +YOUTUBE_QUERY_VALID_TIME_PARAMETERS = ('today', 'this_week', 'this_month', + 'all_time') +YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS = ('published', 'viewCount', 'rating', + 'relevance') +YOUTUBE_QUERY_VALID_RACY_PARAMETERS = ('include', 'exclude') +YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS = ('1', '5', '6') +YOUTUBE_STANDARDFEEDS = ('most_recent', 'recently_featured', + 'top_rated', 'most_viewed','watch_on_mobile') +YOUTUBE_UPLOAD_URI = 'http://uploads.gdata.youtube.com/feeds/api/users' +YOUTUBE_UPLOAD_TOKEN_URI = 'http://gdata.youtube.com/action/GetUploadToken' +YOUTUBE_VIDEO_URI = 'http://gdata.youtube.com/feeds/api/videos' +YOUTUBE_USER_FEED_URI = 'http://gdata.youtube.com/feeds/api/users' +YOUTUBE_PLAYLIST_FEED_URI = 'http://gdata.youtube.com/feeds/api/playlists' + +YOUTUBE_STANDARD_FEEDS = 'http://gdata.youtube.com/feeds/api/standardfeeds' +YOUTUBE_STANDARD_TOP_RATED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'top_rated') +YOUTUBE_STANDARD_MOST_VIEWED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_viewed') +YOUTUBE_STANDARD_RECENTLY_FEATURED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'recently_featured') +YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'watch_on_mobile') +YOUTUBE_STANDARD_TOP_FAVORITES_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'top_favorites') +YOUTUBE_STANDARD_MOST_RECENT_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_recent') +YOUTUBE_STANDARD_MOST_DISCUSSED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_discussed') +YOUTUBE_STANDARD_MOST_LINKED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_linked') +YOUTUBE_STANDARD_MOST_RESPONDED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_responded') +YOUTUBE_SCHEMA = 'http://gdata.youtube.com/schemas' + +YOUTUBE_RATING_LINK_REL = '%s#video.ratings' % YOUTUBE_SCHEMA + +YOUTUBE_COMPLAINT_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA, + 'complaint-reasons.cat') +YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA, + 'subscriptiontypes.cat') + +YOUTUBE_COMPLAINT_CATEGORY_TERMS = ('PORN', 'VIOLENCE', 'HATE', 'DANGEROUS', + 'RIGHTS', 'SPAM') +YOUTUBE_CONTACT_STATUS = ('accepted', 'rejected') +YOUTUBE_CONTACT_CATEGORY = ('Friends', 'Family') + +UNKOWN_ERROR = 1000 +YOUTUBE_BAD_REQUEST = 400 +YOUTUBE_CONFLICT = 409 +YOUTUBE_INTERNAL_SERVER_ERROR = 500 +YOUTUBE_INVALID_ARGUMENT = 601 +YOUTUBE_INVALID_CONTENT_TYPE = 602 +YOUTUBE_NOT_A_VIDEO = 603 +YOUTUBE_INVALID_KIND = 604 + + +class Error(Exception): + """Base class for errors within the YouTube service.""" + pass + +class RequestError(Error): + """Error class that is thrown in response to an invalid HTTP Request.""" + pass + +class YouTubeError(Error): + """YouTube service specific error class.""" + pass + +class YouTubeService(gdata.service.GDataService): + + """Client for the YouTube service. + + Performs all documented Google Data YouTube API functions, such as inserting, + updating and deleting videos, comments, playlist, subscriptions etc. + YouTube Service requires authentication for any write, update or delete + actions. + + Attributes: + email: An optional string identifying the user. Required only for + authenticated actions. + password: An optional string identifying the user's password. + source: An optional string identifying the name of your application. + server: An optional address of the YouTube API server. gdata.youtube.com + is provided as the default value. + additional_headers: An optional dictionary containing additional headers + to be passed along with each request. Use to store developer key. + client_id: An optional string identifying your application, required for + authenticated requests, along with a developer key. + developer_key: An optional string value. Register your application at + http://code.google.com/apis/youtube/dashboard to obtain a (free) key. + """ + + def __init__(self, email=None, password=None, source=None, + server=YOUTUBE_SERVER, additional_headers=None, client_id=None, + developer_key=None, **kwargs): + """Creates a client for the YouTube service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'gdata.youtube.com'. + client_id: string (optional) Identifies your application, required for + authenticated requests, along with a developer key. + developer_key: string (optional) Register your application at + http://code.google.com/apis/youtube/dashboard to obtain a (free) key. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + + gdata.service.GDataService.__init__( + self, email=email, password=password, service=YOUTUBE_SERVICE, + source=source, server=server, additional_headers=additional_headers, + **kwargs) + + if client_id is not None: + self.additional_headers['X-Gdata-Client'] = client_id + + if developer_key is not None: + self.additional_headers['X-GData-Key'] = 'key=%s' % developer_key + + self.auth_service_url = YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL + + def GetYouTubeVideoFeed(self, uri): + """Retrieve a YouTubeVideoFeed. + + Args: + uri: A string representing the URI of the feed that is to be retrieved. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.Get(uri, converter=gdata.youtube.YouTubeVideoFeedFromString) + + def GetYouTubeVideoEntry(self, uri=None, video_id=None): + """Retrieve a YouTubeVideoEntry. + + Either a uri or a video_id must be provided. + + Args: + uri: An optional string representing the URI of the entry that is to + be retrieved. + video_id: An optional string representing the ID of the video. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeVideoEntry() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeVideoEntry() method') + elif video_id and not uri: + uri = '%s/%s' % (YOUTUBE_VIDEO_URI, video_id) + return self.Get(uri, converter=gdata.youtube.YouTubeVideoEntryFromString) + + def GetYouTubeContactFeed(self, uri=None, username='default'): + """Retrieve a YouTubeContactFeed. + + Either a uri or a username must be provided. + + Args: + uri: An optional string representing the URI of the contact feed that + is to be retrieved. + username: An optional string representing the username. Defaults to the + currently authenticated user. + + Returns: + A YouTubeContactFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubeContactFeed() method. + """ + if uri is None: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'contacts') + return self.Get(uri, converter=gdata.youtube.YouTubeContactFeedFromString) + + def GetYouTubeContactEntry(self, uri): + """Retrieve a YouTubeContactEntry. + + Args: + uri: A string representing the URI of the contact entry that is to + be retrieved. + + Returns: + A YouTubeContactEntry if successfully retrieved. + """ + return self.Get(uri, converter=gdata.youtube.YouTubeContactEntryFromString) + + def GetYouTubeVideoCommentFeed(self, uri=None, video_id=None): + """Retrieve a YouTubeVideoCommentFeed. + + Either a uri or a video_id must be provided. + + Args: + uri: An optional string representing the URI of the comment feed that + is to be retrieved. + video_id: An optional string representing the ID of the video for which + to retrieve the comment feed. + + Returns: + A YouTubeVideoCommentFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeVideoCommentFeed() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeVideoCommentFeed() method') + elif video_id and not uri: + uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'comments') + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoCommentFeedFromString) + + def GetYouTubeVideoCommentEntry(self, uri): + """Retrieve a YouTubeVideoCommentEntry. + + Args: + uri: A string representing the URI of the comment entry that is to + be retrieved. + + Returns: + A YouTubeCommentEntry if successfully retrieved. + """ + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoCommentEntryFromString) + + def GetYouTubeUserFeed(self, uri=None, username=None): + """Retrieve a YouTubeVideoFeed of user uploaded videos + + Either a uri or a username must be provided. This will retrieve list + of videos uploaded by specified user. The uri will be of format + "http://gdata.youtube.com/feeds/api/users/{username}/uploads". + + Args: + uri: An optional string representing the URI of the user feed that is + to be retrieved. + username: An optional string representing the username. + + Returns: + A YouTubeUserFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubeUserFeed() method. + """ + if uri is None and username is None: + raise YouTubeError('You must provide at least a uri or a username ' + 'to the GetYouTubeUserFeed() method') + elif username and not uri: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'uploads') + return self.Get(uri, converter=gdata.youtube.YouTubeUserFeedFromString) + + def GetYouTubeUserEntry(self, uri=None, username=None): + """Retrieve a YouTubeUserEntry. + + Either a uri or a username must be provided. + + Args: + uri: An optional string representing the URI of the user entry that is + to be retrieved. + username: An optional string representing the username. + + Returns: + A YouTubeUserEntry if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubeUserEntry() method. + """ + if uri is None and username is None: + raise YouTubeError('You must provide at least a uri or a username ' + 'to the GetYouTubeUserEntry() method') + elif username and not uri: + uri = '%s/%s' % (YOUTUBE_USER_FEED_URI, username) + return self.Get(uri, converter=gdata.youtube.YouTubeUserEntryFromString) + + def GetYouTubePlaylistFeed(self, uri=None, username='default'): + """Retrieve a YouTubePlaylistFeed (a feed of playlists for a user). + + Either a uri or a username must be provided. + + Args: + uri: An optional string representing the URI of the playlist feed that + is to be retrieved. + username: An optional string representing the username. Defaults to the + currently authenticated user. + + Returns: + A YouTubePlaylistFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubePlaylistFeed() method. + """ + if uri is None: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'playlists') + return self.Get(uri, converter=gdata.youtube.YouTubePlaylistFeedFromString) + + def GetYouTubePlaylistEntry(self, uri): + """Retrieve a YouTubePlaylistEntry. + + Args: + uri: A string representing the URI of the playlist feed that is to + be retrieved. + + Returns: + A YouTubePlaylistEntry if successfully retrieved. + """ + return self.Get(uri, converter=gdata.youtube.YouTubePlaylistEntryFromString) + + def GetYouTubePlaylistVideoFeed(self, uri=None, playlist_id=None): + """Retrieve a YouTubePlaylistVideoFeed (a feed of videos on a playlist). + + Either a uri or a playlist_id must be provided. + + Args: + uri: An optional string representing the URI of the playlist video feed + that is to be retrieved. + playlist_id: An optional string representing the Id of the playlist whose + playlist video feed is to be retrieved. + + Returns: + A YouTubePlaylistVideoFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a playlist_id to the + GetYouTubePlaylistVideoFeed() method. + """ + if uri is None and playlist_id is None: + raise YouTubeError('You must provide at least a uri or a playlist_id ' + 'to the GetYouTubePlaylistVideoFeed() method') + elif playlist_id and not uri: + uri = '%s/%s' % (YOUTUBE_PLAYLIST_FEED_URI, playlist_id) + return self.Get( + uri, converter=gdata.youtube.YouTubePlaylistVideoFeedFromString) + + def GetYouTubeVideoResponseFeed(self, uri=None, video_id=None): + """Retrieve a YouTubeVideoResponseFeed. + + Either a uri or a playlist_id must be provided. + + Args: + uri: An optional string representing the URI of the video response feed + that is to be retrieved. + video_id: An optional string representing the ID of the video whose + response feed is to be retrieved. + + Returns: + A YouTubeVideoResponseFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeVideoResponseFeed() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeVideoResponseFeed() method') + elif video_id and not uri: + uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses') + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoResponseFeedFromString) + + def GetYouTubeVideoResponseEntry(self, uri): + """Retrieve a YouTubeVideoResponseEntry. + + Args: + uri: A string representing the URI of the video response entry that + is to be retrieved. + + Returns: + A YouTubeVideoResponseEntry if successfully retrieved. + """ + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoResponseEntryFromString) + + def GetYouTubeSubscriptionFeed(self, uri=None, username='default'): + """Retrieve a YouTubeSubscriptionFeed. + + Either the uri of the feed or a username must be provided. + + Args: + uri: An optional string representing the URI of the feed that is to + be retrieved. + username: An optional string representing the username whose subscription + feed is to be retrieved. Defaults to the currently authenticted user. + + Returns: + A YouTubeVideoSubscriptionFeed if successfully retrieved. + """ + if uri is None: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'subscriptions') + return self.Get( + uri, converter=gdata.youtube.YouTubeSubscriptionFeedFromString) + + def GetYouTubeSubscriptionEntry(self, uri): + """Retrieve a YouTubeSubscriptionEntry. + + Args: + uri: A string representing the URI of the entry that is to be retrieved. + + Returns: + A YouTubeVideoSubscriptionEntry if successfully retrieved. + """ + return self.Get( + uri, converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + def GetYouTubeRelatedVideoFeed(self, uri=None, video_id=None): + """Retrieve a YouTubeRelatedVideoFeed. + + Either a uri for the feed or a video_id is required. + + Args: + uri: An optional string representing the URI of the feed that is to + be retrieved. + video_id: An optional string representing the ID of the video for which + to retrieve the related video feed. + + Returns: + A YouTubeRelatedVideoFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeRelatedVideoFeed() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeRelatedVideoFeed() method') + elif video_id and not uri: + uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'related') + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoFeedFromString) + + def GetTopRatedVideoFeed(self): + """Retrieve the 'top_rated' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_RATED_URI) + + def GetMostViewedVideoFeed(self): + """Retrieve the 'most_viewed' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_VIEWED_URI) + + def GetRecentlyFeaturedVideoFeed(self): + """Retrieve the 'recently_featured' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_RECENTLY_FEATURED_URI) + + def GetWatchOnMobileVideoFeed(self): + """Retrieve the 'watch_on_mobile' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI) + + def GetTopFavoritesVideoFeed(self): + """Retrieve the 'top_favorites' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_FAVORITES_URI) + + def GetMostRecentVideoFeed(self): + """Retrieve the 'most_recent' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RECENT_URI) + + def GetMostDiscussedVideoFeed(self): + """Retrieve the 'most_discussed' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_DISCUSSED_URI) + + def GetMostLinkedVideoFeed(self): + """Retrieve the 'most_linked' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_LINKED_URI) + + def GetMostRespondedVideoFeed(self): + """Retrieve the 'most_responded' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RESPONDED_URI) + + def GetUserFavoritesFeed(self, username='default'): + """Retrieve the favorites feed for a given user. + + Args: + username: An optional string representing the username whose favorites + feed is to be retrieved. Defaults to the currently authenticated user. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + favorites_feed_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, + 'favorites') + return self.GetYouTubeVideoFeed(favorites_feed_uri) + + def InsertVideoEntry(self, video_entry, filename_or_handle, + youtube_username='default', + content_type='video/quicktime'): + """Upload a new video to YouTube using the direct upload mechanism. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to upload. + filename_or_handle: A file-like object or file name where the video + will be read from. + youtube_username: An optional string representing the username into whose + account this video is to be uploaded to. Defaults to the currently + authenticated user. + content_type: An optional string representing internet media type + (a.k.a. mime type) of the media object. Currently the YouTube API + supports these types: + o video/mpeg + o video/quicktime + o video/x-msvideo + o video/mp4 + o video/x-flv + + Returns: + The newly created YouTubeVideoEntry if successful. + + Raises: + AssertionError: video_entry must be a gdata.youtube.VideoEntry instance. + YouTubeError: An error occurred trying to read the video file provided. + gdata.service.RequestError: An error occurred trying to upload the video + to the API server. + """ + + # We need to perform a series of checks on the video_entry and on the + # file that we plan to upload, such as checking whether we have a valid + # video_entry and that the file is the correct type and readable, prior + # to performing the actual POST request. + + try: + assert(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry)) + except AssertionError: + raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, + 'body':'`video_entry` must be a gdata.youtube.VideoEntry instance', + 'reason':'Found %s, not VideoEntry' % type(video_entry) + }) + #majtype, mintype = content_type.split('/') + # + #try: + # assert(mintype in YOUTUBE_SUPPORTED_UPLOAD_TYPES) + #except (ValueError, AssertionError): + # raise YouTubeError({'status':YOUTUBE_INVALID_CONTENT_TYPE, + # 'body':'This is not a valid content type: %s' % content_type, + # 'reason':'Accepted content types: %s' % + # ['video/%s' % (t) for t in YOUTUBE_SUPPORTED_UPLOAD_TYPES]}) + + if (isinstance(filename_or_handle, (str, unicode)) + and os.path.exists(filename_or_handle)): + mediasource = gdata.MediaSource() + mediasource.setFile(filename_or_handle, content_type) + elif hasattr(filename_or_handle, 'read'): + import StringIO + if hasattr(filename_or_handle, 'seek'): + filename_or_handle.seek(0) + file_handle = filename_or_handle + name = 'video' + if hasattr(filename_or_handle, 'name'): + name = filename_or_handle.name + mediasource = gdata.MediaSource(file_handle, content_type, + content_length=file_handle.len, file_name=name) + else: + raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, 'body': + '`filename_or_handle` must be a path name or a file-like object', + 'reason': ('Found %s, not path name or object ' + 'with a .read() method' % type(filename_or_handle))}) + upload_uri = '%s/%s/%s' % (YOUTUBE_UPLOAD_URI, youtube_username, + 'uploads') + self.additional_headers['Slug'] = mediasource.file_name + + # Using a nested try statement to retain Python 2.4 compatibility + try: + try: + return self.Post(video_entry, uri=upload_uri, media_source=mediasource, + converter=gdata.youtube.YouTubeVideoEntryFromString) + except gdata.service.RequestError, e: + raise YouTubeError(e.args[0]) + finally: + del(self.additional_headers['Slug']) + + def CheckUploadStatus(self, video_entry=None, video_id=None): + """Check upload status on a recently uploaded video entry. + + Needs authentication. Either video_entry or video_id must be provided. + + Args: + video_entry: An optional YouTubeVideoEntry whose upload status to check + video_id: An optional string representing the ID of the uploaded video + whose status is to be checked. + + Returns: + A tuple containing (video_upload_state, detailed_message) or None if + no status information is found. + + Raises: + YouTubeError: You must provide at least a video_entry or a video_id to the + CheckUploadStatus() method. + """ + if video_entry is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the CheckUploadStatus() method') + elif video_id and not video_entry: + video_entry = self.GetYouTubeVideoEntry(video_id=video_id) + + control = video_entry.control + if control is not None: + draft = control.draft + if draft is not None: + if draft.text == 'yes': + yt_state = control.extension_elements[0] + if yt_state is not None: + state_value = yt_state.attributes['name'] + message = '' + if yt_state.text is not None: + message = yt_state.text + + return (state_value, message) + + def GetFormUploadToken(self, video_entry, uri=YOUTUBE_UPLOAD_TOKEN_URI): + """Receives a YouTube Token and a YouTube PostUrl from a YouTubeVideoEntry. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to upload (meta-data only). + uri: An optional string representing the URI from where to fetch the + token information. Defaults to the YOUTUBE_UPLOADTOKEN_URI. + + Returns: + A tuple containing the URL to which to post your video file, along + with the youtube token that must be included with your upload in the + form of: (post_url, youtube_token). + """ + try: + response = self.Post(video_entry, uri) + except gdata.service.RequestError, e: + raise YouTubeError(e.args[0]) + + tree = ElementTree.fromstring(response) + + for child in tree: + if child.tag == 'url': + post_url = child.text + elif child.tag == 'token': + youtube_token = child.text + return (post_url, youtube_token) + + def UpdateVideoEntry(self, video_entry): + """Updates a video entry's meta-data. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to update, containing updated + meta-data. + + Returns: + An updated YouTubeVideoEntry on success or None. + """ + for link in video_entry.link: + if link.rel == 'edit': + edit_uri = link.href + return self.Put(video_entry, uri=edit_uri, + converter=gdata.youtube.YouTubeVideoEntryFromString) + + def DeleteVideoEntry(self, video_entry): + """Deletes a video entry. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to be deleted. + + Returns: + True if entry was deleted successfully. + """ + for link in video_entry.link: + if link.rel == 'edit': + edit_uri = link.href + return self.Delete(edit_uri) + + def AddRating(self, rating_value, video_entry): + """Add a rating to a video entry. + + Needs authentication. + + Args: + rating_value: The integer value for the rating (between 1 and 5). + video_entry: The YouTubeVideoEntry to be rated. + + Returns: + True if the rating was added successfully. + + Raises: + YouTubeError: rating_value must be between 1 and 5 in AddRating(). + """ + if rating_value < 1 or rating_value > 5: + raise YouTubeError('rating_value must be between 1 and 5 in AddRating()') + + entry = gdata.GDataEntry() + rating = gdata.youtube.Rating(min='1', max='5') + rating.extension_attributes['name'] = 'value' + rating.extension_attributes['value'] = str(rating_value) + entry.extension_elements.append(rating) + + for link in video_entry.link: + if link.rel == YOUTUBE_RATING_LINK_REL: + rating_uri = link.href + + return self.Post(entry, uri=rating_uri) + + def AddComment(self, comment_text, video_entry): + """Add a comment to a video entry. + + Needs authentication. Note that each comment that is posted must contain + the video entry that it is to be posted to. + + Args: + comment_text: A string representing the text of the comment. + video_entry: The YouTubeVideoEntry to be commented on. + + Returns: + True if the comment was added successfully. + """ + content = atom.Content(text=comment_text) + comment_entry = gdata.youtube.YouTubeVideoCommentEntry(content=content) + comment_post_uri = video_entry.comments.feed_link[0].href + + return self.Post(comment_entry, uri=comment_post_uri) + + def AddVideoResponse(self, video_id_to_respond_to, video_response): + """Add a video response. + + Needs authentication. + + Args: + video_id_to_respond_to: A string representing the ID of the video to be + responded to. + video_response: YouTubeVideoEntry to be posted as a response. + + Returns: + True if video response was posted successfully. + """ + post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id_to_respond_to, + 'responses') + return self.Post(video_response, uri=post_uri) + + def DeleteVideoResponse(self, video_id, response_video_id): + """Delete a video response. + + Needs authentication. + + Args: + video_id: A string representing the ID of video that contains the + response. + response_video_id: A string representing the ID of the video that was + posted as a response. + + Returns: + True if video response was deleted succcessfully. + """ + delete_uri = '%s/%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses', + response_video_id) + return self.Delete(delete_uri) + + def AddComplaint(self, complaint_text, complaint_term, video_id): + """Add a complaint for a particular video entry. + + Needs authentication. + + Args: + complaint_text: A string representing the complaint text. + complaint_term: A string representing the complaint category term. + video_id: A string representing the ID of YouTubeVideoEntry to + complain about. + + Returns: + True if posted successfully. + + Raises: + YouTubeError: Your complaint_term is not valid. + """ + if complaint_term not in YOUTUBE_COMPLAINT_CATEGORY_TERMS: + raise YouTubeError('Your complaint_term is not valid') + + content = atom.Content(text=complaint_text) + category = atom.Category(term=complaint_term, + scheme=YOUTUBE_COMPLAINT_CATEGORY_SCHEME) + + complaint_entry = gdata.GDataEntry(content=content, category=[category]) + post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'complaints') + + return self.Post(complaint_entry, post_uri) + + def AddVideoEntryToFavorites(self, video_entry, username='default'): + """Add a video entry to a users favorite feed. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to add. + username: An optional string representing the username to whose favorite + feed you wish to add the entry. Defaults to the currently + authenticated user. + Returns: + The posted YouTubeVideoEntry if successfully posted. + """ + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites') + + return self.Post(video_entry, post_uri, + converter=gdata.youtube.YouTubeVideoEntryFromString) + + def DeleteVideoEntryFromFavorites(self, video_id, username='default'): + """Delete a video entry from the users favorite feed. + + Needs authentication. + + Args: + video_id: A string representing the ID of the video that is to be removed + username: An optional string representing the username of the user's + favorite feed. Defaults to the currently authenticated user. + + Returns: + True if entry was successfully deleted. + """ + edit_link = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites', + video_id) + return self.Delete(edit_link) + + def AddPlaylist(self, playlist_title, playlist_description, + playlist_private=None): + """Add a new playlist to the currently authenticated users account. + + Needs authentication. + + Args: + playlist_title: A string representing the title for the new playlist. + playlist_description: A string representing the description of the + playlist. + playlist_private: An optional boolean, set to True if the playlist is + to be private. + + Returns: + The YouTubePlaylistEntry if successfully posted. + """ + playlist_entry = gdata.youtube.YouTubePlaylistEntry( + title=atom.Title(text=playlist_title), + description=gdata.youtube.Description(text=playlist_description)) + if playlist_private: + playlist_entry.private = gdata.youtube.Private() + + playlist_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, 'default', + 'playlists') + return self.Post(playlist_entry, playlist_post_uri, + converter=gdata.youtube.YouTubePlaylistEntryFromString) + + def UpdatePlaylist(self, playlist_id, new_playlist_title, + new_playlist_description, playlist_private=None, + username='default'): + """Update a playlist with new meta-data. + + Needs authentication. + + Args: + playlist_id: A string representing the ID of the playlist to be updated. + new_playlist_title: A string representing a new title for the playlist. + new_playlist_description: A string representing a new description for the + playlist. + playlist_private: An optional boolean, set to True if the playlist is + to be private. + username: An optional string representing the username whose playlist is + to be updated. Defaults to the currently authenticated user. + + Returns: + A YouTubePlaylistEntry if the update was successful. + """ + updated_playlist = gdata.youtube.YouTubePlaylistEntry( + title=atom.Title(text=new_playlist_title), + description=gdata.youtube.Description(text=new_playlist_description)) + if playlist_private: + updated_playlist.private = gdata.youtube.Private() + + playlist_put_uri = '%s/%s/playlists/%s' % (YOUTUBE_USER_FEED_URI, username, + playlist_id) + + return self.Put(updated_playlist, playlist_put_uri, + converter=gdata.youtube.YouTubePlaylistEntryFromString) + + def DeletePlaylist(self, playlist_uri): + """Delete a playlist from the currently authenticated users playlists. + + Needs authentication. + + Args: + playlist_uri: A string representing the URI of the playlist that is + to be deleted. + + Returns: + True if successfully deleted. + """ + return self.Delete(playlist_uri) + + def AddPlaylistVideoEntryToPlaylist( + self, playlist_uri, video_id, custom_video_title=None, + custom_video_description=None): + """Add a video entry to a playlist, optionally providing a custom title + and description. + + Needs authentication. + + Args: + playlist_uri: A string representing the URI of the playlist to which this + video entry is to be added. + video_id: A string representing the ID of the video entry to add. + custom_video_title: An optional string representing a custom title for + the video (only shown on the playlist). + custom_video_description: An optional string representing a custom + description for the video (only shown on the playlist). + + Returns: + A YouTubePlaylistVideoEntry if successfully posted. + """ + playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry( + atom_id=atom.Id(text=video_id)) + if custom_video_title: + playlist_video_entry.title = atom.Title(text=custom_video_title) + if custom_video_description: + playlist_video_entry.description = gdata.youtube.Description( + text=custom_video_description) + + return self.Post(playlist_video_entry, playlist_uri, + converter=gdata.youtube.YouTubePlaylistVideoEntryFromString) + + def UpdatePlaylistVideoEntryMetaData( + self, playlist_uri, playlist_entry_id, new_video_title, + new_video_description, new_video_position): + """Update the meta data for a YouTubePlaylistVideoEntry. + + Needs authentication. + + Args: + playlist_uri: A string representing the URI of the playlist that contains + the entry to be updated. + playlist_entry_id: A string representing the ID of the entry to be + updated. + new_video_title: A string representing the new title for the video entry. + new_video_description: A string representing the new description for + the video entry. + new_video_position: An integer representing the new position on the + playlist for the video. + + Returns: + A YouTubePlaylistVideoEntry if the update was successful. + """ + playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry( + title=atom.Title(text=new_video_title), + description=gdata.youtube.Description(text=new_video_description), + position=gdata.youtube.Position(text=str(new_video_position))) + + playlist_put_uri = playlist_uri + '/' + playlist_entry_id + + return self.Put(playlist_video_entry, playlist_put_uri, + converter=gdata.youtube.YouTubePlaylistVideoEntryFromString) + + def DeletePlaylistVideoEntry(self, playlist_uri, playlist_video_entry_id): + """Delete a playlist video entry from a playlist. + + Needs authentication. + + Args: + playlist_uri: A URI representing the playlist from which the playlist + video entry is to be removed from. + playlist_video_entry_id: A string representing id of the playlist video + entry that is to be removed. + + Returns: + True if entry was successfully deleted. + """ + delete_uri = '%s/%s' % (playlist_uri, playlist_video_entry_id) + return self.Delete(delete_uri) + + def AddSubscriptionToChannel(self, username_to_subscribe_to, + my_username = 'default'): + """Add a new channel subscription to the currently authenticated users + account. + + Needs authentication. + + Args: + username_to_subscribe_to: A string representing the username of the + channel to which we want to subscribe to. + my_username: An optional string representing the name of the user which + we want to subscribe. Defaults to currently authenticated user. + + Returns: + A new YouTubeSubscriptionEntry if successfully posted. + """ + subscription_category = atom.Category( + scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME, + term='channel') + subscription_username = gdata.youtube.Username( + text=username_to_subscribe_to) + + subscription_entry = gdata.youtube.YouTubeSubscriptionEntry( + category=subscription_category, + username=subscription_username) + + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'subscriptions') + + return self.Post(subscription_entry, post_uri, + converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + def AddSubscriptionToFavorites(self, username, my_username = 'default'): + """Add a new subscription to a users favorites to the currently + authenticated user's account. + + Needs authentication + + Args: + username: A string representing the username of the user's favorite feed + to subscribe to. + my_username: An optional string representing the username of the user + that is to be subscribed. Defaults to currently authenticated user. + + Returns: + A new YouTubeSubscriptionEntry if successful. + """ + subscription_category = atom.Category( + scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME, + term='favorites') + subscription_username = gdata.youtube.Username(text=username) + + subscription_entry = gdata.youtube.YouTubeSubscriptionEntry( + category=subscription_category, + username=subscription_username) + + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'subscriptions') + + return self.Post(subscription_entry, post_uri, + converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + def AddSubscriptionToQuery(self, query, my_username = 'default'): + """Add a new subscription to a specific keyword query to the currently + authenticated user's account. + + Needs authentication + + Args: + query: A string representing the keyword query to subscribe to. + my_username: An optional string representing the username of the user + that is to be subscribed. Defaults to currently authenticated user. + + Returns: + A new YouTubeSubscriptionEntry if successful. + """ + subscription_category = atom.Category( + scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME, + term='query') + subscription_query_string = gdata.youtube.QueryString(text=query) + + subscription_entry = gdata.youtube.YouTubeSubscriptionEntry( + category=subscription_category, + query_string=subscription_query_string) + + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'subscriptions') + + return self.Post(subscription_entry, post_uri, + converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + + + def DeleteSubscription(self, subscription_uri): + """Delete a subscription from the currently authenticated user's account. + + Needs authentication. + + Args: + subscription_uri: A string representing the URI of the subscription that + is to be deleted. + + Returns: + True if deleted successfully. + """ + return self.Delete(subscription_uri) + + def AddContact(self, contact_username, my_username='default'): + """Add a new contact to the currently authenticated user's contact feed. + + Needs authentication. + + Args: + contact_username: A string representing the username of the contact + that you wish to add. + my_username: An optional string representing the username to whose + contact the new contact is to be added. + + Returns: + A YouTubeContactEntry if added successfully. + """ + contact_category = atom.Category( + scheme = 'http://gdata.youtube.com/schemas/2007/contact.cat', + term = 'Friends') + contact_username = gdata.youtube.Username(text=contact_username) + contact_entry = gdata.youtube.YouTubeContactEntry( + category=contact_category, + username=contact_username) + + contact_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'contacts') + + return self.Post(contact_entry, contact_post_uri, + converter=gdata.youtube.YouTubeContactEntryFromString) + + def UpdateContact(self, contact_username, new_contact_status, + new_contact_category, my_username='default'): + """Update a contact, providing a new status and a new category. + + Needs authentication. + + Args: + contact_username: A string representing the username of the contact + that is to be updated. + new_contact_status: A string representing the new status of the contact. + This can either be set to 'accepted' or 'rejected'. + new_contact_category: A string representing the new category for the + contact, either 'Friends' or 'Family'. + my_username: An optional string representing the username of the user + whose contact feed we are modifying. Defaults to the currently + authenticated user. + + Returns: + A YouTubeContactEntry if updated succesfully. + + Raises: + YouTubeError: New contact status must be within the accepted values. Or + new contact category must be within the accepted categories. + """ + if new_contact_status not in YOUTUBE_CONTACT_STATUS: + raise YouTubeError('New contact status must be one of %s' % + (' '.join(YOUTUBE_CONTACT_STATUS))) + if new_contact_category not in YOUTUBE_CONTACT_CATEGORY: + raise YouTubeError('New contact category must be one of %s' % + (' '.join(YOUTUBE_CONTACT_CATEGORY))) + + contact_category = atom.Category( + scheme='http://gdata.youtube.com/schemas/2007/contact.cat', + term=new_contact_category) + + contact_status = gdata.youtube.Status(text=new_contact_status) + contact_entry = gdata.youtube.YouTubeContactEntry( + category=contact_category, + status=contact_status) + + contact_put_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'contacts', contact_username) + + return self.Put(contact_entry, contact_put_uri, + converter=gdata.youtube.YouTubeContactEntryFromString) + + def DeleteContact(self, contact_username, my_username='default'): + """Delete a contact from a users contact feed. + + Needs authentication. + + Args: + contact_username: A string representing the username of the contact + that is to be deleted. + my_username: An optional string representing the username of the user's + contact feed from which to delete the contact. Defaults to the + currently authenticated user. + + Returns: + True if the contact was deleted successfully + """ + contact_edit_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'contacts', contact_username) + return self.Delete(contact_edit_uri) + + def _GetDeveloperKey(self): + """Getter for Developer Key property. + + Returns: + If the developer key has been set, a string representing the developer key + is returned or None. + """ + if 'X-GData-Key' in self.additional_headers: + return self.additional_headers['X-GData-Key'][4:] + else: + return None + + def _SetDeveloperKey(self, developer_key): + """Setter for Developer Key property. + + Sets the developer key in the 'X-GData-Key' header. The actual value that + is set is 'key=' plus the developer_key that was passed. + """ + self.additional_headers['X-GData-Key'] = 'key=' + developer_key + + developer_key = property(_GetDeveloperKey, _SetDeveloperKey, + doc="""The Developer Key property""") + + def _GetClientId(self): + """Getter for Client Id property. + + Returns: + If the client_id has been set, a string representing it is returned + or None. + """ + if 'X-Gdata-Client' in self.additional_headers: + return self.additional_headers['X-Gdata-Client'] + else: + return None + + def _SetClientId(self, client_id): + """Setter for Client Id property. + + Sets the 'X-Gdata-Client' header. + """ + self.additional_headers['X-Gdata-Client'] = client_id + + client_id = property(_GetClientId, _SetClientId, + doc="""The ClientId property""") + + def Query(self, uri): + """Performs a query and returns a resulting feed or entry. + + Args: + uri: A string representing the URI of the feed that is to be queried. + + Returns: + On success, a tuple in the form: + (boolean succeeded=True, ElementTree._Element result) + On failure, a tuple in the form: + (boolean succeeded=False, {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response}) + """ + result = self.Get(uri) + return result + + def YouTubeQuery(self, query): + """Performs a YouTube specific query and returns a resulting feed or entry. + + Args: + query: A Query object or one if its sub-classes (YouTubeVideoQuery, + YouTubeUserQuery or YouTubePlaylistQuery). + + Returns: + Depending on the type of Query object submitted returns either a + YouTubeVideoFeed, a YouTubeUserFeed, a YouTubePlaylistFeed. If the + Query object provided was not YouTube-related, a tuple is returned. + On success the tuple will be in this form: + (boolean succeeded=True, ElementTree._Element result) + On failure, the tuple will be in this form: + (boolean succeeded=False, {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server response}) + """ + result = self.Query(query.ToUri()) + if isinstance(query, YouTubeUserQuery): + return gdata.youtube.YouTubeUserFeedFromString(result.ToString()) + elif isinstance(query, YouTubePlaylistQuery): + return gdata.youtube.YouTubePlaylistFeedFromString(result.ToString()) + elif isinstance(query, YouTubeVideoQuery): + return gdata.youtube.YouTubeVideoFeedFromString(result.ToString()) + else: + return result + +class YouTubeVideoQuery(gdata.service.Query): + + """Subclasses gdata.service.Query to represent a YouTube Data API query. + + Attributes are set dynamically via properties. Properties correspond to + the standard Google Data API query parameters with YouTube Data API + extensions. Please refer to the API documentation for details. + + Attributes: + vq: The vq parameter, which is only supported for video feeds, specifies a + search query term. Refer to API documentation for further details. + orderby: The orderby parameter, which is only supported for video feeds, + specifies the value that will be used to sort videos in the search + result set. Valid values for this parameter are relevance, published, + viewCount and rating. + time: The time parameter, which is only available for the top_rated, + top_favorites, most_viewed, most_discussed, most_linked and + most_responded standard feeds, restricts the search to videos uploaded + within the specified time. Valid values for this parameter are today + (1 day), this_week (7 days), this_month (1 month) and all_time. + The default value for this parameter is all_time. + format: The format parameter specifies that videos must be available in a + particular video format. Refer to the API documentation for details. + racy: The racy parameter allows a search result set to include restricted + content as well as standard content. Valid values for this parameter + are include and exclude. By default, restricted content is excluded. + lr: The lr parameter restricts the search to videos that have a title, + description or keywords in a specific language. Valid values for the lr + parameter are ISO 639-1 two-letter language codes. + restriction: The restriction parameter identifies the IP address that + should be used to filter videos that can only be played in specific + countries. + location: A string of geo coordinates. Note that this is not used when the + search is performed but rather to filter the returned videos for ones + that match to the location entered. + feed: str (optional) The base URL which is the beginning of the query URL. + defaults to 'http://%s/feeds/videos' % (YOUTUBE_SERVER) + """ + + def __init__(self, video_id=None, feed_type=None, text_query=None, + params=None, categories=None, feed=None): + + if feed_type in YOUTUBE_STANDARDFEEDS and feed is None: + feed = 'http://%s/feeds/standardfeeds/%s' % (YOUTUBE_SERVER, feed_type) + elif (feed_type is 'responses' or feed_type is 'comments' and video_id + and feed is None): + feed = 'http://%s/feeds/videos/%s/%s' % (YOUTUBE_SERVER, video_id, + feed_type) + elif feed is None: + feed = 'http://%s/feeds/videos' % (YOUTUBE_SERVER) + + gdata.service.Query.__init__(self, feed, text_query=text_query, + params=params, categories=categories) + + def _GetVideoQuery(self): + if 'vq' in self: + return self['vq'] + else: + return None + + def _SetVideoQuery(self, val): + self['vq'] = val + + vq = property(_GetVideoQuery, _SetVideoQuery, + doc="""The video query (vq) query parameter""") + + def _GetOrderBy(self): + if 'orderby' in self: + return self['orderby'] + else: + return None + + def _SetOrderBy(self, val): + if val not in YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS: + if val.startswith('relevance_lang_') is False: + raise YouTubeError('OrderBy must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS)) + self['orderby'] = val + + orderby = property(_GetOrderBy, _SetOrderBy, + doc="""The orderby query parameter""") + + def _GetTime(self): + if 'time' in self: + return self['time'] + else: + return None + + def _SetTime(self, val): + if val not in YOUTUBE_QUERY_VALID_TIME_PARAMETERS: + raise YouTubeError('Time must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_TIME_PARAMETERS)) + self['time'] = val + + time = property(_GetTime, _SetTime, + doc="""The time query parameter""") + + def _GetFormat(self): + if 'format' in self: + return self['format'] + else: + return None + + def _SetFormat(self, val): + if val not in YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS: + raise YouTubeError('Format must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS)) + self['format'] = val + + format = property(_GetFormat, _SetFormat, + doc="""The format query parameter""") + + def _GetRacy(self): + if 'racy' in self: + return self['racy'] + else: + return None + + def _SetRacy(self, val): + if val not in YOUTUBE_QUERY_VALID_RACY_PARAMETERS: + raise YouTubeError('Racy must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_RACY_PARAMETERS)) + self['racy'] = val + + racy = property(_GetRacy, _SetRacy, + doc="""The racy query parameter""") + + def _GetLanguageRestriction(self): + if 'lr' in self: + return self['lr'] + else: + return None + + def _SetLanguageRestriction(self, val): + self['lr'] = val + + lr = property(_GetLanguageRestriction, _SetLanguageRestriction, + doc="""The lr (language restriction) query parameter""") + + def _GetIPRestriction(self): + if 'restriction' in self: + return self['restriction'] + else: + return None + + def _SetIPRestriction(self, val): + self['restriction'] = val + + restriction = property(_GetIPRestriction, _SetIPRestriction, + doc="""The restriction query parameter""") + + def _GetLocation(self): + if 'location' in self: + return self['location'] + else: + return None + + def _SetLocation(self, val): + self['location'] = val + + location = property(_GetLocation, _SetLocation, + doc="""The location query parameter""") + + + +class YouTubeUserQuery(YouTubeVideoQuery): + + """Subclasses YouTubeVideoQuery to perform user-specific queries. + + Attributes are set dynamically via properties. Properties correspond to + the standard Google Data API query parameters with YouTube Data API + extensions. + """ + + def __init__(self, username=None, feed_type=None, subscription_id=None, + text_query=None, params=None, categories=None): + + uploads_favorites_playlists = ('uploads', 'favorites', 'playlists') + + if feed_type is 'subscriptions' and subscription_id and username: + feed = "http://%s/feeds/users/%s/%s/%s" % (YOUTUBE_SERVER, username, + feed_type, subscription_id) + elif feed_type is 'subscriptions' and not subscription_id and username: + feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username, + feed_type) + elif feed_type in uploads_favorites_playlists: + feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username, + feed_type) + else: + feed = "http://%s/feeds/users" % (YOUTUBE_SERVER) + + YouTubeVideoQuery.__init__(self, feed=feed, text_query=text_query, + params=params, categories=categories) + + +class YouTubePlaylistQuery(YouTubeVideoQuery): + + """Subclasses YouTubeVideoQuery to perform playlist-specific queries. + + Attributes are set dynamically via properties. Properties correspond to + the standard Google Data API query parameters with YouTube Data API + extensions. + """ + + def __init__(self, playlist_id, text_query=None, params=None, + categories=None): + if playlist_id: + feed = "http://%s/feeds/playlists/%s" % (YOUTUBE_SERVER, playlist_id) + else: + feed = "http://%s/feeds/playlists" % (YOUTUBE_SERVER) + + YouTubeVideoQuery.__init__(self, feed=feed, text_query=text_query, + params=params, categories=categories) diff --git a/gam/gdata/apps/__init__.py b/gam/gdata/apps/__init__.py new file mode 100755 index 00000000000..ebdf98ec9af --- /dev/null +++ b/gam/gdata/apps/__init__.py @@ -0,0 +1,526 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 SIOS Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains objects used with Google Apps.""" + +__author__ = 'tmatsuo@sios.com (Takashi MATSUO)' + + +import atom +import gdata + + +# XML namespaces which are often used in Google Apps entity. +APPS_NAMESPACE = 'http://schemas.google.com/apps/2006' +APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s' + + +class EmailList(atom.AtomBase): + """The Google Apps EmailList element""" + + _tag = 'emailList' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def EmailListFromString(xml_string): + return atom.CreateClassFromXMLString(EmailList, xml_string) + + +class Who(atom.AtomBase): + """The Google Apps Who element""" + + _tag = 'who' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['email'] = 'email' + + def __init__(self, rel=None, email=None, extension_elements=None, + extension_attributes=None, text=None): + self.rel = rel + self.email = email + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def WhoFromString(xml_string): + return atom.CreateClassFromXMLString(Who, xml_string) + + +class Login(atom.AtomBase): + """The Google Apps Login element""" + + _tag = 'login' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['userName'] = 'user_name' + _attributes['password'] = 'password' + _attributes['suspended'] = 'suspended' + _attributes['admin'] = 'admin' + _attributes['changePasswordAtNextLogin'] = 'change_password' + _attributes['agreedToTerms'] = 'agreed_to_terms' + _attributes['ipWhitelisted'] = 'ip_whitelisted' + _attributes['hashFunctionName'] = 'hash_function_name' + + def __init__(self, user_name=None, password=None, suspended=None, + ip_whitelisted=None, hash_function_name=None, + admin=None, change_password=None, agreed_to_terms=None, + extension_elements=None, extension_attributes=None, + text=None): + self.user_name = user_name + self.password = password + self.suspended = suspended + self.admin = admin + self.change_password = change_password + self.agreed_to_terms = agreed_to_terms + self.ip_whitelisted = ip_whitelisted + self.hash_function_name = hash_function_name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def LoginFromString(xml_string): + return atom.CreateClassFromXMLString(Login, xml_string) + + +class Quota(atom.AtomBase): + """The Google Apps Quota element""" + + _tag = 'quota' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['limit'] = 'limit' + + def __init__(self, limit=None, extension_elements=None, + extension_attributes=None, text=None): + self.limit = limit + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def QuotaFromString(xml_string): + return atom.CreateClassFromXMLString(Quota, xml_string) + + +class Name(atom.AtomBase): + """The Google Apps Name element""" + + _tag = 'name' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['familyName'] = 'family_name' + _attributes['givenName'] = 'given_name' + + def __init__(self, family_name=None, given_name=None, + extension_elements=None, extension_attributes=None, text=None): + self.family_name = family_name + self.given_name = given_name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NameFromString(xml_string): + return atom.CreateClassFromXMLString(Name, xml_string) + + +class Nickname(atom.AtomBase): + """The Google Apps Nickname element""" + + _tag = 'nickname' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + + def __init__(self, name=None, + extension_elements=None, extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NicknameFromString(xml_string): + return atom.CreateClassFromXMLString(Nickname, xml_string) + + +class NicknameEntry(gdata.GDataEntry): + """A Google Apps flavor of an Atom Entry for Nickname""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}login' % APPS_NAMESPACE] = ('login', Login) + _children['{%s}nickname' % APPS_NAMESPACE] = ('nickname', Nickname) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + login=None, nickname=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.login = login + self.nickname = nickname + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NicknameEntryFromString(xml_string): + return atom.CreateClassFromXMLString(NicknameEntry, xml_string) + + +class NicknameFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps Nickname feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [NicknameEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def NicknameFeedFromString(xml_string): + return atom.CreateClassFromXMLString(NicknameFeed, xml_string) + + +class UserEntry(gdata.GDataEntry): + """A Google Apps flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}login' % APPS_NAMESPACE] = ('login', Login) + _children['{%s}name' % APPS_NAMESPACE] = ('name', Name) + _children['{%s}quota' % APPS_NAMESPACE] = ('quota', Quota) + # This child may already be defined in GDataEntry, confirm before removing. + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + _children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + login=None, name=None, quota=None, who=None, feed_link=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.login = login + self.name = name + self.quota = quota + self.who = who + self.feed_link = feed_link or [] + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def UserEntryFromString(xml_string): + return atom.CreateClassFromXMLString(UserEntry, xml_string) + + +class UserFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps User feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [UserEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def UserFeedFromString(xml_string): + return atom.CreateClassFromXMLString(UserFeed, xml_string) + + +class EmailListEntry(gdata.GDataEntry): + """A Google Apps EmailList flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}emailList' % APPS_NAMESPACE] = ('email_list', EmailList) + # Might be able to remove this _children entry. + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + email_list=None, feed_link=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.email_list = email_list + self.feed_link = feed_link or [] + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EmailListEntryFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListEntry, xml_string) + + +class EmailListFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps EmailList feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [EmailListEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def EmailListFeedFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListFeed, xml_string) + + +class EmailListRecipientEntry(gdata.GDataEntry): + """A Google Apps EmailListRecipient flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + who=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.who = who + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EmailListRecipientEntryFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListRecipientEntry, xml_string) + + +class EmailListRecipientFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps EmailListRecipient feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [EmailListRecipientEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def EmailListRecipientFeedFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListRecipientFeed, xml_string) + + +class Property(atom.AtomBase): + """The Google Apps Property element""" + + _tag = 'property' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['value'] = 'value' + + def __init__(self, name=None, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def PropertyFromString(xml_string): + return atom.CreateClassFromXMLString(Property, xml_string) + + +class PropertyEntry(gdata.GDataEntry): + """A Google Apps Property flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}property' % APPS_NAMESPACE] = ('property', [Property]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + property=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.property = property + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def PropertyEntryFromString(xml_string): + return atom.CreateClassFromXMLString(PropertyEntry, xml_string) + +class PropertyFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps Property feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PropertyEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + +def PropertyFeedFromString(xml_string): + return atom.CreateClassFromXMLString(PropertyFeed, xml_string) diff --git a/gam/gdata/apps/__init__.pyc b/gam/gdata/apps/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7085213156108aded4bbc82f2320f0bb33770df GIT binary patch literal 19987 zcmeHP+ix6K89(cF9N({LZcdubt=+VZou>B^AWq_hLgKm;_vTWD$f|+sj~KbK6B=q zbDQ)1&hPuqth4_f8GK~z*NX+!_+;_-JP!SFk5aZ$%a9qhoKbdfU<`~V^G<{ z35}6RV@TPfqA{%Ov4qBWq%orG3DFo;_GCh1D$*EJ_8!p~SN1~*jlGe^gtGUE#-y_M zD<`W~r=%|jB8@%DJ}4RwDSKKp_KMu6Rxl`sBE9{}J}i0%l>Km`q$82WL1pJeV_Mlq zHE)DtO7xB@`>{kxk4GBEl>LNg z99Q<~O(o4ldMA`UD|#oDeI_xIPevM#DEq8vJgV%c5_LTtY2=mtjA)!v_OqMndM?s? zOxe$i-s7Br*YhuMc0LMm(X0BFTh%%5#;Q~Bb#6^NcJ7wzmvUD;uUK|+bG4eD(MO@{ zS1ezzdFQq3>6wC8$>o=1+N7W8rGmVCeV;_R$0l$?sCXNpo5 zZ<@7G-t5^kXP%Y%-`j@b`zN(OfHLPRmRr8+YCkfxUC6zFL%$8-z&KzskeEa;x2$qt zR=bqbr&f2X)qb@)KvRMNHF5^S92iWGazvi9VTaLgR^3Js!^*gxXH_x^OC={cG`Xl% zIj8JYoT@KvMX!Ul`n|ZZYE_&9OW?tbi#YVt5E~h_p_GY4uOx8VqiWNth#chisEw?u zJ*M2?6iA`)^fI_{A;%_mQKmt z+9|I{S%#F#W*S+DJdQxMM^UO1+YwLc= ztDe^NLN5R0soA1!`Bw12P1CKqzG==tr5`1Q=d3_8L-h<~1~Oxrh5QIk8af;|)AR?C zW11DuUMur-x18Eu0ckeZ^sS;lip1RF;&pRj?%MqI#kq^~!EG!;*Q|mgs+R9nr0h8) zE?C-;hh-K@ZrQHm*|E5Shc*te+<2tq!h%GzJP+M{wwIh`vl;501zD-9 zqEsmJNk;44O4-zMA?{-5dgo7}C6T$X9YG^zk*ztARmNwtUoUwDp7GO%LoB?4gwxK# zSFvhh$qX%gbx@2POJtBUq#8L`{9xoF&jACO%Bpi%bDMs9)oqkFLW^hBdK#kD;?cgy z*4f@$B~RL2cgj93M{K{q+j($GUBsck2oamRJTDXGzAItqCqlc=5)W!mg?0}>itS#E z^8gAn4pOiMvDHee4c2SKDr|xww^2EbM=NH*4r|fxr@Biv#y)iw2R{ zV9{c8#zRcu=s3i3_%Kal8=FT)Xk(lSSG}TJEg&JEVMwvE-yq?%v$DHeM`>8SvZD5i ziE(Y+g%s=*sXnB3OR66!SSs=cks6ZJFj6Cu8bt~$bL-zRb$h6>M00-V!w{_oE1iu@ zmfbJ2`-1)8J9U$bge(g@TL@2TK+9WR-4;E)rfW{sc5IormR-U9oSKD_RV_MZq)V#{ z@)jGn&-n{jlcf!EhgGaQj&1s$={xm`_W7)Wyd`f*a%eMd&Ae4|eWwh*gG#C7)?QEK za9)Nm%S;ankB!Sc2k^nd9mju8n z;3-fDfC~ykatR6ps=*nrv?R5dq5Fg)TI!D#PvM*y%2y-ud7JF!o+*1^BP_TJ zHX>6lwvlhMD-9drGne%Pf8|47DJHUtgwsi$Kz4u-SMs+{sJJ-a`TMqIO+>-;bxVr z4Qcswdtu6<)=h@iXjEuy!j`LzOtHLlRwj;1tG&c4lU(hRsr(czxxZGX=h5OHvDKSb zrtFR!SQebJnb5XFLx@e~7wk>LREScc#(dSUuSfPm=68XbpFp%-rS2|fG|X)Y2iq%% z8jYyE;@Xaed5AAdUUKEyI*oO0R4tD)#3bt)L1(Thq`XqL-tp==ZzX3{bBt8xq=UH? zWQJW9N{EPMech0NAy=~ZPU`o?vo&5|_vu$KD>Tyr2Zjd+_aYAcU5FrlxGM1|gw5d< zB2I=NGCeFHqtgiO!O&M6Qnj~2KM;40 zN}w1Cq?(m+>WvFH@YSn4{oKG&tJEB?>+)sld3*RF}Z(cc@D< z_@s7EOGJ4@9MmyLx~39YYq#QawF*M(OD|t*4bO29jd=o-?=Bz%M?` zeuYCbB$|xY?aAzsjRoXob_RN^B4yBHU@)|AnUgW{{gIt(ARp~IRJ)8&QUrW_zyf?f zMxrCYSImSsC%b9DO~7AE$b9>!2(C}5<V!%Fe0FkJto4L?8X7>#2Im_P93;bmtMNK979Qf zb6(LCfULX4lCK3I_1X==xdhB`Xe4OpBYaRc>yBP4Lz~`__N}^)?Km6gM*BvPv7$}D z8-kccrxlU1&`^QHeT{inDXvkRrw|}qVCrRxMT!^_*;eBf3WMT0 zMVD~#uc#{s+VxFmh9$l~38V;r7V-k{cS1(v3Y#kKl3?KyPsb?)P`S(+gpF<>;7h3I z&p7mch$I3Y1pbYoZ#xayv4WKGkI~Z*0XWHG`1?D1)__0GL+(QhLPprf!4~YJYDchd z8>A2W8Dk+pVn;)WAYL!P&f~a>p)=TvoFH@#DrYjh0oxr=lxzUW-Tn@%+Y@Se53!U+ ztWQF0i$w}*w&RQ(UU6`QGr|EiW|_m`W=;@@d{nY< zSFVel-ffh%fG;Kp`)X#G!Al(>&*Xk*+fXj2a4VW_%`DIIju{A90f`ne0S_^oJgOU* z`x@%`Ck}lKB8j%)!jQ2KyY_A^T)iDWmTcs)%qkhTgq=u$;WuG~q%>6%KcV zyd(h^aOnGlb#l@)BPF}E72`C%id*h;wrQlo%LKb(?nAW8-}j{OIA^@<tO{%zaeMi6oTWo_`?C%3+g>h>e@%mgcD}1FX zWoAX=?gq0mQP11#OY}JDqV+T_Qyva?`Z=v3)yNa=z8e5mKs}$ZH?1#ZWF;iti493skurqk zP(^!0imB3X69~rjWwG(95Su3-vJl}$9eQcH2d_%wZ4A7QaL4a2%1hJ2esZY1^8ZEf zxsv@pxhf1v0!S^d0CXX60?Ms^&vujk1c8(Io#>r?TaiVlY-Vh9I4NVp`cuNNIKZJs z+MY+eaUZ_#d5@mQ3>w|M_=jEC+TU2geNl)uw95jg7mv@O)4>jR$mxiS>JnP~Ews8+ zoDLda5Qpsoh`6ZiFYHchOrujy2P;S!O6aA9R@UNb{3C%UMD0-RzIHIqq9b?0m9y%kG9-aTE3Ys~tpj>MciBkTOK^K_sy!W;`R{x{Y8+{t1pKS*UF3 znr85Q2-6JyDTYMt2x9V%ul(PerTJ@%SLc@IgMXNTOUhtP!OKvGnUdG?PcubkVmwJf z{4k!QV4T!=kwT(@^4{uKc)A23jo=Gbm-*gs-FS%?UZr@Ag5P~N_%49)7KH@izrhrv z*v2<0EQ%Wxbc_s}LRPmDQ!;<(9~pcAMj0ZHBOyBYZ)oVHoeTb((3d8V)(a5$S3B9^ x%y2KrbN0aaK<|M`9LI1>;FxOsJv`8RphMM~6DTyVC`iTVv#rlaUxtV#1 lr74*u`tk9Zd6^~g@p?ej96&>Da`RJ4b5iXMuwz0;P2;VmC!mm8z7QrbHAJs!b6zH>>IH*j{J7Yi7oA zEx9+ecZfIP5{U~Qga_dP;5##RW3{Q^rncvJcIM3aKi_Qm?^BJ-<3Aq8H2;+Ge;30< zAPLbvXh8b`B_&GAl$B_|yqH&(=GCQnjrMDlR7jO+utI|qG^kS-5~}oq=sD5j{RYe0 zYt~rNZy=pynC6{vlG;6`57Sr)lMY8&+D(-aZd15j(Rui_h-H?UR-^IyA@$U%JTan| zNGn@%G%_vY<+UE_Vx+X4F67%nC~1^PbE~wBZCd1FlG=X5_LbNvie9Ef2LfKz-fEZ+ zVDJMu72hf)ZV%-2dZyIf-cY7lD=vn2(tq&TweKG1*2^%jVE0Q5vjd_KW`qY6F|I8A zu!zSJy+A~l*ZsUqFA()ADbN~)d(A2f+yYrR>EdD7wQixsvS%FcrI0S7WZuK+$z;;% zxd_NnYS^ck%{_r}8si%b^BBYu4FVdJXi%n^E0dWklbNd&w!BI+&>GFEl&sJUlNI_k zs0Fsd2O_MVa1(4e5zz`Ib<{Sy!meT)6 zhUXOMlu190`<#R#&z0+yrSMGBEE7+ld8Bfcw8XaR%5i4Jq0Gjx^$NG)x~n8#R9sM3 z79L%@b}*5gF_)48f!1OqH9SFE8rIDL-nZ!g&b1Ci`;Yzyr*Sndf| z2A1o0kqyfQJ(q=IBw{12gCiGWKXs9^+!82{)YnebS2BTV>7N17Zj*&N9nQS4%#W)e z%l0|VQIzJXjUuP*ksd3{yvAg1VN_`Abo!)wv22%7SvQ0x3egP|zQcrF3fb0>y>Q2T z5Sw@;N6dGawZDglrhSm=C)4(JG0C$+CZ^q!!}S}j&CT`E)b@*f-RM{}H?Fpw2W<{S zTQWRIhU4}k;G=2CUHLJFVF&5VYVcujCaB}<;DLA9vU?cj8cfEu0X#VW3_V?;WQj6fRC z_`72S7J;#P3?uF>&-j;P1Q&j_myED(dVwJez{ft+$(HDO5bXVsZ&C#*9aLygrGp0P zwIFy_r-0XLZW&7Jd>JMUy7WAtp99JR`Vs2(njBDb5hPTADs{++mjic@01s-0v+8rB z6&DNetnCwYxY1rS?Hku`Zhp48wYjmiwPCqlzA%i{YX(t01A2njTvhKDxi9k_ zWqr9@9dgGU^+~9H8Y^S(YhCD&3*NpD(m@e^Rp`g)#chX|_n*effwyl~0EW9GLN>{s z1c}GS7DJ_@G;!_dD%dwayJ_8x!rn!AHBhc2;hBHn^YALd1;B1OOUF8OOu*47?&BQC zRK48>vz!+_uYky9moIadd#W^WZ>|Ps(Qe1l3qd)!5S$Bkn+_8=$Wg=>0uYCNkD_6b zj5B^@hA2vkIEuovn7h6XKf%nN;>WoG9j?A3hu!S0zzTHIM;M0ftOd2wYH<4e#Y;_2 zOL!S%?f_rIg`WF!^oHjO%7OIJ+TyLxIpmj@!G2(h+!dRzLoT9MUJpYoc_clGz}s-% V)!1#Hnmgy9(_w3^#6fW1^1mz~?X3U+ literal 0 HcmV?d00001 diff --git a/gam/gdata/apps/adminaudit/service.py~ b/gam/gdata/apps/adminaudit/service.py~ new file mode 100755 index 00000000000..04131598872 --- /dev/null +++ b/gam/gdata/apps/adminaudit/service.py~ @@ -0,0 +1,83 @@ +#!/usr/bin/python2.4 +# +# Copyright 2010 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""AdminAuditService simplifies Admin Audit API calls. + +AdminAuditService extends gdata.apps.service.PropertyService to ease interaction with +the Google Apps Admin Audit API. +""" + +__author__ = 'Jay Lee ' + +import gdata.apps +import gdata.apps.service +import gdata.service +import json + + +class AdminAuditService(gdata.apps.service.PropertyService): + """Service extension for the Google Admin Audit API service.""" + + def __init__(self, email=None, password=None, domain=None, source=None, + server='www.googleapis.com', additional_headers=None, + **kwargs): + """Creates a client for the Admin Audit service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + domain: string (optional) The Google Apps domain name. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'apps-apis.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='apps', source=source, + server=server, additional_headers=additional_headers, **kwargs) + self.ssl = True + self.port = 443 + self.domain = domain + + def retrieve_audit(self, customer_id, admin=None, event=None, start_date=None, end_date=None, max_results=None): + """Retrieves an audit + + """ + uri = '/apps/reporting/audit/v1/%s/207535951991' % customer_id + use_char = '?' + if admin != None: + uri += '%sactorEmail=%s' % (use_char, admin) + use_char = '&' + if event != None: + uri += '%seventName=%s' % (use_char, event) + use_char = '&' + if start_date != None: + uri += '%sstartTime=%s' % (use_char, start_date) + use_char = '&' + if end_date != None: + uri += '%sendTime=%s' % (use_char, end_date) + use_char = '&' + if max_results != None: + uri += '%smaxResults=%s' % (use_char, max_results) + print uri + try: + return self.Get(uri) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + RetrieveAudit = retrieve_audit diff --git a/gam/gdata/apps/adminsettings/__init__.py b/gam/gdata/apps/adminsettings/__init__.py new file mode 100755 index 00000000000..d284c7cee33 --- /dev/null +++ b/gam/gdata/apps/adminsettings/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/gam/gdata/apps/adminsettings/__init__.pyc b/gam/gdata/apps/adminsettings/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe0fde92e770b0758d9bc512c449e1d102e0d357 GIT binary patch literal 170 zcmZSn%*&-#x;-G70SXv_v;zphMM~6DTyVC`iTVv#rlaUxtV#z osU;k0VdPH$o0)n+Jd)e$MIRv?8cXLZ}SOmy12y$E?e?WrVatjb3w;=hxSKU25qoKo< zL{id}s_S>X-|tn`_TE zqLM!%d6Z15;D|Eg>fTXxZ%W;pR-5QMsh%kHwNhW*o#7~rg`=DzMY0@4$+o_pB<;xP zKoB??WP>CGh7*_Ig~3_%JpDo6)H1;11CZ-Jv{ z9DXf{O*V;)Ymt!@me*IBx378qM%F}|*$irO`fP@Q$4M6ObBXnktKY&7Z2f+iZtT>r zCfjk8_?WxxcP@Om^wCEbx;xoc5?`=sOD}xzUcDXonP2z8A$4Jmd~jPY;DGkWFl;mb zzLiD-RIXOlX7RVhMQ|5K@Do5akRfo8*3{PmqMZ{txBw!QG`@@%)dV1R4;($A?$4-n zP1PQct6FwMJwh4ix~9&c<&SDAuBkhq`-XtR{semj3MK&2@bpy!d5`9hj61 z3zkjW_LAhZj*^z2g-Lu#rzS$~fziD*B-^8#Ywnq>m&Rp}WgUdAOt+FagS>zo5Q0gK z?M0bRHs!00DHF^mLobC`ky;bk;}I;`OGug+e?4WREHoCoQc6HAxXodiQDk`Lk9%lv zElra&dmROoR-Y#6S4l6uDl+TZ!TnNd7KOv3N$T-pfen}cN(g?77CP(joqX7o9+U;Fm zHJd&m|Aim7dw$zIjbbv-NK6iQ)xOofhUf=Ump~>_q`Qf=;RaO113v;5w4WLsK|7d0 z`&mIlT5n}p_foyyN&>Ug7M-%xN;=@05L`pnG0-g$Sj5SYdSD)yh`6`Sb}tM}vzsPa z(n_MGt*jHhbA7q#Iz;)Vnwj6Wmaq|U`VmHuZ5O-F=216Pwm{%|$B~GvapXYybF{ku z=zke=0aZDei=|#fsU>NunfbnKAY}>xEFv>N0AngFXy76QfD@_|q~q%AlIa(**IQkP zD&T0(gfRrKOGVFOrMdFYC50YI3-7!-b=m+9RXwKfq5qs8n~vWzwc_{0y;P>iXy8=Re1w1wU6O6c9cw z;)kJh$$=W$xLWgqScfLtZaDKOqx-z$(WtL*3u}d2JW}ueqwExBabT8fQExkb@DOea?BoWS9=bV78uCexM^=}%B12;jjC{%Azz6pgvt7<@qoC9 zBmjWr;2KzVgeu2LQ{be+H2QThvgIbQ`bsZN>C+5hiei3VvL+C;Bwy|Q|UWolm>j4Mlb<3p=EDxBUwQ-0j{NU z;q(JID4XFP8RL^AiHsjF>adJc6#Xq3)o-POZbI7Y~oE7lor#nOL7*i9i(;E3b3iLsI^N_3E)Mx!H_P-|!) z5~|VbcHu(l^_8`~@j&hhI9XKby&K4_n*ifGH@y5jM5R6FcAu{vOZzDbo)$8D2j`y@ z;H<{Za+CX75Yi2O2IxAbc-Lyv`b5UcmiI&t^#nD)!mvxK-(H-y99cq75mTv7O5IA;Obh2H;uj zAB+1dlspeiH{6J&%XlR<&QQM@cJ>rV(8~idqNL(hF;Iqq>|2;-1hp28IGIFxss)D# zXND38=X?)b-rqbYp#RMc*#|VWRl1|EAYk%5(n7Qz#*x5_ULOwDlo6UdKC`{Oy%gGH z2~Gj@V}f{%EThTpmL<*K_~7D2`D`-EgSbuHMrI2|u4N3<**5(+aE(l4Ixw*;!#2Az zlk3W@I%evBA0mdIAY*%5D^cDn?ZxOrcT4Z zIE~{axF7fAYzb~TBcwPUL2OhNzDEtb43a8w{6*4E94>Bd!JpXd#VuNGa7++9jgw4w z{QEcsv8Fi9s9Jimf&O6_4xtt$J{t0jlmYU%f9OKQ<1{7Gwrg5&c3KRm4xg?$)?Lyo z8Jrt8RNRVs0V3=?j6)c`8<`A;2-a$gg~_4o;2F+{NWw-KQ|5#nzisqp7@7RAm8!U+ z463pEdaLWlCaPOHB6aC$>k(kJ)DAZ#h9f6%*iRI0$ibS}nfOM`(;dBld4fu`=K~Cs zAGt(w4g<;8O=9>~sMy~Vx!ml(D(6E)_`hOC=Sj`Z;^-xh)X_IBCCDfEDCw*tr*btC znjFA8(qbfD=nbDBzKf41gcOD59!@2Gjq8*(zehncCN&0Phs#(_<%G{>GpHIK%e2)$xS zF4`pO__wjSVV!)LWe3PL44XLs?}Fl^Aht60G+Ue5I}2@L=qd}X-g$2RDS-Gi&(K?| zqO{(5csLBhKpb|xdgr#p9UTWdZh3{Eblmd78Rcgs9vtj!ki@sra1j<_ zC!zxHW%AkD!UYPm?pwPJ^&>?l_Hm!1(YS%eHu-WZ%nV)qMH;+Le>QS!b;a7ANgrEA zz|gdwpEw;3cTq9|c35|D5$*j7Q}I3+O`Fn|YoL;fMJzhnZbvKHXG%T7B|u!M!EHYt zSbR@h3MslAqaSh;W#z4+azYiAN6N~ys60|uPL`E_T2xNDN|=n8Z(OCnq*aVtUH6Xg z9v3tr7RX7Dx^}QPUAte2vDsf2^rfr#59&_0Oj#4IJ-Ujq8mv%O&&^<;BX+wCxGZROWY1_-+ET(cJl8y8g9eOz+lH8wp+?N7A@6AU-5aKd2oqC#I0 zjv44z;gWtQkBmhJab>-T>A?Hj-U?e=&e*U9oZYyQuXGpYs(@w1d3^&nFC9PPOjtW! z9CQToq4yF;=*iWf%9T9nJIvV`P2vIgXDCo*aVc0b-jI1pwnz-ODpSOZC`llrg4_iT zYYS7TbS}lm%z?s`$1-=N4_C-M^0$oGq-pmduBkaa!E)z)e9GK16B_v5RVLS%TxW8C zwiUtS7z7kh$g7XAC%c3ib$ot&7N>c0)gMaE)n-vP_&euL{0NRR9~;pi`FKWZnqM7h;jWQYRit&c5%OC4d|EceaM;iw4` zgHsdO=LR8OF}wE7+y6_r0FVr+U4O2(cl+jPaojgr2&*l3W~(7hcKp zuzNoOAl~^1bh#5-hdr#$MR9%$``X>v|DvotK-QWqK@HZzLL=gsfuX{>?ddX4j;_)TMWhkJFV&rLFee!UORHEy`t`C-0`sn( zD<~HrzINr_g7-H1dgqZG_;m#a>R0T*>T>A(ppTOL(gIQO0)q8_hhcIy8^X(O5t$Jn ztf<_P_^1b=rG@M&Qz}|{ zPc&d`*Hr&GgIKqC&hYt&=L};?o-^Q;1ME@Qo6zZM(^<{h(#DRNeco6e|#KFU$ycVxuco7Z# z;A8z%B8=s@2ruRpzYwRY9FQx+nq(BYPA^pT@7O{vX$olUfv`$C=(`DFw>+oCVa^t4 ziVXv7tgl*L!^Yzj7}d*0=q z^(<=K^e77TXzI)B)FzgfJQ^*k{c=;F(#xr0oOz3O)k1cf zbHFLddy*eoW-ZBhRw~Q3l>w}QbayO+Ls~WoT^sjIB@Xbs&4mT>%MYL7no5Mx`>f>L za~RpOw?%#4kD_IFb=026Cr$$%gcfJPCa4zo;`=e)36@1KjswMI7Et7nkT(yqx(f2- zy_^)5=#Y+5ltWr9#t{INhfzZS5zXnin)L2{#zd6W3Ubn;iMvGA2QSi2^wAwQ{))+6 zCV#->51CLk9MFj$+#KlM=Qyr81Z;-(BsJ7x0t-kP)`Fg!dP`OHAb4=XaQsF!%2=_c4>-WAZtZRVFu>tTPd*dz-mGLb8BgCE2IvtQ6mh z7X|E9B$KttvDqWj^V4(ko0~p{zb{G3+^qagpmt_9|Di{1{{870v|_K5^KUGOt>a^~ z9Qb|@2cBt;3widr#Akvo;`1`v^~;vv|L{$@M4?=9UgA+u_lZ~NcHKR|+G)5>vXz5@ an$Xx~M^)b^;mCXU$+1Z&82lZd9Q!}`!)#gr literal 0 HcmV?d00001 diff --git a/gam/gdata/apps/adminsettings/service.py~ b/gam/gdata/apps/adminsettings/service.py~ new file mode 100755 index 00000000000..42bd965ef39 --- /dev/null +++ b/gam/gdata/apps/adminsettings/service.py~ @@ -0,0 +1,474 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Allow Google Apps domain administrators to set domain admin settings. + + AdminSettingsService: Set admin settings.""" + +__author__ = 'jlee@pbu.edu' + + +import gdata.apps +import gdata.apps.service +import gdata.service + + +API_VER='2.0' + +class AdminSettingsService(gdata.apps.service.PropertyService): + """Client for the Google Apps Admin Settings service.""" + + def _serviceUrl(self, setting_id, domain=None): + if domain is None: + domain = self.domain + return '/a/feeds/domain/%s/%s/%s' % (API_VER, domain, setting_id) + + def genericGet(self, location): + """Generic HTTP Get Wrapper + + Args: + location: relative uri to Get + + Returns: + A dict containing the result of the get operation.""" + + uri = self._serviceUrl(location) + try: + return self._GetProperties(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def GetDefaultLanguage(self): + """Gets Domain Default Language + + Args: + None + + Returns: + Default Language as a string. All possible values are listed at: + http://code.google.com/apis/apps/email_settings/developers_guide_protocol.html#GA_email_language_tags""" + + result = self.genericGet('general/defaultLanguage') + return result['defaultLanguage'] + + def UpdateDefaultLanguage(self, defaultLanguage): + """Updates Domain Default Language + + Args: + defaultLanguage: Domain Language to set + possible values are at: + http://code.google.com/apis/apps/email_settings/developers_guide_protocol.html#GA_email_language_tags + + Returns: + A dict containing the result of the put operation""" + + uri = self._serviceUrl('general/defaultLanguage') + properties = {'defaultLanguage': defaultLanguage} + return self._PutProperties(uri, properties) + + def GetOrganizationName(self): + """Gets Domain Default Language + + Args: + None + + Returns: + Organization Name as a string.""" + + result = self.genericGet('general/organizationName') + return result['organizationName'] + + + def UpdateOrganizationName(self, organizationName): + """Updates Organization Name + + Args: + organizationName: Name of organization + + Returns: + A dict containing the result of the put operation""" + + uri = self._serviceUrl('general/organizationName') + properties = {'organizationName': organizationName} + return self._PutProperties(uri, properties) + + def GetMaximumNumberOfUsers(self): + """Gets Maximum Number of Users Allowed + + Args: + None + + Returns: An integer, the maximum number of users""" + + result = self.genericGet('general/maximumNumberOfUsers') + return int(result['maximumNumberOfUsers']) + + def GetCurrentNumberOfUsers(self): + """Gets Current Number of Users + + Args: + None + + Returns: An integer, the current number of users""" + + result = self.genericGet('general/currentNumberOfUsers') + return int(result['currentNumberOfUsers']) + + def IsDomainVerified(self): + """Is the domain verified + + Args: + None + + Returns: Boolean, is domain verified""" + + result = self.genericGet('accountInformation/isVerified') + if result['isVerified'] == 'true': + return True + else: + return False + + def GetSupportPIN(self): + """Gets Support PIN + + Args: + None + + Returns: A string, the Support PIN""" + + result = self.genericGet('accountInformation/supportPIN') + return result['supportPIN'] + + def GetEdition(self): + """Gets Google Apps Domain Edition + + Args: + None + + Returns: A string, the domain's edition (premier, education, partner)""" + + result = self.genericGet('accountInformation/edition') + return result['edition'] + + def GetCustomerPIN(self): + """Gets Customer PIN + + Args: + None + + Returns: A string, the customer PIN""" + + result = self.genericGet('accountInformation/customerPIN') + return result['customerPIN'] + + def GetCreationTime(self): + """Gets Domain Creation Time + + Args: + None + + Returns: A string, the domain's creation time""" + + result = self.genericGet('accountInformation/creationTime') + return result['creationTime'] + + def GetCountryCode(self): + """Gets Domain Country Code + + Args: + None + + Returns: A string, the domain's country code. Possible values at: + http://www.iso.org/iso/country_codes/iso_3166_code_lists/english_country_names_and_code_elements.htm""" + + result = self.genericGet('accountInformation/countryCode') + return result['countryCode'] + + def GetAdminSecondaryEmail(self): + """Gets Domain Admin Secondary Email Address + + Args: + None + + Returns: A string, the secondary email address for domain admin""" + + result = self.genericGet('accountInformation/adminSecondaryEmail') + return result['adminSecondaryEmail'] + + def UpdateAdminSecondaryEmail(self, adminSecondaryEmail): + """Gets Domain Creation Time + + Args: + adminSecondaryEmail: string, secondary email address of admin + + Returns: A dict containing the result of the put operation""" + + uri = self._serviceUrl('accountInformation/adminSecondaryEmail') + properties = {'adminSecondaryEmail': adminSecondaryEmail} + return self._PutProperties(uri, properties) + + def GetDomainLogo(self): + """Gets Domain Logo + + This function does not make use of the Google Apps Admin Settings API, + it does an HTTP Get of a url specific to the Google Apps domain. It is + included for completeness sake. + + Args: + None + + Returns: binary image file""" + + import urllib + url = 'http://www.google.com/a/cpanel/'+self.domain+'/images/logo.gif' + response = urllib.urlopen(url) + return response.read() + + def UpdateDomainLogo(self, logoImage): + """Update Domain's Custom Logo + + Args: + logoImage: binary image data + + Returns: A dict containing the result of the put operation""" + + from base64 import b64encode + uri = self._serviceUrl('appearance/customLogo') + properties = {'logoImage': b64encode(logoImage)} + return self._PutProperties(uri, properties) + + def GetCNAMEVerificationStatus(self): + """Gets Domain CNAME Verification Status + + Args: + None + + Returns: A dict {recordName, verified, verifiedMethod}""" + + return self.genericGet('verification/cname') + + def UpdateCNAMEVerificationStatus(self, verified): + """Updates CNAME Verification Status + + Args: + verified: boolean, True will retry verification process + + Returns: A dict containing the result of the put operation""" + + uri = self._serviceUrl('verification/cname') + properties = self.GetCNAMEVerificationStatus() + properties['verified'] = verified + return self._PutProperties(uri, properties) + + def GetMXVerificationStatus(self): + """Gets Domain MX Verification Status + + Args: + None + + Returns: A dict {verified, verifiedMethod}""" + + return self.genericGet('verification/mx') + + def UpdateMXVerificationStatus(self, verified): + """Updates MX Verification Status + + Args: + verified: boolean, True will retry verification process + + Returns: A dict containing the result of the put operation""" + + uri = self._serviceUrl('verification/mx') + properties = self.GetMXVerificationStatus() + properties['verified'] = verified + return self._PutProperties(uri, properties) + + def GetSSOSettings(self): + """Gets Domain Single Sign-On Settings + + Args: + None + + Returns: A dict {samlSignonUri, samlLogoutUri, changePasswordUri, enableSSO, ssoWhitelist, useDomainSpecificIssuer}""" + + return self.genericGet('sso/general') + + def UpdateSSOSettings(self, enableSSO=None, samlSignonUri=None, + samlLogoutUri=None, changePasswordUri=None, + ssoWhitelist=None, useDomainSpecificIssuer=None): + """Update SSO Settings. + + Args: + enableSSO: boolean, SSO Master on/off switch + samlSignonUri: string, SSO Login Page + samlLogoutUri: string, SSO Logout Page + samlPasswordUri: string, SSO Password Change Page + ssoWhitelist: string, Range of IP Addresses which will see SSO + useDomainSpecificIssuer: boolean, Include Google Apps Domain in Issuer + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('sso/general') + + #Get current settings, replace Nones with '' + properties = self.GetSSOSettings() + if properties['samlSignonUri'] == None: + properties['samlSignonUri'] = '' + if properties['samlLogoutUri'] == None: + properties['samlLogoutUri'] = '' + if properties['changePasswordUri'] == None: + properties['changePasswordUri'] = '' + if properties['ssoWhitelist'] == None: + properties['ssoWhitelist'] = '' + + #update only the values we were passed + if enableSSO != None: + properties['enableSSO'] = gdata.apps.service._bool2str(enableSSO) + if samlSignonUri != None: + properties['samlSignonUri'] = samlSignonUri + if samlLogoutUri != None: + properties['samlLogoutUri'] = samlLogoutUri + if changePasswordUri != None: + properties['changePasswordUri'] = changePasswordUri + if ssoWhitelist != None: + properties['ssoWhitelist'] = ssoWhitelist + if useDomainSpecificIssuer != None: + properties['useDomainSpecificIssuer'] = gdata.apps.service._bool2str(useDomainSpecificIssuer) + + return self._PutProperties(uri, properties) + + def GetSSOKey(self): + """Gets Domain Single Sign-On Signing Key + + Args: + None + + Returns: A dict {modulus, exponent, algorithm, format}""" + + return self.genericGet('sso/signingkey') + + def UpdateSSOKey(self, signingKey): + """Update SSO Settings. + + Args: + signingKey: binary, public key to be uploaded + + Returns: + A dict containing the result of the update operation.""" + + from base64 import b64encode + uri = self._serviceUrl('sso/signingkey') + properties = {'signingKey': b64encode(signingKey)} + return self._PutProperties(uri, properties) + + def IsUserMigrationEnabled(self): + """Is User Migration Enabled + + Args: + None + + Returns: + boolean, is user migration enabled""" + + result = self.genericGet('email/migration') + if result['enableUserMigration'] == 'true': + return True + else: + return False + + def UpdateUserMigrationStatus(self, enableUserMigration): + """Update User Migration Status + + Args: + enableUserMigration: boolean, user migration enable/disable + + Returns: + A dict containing the result of the update operation.""" + + uri = self._serviceUrl('email/migration') + properties = {'enableUserMigration': enableUserMigration} + return self._PutProperties(uri, properties) + + def GetOutboundGatewaySettings(self): + """Get Outbound Gateway Settings + + Args: + None + + Returns: + A dict {smartHost, smtpMode}""" + + uri = self._serviceUrl('email/gateway') + try: + return self._GetProperties(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + except TypeError: + #if no outbound gateway is set, we get a TypeError, + #catch it and return nothing... + return {'smartHost': None, 'smtpMode': None} + + def UpdateOutboundGatewaySettings(self, smartHost=None, smtpMode=None): + """Update Outbound Gateway Settings + + Args: + smartHost: string, ip address or hostname of outbound gateway + smtpMode: string, SMTP or SMTP_TLS + + Returns: + A dict containing the result of the update operation.""" + + uri = self._serviceUrl('email/gateway') + + #Get current settings, replace Nones with '' + properties = self.GetOutboundGatewaySettings() + if properties['smartHost'] == None: + properties['smartHost'] = '' + try: + if properties['smtpMode'] == None: + properties['smtpMode'] = '' + except KeyError: + properties['smtpMode'] = '' + + #If we were passed new values for smartHost or smtpMode, update them + if smartHost != None: + properties['smartHost'] = smartHost + if smtpMode != None: + properties['smtpMode'] = smtpMode + return self._PutProperties(uri, properties) + + def AddEmailRoute(self, routeDestination, routeRewriteTo, routeEnabled, bounceNotifications, accountHandling): + """Adds Domain Email Route + + Args: + routeDestination: string, destination ip address or hostname + routeRewriteTo: boolean, rewrite smtp envelop To: + routeEnabled: boolean, enable disable email routing + bounceNotifications: boolean, send bound notificiations to sender + accountHandling: string, which to route, "allAccounts", "provisionedAccounts", "unknownAccounts" + + Returns: + A dict containing the result of the update operation.""" + + uri = self._serviceUrl('emailrouting') + properties = {} + properties['routeDestination'] = routeDestination + properties['routeRewriteTo'] = gdata.apps.service._bool2str(routeRewriteTo) + properties['routeEnabled'] = gdata.apps.service._bool2str(routeEnabled) + properties['bounceNotifications'] = gdata.apps.service._bool2str(bounceNotifications) + properties['accountHandling'] = accountHandling + return self._PostProperties(uri, properties) diff --git a/gam/gdata/apps/audit/__init__.py b/gam/gdata/apps/audit/__init__.py new file mode 100755 index 00000000000..8b137891791 --- /dev/null +++ b/gam/gdata/apps/audit/__init__.py @@ -0,0 +1 @@ + diff --git a/gam/gdata/apps/audit/__init__.pyc b/gam/gdata/apps/audit/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a2bb13195bd6298c5c44e1aa56ad2cbb5bf183b GIT binary patch literal 162 zcmZSn%*&-#x;-G70SXv_v;zphMM~6DTyVC`iTVv#rlb*DVZhu g@$s2?nI-Y@dO(F7K=W*J^HWN5Qtd#t76UN@0N08n!T3ID%}D`kzZ)E3Hu+A1i&sQi)&i>g&pTP0QC^OOpw)Yi1L z;eMv?epc?Mm7Z36b84@w_9|)zqs*xLO5IcHovnGkzgeB-WSB**hGBf*+=}C7sGZez zJ9YfHTlPEyCjj@R)6<8)G;IKF4Rl}g2NR#~^HllOv#zUFLdV|xWr zb0uBC@Oz=wueP^4E86b_|HV(WfLc>VvHkK@9W`QKuN(I4AE_&sIW)Y6EByls4Y^=1 zC?SM1@>@{%i)w3%qQC|zlQT-s4F5uc_aT{CZk^rcls_#uH$&9 zo*f93;#+A|;N7S#Ji6`{VA*Iyx4j`3SmJ4N-SO$ft2 zq03H*6$%4i`3gKpI#x*0BaJ&v@Q z!Hq>um37jP>Tt(Y@RVv}0Ev3wyIh7e5=T`$C@In(;I4KTb!lxsNVbn^H{*jSj6E!~ z>9sCjS$XND%k3kx8%LMZq~TOwe4*AP3TqxPn=L;VB>K%`xO(<{HwnLr$CN8kg$4O{ zrr6;6r*UybBJJ|`=ZE?z*E}{Z#|hw1j@sFQkrTxOg(H-mz%u%tFr5I(r5$l`DMP&1H{#T6Bpkp5I;~Q?uE2rt zu$wWIcP9yQ8<5pfIqlw?c$y3_Xlgp5AE*V}xQJ(qjPeqYF;l1%8U)WGF0NdV@ps&G zkztS5y+YyrdBMbjU;y@_q^=Vh3u?ctl2-_eW7JNm>-MzrVH#bgRq|8X9X`*Z5w>S8 ze>k2|$zk4D?r+4y@vKTdRL8IczstK+)b)2i%-_cYXTszU)Nw^6ALYH~(d$9p3lHpN z!8^~11-#sE`X}u8Mxq70o}=LhLZ=l+0X)Dm4cLK~_&IRWxRW$=oyRz}U13mmJ(G^2 zxUXvu%7hzVb`EyqRA-YN1Yt-Ji#Y<0c^(|c*D1`0KvL!#GYGJs=!Op7(}L>keLHG4 zijMp`&*k9Ttm(bt$O6#Q38L&Zy9AaOj_`B&&YjD?@4WHGwN~o_WzzD@GUk4_6C~XQ zZ|i0dopQ3&c!|Mu&d7WZoiWrN_PQN<(LN78BS05daYT|JpNf)QVbkUaL!fvmPgx7n+4$28-~ zuObyUo5U^I!00mk#M?3H!*(w{oR&^guL&a!6HAnyG}5GmYDFrGby8^Acebrgcw91( z%1L|JR=lLK3!V6m^gMYh(mbJ^eDWG(H=o^43Du<7YMHfi>2HKq(sta1+}6F8wl0Tf z^CnKcq3^;`mwTH0H`Z2fuDSKM)^ERKp22f|c-A=b9cu<}>-Tgh1HXRv_U-zewav}d zTWiuiNBL;?a~MpZH899o)KdEWmFs<8mHfn|TddMHTQ|yO_E}TIF@~+9A!*KyozSnI zIWkY8E#KFG)}es+a zm^1w*3U&TT@$L(SMfmuc!eX&k@@Wxu_J3i9WGP+$Pewe(|AYDoAqNKpMYI$<{UiEb zxTOs^9&E>e4^6v4KodtZvL;fFKtJc>DIfGvtf6I1G-x0IIA+wbj4wp^hL=e2y2fmf z;n~;5A0Hw3LhwYXb?m7}?QUaei&Y9qj+^Lsa_S}2o7(j2Hr+>E$_1#(lZ6IVMj-MM z+UDVANNpf#PeENzk-nAG(YaiMCDGuIqh&Q^ifXT<_NLT6IMmNn0i?fRrb*}a5CTrA zXQ1m}6jW4DKLMZHtnzrIe?xIohnlp1MDM3kd*87hqkWJhCrCk!NWU^8*QFc77GU6g z0S{jXKO%VS`np3{EM=r^$P%x|W*74bLdRnS@e#r{L`H}LC8uEVLW5Vg;^;vJGK2=f zw>+`Dz%-*?v868P6YTU_lEjJmGP*Gl@p_!R1CDu9(h+Ni4c%r;BC(@~5UCejxHQ%- zUlRz*n)Kv=NIL_<16B89@es8(j+lS8P%4}&0FZfv*#+b_It>AmKrBie0XRO9!x5GR zh(u61ipZZ2BeFDrNG5kUU-}V>9+xLUg@gr(UZW9rz!QZe6HmTIL>P`C?hi9XcD#J7 zAhJ{#8sZ#d&C%*90?;-B$o>G7K02F2jLb09#6h}Lw)+ALT6*_eES6F9VT7l&=Y`zM zY{fO1Ad^{^hPa|tE9*CV;>oc7OLUPlrONP2#Zuu3BIa?LuzzDkUB88>#B{`Ict`~f z113Y$Xry-nU~GstGE9a$=K!IDgit_e*QjN4AZIjS&hmb!|I{FZ8|lNCTf@e!2fK*7 zVT2j7mOg~hAZ?ecEQ^EF5gx#j-^|$C{ja8*!UmWy<_vx<3Gq) z`UuM>oND(Hc6{1%>|SAK31KFCdg(VTwKx$ve?gaFol%xc^e}$Fl(HKVJ+wq~J58Ggh zxi1(sAP>di&>&q}ok6bafqG$c4}y{|L|#aH2|nw9t4M%n^ZBx-Wq20yR4-fxuiEh> z_CoBIznB|Ep$F)f<#YNnYCLpEDw6JB8qPRg%DU&_{pi9%CT~x_tUPDP<&7-m6bnHv zA?*0NZcz(YJubDFb%H#|n*Cg6$az{lE<3G`>3AVhkNObfrA*@<1ZEeX#+XB@gKH*7 z+4B0UdfRVOsrXECgs+&kha)dxXwNiudz0oFW;?U?Po8^dYh(S=G>V zV8rAyvJkqFO>Hf*FCVk)rhY_0xXi*xzqZ%dN2c-y!Xo58T_$(dt8*NRCc&kH78_@S zCd5=ik|hmRr=qUlyOOlMopiL#HPMbpuF0c{bI_eIF#bLW|##iBs!#v#mdxqqU=Bcjy5J^>J*)U+F;h?qQ~9#-Gu;?`fYY5=z$ zr-_VK|A(ruS;5CltCr=(ME}>R(ny-6ds@<>S z(S4r93X5;E_zsJ!EchA1{XUCVS*)^n4Moo_%H1RWGZ+`!RrKESH$C{iQCylUpXGNB zc)}~?#qv4)J6k?eKKJx<{J$zO&Kh3GMBsJs7dHt+!1UR+m+F_Vy128=lFMD;-``l0 zK{@v7R`LWdzh2qutj7H=YAh3>46_XJ(WXrZ%bF)E7WqPU-KzH$GA~KQ%@k*fXN&UZ HLB;<8G3`7! literal 0 HcmV?d00001 diff --git a/gam/gdata/apps/emailsettings/__init__.py b/gam/gdata/apps/emailsettings/__init__.py new file mode 100755 index 00000000000..275c6a0b219 --- /dev/null +++ b/gam/gdata/apps/emailsettings/__init__.py @@ -0,0 +1,15 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/gam/gdata/apps/emailsettings/__init__.pyc b/gam/gdata/apps/emailsettings/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5513a8cb391fecdf3cf6c61e290b28b24bf26ef6 GIT binary patch literal 170 zcmZSn%*&-#x;-G70SXv_v;zphMM~6DTyVC`iTVv#rmnaiJ3XY osU;cCR6+p-AyVk|p^gM$&N*n~PXv?5w>2;cP^Uw5?rzA6E}LVp*uw%ra>yabF~}jC|6mt61W1r$fNXxh_o{k&h7?Uv z61^NwvAgQk*ZY32ilhHCJ^5zuCu=R^en#>42EO)76p1mLC>v(8VUiJ(jGFd{*&OxH zW3}gVwdbbUY?=hE<0e72GEbrMvo$Rk;{vDb5 zdW@M-WcG$u>=b#gYv)_}UiJ(8tXg95-TB`x$ME;ND=TZ|I2Rw@{N&cBD`gYU%PY56 zHde~>tiF9ay0diac6kvme26{@@7{@4?|v5DT`jMm@~FkOP%l3-C_lv4E;fuwK`bB; zB`Ea@srDFz8zZrR(#>;3Cm)0~d;oO-c9P$7=JB-9QGuK0G1x!|%5j9Id;nP$U_t;u zUwxgF*J(%G%^8zS$x3ETGA$1v^^81RGRdqwT=sqI%e!bEgOOe`$t8igVv@`9aMdI) z$-_01T#<*DO>$Kp(EXY`yn1NBm(61^=W8ZhpL>-!d>6&dc9wR^U^_2@ayK1dHX#JL zL>LJiC44jA8bddJ&#^nc_H7ggkP%}ZoHxZQ=D?Wy4fDmQ>4+$mBWyiD*)ZE<=Gyw) z2s*6VH&8Cb3)^X$*ahKQ&$J7_X{+Bka+k2yYTikOom2{x%hqm1pRR;+T#o?Rw7p$U z;?0c}Wr^?|g`*Sir6GB94qq*7h^K{nl$?bJS+VtKVL9LLwDTBC+KKnBzdQfld)K>< z%H6zk-4?B2?wxNe>?Cm+FHkCmR_!qa_zM^|a_{$w_HW_UlKo9%NX(-yGRF;f8Ie##bR)q1NGtDkYQT(icU4^Qjlb=GHB(U5^4$RMtG=5ZLeJh z`L?{ddGc-wZI$I6ou}m9Wh!BYkUFoNMG>v#wp=R+US_GCBV0Jaf+!8AQIy14ugHYV z-99L4Up!G2(}JCMFrTG*Gn1Da(+wdh`LH7Cdw3~D7Lb84U33>_5k#3{!vJvB=zsM6pQAB1<2pweC-nw>GRkB}*q#vQZ{#Qgj)w z5f;Y!FVt#Zm;@_6}YwiEkz@H(Fc(`J+PiKf|IABYW0l#(Z&6bl|Lc zFlvf-#E`&zj+hT%UTAi-;X7hv#D$UuxB=SGqPjdl!KbNpbh_EV9Ow;_s7hAbRDO#a^!J^gWIQYKQ#yErQy6bz;V!q{~H`&!VfT^ zaPTV>?#;%)M3WwFQsLlsDBS8G+yxJJLE+$!DBMotU`iG=r48^*wBgSO2blH)Oe-85 z7lr%DAl!_Hn^8C@FJNqzQ;1=rb7%h-qEQ*9?W|pNse7Lhr#6~F^e zZPdxjXg7YC9@8W5mGzMob)WlwDZM>5P7>@R{59fP}q}S{* z{Eqe`-)ML9UOS2Q;^IMs4Zx&h@dLx7h2#2ZVjB=>sDE6vcC&{F3F=+7vetR1lP%+7 zum@{M4nLdFI^Z_<<06Sj9i@p2H{v9Lub_BV7~d0)hwJQih)~Dw4$dZ3hi7vgNFv*f z_XhiPpk)hqvYn*XQQ{|nDsK*1!Dn!BXdIP)#4{zoLaN~sI#XV2@~7_rI&07;+BL}G%P7$)gczJb~$i-Jx z@C{;FS_M(-K+5VI9cD4-AvYwBn|Q#oDSE-W8;uU6bF>8pe;1?<8LrJy%l9ph3?Pym z{t6pj=b&`|>JnI9L&edeR&q?j_C-Ze@e=AC@$2#+@UGtRd2PoLc~;QV=TcdX>mf`g zj;X8gd?g-BNc6vWw}seIMXIXrbMABzg?a6AqltK8`iSrAu8x2GmvH*rA}tw^ROUCr z%dq)viuoNElM5pVlgr=(PosE(QNP1Pk9{K`sx!!9rx3tkak2@O{31 z3x&km9(^K6bp)NI&`|3ro2>&4OLmE@@)xMI+<=*(+}0pb);ySIU2lkY^JXkOBw` zYJ?#g`>_ERHIN)xLdCFhLUIJL!I&9PKO>@kBn9ZzPd{D4VeP-7xYtDrCk;MJw~+qo zRw=HNIHx`B>Z}F5h?@3EJMmV#=0;U2AP=;OQwE&VQRL~x-OWFoPN9+BPq$haT2f>> z-SLtvmN8oE)gr~=bPFTovlQB^6>%JGeD?9jXwU>2V#j6!B1`_@t}6qU;ANOc&Iyk+ zkKz$Td$eaG^k|s2+>rPYwa&Il(pVx%4Q|Aci526__6Dq&W1|``_yp#O;+$t>C9x~D zxr<&q^;XGMA8o>1W6g-gEoSBxgC+S(gIvNK1o1XPHt)WJ)t{7<`06C!6S$Un+Wrng z;kMsEov^Sjs^)$kVT{DKZ4|vUyn${COXpUTg)-hde7(tny6ucSHu{Ci9Up061MK}H z>eTj3DGN`*`XFuiH8b*RpOM$@uKf;7NA2L~tK2Y0*MW{trx4lIbFC_^2poY3d^ci=?xS|v9p9KeLS5xV^g@4vDfnMzv1O5jy`oBbv68m`(}SlAk$G8%w>K( z4hy|$rd0Fpf8a?Sl?IZSr{Ae)u52&v_IDe82V;Ld`=cSn{%&DC|2eQhvhsi@_H9%Q zTUj}#I_!qQzWP%u7|$U$>|ZdBGE#8s&eGbc)XqT1r&BxZ`n=6qcpG~Xp+Da5QsE|6 zYmWC>ds;NTihmOQzwuU`guMPte9g%4Vce3lzF!L7=D8tw`%|~3UvR5(3h`_L9Day_ zeknFcQeuP7(-?(4@9&TLnh2mA(+RRaLoqDB6XGRL9_1zDL}I{8#>oZ*8MV$!#uG9? z$ok-V3y)_W4GyB&AOQ7`SQPHB2jQ9$wgIkx;-YYW$J`O;>mRXbgC4P<0Y@y~`zdc4 za94nxAh`JH#EZppy`J%0c`99VsPoh5o2ok!-rc-oQ>3|gE*}KdxrAp>@AUW<59m_( zi@_nltvsnt1SA{KDs7Z^+K>7Psahl|$pwENKq&hAzT5od1kX5l%V=nX&wM&752TX} z!<<&!!zCoQD-K0p3kk1|B)lv$aoqxju6X3eWnO~)#3w_X!5EtF9!0VQNy|oz(s55o z&T)b_KF81XJ9n-xFTW9Npu=`nApMcVj{@vYr}JaL*CUz}czyerP8}xX_}ncQPCXyo zVjm>ta3o+6{`dnoy^WheneC;ay0fMs}~`N%D@4`6NB) z8SgF_QJh<4K6$0`5=P)Iq^H}_>YU^snHLX_hziw(rq8CPR&I`Puv%Ei#e}^4Bc~;P z6~gm!JW!JXJv|{a2tg&q>8X2`K_#7GHQ8PRj8oVDDyq$;en>f4CHeJW;G*748Svs3 z&}G;PoanOA-&bo0Y_`*haUBRr2Vt4x4|RK*H>*0;)n`=+S@(lhr?=0S9VHyA=Bt_S z8`#)z_UjK(ca{WZf54+G!$a|7v^yXO}y*010^ENbckDhtGgVxzncg9S-Z_Md6>6W%big> zu$&2Y)aFpzG87D^4$r*BJ!Tp(f|Z_x(XqDtNEwqCTQG+8B?ZB0*?c$=OsB^U4Vxxs zGBu)8h4i^xG?`zANJh1hvt)ZNO6j!haKg*I&xPyl4>5LiJma@&k7a`=P!U#GS-}&~ zr<0P3l(lg&=aViKAnlV5h@5C#J$*Sj!r&3G3+OmeDdBF<&)tP z|K7yczJ;Q1OGG{)mbln>?PXkGWs8WWT$W=edk*7qa5z>T1;-KuE}%%2lQ~KkMAMUl zE~gvjc*z_*$~{6vJ9*H~J3ElyGZI!Ix!j&#z=g^_>3Iqw_bW9&@F#c;&yfCv(}mM?|QT_+gKc z(kESKc7DvT+ND40=c-Zi2CDn|R^87Ob!V+!70Dh2G09nTDwzGFdt8b1&+*b1X}ya+ zE=dAhYsB5#4flwvf1}}J*Bs|1ucUMDlQU|^9@`u+V`5&t;mZN(0IvK@?$23smMd02UD7#Sgc_&vT{B!sfzlHc* z2RhZG_xRG2L&1VT`Rz$}x5|3-NGXv2C*Wwc`9n^U@9L#>%*P&n0isTL6G?XT52UcD0pMG>zCMUe!#QM8vQ zI7Y@RbwU&+c`J(K-k;tFx`f)0ck{wmSbUSk2P}S<#S)7Z7PnY@m&F|xt1Q-7{6336 zVDU#R{+PuUixvwW;fLESauj_}N1k5Lzv_exj5&ej(MGe;9GPlP%-}OM0qjlSGyBca z#?)23&&*z#y)rR|&kW0E<7yyJgq_AyL$5j(7{{R#xiN62!>-(#oac>6;WWAF*{_>N z;K2XOlYE3$yPA+U8$;?T<||AH`kGL@1uE>2&RCyo<*uT;`u}KMhpe_(L+J~`rts7U d%0=JifAd)~N^_(+D*yL1GBtu{;8i}O{}1NpR&oFU literal 0 HcmV?d00001 diff --git a/gam/gdata/apps/emailsettings/service.py~ b/gam/gdata/apps/emailsettings/service.py~ new file mode 100755 index 00000000000..38197d03475 --- /dev/null +++ b/gam/gdata/apps/emailsettings/service.py~ @@ -0,0 +1,394 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Allow Google Apps domain administrators to set users' email settings. + + EmailSettingsService: Set various email settings. +""" + +__author__ = 'google-apps-apis@googlegroups.com' + + +import gdata.apps +import gdata.apps.service +import gdata.service + +import urllib + +API_VER='2.0' +# Forwarding and POP3 options +KEEP='KEEP' +ARCHIVE='ARCHIVE' +DELETE='DELETE' +ALL_MAIL='ALL_MAIL' +MAIL_FROM_NOW_ON='MAIL_FROM_NOW_ON' + + +class EmailSettingsService(gdata.apps.service.PropertyService): + """Client for the Google Apps Email Settings service.""" + + def _serviceUrl(self, setting_id, username, domain=None): + if domain is None: + domain = self.domain + return '/a/feeds/emailsettings/%s/%s/%s/%s' % (API_VER, domain, username, + setting_id) + + def CreateLabel(self, username, label): + """Create a label. + + Args: + username: User to create label for. + label: Label to create. + + Returns: + A dict containing the result of the create operation. + """ + uri = self._serviceUrl('label', username) + properties = {'label': label} + return self._PostProperties(uri, properties) + + def DeleteLabel(self, username, label): + """Delete a label. + + Args: + username: User to delete label for. + label: Label to delete. + + Returns: + Nothing + """ + label = urllib.quote_plus(label, '') + uri = self._serviceUrl('label/%s' % label, username) + return self.Delete(uri) + + def GetLabels(self, username): + """Get labels for a user. + + Args: + username: User to retrieve labels for. + + Returns: + A list of labels + """ + uri = self._serviceUrl('label', username) + return self._GetPropertiesList(uri) + + def CreateFilter(self, username, from_=None, to=None, subject=None, + has_the_word=None, does_not_have_the_word=None, + has_attachment=None, label=None, should_mark_as_read=None, + should_archive=None, should_star=None, forward_to=None, + should_trash=None, should_not_spam=None): + """Create a filter. + + Args: + username: User to create filter for. + from_: Filter from string. + to: Filter to string. + subject: Filter subject. + has_the_word: Words to filter in. + does_not_have_the_word: Words to filter out. + has_attachment: Boolean for message having attachment. + label: Label to apply. + should_mark_as_read: Boolean for marking message as read. + should_archive: Boolean for archiving message. + should_star: Boolean for starring message + forward_to: string email address to forward message to + should_trash: Boolean for trashing message + should_not_spam: Boolean for never sending message to spam + + Returns: + A dict containing the result of the create operation. + """ + uri = self._serviceUrl('filter', username) + properties = {} + if from_ != None: + properties['from'] = from_ + if to != None: + properties['to'] = to + if subject != None: + properties['subject'] = subject + if has_the_word != None: + properties['hasTheWord'] = has_the_word + if does_not_have_the_word != None: + properties['doesNotHaveTheWord'] = does_not_have_the_word + if has_attachment != None: + properties['hasAttachment'] = gdata.apps.service._bool2str(has_attachment) + if label != None: + properties['label'] = label + if should_mark_as_read != None: + properties['shouldMarkAsRead'] = gdata.apps.service._bool2str(should_mark_as_read) + if should_archive != None: + properties['shouldArchive'] = gdata.apps.service._bool2str(should_archive) + if should_star != None: + properties['shouldStar'] = gdata.apps.service._bool2str(should_star) + if forward_to != None: + properties['forwardTo'] = forward_to + if should_trash != None: + properties['shouldTrash'] = gdata.apps.service._bool2str(should_trash) + if should_not_spam != None: + properties['neverSpam'] = gdata.apps.service._bool2str(should_not_spam) + return self._PostProperties(uri, properties) + + def CreateSendAsAlias(self, username, name, address, reply_to=None, + make_default=None): + """Create alias to send mail as. + + Args: + username: User to create alias for. + name: Name of alias. + address: Email address to send from. + reply_to: Email address to reply to. + make_default: Boolean for whether this is the new default sending alias. + + Returns: + A dict containing the result of the create operation. + """ + uri = self._serviceUrl('sendas', username) + properties = {} + properties['name'] = name + properties['address'] = address + properties['replyTo'] = reply_to + properties['makeDefault'] = gdata.apps.service._bool2str(make_default) + return self._PostProperties(uri, properties) + + def GetSendAsAlias(self, username): + """Retrieve send as aliases for a user. + + Args: + username: User to retrieve send as aliases for + + Return: + Alist containg the user's send as aliases. + """ + uri = self._serviceUrl('sendas', username) + return self._GetPropertiesList(uri) + + def UpdateWebClipSettings(self, username, enable): + """Update WebClip Settings + + Args: + username: User to update forwarding for. + enable: Boolean whether to enable Web Clip. + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('webclip', username) + properties = {} + properties['enable'] = gdata.apps.service._bool2str(enable) + return self._PutProperties(uri, properties) + + def UpdateForwarding(self, username, enable, forward_to=None, action=None): + """Update forwarding settings. + + Args: + username: User to update forwarding for. + enable: Boolean whether to enable this forwarding rule. + forward_to: Email address to forward to. + action: Action to take after forwarding. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('forwarding', username) + properties = {} + properties['enable'] = gdata.apps.service._bool2str(enable) + if enable is True: + properties['forwardTo'] = forward_to + properties['action'] = action + return self._PutProperties(uri, properties) + + def UpdatePop(self, username, enable, enable_for=None, action=None): + """Update POP3 settings. + + Args: + username: User to update POP3 settings for. + enable: Boolean whether to enable POP3. + enable_for: Which messages to make available via POP3. + action: Action to take after user retrieves email via POP3. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('pop', username) + properties = {} + properties['enable'] = gdata.apps.service._bool2str(enable) + if enable is True: + properties['enableFor'] = enable_for + properties['action'] = action + return self._PutProperties(uri, properties) + + def GetPop(self, username): + uri = self._serviceUrl('pop', username) + return self._GetProperties(uri) + + def GetImap(self, username): + uri = self._serviceUrl('imap', username) + return self._GetProperties(uri) + + def UpdateImap(self, username, enable): + """Update IMAP settings. + + Args: + username: User to update IMAP settings for. + enable: Boolean whether to enable IMAP. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('imap', username) + properties = {'enable': gdata.apps.service._bool2str(enable)} + return self._PutProperties(uri, properties) + + def GetVacation(self, username): + uri = self._serviceUrl('vacation', username) + return self._GetProperties(uri) + + def GetForward(self, username): + uri = self._serviceUrl('forwarding', username) + return self._GetProperties(uri) + + def UpdateVacation(self, username, enable, subject=None, message=None, + contacts_only=None, domain_only=None, start_date=None, end_date=None): + """Update vacation settings. + + Args: + username: User to update vacation settings for. + enable: Boolean whether to enable vacation responses. + subject: Vacation message subject. + message: Vacation message body. + contacts_only: Boolean whether to send message only to contacts. + domain_only: Boolean Whether to only send the autoresponse to users in the same primary domain as the user taking the vacation. + start_date: String "YYYY-MM-DD" The first day when the vacation responder was enabled for the user. In this version of the API, the startDate is in the UTC timezone, not the user's timezone. + end_date: String "YYYY-MM-DD" The last day until which vacation responder is enabled for the user. In this version of the API, the endDate is the UTC timezone, not the user's timezone. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('vacation', username) + properties = {} + properties['enable'] = gdata.apps.service._bool2str(enable) + if enable is True: + properties['subject'] = subject + properties['message'] = message + if contacts_only != None: + properties['contactsOnly'] = gdata.apps.service._bool2str(contacts_only) + if domain_only != None: + properties['domainOnly'] = gdata.apps.service._bool2str(domain_only) + if start_date != None: + properties['startDate'] = start_date + if end_date != None: + properties['endDate'] = end_date + return self._PutProperties(uri, properties) + + def UpdateSignature(self, username, signature): + """Update signature. + + Args: + username: User to update signature for. + signature: Signature string. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('signature', username) + properties = {'signature': signature} + return self._PutProperties(uri, properties) + + def GetSignature(self, username): + uri = self._serviceUrl('signature', username) + return self._GetProperties(uri) + + def CreateDelegate(self, delegate, delegator): + """Create delegate + + Args: + delegate: User who will have access to delegator's account + delegator: User whose account will be accessible by delegate + + Returns: + A dict containing the result of the operation. + """ + uri = self._serviceUrl('delegation', delegator) + properties = {'address': delegate} + return self._PostProperties(uri, properties) + + def GetDelegates(self, delegator): + """Retrieve delegates + + Args: + delegator: User whose account is accessible by retrieved delegates + + Returns: + A dict contaning the delegates + """ + uri = self._serviceUrl('delegation', delegator) + return self._GetPropertiesList(uri) + + def DeleteDelegate(self, delegate, delegator): + """Delete delegate + + Args: + delegate: User account who has access to delegator's account + delegator: Email address whose account will no longer be accessible by delegate + + Returns: + A dict containing the result of the operation. + """ + uri = self._serviceUrl('delegation', delegator)+"/%s" % delegate + return self._DeleteProperties(uri) + + def UpdateLanguage(self, username, language): + """Update user interface language. + + Args: + username: User to update language for. + language: Language code. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('language', username) + properties = {'language': language} + return self._PutProperties(uri, properties) + + def UpdateGeneral(self, username, page_size=None, shortcuts=None, arrows=None, + snippets=None, unicode=None): + """Update general settings. + + Args: + username: User to update general settings for. + page_size: Number of messages to show. + shortcuts: Boolean whether shortcuts are enabled. + arrows: Boolean whether arrows are enabled. + snippets: Boolean whether snippets are enabled. + unicode: Wheter unicode is enabled. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('general', username) + properties = {} + if page_size != None: + properties['pageSize'] = str(page_size) + if shortcuts != None: + properties['shortcuts'] = gdata.apps.service._bool2str(shortcuts) + if arrows != None: + properties['arrows'] = gdata.apps.service._bool2str(arrows) + if snippets != None: + properties['snippets'] = gdata.apps.service._bool2str(snippets) + if unicode != None: + properties['unicode'] = gdata.apps.service._bool2str(unicode) + return self._PutProperties(uri, properties) diff --git a/gam/gdata/apps/groups/__init__.py b/gam/gdata/apps/groups/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/gam/gdata/apps/groups/__init__.pyc b/gam/gdata/apps/groups/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dee609d10c92e513b30ccd9afbc52241c3119585 GIT binary patch literal 163 zcmZSn%*&-#x;-G70SXv_v;zphMM~6DTyVC`iTVvKqW=_r9dh^ fJ~J<~BtBjbsFDL{piORmN@-529mv{ZAZ7pn2VEsh literal 0 HcmV?d00001 diff --git a/gam/gdata/apps/groups/service.py b/gam/gdata/apps/groups/service.py new file mode 100755 index 00000000000..987a045e60b --- /dev/null +++ b/gam/gdata/apps/groups/service.py @@ -0,0 +1,392 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Allow Google Apps domain administrators to manage groups, group members and group owners. + + GroupsService: Provides methods to manage groups, members and owners. +""" + +__author__ = 'google-apps-apis@googlegroups.com' + + +import urllib +import gdata.apps +import gdata.apps.service +import gdata.service + + +API_VER = '2.0' +BASE_URL = '/a/feeds/group/' + API_VER + '/%s' +GROUP_MEMBER_URL = BASE_URL + '?member=%s' +GROUP_MEMBER_DIRECT_URL = GROUP_MEMBER_URL + '&directOnly=%s' +GROUP_ID_URL = BASE_URL + '/%s' +MEMBER_URL = BASE_URL + '/%s/member' +MEMBER_WITH_SUSPENDED_URL = MEMBER_URL + '?includeSuspendedUsers=%s' +MEMBER_ID_URL = MEMBER_URL + '/%s' +OWNER_URL = BASE_URL + '/%s/owner' +OWNER_WITH_SUSPENDED_URL = OWNER_URL + '?includeSuspendedUsers=%s' +OWNER_ID_URL = OWNER_URL + '/%s' + +PERMISSION_OWNER = 'Owner' +PERMISSION_MEMBER = 'Member' +PERMISSION_DOMAIN = 'Domain' +PERMISSION_ANYONE = 'Anyone' + + +class GroupsService(gdata.apps.service.PropertyService): + """Client for the Google Apps Groups service.""" + + def _ServiceUrl(self, service_type, is_existed, group_id, member_id, owner_email, + direct_only=False, domain=None, suspended_users=False): + if domain is None: + domain = self.domain + + if service_type == 'group': + if group_id != '' and is_existed: + return GROUP_ID_URL % (domain, group_id) + elif member_id != '': + #if direct_only: + return GROUP_MEMBER_DIRECT_URL % (domain, urllib.quote_plus(member_id), + self._Bool2Str(direct_only)) + #else: + # return GROUP_MEMBER_URL % (domain, urllib.quote_plus(member_id)) + else: + return BASE_URL % (domain) + + if service_type == 'member': + if member_id != '' and is_existed: + return MEMBER_ID_URL % (domain, group_id, urllib.quote_plus(member_id)) + elif suspended_users: + return MEMBER_WITH_SUSPENDED_URL % (domain, group_id, + self._Bool2Str(suspended_users)) + else: + return MEMBER_URL % (domain, group_id) + + if service_type == 'owner': + if owner_email != '' and is_existed: + return OWNER_ID_URL % (domain, group_id, urllib.quote_plus(owner_email)) + elif suspended_users: + return OWNER_WITH_SUSPENDED_URL % (domain, group_id, + self._Bool2Str(suspended_users)) + else: + return OWNER_URL % (domain, group_id) + + def _Bool2Str(self, b): + if b is None: + return None + return str(b is True).lower() + + def _IsExisted(self, uri): + try: + self._GetProperties(uri) + return True + except gdata.apps.service.AppsForYourDomainException, e: + if e.error_code == gdata.apps.service.ENTITY_DOES_NOT_EXIST: + return False + else: + raise e + + def CreateGroup(self, group_id, group_name, description, email_permission): + """Create a group. + + Args: + group_id: The ID of the group (e.g. us-sales). + group_name: The name of the group. + description: A description of the group + email_permission: The subscription permission of the group. + + Returns: + A dict containing the result of the create operation. + """ + uri = self._ServiceUrl('group', False, group_id, '', '') + properties = {} + properties['groupId'] = group_id + properties['groupName'] = group_name + properties['description'] = description + properties['emailPermission'] = email_permission + return self._PostProperties(uri, properties) + + def UpdateGroup(self, group_id, group_name, description, email_permission): + """Update a group's name, description and/or permission. + + Args: + group_id: The ID of the group (e.g. us-sales). + group_name: The name of the group. + description: A description of the group + email_permission: The subscription permission of the group. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._ServiceUrl('group', True, group_id, '', '') + properties = {} + properties['groupId'] = group_id + properties['groupName'] = group_name + properties['description'] = description + properties['emailPermission'] = email_permission + return self._PutProperties(uri, properties) + + def RetrieveGroup(self, group_id): + """Retrieve a group based on its ID. + + Args: + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('group', True, group_id, '', '') + return self._GetProperties(uri) + + def RetrieveAllGroups(self, noUserManagedGroups=False): + """Retrieve all groups in the domain. + + Args: + None + + Returns: + A list containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('group', True, '', '', '') + if noUserManagedGroups: + uri = uri + '?skipUserCreatedGroups=True' + return self._GetPropertiesList(uri) + + def RetrievePageOfGroups(self, start_group=None): + """Retrieve one page of groups in the domain. + + Args: + start_group: The key to continue for pagination through all groups. + + Returns: + A feed object containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('group', True, '', '', '') + if start_group is not None: + uri += "?start="+start_group + property_feed = self._GetPropertyFeed(uri) + return property_feed + + def RetrieveGroups(self, member_id, direct_only=False): + """Retrieve all groups that belong to the given member_id. + + Args: + member_id: The member's email address (e.g. member@example.com). + direct_only: Boolean whether only return groups that this member directly belongs to. + + Returns: + A list containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('group', True, '', member_id, '', direct_only=direct_only) + return self._GetPropertiesList(uri) + + def DeleteGroup(self, group_id): + """Delete a group based on its ID. + + Args: + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the delete operation. + """ + uri = self._ServiceUrl('group', True, group_id, '', '') + return self._DeleteProperties(uri) + + def AddMemberToGroup(self, member_id, group_id): + """Add a member to a group. + + Args: + member_id: The member's email address (e.g. member@example.com). + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the add operation. + """ + uri = self._ServiceUrl('member', False, group_id, member_id, '') + properties = {} + properties['memberId'] = member_id + properties['membershipType'] = 'manager' + properties['membership_type'] = 'manager' + properties['delivery'] = '0' + return self._PostProperties(uri, properties) + + def IsMember(self, member_id, group_id): + """Check whether the given member already exists in the given group. + + Args: + member_id: The member's email address (e.g. member@example.com). + group_id: The ID of the group (e.g. us-sales). + + Returns: + True if the member exists in the group. False otherwise. + """ + uri = self._ServiceUrl('member', True, group_id, member_id, '') + return self._IsExisted(uri) + + def RetrieveMember(self, member_id, group_id): + """Retrieve the given member in the given group. + + Args: + member_id: The member's email address (e.g. member@example.com). + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('member', True, group_id, member_id, '') + return self._GetProperties(uri) + + def RetrieveAllMembers(self, group_id, suspended_users=False): + """Retrieve all members in the given group. + + Args: + group_id: The ID of the group (e.g. us-sales). + suspended_users: A boolean; should we include any suspended users in + the membership list returned? + + Returns: + A list containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('member', True, group_id, '', '', + suspended_users=suspended_users) + return self._GetPropertiesList(uri) + + def RetrievePageOfMembers(self, group_id, suspended_users=False, start=None): + """Retrieve one page of members of a given group. + + Args: + group_id: The ID of the group (e.g. us-sales). + suspended_users: A boolean; should we include any suspended users in + the membership list returned? + start: The key to continue for pagination through all members. + + Returns: + A feed object containing the result of the retrieve operation. + """ + + uri = self._ServiceUrl('member', True, group_id, '', '', + suspended_users=suspended_users) + if start is not None: + if suspended_users: + uri += "&start="+start + else: + uri += "?start="+start + property_feed = self._GetPropertyFeed(uri) + return property_feed + + def RemoveMemberFromGroup(self, member_id, group_id): + """Remove the given member from the given group. + + Args: + member_id: The member's email address (e.g. member@example.com). + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the remove operation. + """ + uri = self._ServiceUrl('member', True, group_id, member_id, '') + return self._DeleteProperties(uri) + + def AddOwnerToGroup(self, owner_email, group_id): + """Add an owner to a group. + + Args: + owner_email: The email address of a group owner. + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the add operation. + """ + uri = self._ServiceUrl('owner', False, group_id, '', owner_email) + properties = {} + properties['email'] = owner_email + return self._PostProperties(uri, properties) + + def IsOwner(self, owner_email, group_id): + """Check whether the given member an owner of the given group. + + Args: + owner_email: The email address of a group owner. + group_id: The ID of the group (e.g. us-sales). + + Returns: + True if the member is an owner of the given group. False otherwise. + """ + uri = self._ServiceUrl('owner', True, group_id, '', owner_email) + return self._IsExisted(uri) + + def RetrieveOwner(self, owner_email, group_id): + """Retrieve the given owner in the given group. + + Args: + owner_email: The email address of a group owner. + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('owner', True, group_id, '', owner_email) + return self._GetProperties(uri) + + def RetrieveAllOwners(self, group_id, suspended_users=False): + """Retrieve all owners of the given group. + + Args: + group_id: The ID of the group (e.g. us-sales). + suspended_users: A boolean; should we include any suspended users in + the ownership list returned? + + Returns: + A list containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('owner', True, group_id, '', '', + suspended_users=suspended_users) + return self._GetPropertiesList(uri) + + def RetrievePageOfOwners(self, group_id, suspended_users=False, start=None): + """Retrieve one page of owners of the given group. + + Args: + group_id: The ID of the group (e.g. us-sales). + suspended_users: A boolean; should we include any suspended users in + the ownership list returned? + start: The key to continue for pagination through all owners. + + Returns: + A feed object containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('owner', True, group_id, '', '', + suspended_users=suspended_users) + if start is not None: + if suspended_users: + uri += "&start="+start + else: + uri += "?start="+start + property_feed = self._GetPropertyFeed(uri) + return property_feed + + def RemoveOwnerFromGroup(self, owner_email, group_id): + """Remove the given owner from the given group. + + Args: + owner_email: The email address of a group owner. + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the remove operation. + """ + uri = self._ServiceUrl('owner', True, group_id, '', owner_email) + return self._DeleteProperties(uri) diff --git a/gam/gdata/apps/groups/service.pyc b/gam/gdata/apps/groups/service.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41d04fa2885b41b8f86703a5f41137e8524315b2 GIT binary patch literal 14110 zcmeHOO>-Pa8Sc?aYpt}hEXB5*7|0}*ILe2m#E=9Wf610?RIxsom10{!O|5oD(pak< zWqQ_*C>to^3m1L>S1z15a7g|DKY$Y#PEbX0;6N2{;6N2T&)c)Jv#XWmueF^jwBFvH z{+jOhecrF`cJ7};11IWVE>=|Xm&5M`eDzqp0 zdr)czksDV2u&F&HIo6ISuc%f=l{chTUQpgawQ^W_!)oP-^3bSP-8`ypj;Wi+)Ed?? zqV6g6xl%V)UgU~OeuaHbttKn_?N*JzJeS2zqTiaf^RSs%)+1mhieR^U%@=rWrioqm?4rtUoZt`G82_>EBG(|<~4bFYT{izjNi%f!<)CaIK?#7$Z-Hi<^evG z7bHJKoN=6k${UmnkXMw%l`h8kA)ASXcLA!!3J3h0Q(Q7Jj9ycZ;6 z=6_haA8T|!BHe+`qtfOj<&?&ckpgcaxl|4OT5PX{ksWXNEoe3Lv%w2uY@$MD@Y~OB z`06D~sXHKwQk(rMI;QR@b<JCWIqc(FYnpSsEGOgksc0~DF zk2+eqBVx~bt>PSOVhCBELiy*GjG9+>^31R0&^$)Z1Ncqel9RXmm93Mvd?9c7LdE^b zTe8E+5O*-&Kd88n)N|r^MSt@Z14)g{FL_a~ylBz7Bgo)%U$(fMm#uiP^+kQ@Kzz}8 zD=zN&qM^1IftaOn0)1ZKF5(sAQ3<+@&`6Dl#5e^Et3z|5o?>Bi4BiL{pMf)nGNIcz#+BW8dAb)-@=klzTA@tEWnF1bRYt zrb;u?i6WK^mArzooRnOfUHYzDS}rZl%umml32@}3mh<5R7p~1?B1vyT(iC*)%z2nf z^d0AutXlikH5n-ZsT=QX`?7{WyZ&d;vA!p>7CO3tCkrxEv+jkZWlNDM|Fl$W4mEn%oSc)t$*{3|I}z)Q#n>lcy%$fB)q64wQ23 zq>d`~_+;UX?79RBk64g_*iujIN2k|>H)$Y;7As4RUs|i+R6|G)N7Rkv`USyGQFVxNv zQ5mvYEXu;nO4n-VR4>+v72XupvN@olWo6y%Q50baf3xy{i1}4&j}q#BW~rK`u9I8| zVT2|WJokzp!|-myAO*g5XiJUIlD|SN@)+t0vo;KkUqYU)z~wNy5!NGPxMn`9_}g(1 z)&#kJ6orvn2|YhPg4Q$hOS4Ni-06jxk~_b!TaJ3 zl)4KY3`B~(Jpo+G_I@QWs@U_^Dxt5NN zhVa2EQ6StrZBN;m$Cl>Fu%dNb0B9@F8UQeZ>-uUsh~2Dg86a7h)?_HEwjT7y=QtWrEJk+#$alGE49(#F#beQu6ayuu+fA1&U^rB`l^Z zFnD~KgT2kcD7Nlms562hyb{Pd6bHeRVG610yvMRbNZN_xywhmCoeEtSVNOMCz;uPa z_8J}y&Wp#PatD&nGr{=}f%D~UXqgn}Z)kz!J1u}i;4lflu8E-kO9Mo=RT}DsjC)Fh zGYP&s?=m4Uol{6M7#E!{wqUWoH^d!Uant{O<_Kix1CE4m7ySr>5rOZI@S;xeZTJYO zn&y=DRMSwoM)5*x!>kG+^l#AvdMFC~TPYsx)w1?IJFMA3tf6EdA9=e#!KyTwQ4M*wy{IOP|ySt9T>+@gIf@`y^9@Hc-;55KKrvn zAIH4T>&;-BUbWG{o)AT!rT5v!u9*8Rt)AFb%%aJ__yshmg&b^7wpo(Fy3Ny(40*8? zWLEG?6zYB?N6K3U6!+&XoTANwioTt27~CHFu*qe3OTy*5$Z$W3hF$*& zNy=;R&)(*K6Fi@F*+**UIhiXQ&N@yyE=RE|9gLr{>F;p!irz+0tNXHvfN_JG_!R&V z#$MmZK&#;-bp{VNlr36sX8nK403Uw7BL|lohJ%LOc zga8sM2?LHki{LPt95Ej?xoCtB;8!$WnqeC_S&r>hzZ%k^3`IMv2e8Y^{Bq5{y@72`KZ1R! z?!cha2W}u)<{EDV+zRFXDLi6sKcbyk0Jlby*wvC8CVYH z5K1RGgwoixW?6q4R6{jj;UDAo*x*Z|V1cI@1q9&I@f!psH-i|Fhx2Yp)ng}AgXJ(!N;OLy%f!RzlP-tHW zeXR-GmW?|*qy@VjXg}G2NpY6}iTEqD=IzEK1qu1kr3jMnpVbB)e?~qr8;D1q02=zT zuEs7PBq7LGh4T-x?FKK!M96#Un|8jxM$S^`Iw^-P@;&P`81DbQBln4qOROP4J30;f zxSMZfF~9)SNX-KZ{zFPELmJy|JICP`?W*x9KeF{kSg(5aZ3wYBkH*RSPO}F#g?Snu z)Wq0uFv2#OGR%N2-lK6weee8}kC0OZ;d}!GZ%0d;izy0%?Yiw^GW>41mD1H4W*!F1~ncG zj-qis&<8(;l+!=WqAeQ&H&BOY7=De!HHonylXr!|&pYloX#kzU`a_552`To9j~+il zZ^f|Y;|UX{Oa6W#yK@eW$LaYt&kjTqccZw!^neW|4<-4ZxeoJi+`R0H*0w zsUc#%i2WTL%Vq=g)NSYVTF}3*_mmU#ZiU!l-DNG;qHrrynt*o~a1C=)r`kL|d_ew? z_?KybUagV#{;qlC%VHncTGA3d3u|fMz_}V2Tn00)(9wI9aG>5(aHYj%YKh#auiGGi zunnOak3)AyZfuI`W^Bfh^&gQ)Y*w@78UuG{_KwXmHyw;-Xx)l2`j1mLgEe;Ro)W`5 z#bPuUCKe;@WV#u<*C~Yh8hfPPakoP#Mtz|F0xzW{5!58v&UusVJSTi0Z#m2Ql-126 zmO~hHzu2h(fyQxm){yPe1<-ND*PP~@GtaeSe=ZnH$;1&aOv5x{ISBLJ-PfE z`ADi7ci^dUIeS(oy#BZ&uQ_kw^`k~dyls?noY9dmP&ck|v^c7}~@ccMWH{>+Lb$QXxb$pP(7$=s|K p%66H10^TzQuIIQ?`5!d-o_r2h^ZCi;aXG(nGc^6}0iXHF{SUE(q<8=T literal 0 HcmV?d00001 diff --git a/gam/gdata/apps/groups/service.py~ b/gam/gdata/apps/groups/service.py~ new file mode 100755 index 00000000000..c204a2b6344 --- /dev/null +++ b/gam/gdata/apps/groups/service.py~ @@ -0,0 +1,393 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Allow Google Apps domain administrators to manage groups, group members and group owners. + + GroupsService: Provides methods to manage groups, members and owners. +""" + +__author__ = 'google-apps-apis@googlegroups.com' + + +import urllib +import gdata.apps +import gdata.apps.service +import gdata.service + + +API_VER = '2.0' +BASE_URL = '/a/feeds/group/' + API_VER + '/%s' +GROUP_MEMBER_URL = BASE_URL + '?member=%s' +GROUP_MEMBER_DIRECT_URL = GROUP_MEMBER_URL + '&directOnly=%s' +GROUP_ID_URL = BASE_URL + '/%s' +MEMBER_URL = BASE_URL + '/%s/member' +MEMBER_WITH_SUSPENDED_URL = MEMBER_URL + '?includeSuspendedUsers=%s' +MEMBER_ID_URL = MEMBER_URL + '/%s' +OWNER_URL = BASE_URL + '/%s/owner' +OWNER_WITH_SUSPENDED_URL = OWNER_URL + '?includeSuspendedUsers=%s' +OWNER_ID_URL = OWNER_URL + '/%s' + +PERMISSION_OWNER = 'Owner' +PERMISSION_MEMBER = 'Member' +PERMISSION_DOMAIN = 'Domain' +PERMISSION_ANYONE = 'Anyone' + + +class GroupsService(gdata.apps.service.PropertyService): + """Client for the Google Apps Groups service.""" + + def _ServiceUrl(self, service_type, is_existed, group_id, member_id, owner_email, + direct_only=False, domain=None, suspended_users=False): + if domain is None: + domain = self.domain + + if service_type == 'group': + if group_id != '' and is_existed: + return GROUP_ID_URL % (domain, group_id) + elif member_id != '': + #if direct_only: + return GROUP_MEMBER_DIRECT_URL % (domain, urllib.quote_plus(member_id), + self._Bool2Str(direct_only)) + #else: + # return GROUP_MEMBER_URL % (domain, urllib.quote_plus(member_id)) + else: + return BASE_URL % (domain) + + if service_type == 'member': + if member_id != '' and is_existed: + return MEMBER_ID_URL % (domain, group_id, urllib.quote_plus(member_id)) + elif suspended_users: + return MEMBER_WITH_SUSPENDED_URL % (domain, group_id, + self._Bool2Str(suspended_users)) + else: + return MEMBER_URL % (domain, group_id) + + if service_type == 'owner': + if owner_email != '' and is_existed: + return OWNER_ID_URL % (domain, group_id, urllib.quote_plus(owner_email)) + elif suspended_users: + return OWNER_WITH_SUSPENDED_URL % (domain, group_id, + self._Bool2Str(suspended_users)) + else: + return OWNER_URL % (domain, group_id) + + def _Bool2Str(self, b): + if b is None: + return None + return str(b is True).lower() + + def _IsExisted(self, uri): + try: + self._GetProperties(uri) + return True + except gdata.apps.service.AppsForYourDomainException, e: + if e.error_code == gdata.apps.service.ENTITY_DOES_NOT_EXIST: + return False + else: + raise e + + def CreateGroup(self, group_id, group_name, description, email_permission): + """Create a group. + + Args: + group_id: The ID of the group (e.g. us-sales). + group_name: The name of the group. + description: A description of the group + email_permission: The subscription permission of the group. + + Returns: + A dict containing the result of the create operation. + """ + uri = self._ServiceUrl('group', False, group_id, '', '') + properties = {} + properties['groupId'] = group_id + properties['groupName'] = group_name + properties['description'] = description + properties['emailPermission'] = email_permission + return self._PostProperties(uri, properties) + + def UpdateGroup(self, group_id, group_name, description, email_permission): + """Update a group's name, description and/or permission. + + Args: + group_id: The ID of the group (e.g. us-sales). + group_name: The name of the group. + description: A description of the group + email_permission: The subscription permission of the group. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._ServiceUrl('group', True, group_id, '', '') + properties = {} + properties['groupId'] = group_id + properties['groupName'] = group_name + properties['description'] = description + properties['emailPermission'] = email_permission + return self._PutProperties(uri, properties) + + def RetrieveGroup(self, group_id): + """Retrieve a group based on its ID. + + Args: + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('group', True, group_id, '', '') + return self._GetProperties(uri) + + def RetrieveAllGroups(self, noUserManagedGroups=False): + """Retrieve all groups in the domain. + + Args: + None + + Returns: + A list containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('group', True, '', '', '') + if noUserManagedGroups: + uri = uri + '?skipUserCreatedGroups=True' + return self._GetPropertiesList(uri) + + def RetrievePageOfGroups(self, start_group=None): + """Retrieve one page of groups in the domain. + + Args: + start_group: The key to continue for pagination through all groups. + + Returns: + A feed object containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('group', True, '', '', '') + if start_group is not None: + uri += "?start="+start_group + property_feed = self._GetPropertyFeed(uri) + return property_feed + + def RetrieveGroups(self, member_id, direct_only=False): + """Retrieve all groups that belong to the given member_id. + + Args: + member_id: The member's email address (e.g. member@example.com). + direct_only: Boolean whether only return groups that this member directly belongs to. + + Returns: + A list containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('group', True, '', member_id, '', direct_only=direct_only) + return self._GetPropertiesList(uri) + + def DeleteGroup(self, group_id): + """Delete a group based on its ID. + + Args: + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the delete operation. + """ + uri = self._ServiceUrl('group', True, group_id, '', '') + return self._DeleteProperties(uri) + + def AddMemberToGroup(self, member_id, group_id): + """Add a member to a group. + + Args: + member_id: The member's email address (e.g. member@example.com). + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the add operation. + """ + uri = self._ServiceUrl('member', False, group_id, member_id, '') + properties = {} + properties['memberId'] = member_id + properties['membershipType'] = 'manager' + properties['membership_type'] = 'manager' + properties['delivery'] = '0' + return self._PostProperties(uri, properties) + + def IsMember(self, member_id, group_id): + """Check whether the given member already exists in the given group. + + Args: + member_id: The member's email address (e.g. member@example.com). + group_id: The ID of the group (e.g. us-sales). + + Returns: + True if the member exists in the group. False otherwise. + """ + uri = self._ServiceUrl('member', True, group_id, member_id, '') + return self._IsExisted(uri) + + def RetrieveMember(self, member_id, group_id): + """Retrieve the given member in the given group. + + Args: + member_id: The member's email address (e.g. member@example.com). + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('member', True, group_id, member_id, '') + return self._GetProperties(uri) + + def RetrieveAllMembers(self, group_id, suspended_users=False): + """Retrieve all members in the given group. + + Args: + group_id: The ID of the group (e.g. us-sales). + suspended_users: A boolean; should we include any suspended users in + the membership list returned? + + Returns: + A list containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('member', True, group_id, '', '', + suspended_users=suspended_users) + print self.Get(uri) + return self._GetPropertiesList(uri) + + def RetrievePageOfMembers(self, group_id, suspended_users=False, start=None): + """Retrieve one page of members of a given group. + + Args: + group_id: The ID of the group (e.g. us-sales). + suspended_users: A boolean; should we include any suspended users in + the membership list returned? + start: The key to continue for pagination through all members. + + Returns: + A feed object containing the result of the retrieve operation. + """ + + uri = self._ServiceUrl('member', True, group_id, '', '', + suspended_users=suspended_users) + if start is not None: + if suspended_users: + uri += "&start="+start + else: + uri += "?start="+start + property_feed = self._GetPropertyFeed(uri) + return property_feed + + def RemoveMemberFromGroup(self, member_id, group_id): + """Remove the given member from the given group. + + Args: + member_id: The member's email address (e.g. member@example.com). + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the remove operation. + """ + uri = self._ServiceUrl('member', True, group_id, member_id, '') + return self._DeleteProperties(uri) + + def AddOwnerToGroup(self, owner_email, group_id): + """Add an owner to a group. + + Args: + owner_email: The email address of a group owner. + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the add operation. + """ + uri = self._ServiceUrl('owner', False, group_id, '', owner_email) + properties = {} + properties['email'] = owner_email + return self._PostProperties(uri, properties) + + def IsOwner(self, owner_email, group_id): + """Check whether the given member an owner of the given group. + + Args: + owner_email: The email address of a group owner. + group_id: The ID of the group (e.g. us-sales). + + Returns: + True if the member is an owner of the given group. False otherwise. + """ + uri = self._ServiceUrl('owner', True, group_id, '', owner_email) + return self._IsExisted(uri) + + def RetrieveOwner(self, owner_email, group_id): + """Retrieve the given owner in the given group. + + Args: + owner_email: The email address of a group owner. + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('owner', True, group_id, '', owner_email) + return self._GetProperties(uri) + + def RetrieveAllOwners(self, group_id, suspended_users=False): + """Retrieve all owners of the given group. + + Args: + group_id: The ID of the group (e.g. us-sales). + suspended_users: A boolean; should we include any suspended users in + the ownership list returned? + + Returns: + A list containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('owner', True, group_id, '', '', + suspended_users=suspended_users) + return self._GetPropertiesList(uri) + + def RetrievePageOfOwners(self, group_id, suspended_users=False, start=None): + """Retrieve one page of owners of the given group. + + Args: + group_id: The ID of the group (e.g. us-sales). + suspended_users: A boolean; should we include any suspended users in + the ownership list returned? + start: The key to continue for pagination through all owners. + + Returns: + A feed object containing the result of the retrieve operation. + """ + uri = self._ServiceUrl('owner', True, group_id, '', '', + suspended_users=suspended_users) + if start is not None: + if suspended_users: + uri += "&start="+start + else: + uri += "?start="+start + property_feed = self._GetPropertyFeed(uri) + return property_feed + + def RemoveOwnerFromGroup(self, owner_email, group_id): + """Remove the given owner from the given group. + + Args: + owner_email: The email address of a group owner. + group_id: The ID of the group (e.g. us-sales). + + Returns: + A dict containing the result of the remove operation. + """ + uri = self._ServiceUrl('owner', True, group_id, '', owner_email) + return self._DeleteProperties(uri) diff --git a/gam/gdata/apps/groupsettings/__init__.py b/gam/gdata/apps/groupsettings/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/gam/gdata/apps/groupsettings/__init__.pyc b/gam/gdata/apps/groupsettings/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be93d8b7aa207c9be464d15d8500d5a1929f68a7 GIT binary patch literal 170 zcmZSn%*&-#x;-G70SXv_v;zphMM~6DTyVC`iTVvKqW=_r3J;Q nB_)}8>Bai-@tJvtMq4OR literal 0 HcmV?d00001 diff --git a/gam/gdata/apps/groupsettings/service.py b/gam/gdata/apps/groupsettings/service.py new file mode 100755 index 00000000000..6890d944ab7 --- /dev/null +++ b/gam/gdata/apps/groupsettings/service.py @@ -0,0 +1,172 @@ +#!/usr/bin/python2.4 +# +# Copyright 2010 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GroupSettingsService simplifies Group Settings API calls. + +GroupSettingsService extends gdata.apps.service.PropertyService to ease interaction with +the Google Apps Group Settings API. +""" + +__author__ = 'Jay Lee ' + +import gdata.apps +import gdata.apps.service +import gdata.service + +# Group Settings URI template +GROUP_SETTINGS_URI_TEMPLATE = '/groups/v1/groups/%s?alt=atom' + +class GroupSettingsService(gdata.apps.service.PropertyService): + """Service extension for the Google Group Settings API service.""" + + def __init__(self, email=None, password=None, domain=None, source=None, + server='www.googleapis.com', additional_headers=None, + **kwargs): + """Creates a client for the Group Settings service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + domain: string (optional) The Google Apps domain name. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'apps-apis.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='apps', source=source, + server=server, additional_headers=additional_headers, **kwargs) + self.ssl = True + self.port = 443 + self.domain = domain + + def make_group_settings_uri(self, group_email): + """Creates the URI for the Group Settings API call. + + Create the URI to access the group settings API. If params are provided, + append them as GET params. + + Args: + group: email address of the group + + Returns: + A string giving the URI for Group Settings API calls + + """ + uri = GROUP_SETTINGS_URI_TEMPLATE % (group_email) + return uri + + MakeGroupSettingsUri = make_group_settings_uri + + def retrieve_group_settings(self, group_email): + """Retrieves group settings + + Args: + group_email: string, the group email address + + Returns: + A dict. The group settings + """ + uri = self.MakeGroupSettingsUri(group_email) + group_settings_entry = self.Get(uri) + group_settings_values = [] + for group_settings_value in group_settings_entry.extension_elements: + group_settings_values.append({group_settings_value.tag: group_settings_value.text}) + return group_settings_values + + RetrieveGroupSettings = retrieve_group_settings + + def update_group_settings(self, group_email, allow_external_members=None, + allow_google_communication=None, allow_web_posting=None, archive_only=None, custom_reply_to=None, + default_message_deny_notification_text=None, description=None, is_archived=None, max_message_bytes=None, + members_can_post_as_the_group=None, message_display_font=None, message_moderation_level=None, name=None, + primary_language=None, reply_to=None, send_message_deny_notification=None, show_in_group_directory=None, + who_can_invite=None, who_can_join=None, who_can_post_message=None, who_can_view_group=None, + who_can_view_membership=None, include_in_global_address_list=None, spam_moderation_level=None): + + uri = self.MakeGroupSettingsUri(group_email) + + xml = ''' + + tag:googleapis.com,2010:apps:groupssettings:GROUP:NNN + Groups Resource Entry + + Google + + %s + %s + +''' % (group_email, group_email) + + template = "%s\n" + if name != None: + xml += template % ('name', name, 'name') + if allow_external_members != None: + xml += template % ('allowExternalMembers', allow_external_members, 'allowExternalMembers') + if allow_google_communication != None: + xml += template % ('allowGoogleCommunication', allow_google_communication, 'allowGoogleCommunication') + if allow_web_posting != None: + xml += template % ('allowWebPosting', allow_web_posting, 'allowWebPosting') + if archive_only != None: + xml += template % ('archiveOnly', archive_only, 'archiveOnly') + if custom_reply_to != None: + xml += template % ('customReplyTo', custom_reply_to, 'customReplyTo') + if default_message_deny_notification_text != None: + xml += template % ('defaultMessageDenyNotificationText', default_message_deny_notification_text, 'defaultMessageDenyNotificationText') + if description != None: + xml += template % ('description', description, 'description') + if is_archived != None: + xml += template % ('isArchived', is_archived, 'isArchived') + if max_message_bytes != None: + xml += template % ('maxMessageBytes', max_message_bytes, 'maxMessageBytes') + if members_can_post_as_the_group != None: + xml += template % ('membersCanPostAsTheGroup', members_can_post_as_the_group, 'membersCanPostAsTheGroup') + if message_display_font != None: + xml += template % ('messageDisplayFont', message_display_font, 'messageDisplayFont') + if message_moderation_level != None: + xml += template % ('messageModerationLevel', message_moderation_level, 'messageModerationLevel') + if primary_language != None: + xml += template % ('primaryLanguage', primary_language, 'primaryLanguage') + if reply_to != None: + xml += template % ('replyTo', reply_to, 'replyTo') + if send_message_deny_notification != None: + xml += template % ('sendMessageDenyNotification', send_message_deny_notification, 'sendMessageDenyNotification') + if show_in_group_directory != None: + xml += template % ('showInGroupDirectory', show_in_group_directory, 'showInGroupDirectory') + if who_can_invite != None: + xml += template % ('whoCanInvite', who_can_invite, 'whoCanInvite') + if who_can_join != None: + xml += template % ('whoCanJoin', who_can_join, 'whoCanJoin') + if who_can_post_message != None: + xml += template % ('whoCanPostMessage', who_can_post_message, 'whoCanPostMessage') + if who_can_view_group != None: + xml += template % ('whoCanViewGroup', who_can_view_group, 'whoCanViewGroup') + if who_can_view_membership != None: + xml += template % ('whoCanViewMembership', who_can_view_membership, 'whoCanViewMembership') + if include_in_global_address_list != None: + xml += template % ('includeInGlobalAddressList', include_in_global_address_list, 'includeInGlobalAddressList') + if spam_moderation_level != None: + xml += template % ('spamModerationLevel', spam_moderation_level, 'spamModerationLevel') + xml += '' + group_settings_entry = self.Put(uri=uri, data=xml) + group_settings_values = [] + for group_settings_value in group_settings_entry.extension_elements: + group_settings_values.append({group_settings_value.tag: group_settings_value.text}) + return group_settings_values + + UpdateGroupSettings = update_group_settings diff --git a/gam/gdata/apps/groupsettings/service.pyc b/gam/gdata/apps/groupsettings/service.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1d77f8ca1c31939ba8f9c296f8511d5f232516f GIT binary patch literal 6359 zcmcgw&u<&Y6&{ijCDF1a|B`>(3l&b3)F$l|PE$cvqF9dV#8!lrlpq4y#cFp%t-Rc2 zc7~#fu6wb2DNyv-TY;W?4qBk-x##{3J@nL54?VWud$U7v6}v48xRSV9zWHY6&3oUx z_hxE;KiPPz_q&Zi6hAfm{|F!T6&|4wJ9t*aPDO-O5!OUp6+1QiJ~r|`&hL=aMK~^G zO&m;!gGq5PCHBy_E?x@pLWr++8Z^r0!URq6dpy>%wAbB~T1QD+ZOZI03Z$c=PB)JB zBB>m1cPzkJ*?8mxejKZ2qw&hF@|l)NsGN4_YrpAtyQ-?}HUq_3nC zC0b^FprbT#`jOsm=zZy|rD;2s&I$&2g$~Vzx&q}t^#{&lDV?PQe{d(3@}qXgkK$&K zc9x@m;%8l51FF@gHLBL(UHf)Zedx#fzOU1cKKB|U3_WrQfRFJ}zs5ttBd{l)a0g61 z{9_}-M=$GQXN+tNgYUo*@Cz_w{B=Ue39&Od(reiLe|s4hFE5InDIuo>9P1WtU4ow0 z#_SG7mfcG;XT-3t@T>KcI)efF{eH8}KJdGdB9jKhnMr(R@c9fMrl=O;pdt>c;-DsS z=4#Gd&6%!PIM|ruSr_@Z2q#32mkIHUO1+}TsDlUTB)_16hwvsuI0cF}Xo}UU5Dzlx zYf#a5f;f_iK3WR-^@y8R|3<^X&q~%-i~Qy|l4!B$D4l`j&O+KHuKDph&K8)~L%+9` z!8m>xW>TqJBn@ezVgwq|@4G~N~g4m#F zZZaVKH-W|adTkY9X43?i5nZ4loZSm_nwc5&IM~EA&_rSD4b5@TbO#@WASHF3n8zkI zuWTtzy8`rx40pVJ>4#7)`a?yC^#vM~=V0~R8hxF{V^lw$+4G_#(w@h&ZDl>F39D4h zf^M2=p3^wp3`=?`8SlB6Nx0Y$!aYR~T9Qk;y0i-JW{kLiuYw=S#an9|)>5sfQMNm1 zt)~4XPW@1|+J5Kud(C_IZg&TIKTU3{EN~X?zSH6rw8#)exGa)G%i6py_Ec6n+Czk5)wgAK;@%RRjq{g}LWdfv~W-KSJbuR_((Mm zw+d`A*=QKyu@}pc!frHfGy1tw_p)Rdd{%7SX-9_?enuDjiYUz4)eOXj(v;}d+|Rc+ zyv>JOTaVV)Ha#rc+j{t9iAEkXJ)t7 z_b~Y)niMfuoNO3&5MznD%#JVcf-G>r1bI;rL=ny?APRl0iM9~W$HcSmQg%R^S^beC zo^wh;!ig%BRSu9y@EjNKqetcIruY&Gr(oI7F+Ti}Me-0Cgw-380y*4k+4wVg#A3D&?|iZsK-?a5enkakR1GIZrw zp#kA8+Hdo~OahlWyJzh@W<^7XV4Q5A&!hEN0|#lvs=cb^|3jgyaMUpxJ%+j_&`ix# zW>7iIBHRR*A$9N>QfUS(D^ck(RnCYz_}i-@`dp#9`T13mpP~ZpX-#CGiTsMluTr^2 z!kXBtiMj6sm|!RrFb4ItMS*7!j+Ia-#trIM76o!bI9@`b05YgQS`?}su8KxyL@8uY zf3YakOI$ULqEIRt)KsO=9`zH1lcgC^L>W}eqNb?uLS;2N7HUz0+P0{M$RTW$P*WE5 z6N@?_atKe9P&iqbv3_Gw(;|m(x`aAmQGc?ilOl)kWC=BGQGc_jQzD1(R0(y`qRv(e zOU#HI!kH3k#-e^;QL`e4aJGb+wJ6V`PKzAE(NXG*9u7WG?;IxBJr z&z4YUE$Yt}bx!0Eo-3iwS=2u)>b%GyJYPbcx2S8i!V+^Lhj6ZhnzN_}7Ii`75MC&u zE?87#Q5QuH;l&c_qDB4EqArOX!b>I8C5!sTqArUZ!pkKTHs%#U+m5y`b{MuO_U<*> z1TRGO`8Cb~*Et7VqjLu5fEyyePNAQ2zzvH2^sE;-;IFkW(Ym>C9RXrp{S>9f(udDF zv4e`24nz0n?>6tuJ2DB<5Gnco{Pxx-w?CNwaJjL>5z+xRQTONfweBvqT67lbf3KNl z?bdsD?%Zpw;M_QGFpE?t4S^~^8R4tq;6w$ksiV;AecPcO{yVstVSX94$x;+9BeXA; zPhq#-yL0yr&$`HgTG`mX$Th;^`uh4(3q5H79qCvuukq>Eaiu9~orjbImRhEfV05lY zGwOjaba+@c$B(5Je<4Wg2(v^hUc?MH6~x6m4@2*qB9EHP*G5Cl!4anCO?A|a_Xg)O znuwDRt`^e%L);-`IG;U{on1IgUqokaFnT>mJDpxq9A`9DKHT`F+}%hO-D7YepF>c;-ylQhwqtW$`e zq&QXZ0rN3RmYApLW>Lq_29N!u-GiolXvvC&P<5zK!MJn32y0CF+?gfY5h#*`Qv6-1F9#vPwFt6|wt7@9ecJr`zM@UA-FwK3r>vap7kqHZ$ zc`5OXrZPP9*gu|5^;?h?Ov7wUH7^9$ykvxI9u;#gL78%q@``ao_5(OJ?7Pb>qkFAF zcQ5c0R>|{~hr@-bp4cnH`3%>*Jt7;~$ToC_Z1ZBGn|p&)n8l;uoEKZtkynO`qPR+| z8*25f74eyXh==oTQO|`#?$MbLjA6|q!@h!!EIJ_m(Y>6-kiqR#yoYSVAxK!Pm;7H6 z7v>=%lDY*ESskXLSKRe@F_Do-MP&58WfHEX=_rMyz(;s3aXX3|DVO%1dzBvKjyGQ; zla9SEHp20P){oJkGCaiO%=B!vUYV|3exsj8vjN@v9qNGQL?8Wqz zZYMl1OasqzKfpWR9=POR-pVd{hwX0CR~Q^Cwt4BL#v_~a+nc)XfChZl zP^4s9*HU}^h%=7($!cbjxJW-ytvu*%V`xJ&zqxqKwEH|3nKjJUXS saVY?~6b8-J5(S!L*Xd-Y5e~jdd$6~cjDMGTphMM~6DTyVC`iTVv#rnCWIVG7X m`MHUidHV73nR%Hd@$q^<Hf|+-#I;% ze=gTw8UA&vtBSu0em8I#4G}A~1L>(9PsP59D=PKXPNi(mO|<7H+EukvRWW84R6MVA zMfDa{ucmr+wTI_a)nla|DfP>pB|dSxwZPZ?9OBkTR`+8aH}4Kpn>24`hf&gRZVv~8 zEVpa*decp~@ok-dm~{15cs;D8N3BbE^xOpBIe{bM2zkq-@9DB+tbqs(#wO3JB9?q#_U*)Un7;yM( zKl>w$^`TQy`CS>VDC?_Ug(Hu=@%$X@&&PMn(~#|`y{C0-+AGC>>v2@%+oEUbESp>J zhVS1BY!&TToJ5+#JWZ2bTf-=ty5G%W-QxN!-(YmQXY10iHtFw&No+Z(;bf`YKZx?^ z&{+D%Fi%?eQ6kJ1TJ3vA=cau>$#);LZ)QjRG>fp%{pfIYeeK${)xiUMko8wh-fgzt zc(uJBM>c9lz_xuzXLieM;~8|?HE7en!lHjKPrr?>X+S9NidXfX#`V0{sRh+ay$enc@#6IEOzYK4y^Br{;pFtRcf2GE zf67e&xkYu{;LsxSVP@>r(! zlHAx%bf_(-(x~XruM@KwA0~ZEs~<$>;GH5~?F%w_;~?tq>#gEpo3^7rvUjq5+^~%K zXg^2jyqCSFb2u@*D>2(cGZ01(}AqGL-a77i>s3wN_DT{kwjB;Rl=Gj=P_27n~dM(8NuE+q(wR4Abr zJXMYfGGU+4a5;p=^$1jA!brg3UI|WEvYEWCE7*i1q`3lzNNBUr3MPE086m>3N(SN4 z1SAAaAPJLxxW5~)FRLAlw}GLvWCaq`ja)}olW)F}87{mY<*OCW`M@vq^%=rz1$ zT>PE9mdCU~7Zuw0^OMsCNCL$2Ncm&(0He5=*(KLA*g+p;0aONV;@50*0&Kzw5RjYr zm~BpgTsSe}m%mMi{Y-xO$1HyNZ2AE+`DJCA4`%Yq-D$m<{4$)@o5?R9dkA3C5 z83c+?uW*HM=RYvMR6-|A=`3|yPg;sA7Rq&&BJO7mIg>`9ZJy{41@v9?4f$JnWU#AV zmla(Jr*ISfjW|e2gL;N_N?FN}d58>m(vdtXA<*IjT!Nzahf~|WgZ$9Q@2GO8Gh##QwWfB>(PWg2QoR)yx@I`1r{g9`%lf~s1 zXQV(NWabrCJ2lqC1IIW@^?x3*#$<^KkIAKQ!9QVS0?L=XPKy~w(q|~&48l-|ISdc8 zc$jK*333?5SvL&bu}4Z(p0I;9#Tyjgr?^J(6N;ZwyhZUg#RkPDL<`r1IFk>A0pHQY zWq2y9dR6~ot>ImI?y`63g<7>XhpS#&a;=u^phMM~6DTyVC`iTVv#rpY0>Bai- f@tJvtuF)iA literal 0 HcmV?d00001 diff --git a/gam/gdata/apps/orgs/service.py b/gam/gdata/apps/orgs/service.py new file mode 100755 index 00000000000..b9002a08341 --- /dev/null +++ b/gam/gdata/apps/orgs/service.py @@ -0,0 +1,170 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Organization Support. + + OrganizationService: Organization Support.""" + +__author__ = 'jlee@pbu.edu' + + +import urllib +import gdata.apps +import gdata.apps.service +import gdata.service + + +API_VER='2.0' + +class OrganizationService(gdata.apps.service.PropertyService): + """Extended functions for Google Apps Organization Support.""" + + def _serviceUrl(self, setting_id, domain=None): + if domain is None: + domain = self.domain + return '/a/feeds/%s/%s/%s' % (setting_id, API_VER, domain) + + def RetrieveCustomerId(self): + + uri = '/a/feeds/customer/2.0/customerId' + return self._GetProperties(uri) + + def CreateOrganizationUnit(self, name, description, parent_org_unit_path='/', block_inheritance=False): + + customer_id = self.RetrieveCustomerId()['customerId'] + uri = '/a/feeds/orgunit/2.0/%s' % customer_id + properties = {} + properties['name'] = name + properties['description'] = description + properties['parentOrgUnitPath'] = urllib.quote_plus(parent_org_unit_path, safe='/') + properties['blockInheritance'] = gdata.apps.service._bool2str(block_inheritance) + return self._PostProperties(uri, properties) + + def UpdateOrganizationUnit(self, old_name, new_name=None, description=None, parent_org_unit_path=None, block_inheritance=None, users_to_move=None): + + customer_id = self.RetrieveCustomerId()['customerId'] + old_name = urllib.quote_plus(old_name, safe='/') + uri = '/a/feeds/orgunit/2.0/%s/%s' % (customer_id, old_name) + properties = {} + if new_name != None: + properties['name'] = new_name + if description != None: + properties['description'] = description + if parent_org_unit_path != None: + properties['parentOrgUnitPath'] = urllib.quote_plus(parent_org_unit_path, safe='/') + if block_inheritance != None: + properties['blockInheritance'] = gdata.apps.service._bool2str(block_inheritance) + if users_to_move != None: + properties['usersToMove'] = '' + for user in users_to_move: + if user.find('@') < 0: + user = user+'@'+self.domain + properties['usersToMove'] += user+', ' + return self._PutProperties(uri, properties) + + def UpdateUserOrganization(self, user, new_name, old_name=None, customer_id=None): + + if customer_id == None: + customer_id = self.RetrieveCustomerId()['customerId'] + uri = '/a/feeds/orguser/2.0/%s/%s' % (customer_id, urllib.quote_plus(user)) + properties = {} + properties['orgUnitPath'] = new_name + if old_name != None: + properties['oldOrgUnitPath'] = old_name + return self._PutProperties(uri, properties) + + def RetrieveOrganizationUnit(self, name): + + customer_id = self.RetrieveCustomerId()['customerId'] + name = urllib.quote_plus(name, safe='/') + uri = '/a/feeds/orgunit/2.0/%s/%s' % (customer_id, name) + org = self._GetProperties(uri) + try: + org['orgUnitPath'] = urllib.unquote_plus(org['orgUnitPath']) + org['parentOrgUnitPath'] = urllib.unquote_plus(org['parentOrgUnitPath']) + except AttributeError: + pass + return org + + def RetrieveAllOrganizationUnits(self): + + customer_id = self.RetrieveCustomerId()['customerId'] + uri = '/a/feeds/orgunit/2.0/%s?get=all' % customer_id + all_orgs = self._GetPropertiesList(uri) + for org in all_orgs: + try: + org['orgUnitPath'] = urllib.unquote_plus(org['orgUnitPath']) + org['parentOrgUnitPath'] = urllib.unquote_plus(org['parentOrgUnitPath']) + except AttributeError: + pass + return all_orgs + + def RetrieveSubOrganizationUnits(self, name): + + customer_id = self.RetrieveCustomerId()['customerId'] + uri = '/a/feeds/orgunit/2.0/%s?get=children&orgUnitPath=%s' % (customer_id, urllib.quote_plus(name, safe='/')) + sub_orgs = self._GetPropertiesList(uri) + for org in sub_orgs: + try: + org['orgUnitPath'] = urllib.unquote_plus(org['orgUnitPath']) + org['parentOrgUnitPath'] = urllib.unquote_plus(org['parentOrgUnitPath']) + except AttributeError: + pass + return sub_orgs + + def DeleteOrganizationUnit(self, name): + + customer_id = self.RetrieveCustomerId()['customerId'] + name = urllib.quote_plus(name, safe='/') + uri = '/a/feeds/orgunit/2.0/%s/%s' % (customer_id, name) + return self._DeleteProperties(uri) + + def RetrieveUserOrganization(self, user): + + customer_id = self.RetrieveCustomerId()['customerId'] + if user.find('@') < 0: + user = user+'@'+self.domain + uri = '/a/feeds/orguser/2.0/%s/%s' % (customer_id, urllib.quote_plus(user)) + org = self._GetProperties(uri) + try: + org['orgUnitPath'] = urllib.unquote_plus(org['orgUnitPath']) + except AttributeError: + pass + return org + + def RetrieveAllOrganizationUsers(self): + + customer_id = self.RetrieveCustomerId()['customerId'] + uri = '/a/feeds/orguser/2.0/%s?get=all' % customer_id + all_users = self._GetPropertiesList(uri) + for user in all_users: + try: + user['orgUnitPath'] = urllib.unquote_plus(user['orgUnitPath']) + except AttributeError: + pass + return all_users + + def RetrieveAllOrganizationUnitUsers(self, name): + + customer_id = self.RetrieveCustomerId()['customerId'] + uri = '/a/feeds/orguser/2.0/%s?get=children&orgUnitPath=%s' % (customer_id, urllib.quote_plus(name)) + all_users = self._GetPropertiesList(uri) + for user in all_users: + try: + user['orgUnitPath'] = urllib.unquote_plus(user['orgUnitPath']) + except AttributeError: + pass + return all_users diff --git a/gam/gdata/apps/orgs/service.pyc b/gam/gdata/apps/orgs/service.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4ec6b3ad249d556450253fc4ce14db303b01623 GIT binary patch literal 6395 zcmcgwU2hyo6|J5x+haS6uRZVr(J?EZ#yUYKX zuix(dbE~7WzcRibp%{(IS85lvqjnwTmy};tVM*Tp^e&Zxt>+Q&LI^+u`JN`1OJ%N4hqQ@q_es$V4s?I`$d+Xive*zR?^abj2M^+uyO zu&t99K}Y{|s575!=CICTsP&`nUT;PFz2FP{Z5keHb>*JDGIE=2wGO)FuZH>q6!Ujf zFaZya8t1)4JG_A@J-OR>S$Ue*VHM@=%}-jii7~CjN)^gp=IMS8|_2c7q5Lx2kW4>$>?z3)-htVwLx^? z1-{GGn%ec@cB_r^x8mf$wD4n+W(Ct`v6`1%KTE=QaA~+n<;>yJanR!ZS;R1843Ix_#3V41 zqh32;ZEAuBMspK%dd9}bI%$Dvt^UPRzsWW2B~;#r+HNIrS0^^mMwW1CDNCF6l3-$o z{sz`}wM_#3La%4L#KzETcz;!@IhMO@b*iYRQP?f#e}M+cAU+F2ET4h*ph8tGp4C*K z)Y%kmativF)fyPWRz#dQDDX3w~VkZY9DI>+1I9%1YLTC44C%5cF8nL-geuG zjx2=b8GgUjw$JTl^zMam=jdtlTql8TM;+ayDO~PDh}=uUFxZnFo%CX>y>19&(quw7 zxeZAtRL?kO`D<@4j>A=B6HC|iwqjG{f!u@!cd1_PJSzJ(s&tPY4(aifUYFbC=4G2+ zFxaOYxNju75SNsFb_aAr*F-#O5$moex@|SgnF~1WdKV*xc2%XzPTi@Y)SYE~I@AJs zB0W!N_8#arOtUGKQK%n#Kp}V`xCMTIar-4Oa9S8R)0eKYNA>ImH|hs7a#dtn765(IdHL1cG$lcO2+B7pSYX zmzx}T@|$&aR#&HWMnXtCI1gPvQYx8o(21TUo_$6wVHVetA9qS>nWOV+>HYHB$-mVO zhcFK-O>>So$t75D)1@t4HihBln`m9&BoOKW&0NyaB|;WO35|MG+K=O3#xJyJ!Xx-N zV5Hu?+i1#-NNJZubVXRG94>XzC2QTQtVjy+hy5T*6+KnJtzMD&vRZ{xN!U2_J#k-N zMEWzi5O;8|p~qe3M0$s8(X+94j7_>ctPtM4&Iw$O6Poy*7~h|D5n;yo&X8gc)ZyzN|fL~Q8^+_HGV2^!Eu5~-w;n}RmtjYOd3vx)>`u)D%w zd1#yHQkfU5P z$vB`^kkKX=bA=HjEcq4&B-fLW=icJHcUXO!)i8)(19nU(ih8w)U1VA{=UjnCQLTs` z+)yc=ibYWR6B@%>Rq9K~xK;s=OsjwwERVWO0RWn>R^?~F0+3hLGDD%XBzUIXwY(d$ zwG+LiDu_2nb(Q>4IgS%K>J$7V&7uES75FZWAp+2(3QLqLI5l}`8IO8VA=VM_#}dC|_vJz!W7 z41Y%qas3I(4%XZjV`i>+6MMQq!yg`K`=A|$?f^m_Crwl+$-{}}M}e^^vftyZ@1rW9 zHUNyWi&dF$824SyLYat!-oU#3eDrY`j-qGwF`mO$cNPF!AHM0-Aqia$-{-g(hEJYR zf!H90fEqH{bVM!3T#=YoQcSA_zeMcM{8Czdzm$wt+*Mk@bR+l6zX+xqz;pwc4*RD0 zAvXF|n0B5Ap^wD$HX!JW>;nXBx}8M!0^&UVsMD^nZve7PInOnTwCU}A%}8(e_QsGl z+)3Y2Z=Q!V_s=5I`vDrGNDnZU*Ajy?@gnIhy4k?A8C_xeT7(BGO4a}C;2_u26;3FcO0@f`~&cT|j02X7{XeROp>?3yxW{S&ji}HoB z>lo9A@%yUS^&pL1_a$}#l=EX(F-ZAiL5kM8C`b+97z$Epyb`S^hG5@(i9hixE7=g3+*>6f(k>fQMlS z@E{BU9{8c8>iyIL@dl~I&dI|(-WY-BA>z%$EZ!908H+a~@LUvcm{**e7528sl; zno}#yRcERTv)2|EW@qN*GlhDtDS_!{n3x^9-NXCS1P2wX@GY6V(OTi#EP1rIr2n9A z1?J@&Re2mwyR#m*jERF60rJ8ti;b_g(;vw$Cu#@5buC5m=U1>Ha|uphMM~6DTyVC`iTVv#rj351^GoK knR)5@@$s2?nI-Y@dO*b-Kr?M}^HWN5Qtd!C7XvW^0E=iQi2wiq literal 0 HcmV?d00001 diff --git a/gam/gdata/apps/reporting/service.py b/gam/gdata/apps/reporting/service.py new file mode 100755 index 00000000000..fb9bdd1db95 --- /dev/null +++ b/gam/gdata/apps/reporting/service.py @@ -0,0 +1,96 @@ +#!/usr/bin/python2.4 +# +# Copyright 2010 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ReportService simplifies Reporting API calls. + +ReportService extends gdata.apps.service.PropertyService to ease interaction with +the Google Apps Reporting API. +""" + +__author__ = 'Jay Lee ' + +import gdata.apps +import gdata.apps.service +import gdata.service +import time + +class ReportService(gdata.apps.service.PropertyService): + """Service extension for the Google Reporting API service.""" + + def __init__(self, email=None, password=None, domain=None, source=None, + server='www.google.com', additional_headers=None, + **kwargs): + """Creates a client for the Reporting service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + domain: string (optional) The Google Apps domain name. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'apps-apis.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='apps', source=source, + server=server, additional_headers=additional_headers, **kwargs) + self.ssl = True + self.port = 443 + self.domain = domain + + def retrieve_report(self, report, date=None): + """Retrieves a report + + Args: + report: string, accounts, activity, disk_space, email_clients or summary + date: string: YYYY-MM-DD. If not specified, most recent day that has past 12pm PST will be used (in other words, today if it's after 12pm PST or yesterday if not) + + Returns: + String, the report data + """ + uri = '/hosted/services/v1.0/reports/ReportingData' + if date == None: + now = time.time() + report_time = time.gmtime(now) + if report_time.tm_hour < 20: + report_time = time.gmtime(now - 60*60*24) + date = '%s-%s-0%s' % (report_time.tm_year, report_time.tm_mon, report_time.tm_mday) + page = 1 + report_data = '' + while True: + xml = ''' + + Report + %s + %s + %s + daily + %s +''' % (self.domain, date, page, report) + try: + report_page = self.Post(xml, uri, converter=str) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + if report_page == 'End-Of-Report': + return report_data + else: + report_data += report_page + page = page + 1 + + RetrieveReport = retrieve_report diff --git a/gam/gdata/apps/reporting/service.pyc b/gam/gdata/apps/reporting/service.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d5e700353f0ebb4d1e5ead047cbb5717ea24af5 GIT binary patch literal 3581 zcmcIn-EJGl6+TN!l(m+Bnl{c)gJdeeF`bB{S%@H;c{gkX&lH zyPg?}B0vE_?b|*{)hh>kIPbnH=5qoht*jgITpdSz+7y0mW4af6Z-l64xb(dY_|)~OE(tMroS z1<|u(pJnx$4Zi4am;zZA%JifHu?kJjVXHtGS7)NQ=j*IbL`fq#=%(d4|xp3}+X_>P4HmKaUovD~C_#K+r@}E1joL-Joi22>`812teUGo-%Loc9Ntncfz5J5~!B`!y`IqvQT@6RWGRX;|8W> z{p`(QnC7VoLwnninn=UET4$EP$Y$?!L3d(VFDl&~X_Ly^G z6OUvkeSlTxJ6NbYC#m{z*4Zznc~(S;?hK-F>t6f*{Z=_M!y<2K6^rK1-Hx@O!;a`E z3$>`3PUY`%7H~=a21B!bbaTV|(7Wla;~TRjS20+zuQ2pS&=?)z93>4T*r1adspn(> z6&_>z`HEfF7#m)I4Ir_~5CT*?AyUERZgQR8bsRY)IzAu=xZS!z{W{$`uMuxI=o~F& zjn3ET?5~6ntM@$zFK91Ngwe;N{t6CUq1gwFJ7_v{?BVKlGCsYidFSi&eSV)-%vBm~ z(C8YS_@w?x-nma6)2>_c4VwEjx=D#o??Jymdz5?h!+f2RtJc`M2b}9+fb==^!PQXj z9q9M`C;A>9=`|Th^%FQMkboz7YBA0^{=b3D(W|1}5>Xr%liUEWxIvw!X0|1gRG);p zjAFUvKrM8LsRb}!PsZa&%@(DCCU}7&yW$x>t*1|0`}=M2ZC~Vt5xSHyH`ioKj0bFUnW!`=}(q(cFVpUpHEu9g`gp`|JKXxbO40pvz~6!+y( zs5g@uqy-NYxVuo){=h*Km0y5*&!RbG0>hnOpqGg(tNs*T?F^x^Osd?~oztE6_9CZs zC#Z6p5hVQ;_|q;=?{YlWG2PZJ{B7UXDc2oZ9Oye(b|0RNGXd=6M*3hAGB<_H<03(w zJlOp1=cu~rIri*mgFZ0^~^T{A1?o-?W2u@8>!aAfbc?sgW- z1s#!hR9^V&LUhY$;7E4i*qmRFSa^aKI_oanXDQ1p2S92o&z-Q3K67icXV{xSWVj5$ zO|2vO?dNfxwEoy{Ijs-Un*g=+r$2er`~@~G93B+8v5iryZ#&&4|ZyXK*(iXza zIGo8ySr)9vMP8A|NTz^6$zrAzc8IiasB2WfzMP%_E*kr~1;DdI2;eu=g1F2P-_4AS8O6J3s6+G|&D zqPgKt`Vd32p0KJ08*gniU3j-ZTNuFaf`wYxbNSDNO(*-Na7nBF!LV=qj=bom0i%vh oGnawv@z?$f2|#kK{L=V0V7GlW>AI-wy^Skvb8pnxe*3Nc2S4|&IsgCw literal 0 HcmV?d00001 diff --git a/gam/gdata/apps/reporting/service.py~ b/gam/gdata/apps/reporting/service.py~ new file mode 100755 index 00000000000..2262bc2ead0 --- /dev/null +++ b/gam/gdata/apps/reporting/service.py~ @@ -0,0 +1,96 @@ +#!/usr/bin/python2.4 +# +# Copyright 2010 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ReportService simplifies Reporting API calls. + +ReportService extends gdata.apps.service.PropertyService to ease interaction with +the Google Apps Reporting API. +""" + +__author__ = 'Jay Lee ' + +import gdata.apps +import gdata.apps.service +import gdata.service +import time + +class ReportService(gdata.apps.service.PropertyService): + """Service extension for the Google Reporting API service.""" + + def __init__(self, email=None, password=None, domain=None, source=None, + server='www.google.com', additional_headers=None, + **kwargs): + """Creates a client for the Reporting service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + domain: string (optional) The Google Apps domain name. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'apps-apis.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='apps', source=source, + server=server, additional_headers=additional_headers, **kwargs) + self.ssl = True + self.port = 443 + self.domain = domain + + def retrieve_report(self, report, date=None): + """Retrieves a report + + Args: + report: string, accounts, activity, disk_space, email_clients or summary + date: string: YYYY-MM-DD. If not specified, most recent day that has past 12pm PST will be used (in other words, today if it's after 12pm PST or yesterday if not) + + Returns: + String, the report data + """ + uri = '/hosted/services/v1.0/reports/ReportingData' + if date == None: + now = time.time() + report_time = time.gmtime(now) + if report_time.tm_hour < 20: + report_time = time.gmtime(now - 60*60*24) + date = '%s-%s-0%s' % (report_time.tm_year, report_time.tm_mon, report_time.tm_mday) + page = 1 + report_data = '' + while True: + xml = ''' + + Report + %s + %s + %s + daily + %s +''' % (self.domain, date, page, report) + try: + report_page = self._PostProperties(xml, uri, converter=str) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + if report_page == 'End-Of-Report': + return report_data + else: + report_data += report_page + page = page + 1 + + RetrieveReport = retrieve_report diff --git a/gam/gdata/apps/res_cal/__init__.py b/gam/gdata/apps/res_cal/__init__.py new file mode 100755 index 00000000000..8b137891791 --- /dev/null +++ b/gam/gdata/apps/res_cal/__init__.py @@ -0,0 +1 @@ + diff --git a/gam/gdata/apps/res_cal/__init__.pyc b/gam/gdata/apps/res_cal/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb36af20d7e8004b52396823bba80558472da55e GIT binary patch literal 164 zcmZSn%*&-#x;-G70SXv_v;zphMM~6DTyVC`iTVv#rj35#qr6B iIr{POnR%Hd@$q^3a}3?)CY@=0i!_Swn=3m2oMMcRR@h;2!>qSv?)^9 zUBxgUqYpOFPwI#D1KKk~QkIpxwXWqI&F<`N&dixrMB4N_9nTAt8ARDg>Gx@@e(+x9R_Q3V-g#_8nKuv$5W!7I;Iqb9T?4MB=;*q14a8dPI&c`bDQSuV)8Zcx zHE6s}Nn1=C6pVW7y#Fr5AWKzly`w^V_C(!+>jP&zQ+nNxU97CzfVc7>=5o)ob<$60 z4xb&mbSRhj!t{95;}t{m6{H{eM@l8eud(%Yg`B_B|IYu~$Xv@v42xV@4r&>_8-r2V zQNUaF&`M!mL-P-DP2-=X`rU=UU!3Pz5#glcXtKT2-`(AwUf7c&-!?k-de6S~kCVto zeuVtsJA}B9U%QE!pJ2nV>L2RtDY}L)Lrxc;xCLzujk8*CIDx{xOHN+4C?&dVGct38 z#-Y8rE1C-pX-;(3ApJ8zXM2P80M{JBEe*f1&W2`bz`j`C;-M=Ssm`YlZ3nVO?^R+S zEOiIPWK!hAXrk=HrQv;LVx3NHTI80qvtIq{#Z>j!m4JZ{90+?4B0MOJJb+WpVH-BX0V4O26Z75Ki-qG~qI1h}aJq8ZS#mn~ z4D*(JW?XOk{2CRvoFLBMwaZU&f!bxf1gl<~9oQ?k?|+2Hzr%dXZHe~^pP9v|$|YHU zq>9@Wix>2MQ`5U*@q*q(P4Cvn>CJU!D8L5H^nm+;aF46^Dp4iN4ra?)w%}=*Wz5Qe z`5wsI!wUkY_g!Wl)9-MK68k~keGiLy1i^X7^sC9o-~Wn$tGURj8ScxYzEs5iX9_Y9 zF=dGqLg^n&Iiyl}qyQ_W9wpoZixv z*Xzd6uU=mHu~ZJ%BV4g$EyGa{ayC~Xz8ICiRpfqm4T8jLuZ+-7oNk7r*meO1(z@&Z E1A*{>^8f$< literal 0 HcmV?d00001 diff --git a/gam/gdata/apps/service.py b/gam/gdata/apps/service.py new file mode 100755 index 00000000000..86863c637cc --- /dev/null +++ b/gam/gdata/apps/service.py @@ -0,0 +1,553 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 SIOS Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'tmatsuo@sios.com (Takashi MATSUO)' + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import urllib +import gdata +import atom.service +import gdata.service +import gdata.apps +import atom + +API_VER="2.0" +HTTP_OK=200 + +UNKOWN_ERROR=1000 +USER_DELETED_RECENTLY=1100 +USER_SUSPENDED=1101 +DOMAIN_USER_LIMIT_EXCEEDED=1200 +DOMAIN_ALIAS_LIMIT_EXCEEDED=1201 +DOMAIN_SUSPENDED=1202 +DOMAIN_FEATURE_UNAVAILABLE=1203 +ENTITY_EXISTS=1300 +ENTITY_DOES_NOT_EXIST=1301 +ENTITY_NAME_IS_RESERVED=1302 +ENTITY_NAME_NOT_VALID=1303 +INVALID_GIVEN_NAME=1400 +INVALID_FAMILY_NAME=1401 +INVALID_PASSWORD=1402 +INVALID_USERNAME=1403 +INVALID_HASH_FUNCTION_NAME=1404 +INVALID_HASH_DIGGEST_LENGTH=1405 +INVALID_EMAIL_ADDRESS=1406 +INVALID_QUERY_PARAMETER_VALUE=1407 +TOO_MANY_RECIPIENTS_ON_EMAIL_LIST=1500 + +DEFAULT_QUOTA_LIMIT='2048' + + +class Error(Exception): + pass + + +class AppsForYourDomainException(Error): + + def __init__(self, response): + + Error.__init__(self, response) + try: + self.element_tree = ElementTree.fromstring(response['body']) + self.error_code = int(self.element_tree[0].attrib['errorCode']) + self.reason = self.element_tree[0].attrib['reason'] + self.invalidInput = self.element_tree[0].attrib['invalidInput'] + except: + self.error_code = 600 + +class AppsService(gdata.service.GDataService): + """Client for the Google Apps Provisioning service.""" + + def __init__(self, email=None, password=None, domain=None, source=None, + server='apps-apis.google.com', additional_headers=None, + **kwargs): + """Creates a client for the Google Apps Provisioning service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + domain: string (optional) The Google Apps domain name. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'apps-apis.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='apps', source=source, + server=server, additional_headers=additional_headers, **kwargs) + self.ssl = True + self.port = 443 + self.domain = domain + + def _baseURL(self): + return "/a/feeds/%s" % self.domain + + def AddAllElementsFromAllPages(self, link_finder, func): + """retrieve all pages and add all elements""" + next = link_finder.GetNextLink() + while next is not None: + next_feed = self.Get(next.href, converter=func) + for a_entry in next_feed.entry: + link_finder.entry.append(a_entry) + next = next_feed.GetNextLink() + return link_finder + + def RetrievePageOfEmailLists(self, start_email_list_name=None, + num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, + backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve one page of email list""" + uri = "%s/emailList/%s" % (self._baseURL(), API_VER) + if start_email_list_name is not None: + uri += "?startEmailListName=%s" % start_email_list_name + try: + return gdata.apps.EmailListFeedFromString(str(self.GetWithRetries( + uri, num_retries=num_retries, delay=delay, backoff=backoff))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def GetGeneratorForAllEmailLists( + self, num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve a generator for all emaillists in this domain.""" + first_page = self.RetrievePageOfEmailLists(num_retries=num_retries, + delay=delay, + backoff=backoff) + return self.GetGeneratorFromLinkFinder( + first_page, gdata.apps.EmailListRecipientFeedFromString, + num_retries=num_retries, delay=delay, backoff=backoff) + + def RetrieveAllEmailLists(self): + """Retrieve all email list of a domain.""" + + ret = self.RetrievePageOfEmailLists() + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.EmailListFeedFromString) + + def RetrieveEmailList(self, list_name): + """Retreive a single email list by the list's name.""" + + uri = "%s/emailList/%s/%s" % ( + self._baseURL(), API_VER, list_name) + try: + return self.Get(uri, converter=gdata.apps.EmailListEntryFromString) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrieveEmailLists(self, recipient): + """Retrieve All Email List Subscriptions for an Email Address.""" + + uri = "%s/emailList/%s?recipient=%s" % ( + self._baseURL(), API_VER, recipient) + try: + ret = gdata.apps.EmailListFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.EmailListFeedFromString) + + def RemoveRecipientFromEmailList(self, recipient, list_name): + """Remove recipient from email list.""" + + uri = "%s/emailList/%s/%s/recipient/%s" % ( + self._baseURL(), API_VER, list_name, recipient) + try: + self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrievePageOfRecipients(self, list_name, start_recipient=None, + num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, + backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve one page of recipient of an email list. """ + + uri = "%s/emailList/%s/%s/recipient" % ( + self._baseURL(), API_VER, list_name) + + if start_recipient is not None: + uri += "?startRecipient=%s" % start_recipient + try: + return gdata.apps.EmailListRecipientFeedFromString(str( + self.GetWithRetries( + uri, num_retries=num_retries, delay=delay, backoff=backoff))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def GetGeneratorForAllRecipients( + self, list_name, num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve a generator for all recipients of a particular emaillist.""" + first_page = self.RetrievePageOfRecipients(list_name, + num_retries=num_retries, + delay=delay, + backoff=backoff) + return self.GetGeneratorFromLinkFinder( + first_page, gdata.apps.EmailListRecipientFeedFromString, + num_retries=num_retries, delay=delay, backoff=backoff) + + def RetrieveAllRecipients(self, list_name): + """Retrieve all recipient of an email list.""" + + ret = self.RetrievePageOfRecipients(list_name) + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.EmailListRecipientFeedFromString) + + def AddRecipientToEmailList(self, recipient, list_name): + """Add a recipient to a email list.""" + + uri = "%s/emailList/%s/%s/recipient" % ( + self._baseURL(), API_VER, list_name) + recipient_entry = gdata.apps.EmailListRecipientEntry() + recipient_entry.who = gdata.apps.Who(email=recipient) + + try: + return gdata.apps.EmailListRecipientEntryFromString( + str(self.Post(recipient_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def DeleteEmailList(self, list_name): + """Delete a email list""" + + uri = "%s/emailList/%s/%s" % (self._baseURL(), API_VER, list_name) + try: + self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def CreateEmailList(self, list_name): + """Create a email list. """ + + uri = "%s/emailList/%s" % (self._baseURL(), API_VER) + email_list_entry = gdata.apps.EmailListEntry() + email_list_entry.email_list = gdata.apps.EmailList(name=list_name) + try: + return gdata.apps.EmailListEntryFromString( + str(self.Post(email_list_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def DeleteNickname(self, nickname): + """Delete a nickname""" + + uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname) + try: + self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrievePageOfNicknames(self, start_nickname=None, + num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, + backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve one page of nicknames in the domain""" + + uri = "%s/nickname/%s" % (self._baseURL(), API_VER) + if start_nickname is not None: + uri += "?startNickname=%s" % start_nickname + try: + return gdata.apps.NicknameFeedFromString(str(self.GetWithRetries( + uri, num_retries=num_retries, delay=delay, backoff=backoff))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def GetGeneratorForAllNicknames( + self, num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve a generator for all nicknames in this domain.""" + first_page = self.RetrievePageOfNicknames(num_retries=num_retries, + delay=delay, + backoff=backoff) + return self.GetGeneratorFromLinkFinder( + first_page, gdata.apps.NicknameFeedFromString, num_retries=num_retries, + delay=delay, backoff=backoff) + + def RetrieveAllNicknames(self): + """Retrieve all nicknames in the domain""" + + ret = self.RetrievePageOfNicknames() + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.NicknameFeedFromString) + + def GetGeneratorForAllNicknamesOfAUser( + self, user_name, num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve a generator for all nicknames of a particular user.""" + uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name) + try: + first_page = gdata.apps.NicknameFeedFromString(str(self.GetWithRetries( + uri, num_retries=num_retries, delay=delay, backoff=backoff))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + return self.GetGeneratorFromLinkFinder( + first_page, gdata.apps.NicknameFeedFromString, num_retries=num_retries, + delay=delay, backoff=backoff) + + def RetrieveNicknames(self, user_name): + """Retrieve nicknames of the user""" + + uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name) + try: + ret = gdata.apps.NicknameFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.NicknameFeedFromString) + + def RetrieveNickname(self, nickname): + """Retrieve a nickname. + + Args: + nickname: string The nickname to retrieve + + Returns: + gdata.apps.NicknameEntry + """ + + uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname) + try: + return gdata.apps.NicknameEntryFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def CreateNickname(self, user_name, nickname): + """Create a nickname""" + + uri = "%s/nickname/%s" % (self._baseURL(), API_VER) + nickname_entry = gdata.apps.NicknameEntry() + nickname_entry.login = gdata.apps.Login(user_name=user_name) + nickname_entry.nickname = gdata.apps.Nickname(name=nickname) + + try: + return gdata.apps.NicknameEntryFromString( + str(self.Post(nickname_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def DeleteUser(self, user_name): + """Delete a user account""" + + uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name) + try: + return self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def UpdateUser(self, user_name, user_entry): + """Update a user account.""" + + uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name) + try: + return gdata.apps.UserEntryFromString(str(self.Put(user_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def CreateUser(self, user_name, family_name, given_name, password, + suspended='false', quota_limit=None, + password_hash_function=None, + change_password=None): + """Create a user account. """ + + uri = "%s/user/%s" % (self._baseURL(), API_VER) + user_entry = gdata.apps.UserEntry() + user_entry.login = gdata.apps.Login( + user_name=user_name, password=password, suspended=suspended, + hash_function_name=password_hash_function, + change_password=change_password) + user_entry.name = gdata.apps.Name(family_name=family_name, + given_name=given_name) + if quota_limit is not None: + user_entry.quota = gdata.apps.Quota(limit=str(quota_limit)) + + try: + return gdata.apps.UserEntryFromString(str(self.Post(user_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def SuspendUser(self, user_name): + user_entry = self.RetrieveUser(user_name) + if user_entry.login.suspended != 'true': + user_entry.login.suspended = 'true' + user_entry = self.UpdateUser(user_name, user_entry) + return user_entry + + def RestoreUser(self, user_name): + user_entry = self.RetrieveUser(user_name) + if user_entry.login.suspended != 'false': + user_entry.login.suspended = 'false' + user_entry = self.UpdateUser(user_name, user_entry) + return user_entry + + def RetrieveUser(self, user_name): + """Retrieve an user account. + + Args: + user_name: string The user name to retrieve + + Returns: + gdata.apps.UserEntry + """ + + uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name) + try: + return gdata.apps.UserEntryFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrievePageOfUsers(self, start_username=None, + num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, + backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve one page of users in this domain.""" + + uri = "%s/user/%s" % (self._baseURL(), API_VER) + if start_username is not None: + uri += "?startUsername=%s" % start_username + try: + return gdata.apps.UserFeedFromString(str(self.GetWithRetries( + uri, num_retries=num_retries, delay=delay, backoff=backoff))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def GetGeneratorForAllUsers(self, + num_retries=gdata.service.DEFAULT_NUM_RETRIES, + delay=gdata.service.DEFAULT_DELAY, + backoff=gdata.service.DEFAULT_BACKOFF): + """Retrieve a generator for all users in this domain.""" + first_page = self.RetrievePageOfUsers(num_retries=num_retries, delay=delay, + backoff=backoff) + return self.GetGeneratorFromLinkFinder( + first_page, gdata.apps.UserFeedFromString, num_retries=num_retries, + delay=delay, backoff=backoff) + + def RetrieveAllUsers(self): + """Retrieve all users in this domain. OBSOLETE""" + + ret = self.RetrievePageOfUsers() + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.UserFeedFromString) + + +class PropertyService(gdata.service.GDataService): + """Client for the Google Apps Property service.""" + + def __init__(self, email=None, password=None, domain=None, source=None, + server='apps-apis.google.com', additional_headers=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='apps', source=source, + server=server, + additional_headers=additional_headers) + self.ssl = True + self.port = 443 + self.domain = domain + + def AddAllElementsFromAllPages(self, link_finder, func): + """retrieve all pages and add all elements""" + next = link_finder.GetNextLink() + count = 0 + while next is not None: + next_feed = self.Get(next.href, converter=func) + count = count + len(next_feed.entry) + for a_entry in next_feed.entry: + link_finder.entry.append(a_entry) + next = next_feed.GetNextLink() + return link_finder + + def _GetPropertyEntry(self, properties): + property_entry = gdata.apps.PropertyEntry() + property = [] + for name, value in properties.iteritems(): + if name is not None and value is not None: + property.append(gdata.apps.Property(name=name, value=value)) + property_entry.property = property + return property_entry + + def _PropertyEntry2Dict(self, property_entry): + properties = {} + for i, property in enumerate(property_entry.property): + properties[property.name] = property.value + return properties + + def _GetPropertyFeed(self, uri): + try: + return gdata.apps.PropertyFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + def _GetPropertiesList(self, uri): + property_feed = self._GetPropertyFeed(uri) + # pagination + property_feed = self.AddAllElementsFromAllPages( + property_feed, gdata.apps.PropertyFeedFromString) + properties_list = [] + for property_entry in property_feed.entry: + properties_list.append(self._PropertyEntry2Dict(property_entry)) + return properties_list + + def _GetProperties(self, uri): + try: + return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString( + str(self.Get(uri)))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + def _PostProperties(self, uri, properties): + property_entry = self._GetPropertyEntry(properties) + try: + return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString( + str(self.Post(property_entry, uri)))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + def _PutProperties(self, uri, properties): + property_entry = self._GetPropertyEntry(properties) + try: + return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString( + str(self.Put(property_entry, uri)))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + def _DeleteProperties(self, uri): + try: + self.Delete(uri) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + +def _bool2str(b): + if b is None: + return None + return str(b is True).lower() diff --git a/gam/gdata/apps/service.pyc b/gam/gdata/apps/service.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00092b49e2b732985f18fa53d7d1703dcfacbe95 GIT binary patch literal 22578 zcmdU1U2t62T|ZYV$y!;C6epJBSV`7#Y|CjPwQkdN+O(-`tsG6{m3vpR<88xcy?Z6E zyxNubUd2{PT3V%3%J9`02y{xBQec1wm;w*X0PpYsGtBV91I+LUFFXJ z>y_+KuiHwxN9Ue~htFKSTKcSzP|#I>ywDDH*f#xT!g& zHeec#*&HyXjB&0uH_f0oXl{<0_BV{2Xkg1`C zM@$Xxj++{uVhVWoQB%WPhfQtFlpZs+L#BkGKVnKpOl{nhCQWU^l#ZI(qo(wPsU0?@ zW2W|)DLrXwkDJnQQ#)cxQ>Hd)N>7>EQB!)_)SfV)dhXKPN^y1hZ2brLnFaxv zU<^q$-|#lQX0X!syn-!0h~iYKpTYgPnGe_h8vpD6g1h?CAd*W=zR%>3nfwWpKV|Yb z=&XK($t07bOztq*Ve&;LKh5MFChs!&5|b}8`9~xH8ZqZS{IO32xH0*5yVZ_KnBzW= z%O64FVg8PBi_^%P=>a4Gi_7I^Wz#E{gHhdVwrblA+}c_wGRpb$$jz?$UfZ9&S#Mw8 znVoOlZZ=w#nm@Z<*?jTb%u6r5xV019Xfs4=N zYsV#gUd82e1jA17B>v5z&O)nwt+m~rZ*5lU&HSCJw-wY|&AX@aDC*m()QoUCz&UZI zRkp~BU0lHB|2&ewm<`9gW6V2_*&Q&>j%R`avytIFczYCCn@nIMbABib&s zYF;pmyS7*HTTKyMz4>;fQLimFx3+`&8<^ZQ(Ig;-*sC||LAmUTMEPv3-P-hncD=c- z20Tl!%62S*j+ zB6c=`#0*S08E4XY$~o#BM{b(qb~zT8H14u;GKdFEpG1I&NAx)murtzU(UNc#xB4L} z&*So6N5z;RO9Q5MGW7=7l4HX^d@-;{;~ zWTctx8%Q!8MNSbzJ^6 zBvvqPSXl@Q2XYY9I$+)b31>*5puTsILWqH2Ap(&(V1B`Y%pR0nL#ZTS84n;%CFx*2 z6eXhk0@&gLBw673xk|430p~J0n#0drd)@z}a=Dxbz0mk%&Wg6&bW7@?(m0!20c^Gb zgEPLZldIHf5LNyQEUgiIVGC@rvJFr+gL<_>?J^TS*{b;d?N+;XkLIIl(Q7s7s=Z!H z=!%iq#@tPV71_)$tSA$mD_8Sa?ZGP6BD(Q86EiX9Jnc+4S^QRM zG4P_Elft8>qZEJ1<_o>UDo$_UAX&yGm3HD*!*0{M|@x4`d3B$4G zqH(Gj<;dutr`4ul&A;GaR#_FzDHZ+p24cxJ1zPC3`C zQTnJPG`xJ-cc5i3D%hYf&B4p4xab81?@q8(Z{8IB6k1J7P0TBX{yIK!!*y;F*=ySbW+nyiD zt8_)Vq=={2$vr1&93xv}gr~3*L z`OQGv#i%i2ma=3)f*?f$In<89;Cj{i7R|~|VzBK>S#{xzJh@hH1EUmi{Xp6jdgz!) zt4;Y4@rM63Jh$3wa2&>G7T37dUesKqVj+#JV3D>9JpgQ26gdfs;QiY`f&U!pCJ2cT zAeJQqS0d;q;C5&EO3Ya9$8akJX<2}y0MqsZJhB@?n(lzhFQZSNJZUmx1H+D@9}y`F zA`}!|QWPY*mVqE0Fgs5PLe|_LNaQB8V*&YQ zc7_3u3_>0dNTh3mlb0~Li_G4bJe80*GzRG&x{r2F_y^2cEeBbpI07-+07eIa(X7m% zQ3bv5Mu<^1a*()=<(rr%gK_u-P|Q4_$ed=ATLMaR#qH~UwOyBQ=3DXB44=k7mHQglY%6-$Fq*x@G9B4T5(wX6Mm}ccEoqtHw5r3|M0K^C5OY z2q?qi079AxcEP=&W;%hMTyL`lZjB+7TZ1M^@$8H&oi2=lL1*JPa1%1>2-~A17S?34 z&chH6Jj<>LzBO)sa zP?ASLtJ)fz@f^B3?k@%1ZDz(W@c*1+qr-28A<^OI(9j%%%_38iY)(p|-jQTqM25mv zJ}k)zeu~^x`YeMa06IO^lo?8-agUm@wRs**M&c*e%i4^A=38S2F&YHKwi}gpVu{zC_w+-hl9TVY91~pp4W3(nbC4K_ZAa=?3SC~p0x2@a z0^}s@qY&n>j3Ns^F&{aX1j5jOF+}fXB`empZ4;Q>L$N?M}bQ=lnG7s@Mo z#EBM5XuZ>&j)5qXQ_k(x8!i36jsH}wu} zW5Tl`oxYhgj3WTV@ibOi@znkqkE9KZ>>7l11~PEcVnepWC3zE-yQwL0Nn*1`#%m9= z<_%B;-4G~PxFLI+HIJbiYj<=|!5*GQh;=EM*miMWLmPWrEu2+~Z~JPnP~F+t#6bTy z6!|noOeVHaDAmZeG3&mMn=W80ik6mX3Ymr?F3q=@z4=zbGzde;N&NNT+i!-(C-@e| z=iYovp=4KxIlHS$0Zqio?!}{I-WX1nHM8j=yW-90f&x|U1w6Fohs#_q^MerW zLUu5LGB0kW&+JDEu#k#p_FpIp-HA!XQh(%3NBb;hx@Nt4lit10QuqqdJ&njcyoLMa zI4uoC?e*8tBQ4n}6~g&soVyv1&>cpgwYn^}y zKL0--ARyO8O>{;k@8KB`?qW@av8_`OP2i#W1k{ibIBn=TnvXw5nxVWf!w)<1{PEPN zAY|{3JV&q!k$sQA^IL4L%l`io!p2WWo3oWnBtrAG28b&nDAT74AkrGM<>)K3$5 zQQ#|K*3_n&d*EM|=KUc2G<)h{ICcf1-74$JRUq&84Y3N8g%gjt{}tkq_Kfuy;@~so zQHP7xLctzV0INnP6VB|p){wXZ@k+x+=(K_3;k0$HG$gU;ub|@?UzJ>hv6)fr2}*AT ziyp-rpST4qD&+p6b~t%k(;1y_O(~J6=u)4Ku23$9{!$7 z70FVR`vo@MS3i;_NCe<$w2LGQIg+DfZv`ncs7Lu4JCc8dn=X=&BYEEepwPjD!)~2N z8yFf%1%|SQ`z`FMllE!!a4^T{OfoR^2cRehh8|I12>V>IA3M<@&?rR(B!$b^!Hnpm zhaGDf^9p7eI+ubJ+o_F?YWI0KeH$3_mr88(Q(pZqb_-=AnN7q-T_fUB4HGT zbBOSeHTo(+i7q>Tj(n>VU8i9ZNeHY}8opO$(1{)h0dN7A|365&5GN@L(HAc-)nB;z1gt=9rcyWBNIy3J}N;vXIPKo}Gb=#b2W0$IZrs1oD4^ehxw)BY=C* zY~aM$Xoo;P2!Tvc_;iwEflQ>u^plekN_f}u9ywiRvcK(f<0CdOQWo9739YhrIbs{H ztY-zot2Zjmbr1WL>}GE5rdz9Q)*Cyv4zah6jVa+PlGpW3>;*LuQyle++VrW5x3@d<3Rr_&tFo0kTJx z)+QwyFEPaeGF1Co{{(Sy)-|A(ZKA5EUFhusnvMQ`olSMc$-joOhaV@W#dUX(PoVzGm?4LbXjk$4J3(Gq}+o$pGGiU#^pDVV2>7022Ex&*-@@%XPv`iS?72* zJCYsEJ`$HrpllRnk7Tpiv1~T1c}O+0NXN2=vJ-kbI?Ua7k7W3+>PVgcuNi&Gb21k%M3Qqk0>Kgj3wB= zIjRygh*Q`oImENTRQ&!6NcKMqtU280Ip70mO>rJJeFX{J3u|xln+VoLO&w&Z*}bSp z?Q)OK0q?@Lhx>!tW%#7p(baJXn4dbxqUl2F=%Z{9ileUck2-v5S4AWNRBwv)#9B`= zr%=AmTjg+@96d@k&j(#CCp7!u$dzOId&&rc&1ByDOMi67?n+_x%<(ntMd!1PXKHw(dIWltO zom*FMMiB*3f47c=XBIi44{~O)j`~IiFbWpf&+%UJPWI#(+$u)X@R}~c?4CuE!i!9# z5AgwsSAMV}bnc5Rn_*>KHhFN!p#=UXB8Nc&o-yP}p(mWjqH~5+DU|IaxP%}Oa6mkJ7Tr^*Rr#Yx&qF@9g%ss1kOq|Ek2KMJ#Rzk!D{wZn#oj{%$ zfMe_2de-3YD%L>Yt>T*?@>w$&Ktb^X!AQrfQ@lWSk-~`Nl#Mx)wdy!bifhxiXd(Qy zF(#_1o2^j1CHr#~E(?q6@WV*PQLQk7_NyqeA&qY$*9Sc5C`=hvNiR)570@8+^ zV?f+l@RdRaLl_8YSV*I%0s;lD?z~8dlM57>ntyXjh{_OG+S!@B+0>>AZ?sV1Z-^3&c^Lt!ckY0OJ;B(rG&U0K8X{)hhmW>j>3X| z(<>^J=VYD&;0}@?+!Aw5p{Qusy_}IfT~JcLHSzlCeQK*-n%p?X|4x7!u6l8P9dt%p zicl1Vg`p_{7-W=0Y!!kEg~l1hu7)5nrbL2}36RZr2Vmu1e*QVPUX~EBClT#O2i)%v z5Q+ULE%_TPBbiCIy5B}FMUw=WBty)+YdqdPj^eNc+-)~Fl6?C?t`xEVsY8P%{;f~ww)*K8;m^w~!f!1qC!iwJW z?AWYIQpaMUc~}ROs<EacqEUE)pnIbFU?mLRJCA3+Pv7+CGwk zwtUY`$rKkU`s9dG^!X{=_Cww>R$I1<#R;6AA)H3=#DFS!P_ZygU?vd@_vd(fiivno zS|(vcX`b)$>GzoYHk03B^1DcqSt$454E0|`<9|m*@dPrV(=S&(JB;5^IO`+Xkt!o}IfS41G!2*^Sn1udO;V z>8>XeePDCcvSs;ttJOG%uNa*snkdl>9L7Osny!&d@p748q-wSC{Y6?;cQzX{2#R_h zg`u$@^&{FAH~DoTeTG9Tf#Jtx^ie-HyWMUy>er>wwC07cOKe8pSJFBq3uG^Je0oVk zNcKb7L%SsV*H%_8mzQ5xTdRfFm#-Ge__)Zj8yvyI)neW)BXorr-aNuu7xIObrEBse z^rX02yqqu0=jZi_iuvVBbBl$to-8e1T3jjT-?)&^^D*81@bTQz;#{%Z^_=m!@OjTR z7xHr}t8Tu$T9~^sx41NSekrdhVmym0*U-;maiypchb8mN`C_@S%pu^BJC;k;!1fbU%0sPnnoNw z&jaR5<+=HJ%%>QT?4u{2Tg|)I%9rO{jCloU#5`8>I;*y_yj;FCSGY!uE?!>5eG}~I7+olp>vmSx@3Cy_mL#r_F%<|T;5mg2TVUh uY3Y}c3}fy2apw@k element in a feed.""" + _qname = gdata.apps.APPS_TEMPLATE % 'property' + name = 'name' + value = 'value' diff --git a/gam/gdata/auth.py b/gam/gdata/auth.py new file mode 100755 index 00000000000..139c6cd016d --- /dev/null +++ b/gam/gdata/auth.py @@ -0,0 +1,952 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 - 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import cgi +import math +import random +import re +import time +import types +import urllib +import atom.http_interface +import atom.token_store +import atom.url +import gdata.oauth as oauth +import gdata.oauth.rsa as oauth_rsa +import gdata.tlslite.utils.keyfactory as keyfactory +import gdata.tlslite.utils.cryptomath as cryptomath + +import gdata.gauth + +__author__ = 'api.jscudder (Jeff Scudder)' + + +PROGRAMMATIC_AUTH_LABEL = 'GoogleLogin auth=' +AUTHSUB_AUTH_LABEL = 'AuthSub token=' + + +"""This module provides functions and objects used with Google authentication. + +Details on Google authorization mechanisms used with the Google Data APIs can +be found here: +http://code.google.com/apis/gdata/auth.html +http://code.google.com/apis/accounts/ + +The essential functions are the following. +Related to ClientLogin: + generate_client_login_request_body: Constructs the body of an HTTP request to + obtain a ClientLogin token for a specific + service. + extract_client_login_token: Creates a ClientLoginToken with the token from a + success response to a ClientLogin request. + get_captcha_challenge: If the server responded to the ClientLogin request + with a CAPTCHA challenge, this method extracts the + CAPTCHA URL and identifying CAPTCHA token. + +Related to AuthSub: + generate_auth_sub_url: Constructs a full URL for a AuthSub request. The + user's browser must be sent to this Google Accounts + URL and redirected back to the app to obtain the + AuthSub token. + extract_auth_sub_token_from_url: Once the user's browser has been + redirected back to the web app, use this + function to create an AuthSubToken with + the correct authorization token and scope. + token_from_http_body: Extracts the AuthSubToken value string from the + server's response to an AuthSub session token upgrade + request. +""" + +def generate_client_login_request_body(email, password, service, source, + account_type='HOSTED_OR_GOOGLE', captcha_token=None, + captcha_response=None): + """Creates the body of the autentication request + + See http://code.google.com/apis/accounts/AuthForInstalledApps.html#Request + for more details. + + Args: + email: str + password: str + service: str + source: str + account_type: str (optional) Defaul is 'HOSTED_OR_GOOGLE', other valid + values are 'GOOGLE' and 'HOSTED' + captcha_token: str (optional) + captcha_response: str (optional) + + Returns: + The HTTP body to send in a request for a client login token. + """ + return gdata.gauth.generate_client_login_request_body(email, password, + service, source, account_type, captcha_token, captcha_response) + + +GenerateClientLoginRequestBody = generate_client_login_request_body + + +def GenerateClientLoginAuthToken(http_body): + """Returns the token value to use in Authorization headers. + + Reads the token from the server's response to a Client Login request and + creates header value to use in requests. + + Args: + http_body: str The body of the server's HTTP response to a Client Login + request + + Returns: + The value half of an Authorization header. + """ + token = get_client_login_token(http_body) + if token: + return 'GoogleLogin auth=%s' % token + return None + + +def get_client_login_token(http_body): + """Returns the token value for a ClientLoginToken. + + Reads the token from the server's response to a Client Login request and + creates the token value string to use in requests. + + Args: + http_body: str The body of the server's HTTP response to a Client Login + request + + Returns: + The token value string for a ClientLoginToken. + """ + return gdata.gauth.get_client_login_token_string(http_body) + + +def extract_client_login_token(http_body, scopes): + """Parses the server's response and returns a ClientLoginToken. + + Args: + http_body: str The body of the server's HTTP response to a Client Login + request. It is assumed that the login request was successful. + scopes: list containing atom.url.Urls or strs. The scopes list contains + all of the partial URLs under which the client login token is + valid. For example, if scopes contains ['http://example.com/foo'] + then the client login token would be valid for + http://example.com/foo/bar/baz + + Returns: + A ClientLoginToken which is valid for the specified scopes. + """ + token_string = get_client_login_token(http_body) + token = ClientLoginToken(scopes=scopes) + token.set_token_string(token_string) + return token + + +def get_captcha_challenge(http_body, + captcha_base_url='http://www.google.com/accounts/'): + """Returns the URL and token for a CAPTCHA challenge issued by the server. + + Args: + http_body: str The body of the HTTP response from the server which + contains the CAPTCHA challenge. + captcha_base_url: str This function returns a full URL for viewing the + challenge image which is built from the server's response. This + base_url is used as the beginning of the URL because the server + only provides the end of the URL. For example the server provides + 'Captcha?ctoken=Hi...N' and the URL for the image is + 'http://www.google.com/accounts/Captcha?ctoken=Hi...N' + + Returns: + A dictionary containing the information needed to repond to the CAPTCHA + challenge, the image URL and the ID token of the challenge. The + dictionary is in the form: + {'token': string identifying the CAPTCHA image, + 'url': string containing the URL of the image} + Returns None if there was no CAPTCHA challenge in the response. + """ + return gdata.gauth.get_captcha_challenge(http_body, captcha_base_url) + + +GetCaptchaChallenge = get_captcha_challenge + + +def GenerateOAuthRequestTokenUrl( + oauth_input_params, scopes, + request_token_url='https://www.google.com/accounts/OAuthGetRequestToken', + extra_parameters=None): + """Generate a URL at which a request for OAuth request token is to be sent. + + Args: + oauth_input_params: OAuthInputParams OAuth input parameters. + scopes: list of strings The URLs of the services to be accessed. + request_token_url: string The beginning of the request token URL. This is + normally 'https://www.google.com/accounts/OAuthGetRequestToken' or + '/accounts/OAuthGetRequestToken' + extra_parameters: dict (optional) key-value pairs as any additional + parameters to be included in the URL and signature while making a + request for fetching an OAuth request token. All the OAuth parameters + are added by default. But if provided through this argument, any + default parameters will be overwritten. For e.g. a default parameter + oauth_version 1.0 can be overwritten if + extra_parameters = {'oauth_version': '2.0'} + + Returns: + atom.url.Url OAuth request token URL. + """ + scopes_string = ' '.join([str(scope) for scope in scopes]) + parameters = {'scope': scopes_string} + if extra_parameters: + parameters.update(extra_parameters) + oauth_request = oauth.OAuthRequest.from_consumer_and_token( + oauth_input_params.GetConsumer(), http_url=request_token_url, + parameters=parameters) + oauth_request.sign_request(oauth_input_params.GetSignatureMethod(), + oauth_input_params.GetConsumer(), None) + return atom.url.parse_url(oauth_request.to_url()) + + +def GenerateOAuthAuthorizationUrl( + request_token, + authorization_url='https://www.google.com/accounts/OAuthAuthorizeToken', + callback_url=None, extra_params=None, + include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope'): + """Generates URL at which user will login to authorize the request token. + + Args: + request_token: gdata.auth.OAuthToken OAuth request token. + authorization_url: string The beginning of the authorization URL. This is + normally 'https://www.google.com/accounts/OAuthAuthorizeToken' or + '/accounts/OAuthAuthorizeToken' + callback_url: string (optional) The URL user will be sent to after + logging in and granting access. + extra_params: dict (optional) Additional parameters to be sent. + include_scopes_in_callback: Boolean (default=False) if set to True, and + if 'callback_url' is present, the 'callback_url' will be modified to + include the scope(s) from the request token as a URL parameter. The + key for the 'callback' URL's scope parameter will be + OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as + a parameter to the 'callback' URL, is that the page which receives + the OAuth token will be able to tell which URLs the token grants + access to. + scopes_param_prefix: string (default='oauth_token_scope') The URL + parameter key which maps to the list of valid scopes for the token. + This URL parameter will be included in the callback URL along with + the scopes of the token as value if include_scopes_in_callback=True. + + Returns: + atom.url.Url OAuth authorization URL. + """ + scopes = request_token.scopes + if isinstance(scopes, list): + scopes = ' '.join(scopes) + if include_scopes_in_callback and callback_url: + if callback_url.find('?') > -1: + callback_url += '&' + else: + callback_url += '?' + callback_url += urllib.urlencode({scopes_param_prefix:scopes}) + oauth_token = oauth.OAuthToken(request_token.key, request_token.secret) + oauth_request = oauth.OAuthRequest.from_token_and_callback( + token=oauth_token, callback=callback_url, + http_url=authorization_url, parameters=extra_params) + return atom.url.parse_url(oauth_request.to_url()) + + +def GenerateOAuthAccessTokenUrl( + authorized_request_token, + oauth_input_params, + access_token_url='https://www.google.com/accounts/OAuthGetAccessToken', + oauth_version='1.0', + oauth_verifier=None): + """Generates URL at which user will login to authorize the request token. + + Args: + authorized_request_token: gdata.auth.OAuthToken OAuth authorized request + token. + oauth_input_params: OAuthInputParams OAuth input parameters. + access_token_url: string The beginning of the authorization URL. This is + normally 'https://www.google.com/accounts/OAuthGetAccessToken' or + '/accounts/OAuthGetAccessToken' + oauth_version: str (default='1.0') oauth_version parameter. + oauth_verifier: str (optional) If present, it is assumed that the client + will use the OAuth v1.0a protocol which includes passing the + oauth_verifier (as returned by the SP) in the access token step. + + Returns: + atom.url.Url OAuth access token URL. + """ + oauth_token = oauth.OAuthToken(authorized_request_token.key, + authorized_request_token.secret) + parameters = {'oauth_version': oauth_version} + if oauth_verifier is not None: + parameters['oauth_verifier'] = oauth_verifier + oauth_request = oauth.OAuthRequest.from_consumer_and_token( + oauth_input_params.GetConsumer(), token=oauth_token, + http_url=access_token_url, parameters=parameters) + oauth_request.sign_request(oauth_input_params.GetSignatureMethod(), + oauth_input_params.GetConsumer(), oauth_token) + return atom.url.parse_url(oauth_request.to_url()) + + +def GenerateAuthSubUrl(next, scope, secure=False, session=True, + request_url='https://www.google.com/accounts/AuthSubRequest', + domain='default'): + """Generate a URL at which the user will login and be redirected back. + + Users enter their credentials on a Google login page and a token is sent + to the URL specified in next. See documentation for AuthSub login at: + http://code.google.com/apis/accounts/AuthForWebApps.html + + Args: + request_url: str The beginning of the request URL. This is normally + 'http://www.google.com/accounts/AuthSubRequest' or + '/accounts/AuthSubRequest' + next: string The URL user will be sent to after logging in. + scope: string The URL of the service to be accessed. + secure: boolean (optional) Determines whether or not the issued token + is a secure token. + session: boolean (optional) Determines whether or not the issued token + can be upgraded to a session token. + domain: str (optional) The Google Apps domain for this account. If this + is not a Google Apps account, use 'default' which is the default + value. + """ + # Translate True/False values for parameters into numeric values acceoted + # by the AuthSub service. + if secure: + secure = 1 + else: + secure = 0 + + if session: + session = 1 + else: + session = 0 + + request_params = urllib.urlencode({'next': next, 'scope': scope, + 'secure': secure, 'session': session, + 'hd': domain}) + if request_url.find('?') == -1: + return '%s?%s' % (request_url, request_params) + else: + # The request URL already contained url parameters so we should add + # the parameters using the & seperator + return '%s&%s' % (request_url, request_params) + + +def generate_auth_sub_url(next, scopes, secure=False, session=True, + request_url='https://www.google.com/accounts/AuthSubRequest', + domain='default', scopes_param_prefix='auth_sub_scopes'): + """Constructs a URL string for requesting a multiscope AuthSub token. + + The generated token will contain a URL parameter to pass along the + requested scopes to the next URL. When the Google Accounts page + redirects the broswser to the 'next' URL, it appends the single use + AuthSub token value to the URL as a URL parameter with the key 'token'. + However, the information about which scopes were requested is not + included by Google Accounts. This method adds the scopes to the next + URL before making the request so that the redirect will be sent to + a page, and both the token value and the list of scopes can be + extracted from the request URL. + + Args: + next: atom.url.URL or string The URL user will be sent to after + authorizing this web application to access their data. + scopes: list containint strings The URLs of the services to be accessed. + secure: boolean (optional) Determines whether or not the issued token + is a secure token. + session: boolean (optional) Determines whether or not the issued token + can be upgraded to a session token. + request_url: atom.url.Url or str The beginning of the request URL. This + is normally 'http://www.google.com/accounts/AuthSubRequest' or + '/accounts/AuthSubRequest' + domain: The domain which the account is part of. This is used for Google + Apps accounts, the default value is 'default' which means that the + requested account is a Google Account (@gmail.com for example) + scopes_param_prefix: str (optional) The requested scopes are added as a + URL parameter to the next URL so that the page at the 'next' URL can + extract the token value and the valid scopes from the URL. The key + for the URL parameter defaults to 'auth_sub_scopes' + + Returns: + An atom.url.Url which the user's browser should be directed to in order + to authorize this application to access their information. + """ + if isinstance(next, (str, unicode)): + next = atom.url.parse_url(next) + scopes_string = ' '.join([str(scope) for scope in scopes]) + next.params[scopes_param_prefix] = scopes_string + + if isinstance(request_url, (str, unicode)): + request_url = atom.url.parse_url(request_url) + request_url.params['next'] = str(next) + request_url.params['scope'] = scopes_string + if session: + request_url.params['session'] = 1 + else: + request_url.params['session'] = 0 + if secure: + request_url.params['secure'] = 1 + else: + request_url.params['secure'] = 0 + request_url.params['hd'] = domain + return request_url + + +def AuthSubTokenFromUrl(url): + """Extracts the AuthSub token from the URL. + + Used after the AuthSub redirect has sent the user to the 'next' page and + appended the token to the URL. This function returns the value to be used + in the Authorization header. + + Args: + url: str The URL of the current page which contains the AuthSub token as + a URL parameter. + """ + token = TokenFromUrl(url) + if token: + return 'AuthSub token=%s' % token + return None + + +def TokenFromUrl(url): + """Extracts the AuthSub token from the URL. + + Returns the raw token value. + + Args: + url: str The URL or the query portion of the URL string (after the ?) of + the current page which contains the AuthSub token as a URL parameter. + """ + if url.find('?') > -1: + query_params = url.split('?')[1] + else: + query_params = url + for pair in query_params.split('&'): + if pair.startswith('token='): + return pair[6:] + return None + + +def extract_auth_sub_token_from_url(url, + scopes_param_prefix='auth_sub_scopes', rsa_key=None): + """Creates an AuthSubToken and sets the token value and scopes from the URL. + + After the Google Accounts AuthSub pages redirect the user's broswer back to + the web application (using the 'next' URL from the request) the web app must + extract the token from the current page's URL. The token is provided as a + URL parameter named 'token' and if generate_auth_sub_url was used to create + the request, the token's valid scopes are included in a URL parameter whose + name is specified in scopes_param_prefix. + + Args: + url: atom.url.Url or str representing the current URL. The token value + and valid scopes should be included as URL parameters. + scopes_param_prefix: str (optional) The URL parameter key which maps to + the list of valid scopes for the token. + + Returns: + An AuthSubToken with the token value from the URL and set to be valid for + the scopes passed in on the URL. If no scopes were included in the URL, + the AuthSubToken defaults to being valid for no scopes. If there was no + 'token' parameter in the URL, this function returns None. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + if 'token' not in url.params: + return None + scopes = [] + if scopes_param_prefix in url.params: + scopes = url.params[scopes_param_prefix].split(' ') + token_value = url.params['token'] + if rsa_key: + token = SecureAuthSubToken(rsa_key, scopes=scopes) + else: + token = AuthSubToken(scopes=scopes) + token.set_token_string(token_value) + return token + + +def AuthSubTokenFromHttpBody(http_body): + """Extracts the AuthSub token from an HTTP body string. + + Used to find the new session token after making a request to upgrade a + single use AuthSub token. + + Args: + http_body: str The repsonse from the server which contains the AuthSub + key. For example, this function would find the new session token + from the server's response to an upgrade token request. + + Returns: + The header value to use for Authorization which contains the AuthSub + token. + """ + token_value = token_from_http_body(http_body) + if token_value: + return '%s%s' % (AUTHSUB_AUTH_LABEL, token_value) + return None + + +def token_from_http_body(http_body): + """Extracts the AuthSub token from an HTTP body string. + + Used to find the new session token after making a request to upgrade a + single use AuthSub token. + + Args: + http_body: str The repsonse from the server which contains the AuthSub + key. For example, this function would find the new session token + from the server's response to an upgrade token request. + + Returns: + The raw token value to use in an AuthSubToken object. + """ + for response_line in http_body.splitlines(): + if response_line.startswith('Token='): + # Strip off Token= and return the token value string. + return response_line[6:] + return None + + +TokenFromHttpBody = token_from_http_body + + +def OAuthTokenFromUrl(url, scopes_param_prefix='oauth_token_scope'): + """Creates an OAuthToken and sets token key and scopes (if present) from URL. + + After the Google Accounts OAuth pages redirect the user's broswer back to + the web application (using the 'callback' URL from the request) the web app + can extract the token from the current page's URL. The token is same as the + request token, but it is either authorized (if user grants access) or + unauthorized (if user denies access). The token is provided as a + URL parameter named 'oauth_token' and if it was chosen to use + GenerateOAuthAuthorizationUrl with include_scopes_in_param=True, the token's + valid scopes are included in a URL parameter whose name is specified in + scopes_param_prefix. + + Args: + url: atom.url.Url or str representing the current URL. The token value + and valid scopes should be included as URL parameters. + scopes_param_prefix: str (optional) The URL parameter key which maps to + the list of valid scopes for the token. + + Returns: + An OAuthToken with the token key from the URL and set to be valid for + the scopes passed in on the URL. If no scopes were included in the URL, + the OAuthToken defaults to being valid for no scopes. If there was no + 'oauth_token' parameter in the URL, this function returns None. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + if 'oauth_token' not in url.params: + return None + scopes = [] + if scopes_param_prefix in url.params: + scopes = url.params[scopes_param_prefix].split(' ') + token_key = url.params['oauth_token'] + token = OAuthToken(key=token_key, scopes=scopes) + return token + + +def OAuthTokenFromHttpBody(http_body): + """Parses the HTTP response body and returns an OAuth token. + + The returned OAuth token will just have key and secret parameters set. + It won't have any knowledge about the scopes or oauth_input_params. It is + your responsibility to make it aware of the remaining parameters. + + Returns: + OAuthToken OAuth token. + """ + token = oauth.OAuthToken.from_string(http_body) + oauth_token = OAuthToken(key=token.key, secret=token.secret) + return oauth_token + + +class OAuthSignatureMethod(object): + """Holds valid OAuth signature methods. + + RSA_SHA1: Class to build signature according to RSA-SHA1 algorithm. + HMAC_SHA1: Class to build signature according to HMAC-SHA1 algorithm. + """ + + HMAC_SHA1 = oauth.OAuthSignatureMethod_HMAC_SHA1 + + class RSA_SHA1(oauth_rsa.OAuthSignatureMethod_RSA_SHA1): + """Provides implementation for abstract methods to return RSA certs.""" + + def __init__(self, private_key, public_cert): + self.private_key = private_key + self.public_cert = public_cert + + def _fetch_public_cert(self, unused_oauth_request): + return self.public_cert + + def _fetch_private_cert(self, unused_oauth_request): + return self.private_key + + +class OAuthInputParams(object): + """Stores OAuth input parameters. + + This class is a store for OAuth input parameters viz. consumer key and secret, + signature method and RSA key. + """ + + def __init__(self, signature_method, consumer_key, consumer_secret=None, + rsa_key=None, requestor_id=None): + """Initializes object with parameters required for using OAuth mechanism. + + NOTE: Though consumer_secret and rsa_key are optional, either of the two + is required depending on the value of the signature_method. + + Args: + signature_method: class which provides implementation for strategy class + oauth.oauth.OAuthSignatureMethod. Signature method to be used for + signing each request. Valid implementations are provided as the + constants defined by gdata.auth.OAuthSignatureMethod. Currently + they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and + gdata.auth.OAuthSignatureMethod.HMAC_SHA1. Instead of passing in + the strategy class, you may pass in a string for 'RSA_SHA1' or + 'HMAC_SHA1'. If you plan to use OAuth on App Engine (or another + WSGI environment) I recommend specifying signature method using a + string (the only options are 'RSA_SHA1' and 'HMAC_SHA1'). In these + environments there are sometimes issues with pickling an object in + which a member references a class or function. Storing a string to + refer to the signature method mitigates complications when + pickling. + consumer_key: string Domain identifying third_party web application. + consumer_secret: string (optional) Secret generated during registration. + Required only for HMAC_SHA1 signature method. + rsa_key: string (optional) Private key required for RSA_SHA1 signature + method. + requestor_id: string (optional) User email adress to make requests on + their behalf. This parameter should only be set when performing + 2 legged OAuth requests. + """ + if (signature_method == OAuthSignatureMethod.RSA_SHA1 + or signature_method == 'RSA_SHA1'): + self.__signature_strategy = 'RSA_SHA1' + elif (signature_method == OAuthSignatureMethod.HMAC_SHA1 + or signature_method == 'HMAC_SHA1'): + self.__signature_strategy = 'HMAC_SHA1' + else: + self.__signature_strategy = signature_method + self.rsa_key = rsa_key + self._consumer = oauth.OAuthConsumer(consumer_key, consumer_secret) + self.requestor_id = requestor_id + + def __get_signature_method(self): + if self.__signature_strategy == 'RSA_SHA1': + return OAuthSignatureMethod.RSA_SHA1(self.rsa_key, None) + elif self.__signature_strategy == 'HMAC_SHA1': + return OAuthSignatureMethod.HMAC_SHA1() + else: + return self.__signature_strategy() + + def __set_signature_method(self, signature_method): + if (signature_method == OAuthSignatureMethod.RSA_SHA1 + or signature_method == 'RSA_SHA1'): + self.__signature_strategy = 'RSA_SHA1' + elif (signature_method == OAuthSignatureMethod.HMAC_SHA1 + or signature_method == 'HMAC_SHA1'): + self.__signature_strategy = 'HMAC_SHA1' + else: + self.__signature_strategy = signature_method + + _signature_method = property(__get_signature_method, __set_signature_method, + doc="""Returns object capable of signing the request using RSA of HMAC. + + Replaces the _signature_method member to avoid pickle errors.""") + + def GetSignatureMethod(self): + """Gets the OAuth signature method. + + Returns: + object of supertype + """ + return self._signature_method + + def GetConsumer(self): + """Gets the OAuth consumer. + + Returns: + object of type + """ + return self._consumer + + +class ClientLoginToken(atom.http_interface.GenericToken): + """Stores the Authorization header in auth_header and adds to requests. + + This token will add it's Authorization header to an HTTP request + as it is made. Ths token class is simple but + some Token classes must calculate portions of the Authorization header + based on the request being made, which is why the token is responsible + for making requests via an http_client parameter. + + Args: + auth_header: str The value for the Authorization header. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + """ + def __init__(self, auth_header=None, scopes=None): + self.auth_header = auth_header + self.scopes = scopes or [] + + def __str__(self): + return self.auth_header + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header and makes the HTTP request.""" + if headers is None: + headers = {'Authorization':self.auth_header} + else: + headers['Authorization'] = self.auth_header + return http_client.request(operation, url, data=data, headers=headers) + + def get_token_string(self): + """Removes PROGRAMMATIC_AUTH_LABEL to give just the token value.""" + return self.auth_header[len(PROGRAMMATIC_AUTH_LABEL):] + + def set_token_string(self, token_string): + self.auth_header = '%s%s' % (PROGRAMMATIC_AUTH_LABEL, token_string) + + def valid_for_scope(self, url): + """Tells the caller if the token authorizes access to the desired URL. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + for scope in self.scopes: + if scope == atom.token_store.SCOPE_ALL: + return True + if isinstance(scope, (str, unicode)): + scope = atom.url.parse_url(scope) + if scope == url: + return True + # Check the host and the path, but ignore the port and protocol. + elif scope.host == url.host and not scope.path: + return True + elif scope.host == url.host and scope.path and not url.path: + continue + elif scope.host == url.host and url.path.startswith(scope.path): + return True + return False + + +class AuthSubToken(ClientLoginToken): + def get_token_string(self): + """Removes AUTHSUB_AUTH_LABEL to give just the token value.""" + return self.auth_header[len(AUTHSUB_AUTH_LABEL):] + + def set_token_string(self, token_string): + self.auth_header = '%s%s' % (AUTHSUB_AUTH_LABEL, token_string) + + +class OAuthToken(atom.http_interface.GenericToken): + """Stores the token key, token secret and scopes for which token is valid. + + This token adds the authorization header to each request made. It + re-calculates authorization header for every request since the OAuth + signature to be added to the authorization header is dependent on the + request parameters. + + Attributes: + key: str The value for the OAuth token i.e. token key. + secret: str The value for the OAuth token secret. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + oauth_input_params: OAuthInputParams OAuth input parameters. + """ + + def __init__(self, key=None, secret=None, scopes=None, + oauth_input_params=None): + self.key = key + self.secret = secret + self.scopes = scopes or [] + self.oauth_input_params = oauth_input_params + + def __str__(self): + return self.get_token_string() + + def get_token_string(self): + """Returns the token string. + + The token string returned is of format + oauth_token=[0]&oauth_token_secret=[1], where [0] and [1] are some strings. + + Returns: + A token string of format oauth_token=[0]&oauth_token_secret=[1], + where [0] and [1] are some strings. If self.secret is absent, it just + returns oauth_token=[0]. If self.key is absent, it just returns + oauth_token_secret=[1]. If both are absent, it returns None. + """ + if self.key and self.secret: + return urllib.urlencode({'oauth_token': self.key, + 'oauth_token_secret': self.secret}) + elif self.key: + return 'oauth_token=%s' % self.key + elif self.secret: + return 'oauth_token_secret=%s' % self.secret + else: + return None + + def set_token_string(self, token_string): + """Sets the token key and secret from the token string. + + Args: + token_string: str Token string of form + oauth_token=[0]&oauth_token_secret=[1]. If oauth_token is not present, + self.key will be None. If oauth_token_secret is not present, + self.secret will be None. + """ + token_params = cgi.parse_qs(token_string, keep_blank_values=False) + if 'oauth_token' in token_params: + self.key = token_params['oauth_token'][0] + if 'oauth_token_secret' in token_params: + self.secret = token_params['oauth_token_secret'][0] + + def GetAuthHeader(self, http_method, http_url, realm=''): + """Get the authentication header. + + Args: + http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc. + http_url: string or atom.url.Url HTTP URL to which request is made. + realm: string (default='') realm parameter to be included in the + authorization header. + + Returns: + dict Header to be sent with every subsequent request after + authentication. + """ + if isinstance(http_url, types.StringTypes): + http_url = atom.url.parse_url(http_url) + header = None + token = None + if self.key or self.secret: + token = oauth.OAuthToken(self.key, self.secret) + oauth_request = oauth.OAuthRequest.from_consumer_and_token( + self.oauth_input_params.GetConsumer(), token=token, + http_url=str(http_url), http_method=http_method, + parameters=http_url.params) + oauth_request.sign_request(self.oauth_input_params.GetSignatureMethod(), + self.oauth_input_params.GetConsumer(), token) + header = oauth_request.to_header(realm=realm) + header['Authorization'] = header['Authorization'].replace('+', '%2B') + return header + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header and makes the HTTP request.""" + if not headers: + headers = {} + if self.oauth_input_params.requestor_id: + url.params['xoauth_requestor_id'] = self.oauth_input_params.requestor_id + headers.update(self.GetAuthHeader(operation, url)) + return http_client.request(operation, url, data=data, headers=headers) + + def valid_for_scope(self, url): + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + for scope in self.scopes: + if scope == atom.token_store.SCOPE_ALL: + return True + if isinstance(scope, (str, unicode)): + scope = atom.url.parse_url(scope) + if scope == url: + return True + # Check the host and the path, but ignore the port and protocol. + elif scope.host == url.host and not scope.path: + return True + elif scope.host == url.host and scope.path and not url.path: + continue + elif scope.host == url.host and url.path.startswith(scope.path): + return True + return False + + +class SecureAuthSubToken(AuthSubToken): + """Stores the rsa private key, token, and scopes for the secure AuthSub token. + + This token adds the authorization header to each request made. It + re-calculates authorization header for every request since the secure AuthSub + signature to be added to the authorization header is dependent on the + request parameters. + + Attributes: + rsa_key: string The RSA private key in PEM format that the token will + use to sign requests + token_string: string (optional) The value for the AuthSub token. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + """ + + def __init__(self, rsa_key, token_string=None, scopes=None): + self.rsa_key = keyfactory.parsePEMKey(rsa_key) + self.token_string = token_string or '' + self.scopes = scopes or [] + + def __str__(self): + return self.get_token_string() + + def get_token_string(self): + return str(self.token_string) + + def set_token_string(self, token_string): + self.token_string = token_string + + def GetAuthHeader(self, http_method, http_url): + """Generates the Authorization header. + + The form of the secure AuthSub Authorization header is + Authorization: AuthSub token="token" sigalg="sigalg" data="data" sig="sig" + and data represents a string in the form + data = http_method http_url timestamp nonce + + Args: + http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc. + http_url: string or atom.url.Url HTTP URL to which request is made. + + Returns: + dict Header to be sent with every subsequent request after authentication. + """ + timestamp = int(math.floor(time.time())) + nonce = '%lu' % random.randrange(1, 2**64) + data = '%s %s %d %s' % (http_method, str(http_url), timestamp, nonce) + sig = cryptomath.bytesToBase64(self.rsa_key.hashAndSign(data)) + header = {'Authorization': '%s"%s" data="%s" sig="%s" sigalg="rsa-sha1"' % + (AUTHSUB_AUTH_LABEL, self.token_string, data, sig)} + return header + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header and makes the HTTP request.""" + if not headers: + headers = {} + headers.update(self.GetAuthHeader(operation, url)) + return http_client.request(operation, url, data=data, headers=headers) diff --git a/gam/gdata/auth.pyc b/gam/gdata/auth.pyc new file mode 100644 index 0000000000000000000000000000000000000000..799425906a6a3bdd596d622052b8d14733cb6ad3 GIT binary patch literal 38695 zcmeHwTaX;rd0x*hu)u-~E(sEVK!RcmBDf0?9PlPcf(22F1w=q10l0S|N}#Cm>`d=s zfZ3VhbT5{m$hgX69^}MMWXX;lC$5SeKSY&O5+#+SQk6p04G<&*FpFWrW^8J@{X7YdS-E(B)pI&Z7?%x#t|5bdl z-b56&@mGtwQFJw&*P^R6e|;jlI^oYJqpOqtd`EP3hdon)meY#k?8uP(e-`N^~a*CkNJy_`-}UdtNZ=M1JTuk zQ5y~ZRMbXO4@GS>@`Q4A?CMOPiP~uDBhg0~griaW(Wt#ow;t2G zo{dl-or~I!M{Tt8K=cusG+(`lHh)TwAB)!z>w*E+qpxshKxojr`J z3piWaSc&ugjkI?!6usoBd(CxRRyAo1Io#y3pSkmcP5!H1b;mD;3Z82kcU_dn`Vit~NKiaVLxC zMhAB8c-+U34C9;4Zl|rcvqAWVafq8}@tn=TT-@xn{qu7w*lG^)*0p9s^QNYiURo(J zOtV41mu+boM zPm(Hj;;`0k=1n^zq2(l(wX~NGK?DtZxS@v|Zps@Zig}Wlp3;ouq&7bpIq$i7$SHF( zO+h|`)AH!&dvT#MG@@vK#Sg_iN<}(kU&h&qE2ug@@xIx^VzT!&%b!=gt~WvJ*y9HCG7Rr@yT}J5bd9>wMh2R2KtCyK8-J6 z7ck0)wde!zN-Y{fnCj%f62$D1#x7gJlWs=D9vWIB2k>XSk)@msl6-&IdC!>UTG|A^ z8%-xTHzumMq>S<`YWGorQ~D12@k&bm?6jr!E6N z>rHgoxDU0hY;?MLJksJ4U?Q)VxY4(VVg|6R4Oyc>TS;MENn$t)haqkDZBs7yA&X&}=!|6?5vWgrERUC( z9OxK@FPwD{!i`pVz{IjDDK!TZ1?6k`dY%SV)%G^-9|B&5I6rGrH!!0f$SBWQ$A!c+HXflmxyFvOY_8x^JQY%7UeTB zE$zBRe6qid15n9z^ijTrvv2Q4`LFaW=C`#ehIiN;F>_?Kss>6!C8T?wTTaQMepy5uH%RTyIG(pxik83 zGOFc!qU&<6u^e74OKxdimgqm2EHHcA6bS%tm}IG1&gy-@A0~`Jm*4|vS{U0ory73+ zQ0mjlY;<~qjl2Qy&|C*LQ7spE36Km|>;kA8S7H@Pb6JE09~D3Y$~5qq3Aun+fo04S zLun5mhi?pQYQUnjZBp3bDyLPw7$%b}gXXu$JXhSYC*EShvy5hXlm%VLK1rUBOEGgm zU}7xxaISC$jiWB}mPj4j2x@Rn#S{%Ck4bzZz4g3Vl!InxNbm@ac`I(V+a2@dgclY= z>}IahYjrm$(42trQY!1L^_tL4QZwB&UT@yum^DL9exN9qS5vribi3y!)ZZ1I)Z+!{ zHi{S{qM{L@Hu{UuT+`rfd7bildhj>!SJxZu|~uR%4-hSfRKCnarQM-M$5G7F*%Z3++$k`-t1L_8|)AI!060KaE zCqR@h;CjE)gEWDN%7;Rw8w0qyQq`k23S4T25|eQGL6jz}Xh2iY92quXD;X!{wALUD z7u_8_Orr5_v3#EuSn`DQMhe$YJE7N0?O}J-9=4k93AEaPl3JJdiJ9gpTUFIoX<~?& zu-H*s!Q;{&qqC75VqLUaO|UqKozVZR%k~Oa**0pMif5!ez)~SshTVV8Ha(6*bZ{P^ zdaCwFZMwD(pIMw`FXQ?>=_c=7OI2^qk+Cj`3mfWni(;RnO*&b`C;JaLP-R3Bln~Vv zjec>*&(hc1DmeWmN_+TRH?a`Nsh+pso zC=D8nIuFWjXLJ%zsTf8difOkRF|Fesz8;E=*QL1-yMgjT;D(wHst)f%c__(f#|OJP zc?cCRNOO@gh1j6+9p_UeWSJluvYNe5)7zp@Sp#fK#ts9l5 zmm&vLuGA&PAUl{qz0+KUl2oSa%9eHsp#%m`N+aXB1UbWORi`jI`A5%WN%+{|la?+p{Bs;xyNE?Vr>IrcN<={P_(PlGDn?DjDqo1Oex zXjYh<0GMr9fD#Qff{<^|8>dP5&X-<_w~YZSTV)iQkbDYKScv6Sg?n}StlU<@Exz#y z9Zyol^VD0W+o_{ES%>k_UMm%9;20){Stl*8)lRQ%SZ3Jmc2)! z1hN1JX2;xeDI!IA60=x1g;Bh4_({G7){5rZt`uBmtbbzKgH{AcpohOl!alO9iM+ zIs+Sc1I4CUER;v#I~dps`|dyo{NQab{_td$wmsxF{Cj68Qat5t`9e-`z)b>e1>5&- zx>I_oPLs0*C=XI~8M1JT;q4n{UN%VZ?J&GclypagQzbexyKM}d@n^o#mvnG%ia`A9)HG)jZ!Z~G zZb)-lrq$+1Efku-j8}>AkPIC3f@PiUij zlN@6SMxaLtH#v)o$vGa*@IZrc+odck^@75rY#{|9V`pIWXY^V|2Pska>O&)O){MkD z?%d5tTma-px>Vw*YiF`XIVNFG=6A^Mt z9;$qsL4Zic`@b)V)Of^aN%}2J4zufMRKWNcrtJKR?@G0wSI_d`767G}04c!v|7I8qZQ4e~)uAH!Tza@v2NjUn= z+=RHwn>qbJoWy(09D6fe2{~g7Xlyl3fj&&~jFq|GtSC!cxb@AwiTdo{-2yCCvZ9^f1@U2$y78MD@?Sdf~s4XGlPnZsl8V?OIz@5oc<3s@tT41$C8?J?LMZG;=Kf^OIVFq2m$A`xhwhsji|!Q?==dsR=tCtUV0bzrXhIj(isPnvm#?Y-7dw zG?eU0%Y?(Rh)))w8sJ<&WEfr;HV9Vp?3Lq6bnZoT?$O;hRgPQ11qXK>u(8FhzQ7YW z+~8*;=!PS1D*7_3SA46e{`;k{m4aRffDv$`E(M<`{Nc_63};RDWfEVYTK=Wrw_Vx^ zn7&tDzwn(Z>A#kW>>4S8?@%+Y%H8;~16qS4lKlbH8=ckpaHExzS0$+}cZ;&vO|TqK z@%lzL??|`=87+>N(jfn=xjZ+9e~FER&0IXy)m?Z4Ab<$PkW53{oXh}{ckhEXa*hC} zKuG&+TRnc$=eT%gbqN)y4AFm8VH(XCY-QNbHtEuI70t0G?_&YS4h9I%TT!AyI8DH< zsKwEo>xg*-TooK5#*(%;B67?6p_{{np~=PP`x8`t1w?1U3*yV6@0}M#(k4*X%T^Var!i%xj2TaoFAa;*7f5xtu&K z2<{c?bJm7h=)GDquOBpp>$FX{jT4~&EB2lp*Ets@LJepQQ)w{EAw9!4LUUZz+HCx( zQoa^C0m`(nr}sA9K|SJq{vQPc^K1?NV{|vu6_P;L=c}WGrV-hG;2vf@mvkzetkpEK z+%@B4MqmHG(6eqv%@tm8jM?mI>b4lX6*{OS=!hB0G3GlYB{=R)4AXxs@a5sbG6>jV z0Ao>7v{UJXH4tQU_Xc~M6liI6#-M@h%2*&XDd1zP&e4c<1S9BR9bR|xv2kOl$_)#z_My=StWDsW?!b}@d&#Q$d^&EN|H!_ug zR)x(-vKf6wer(jW(_wuE{Zz|Dh+jh*1z^1#SC;3dfWpJLzn$d8K!IyY`L(}8;-UvE zdZ@JYViMo^S;Qw>#etw+Rsfwc6AI`*r;GTC6kQxAqeEB&1ZNP6;^f`BoZ+}3n1A-| zr|2sJNT+WH&T*_2T97k+Dtdb9orUO4IDnSKy4nB0`|gz(OX5l-Gi+`;8WME~l^4E` zt#fYViv@Z}?Hu%x#tzSpBLvQ-16cD4OX~%%!na$b^^godF2MjhZXuMyUNy1yBFCMA z$%v-I(~I+T!O2#&3?D$YTtO@Z!G?Lp$es}qXHHQmg_qPhmu9EX7&F1Pk+PC+;&neJ z8Ife8>HW0>Fi8)<`8ZQ+u_rCn*m>nqATGG-+vR4djJjj0{HJn2Lrs zM2VohT6756B9?YSC#2CW{FAr=Lz5=r43u{U2ny9|2nzLP84Pq+uwVpcBBDR? zF@jEO__8!lyE$7zHx}IzGPg1dUQ@A?5)_aSU_wIE<^&WifpOXz@&SJfL57alG{O3! zAfhYYw5o`((7YL}B{?lAUr`+iaum?dZ+KsST!I`yraP2#to$s@T&!3PMv87(pTc8h zvIhzPNmEvP2~Kcm`QJ(+Z?bI_s*iifL&M@v)oS_Noz-AGH=5pk0hW!KQcMBQKEex& z)HK&q*K`AvR+C?OQ9^9NaD;Xk;XR<4zD!-VhKRP}ptU+GIB}dnslX$L>yZI&AsiQF z2~uKYoFt{t8~2a2Qo(lxs0zKnU{ppDs}=e5n8uFv?^T}NnvPJk#ka}-pQPoj*-9Hd z%G3mCfJOmiS??$Vp+ry`fx-r6^TaWhTLaZ@@4`#eG`7OcEj=e73L;F4Noj21nne1! zDp|&WrPkVFd)6Eb;aTbY12(mHQUXln1#aZ)mwglKtyy;yUhqp&RbKjQhj!3+rbV(GYn0TH<1FQgH(Ohn7Q zbhGMbw@kK>9331?c*uF@l`&oT+dL&2d>N55%HidjV`FK`SxA_S>)#93fjI^rn={bH zVb;LgnCZ$A9174ff!aE`+jlNh6tsc8yR@>1vKpi3`>NUXgRoUbutVe4WK*<|ehV;v zdqi&H!DlLH-TKkYJNxs;NgKS`5Q&8W73VOqZRyIHh7#OfUpRB_^%5FqWHD*ru#K2W z;pQ@9ehqYg9`U_1nB)IO+7~3pQo1R)^|w;`OK8r!2Ees#hv2n{9Gh1dARq1}%>xSo zUhf6Go&<;?3d{c0pF735{Jm9Otd@enga0G;hzuSX+OV-&|i0{)072qkf!#J3arZFJlba4rk1-X7# zl%Eke1Cjzup*Hst8=>P^ev0%ZWiX{MPNAEc-uu^%IbO75Tn?^_Il#Im&fDmXD~a@r z4oo47P8|EG%ni#b(gX&h11+S5(V{5;ASvcU8>%TcWhDe}jiFeFEK7Ogf`3Yktr#V! z&XA%y0Q50;41@#HNjEDWD9$h5>A0jABEzv)l-K?t%&`Gs;D%BR-)GpS}E-RWmW`?2Ouh_8YB?|PVodTO=u0d zGu<9XXQ1;g7h8!`cMQ;GL1ZG!r-TIaiy<{0PDv00hlERY#VHzP5L|~t2D<{?RDj2+ zBKSheOJiYx2pU{~JnQ%7TyZY|qLpY>y8U@i% z6hSEmhb-z><`o4wgSHKZtY}j4cG49SD~kq`t~^XXN#Y`8i1zHOJz0~5p^_u(8e1&F ze{3n6moYgh#m0glEF34IR4ci;Lt-sTvC4awHpZKXJ`%S7HnvDi9GU$yJbAv~MSg>+ z6C;yiMXqH)gi&8wXe^yyc<~hWDPfigCXO3Iz#ZvVUMhws6ABK`ZD*sI z)Yj0l@J4C4uCV!RNObRw=a^c+jEzqQ{ECkq?t2G3t*r44b@R-L{Z`RZ84M!pMuQII zMnm>jqk(078(n_ous0e=?`<@aKF*VOco_OdZew$k8>sXm+eWIGs!dHy@0gxcf?JaD zrBG~9vcUtTYR=93#!0J2V0SH%JGIY|(MjLxx>l)F98ePex0)EOU`x?*1TWu;(kpi^&;CVUsgcQMxNicTys^aThVm zi5hpS#U8PcOE4mBci24?N5nSSZ{o0&BeCpGdsjf9GNT8ZO*SEsaneMPk;!Oub(uIH z-SQ&7*?F(djc2sbzOXyBMvb2ALY(^yIA9G*IC?v9Upf37v4C`b1z#ZI4D2Ao9y_h# zD?~1{?XelH?CJ(==NEi2EaFD7Gb)DoFN$3h8UlTz7(#0P5m?d`n8^FI6;zO)lgupT zMWlslAwUDe82nK`LCY>chG7S>&U+X;^KqDgRR9SH?sqnjrElp)7SXcdT2HY}Os|tk zS-Ay97cVWJJB5MPo)CWI8b~=KViFQFf46oc^$Fg`V`rs0JdE)+Ypg_RJ-}L#F=$F zj^j(LGYC505ed|xr(7H&=h)Z=d|?Wo!&P%kLA9OwhGEY-x3#{x|R zMPb^&)1=(Z(dF==VQ(mXvmP&621d8YV^Ulb-WI&>N(%0Y7QRucnNQvQ&5Su47xB5Fg0`l z8hwM%kUkfm>tQK;iWD_?QIHS|Z%V06Z!W!d0gF{`c82{PG0?I20%i`$Z}+-RQi_^D z#pw7j8%Q34Rn_@El~&c$SB0lJ)SA#y?pTLFS_7{^Quo1!E;&{UKgz`5n{1p8QR}2M zAz@g3a#pJ?Mz-PV4BLz>!=4(6*KeuH#8#-Sw3PxPeiUd$%6WT39Ufx8Hi-(Pv-s*b zBZ-%xnpe_hpujCk#6RaEgX3O**UaVysf_m`vm>ucX{Sx9)Up&?2!5r>pxAe%!zTD|veHARVIJ zSlY8zs_Lb$Iw}$b3tQ|cn(JDj@v=a!kUJC2iWk@r+v4&PL26-0pim<#d;MR);{{l| zf1F0FOC!XbFHbWSh-B8#3#FegIOpJh8$toHZ0$6!xl#9)|D^> z@(U^fUHzw4G}?nA2io)lx|`!oSwiXVn8qnPB4ZSw=<`C(aeb0P z{c5{7JhY`fUhP0-G;j8igDv5o#_4d_$F{kO(vqZaYjkHcQL&(0n$Ss=Ko(9R%acFi zFpIr5WVbdTtGFY7{N=l0zOSfPqHDQFw{+-%kbLqCI{Am3I?_xubq~Yx=GI~HQtEEQ zv2`T8>@4*@!5X#)!{1t7{`ZQ`UYw^}Xw=}AjG;)(=WrOK8pg?o+m#*1De^%8hKn?k zzsM$V7Sy_eS9Ny(vi&{O{2g|go*dfXleO8;P4AxGGc!3oF}*wCj8+r#&H4=L)jdPK#WL7e3 zzTFg=B(|W0TVY0NhxN&Ls1nL7Su?CGte(;UA$nJs#eAQc3`%cu(|~?ByLTi93IS;s z6oK^N7U@Mzexdv5*5eBDa>y}?TdLPc7F(s3jE~z?$BoA4e9#<$c`m0endtF7*M-f> zxl>7SZIcTZ$t#C(aGn~ROhkiyTGI=mG~U5onoI@pY~i%rt=~sgb`2-n(Pz$w3rcJ>{!Nhdwct~ui&tZN9w%^b!B===cQkxj+ZigD z2qAZpS>{N{(svfBVF9wCTlvS}dxYN%J~7<;aALxxn={C@k9Xq~e z@#W;wYuLi>jfLe4i>2H@`UKXn!Cb7ED`kwawuD&SSU^L?_hCmd?GSdGx*iNk(%K_t zP`g}6^iKk97Is7!kb`&tG{=-H%Y((}c$UX=(M2&F(5zY-DW8hYzWqmTsS$TF(Eg!xS6^o0 z*AnZ~&w~_O61-R1l0c+_z&`3@;LrTwzp5wk{W>gNqHPd)Z0gTKmPq5M$AH~mN!cemZ5haBs zhjaT4O@&LzZP_IdW65qa9SgTPFtyB~Qnb#&@)Dj`34m^KJZ%U3m5jHr1&Ei2mK5U5 zKZwL~YzqjAmf8iHW5e|MV0l4=ma{(n0tL2$o0;75rXyTUh(Soo%6p4y3a<{{1qhG! zz&LZFcx zk#;thf(ZBWw9UxREwz4syDc?%_(&B$G2?!=m-PD;4ozln?R1K{gE{6-^21l|!#awQypM8osO(0J@iHi6Dox zg8_#fC_sLaXE43-J1@X|E{HLFO_rd_9MCm9{T2;ND~yi08EDxbp|{=-ULZjj ztZ;uDvr0rP2SdhgL43L0Jt2GGYk^$D4;iVb5u@b9>9<~Z``OZWmqc;;try>>ONpx> zQ9_0&E`{K(i@3VAsjAd9(c~4@M0hc8;pWf)#|d|Rx$2DXr3u`vj!AfRr%}*dWClXc zVu$fuMxLw71_Xm4UJNZzY+e;W$|_~2`m`HJ?=31lT^cM^qMf3ejDZ(5_?P;{pz!@u z6_Ct4kHIwDEcOGCkzj5SB_PRKufYVPO9Wv)Ebsq3Pw@2+{3D0J0uWS?KV%2He?|9q z6!*2IT)`hNfG-vmvT=O4Hye?6tvmSZ4+gly+8^i%;bI)GquO@ z$@xvc%0&+Y9a@X{Wd9Qf+yX6GxEWDqWWW`hAeLMLK(tsY#-p;>;9JPfNhZVJ_e(U_ z=^y|thhN;%mgAsR-m>4>!d?9&$%hjE^aSF)_<0h&h5kPu8L#c^dQMwFO-ZsfdvtJ>A7O^=k#WHth};LPYv1*-QAFaO3#b=YlG zioLGodnt{$_Lr7RMO{i50SDDa(R11j|L99+F3wXM$gQJtjE`ojW=ZmCncqkE5|HyZ zuxe>n?U;3ahKFN39O2<99+r9dEDz7%AeRt+mB~iRd5{%Jw@p=r7Jr|mxuQS$13toZ zR4Z@YeIDNnv-iHyqrBgs?FCGw&CjcoV0LCbM|=0d1%wP~xPzWlG75i|fO=|Q?TImd z{Z%{yHp^zY4K4ln9@+u84PO*Dn*P#{4AZmE5=edpH{*u5*-~Hu@oMz+fla}UwwkEo z^`~{7aUz1cV*5u)@s3`9`ljABJ>cj~sX|HNo#)8gd@}3Drjp~5{FsMd$D#21Cx3<4 z{wfc0+7-3n(slv;AE2u5aCS(C(Ingxn2!V4>p}T2xbbtB@Iwq?jB^RQjqqQ_AQ@*a z@$4uMny@*Z$vk6rdh%O5{52kan}^@w;oCe=TbFdSo;}2C_-xla(_^~Id5&4B9IKGFX(fu=u>lc`6s?w~d^H9a|fV0w1uDfnlnlkeb0 z3m^R>%@V}ld)C9G9B5ThPp3yu6 zaWHKL2j-@;MWxK@aPLszjb zu|IUn25B>ap1y$%n3R-CDPy@6e7}WXM{eNycFAQr+I|hSAq{UAl_erDh5o>nrYV z*OU?k{&uNxIIaE`HGv#P&P-k!yR;h#GVq6;6G734;{vF{JT>F-04-ktSKNS2DsvSG z7FOo!j_3eTF;IW_p-!5?uuH8n{jMkMP8cDaCxp0B3x3B1kHIoMX}_W6R?t0x5%Jq@ zSd4od{&iLW-Vh1q%Dq!nRe1V{gz^zeYpjDleZ>BMMEjthKEnJN z-P4UDs*YJWx((s({55bfIu|H)1=TK_s?w|hKmS`Y>+BqxcEnl}lQ-7~@ckgBw8vUI zTys$#XD@#e&2P1I+pJ?n-U@ew*~Mn#b~o%NLC8Fvui}sqC?3sX{9WX zJi{!W)+Il!Q1j1c*P1ULkrmygr_}#@B+}0b$*+Z9Mpi}xT;tbdQLlFU*e8~9nUf_u z7C%dhUD9O*^Aw-8RN_a%zBRlxzy{iUp7Ltt7J_!m{WD0{Jo$>my;=Fq?Ak)Fy#$D$ zsJW%v?8XBo{(qlmG@s4b=GrOJiJBx@k%gxwqld}gLCNjJ?!?!>OM&t*PNLZ-VV@m_ z^*lA!Vm^x^qZTvlVK1o7_=Q6X5<>Q{^eF9N=ut=+e05oH6kGRa50~s-R+jv^8@fN? zAaYvG&^>f-xhFqj+bPt|NT;Dw%&yMv+hZaN<~_DfxhJr3W_QBE;eQ7wYxBRrm;W9Q zf1ih+-~d-We_%WLhrIBIJTShXb@NKtm4pNTU~Lr^Rmdo{A5r6W@*epzqlm-dQ+&k? zwpvqO3|kpBixQ=abuh0;9sf_>yL z7p$B7W0s?&vA3)>VLl~P=x)hBzr)kgNPdWG#=&%FB&0gE&g-?sZU=(x^*&Z}CYEN& zJ^vE+Fk$_P9C+M=LL&(u)?#yzV~O)3mb4+;5Td2dV)Cyq^6WehGU|VkXIFT5lZS8e z@Jl?jcxd7vxBfB%7Uiu>k~Q9^UtjXf7Sq@|p!ax@OkbT%Iq=n`PD|A(bx9dYN-YsJ zZ8wREP`+%ND|}@i+LS@2L|hp(`X7I46p3CFGZRzrCrv#wRhv0DH97s*^dr*;W element identifies an author-or more generally, an entity + responsible for creating the volume in question. Examples of a creator + include a person, an organization, or a service. In the case of + anthologies, proceedings, or other edited works, this field may be used to + indicate editors or other entities responsible for collecting the volume's + contents. + + This element appears as a child of . If there are multiple authors or + contributors to the book, there may be multiple elements in the + volume entry (one for each creator or contributor). + """ + + _tag = 'creator' + _namespace = DC_NAMESPACE + + +class Date(_AtomFromString): #iso 8601 / W3CDTF profile + """ + The element indicates the publication date of the specific volume + in question. If the book is a reprint, this is the reprint date, not the + original publication date. The date is encoded according to the ISO-8601 + standard (and more specifically, the W3CDTF profile). + + The element can appear only as a child of . + + Usually only the year or the year and the month are given. + + YYYY-MM-DDThh:mm:ssTZD TZD = -hh:mm or +hh:mm + """ + + _tag = 'date' + _namespace = DC_NAMESPACE + + +class Description(_AtomFromString): + """ + The element includes text that describes a book or book + result. In a search result feed, this may be a search result "snippet" that + contains the words around the user's search term. For a single volume feed, + this element may contain a synopsis of the book. + + The element can appear only as a child of + """ + + _tag = 'description' + _namespace = DC_NAMESPACE + + +class Format(_AtomFromString): + """ + The element describes the physical properties of the volume. + Currently, it indicates the number of pages in the book, but more + information may be added to this field in the future. + + This element can appear only as a child of . + """ + + _tag = 'format' + _namespace = DC_NAMESPACE + + +class Identifier(_AtomFromString): + """ + The element provides an unambiguous reference to a + particular book. + * Every contains at least one child. + * The first identifier is always the unique string Book Search has assigned + to the volume (such as s1gVAAAAYAAJ). This is the ID that appears in the + book's URL in the Book Search GUI, as well as in the URL of that book's + single item feed. + * Many books contain additional elements. These provide + alternate, external identifiers to the volume. Such identifiers may + include the ISBNs, ISSNs, Library of Congress Control Numbers (LCCNs), + and OCLC numbers; they are prepended with a corresponding namespace + prefix (such as "ISBN:"). + * Any can be passed to the Dynamic Links, used to + instantiate an Embedded Viewer, or even used to construct static links to + Book Search. + The element can appear only as a child of . + """ + + _tag = 'identifier' + _namespace = DC_NAMESPACE + + +class Publisher(_AtomFromString): + """ + The element contains the name of the entity responsible for + producing and distributing the volume (usually the specific edition of this + book). Examples of a publisher include a person, an organization, or a + service. + + This element can appear only as a child of . If there is more than + one publisher, multiple elements may appear. + """ + + _tag = 'publisher' + _namespace = DC_NAMESPACE + + +class Subject(_AtomFromString): + """ + The element identifies the topic of the book. Usually this is + a Library of Congress Subject Heading (LCSH) or Book Industry Standards + and Communications Subject Heading (BISAC). + + The element can appear only as a child of . There may + be multiple elements per entry. + """ + + _tag = 'subject' + _namespace = DC_NAMESPACE + + +class Title(_AtomFromString): + """ + The element contains the title of a book as it was published. If + a book has a subtitle, it appears as a second element in the book + result's . + """ + + _tag = 'title' + _namespace = DC_NAMESPACE + + +class Viewability(_AtomFromString): + """ + Google Book Search respects the user's local copyright restrictions. As a + result, previews or full views of some books are not available in all + locations. The element indicates whether a book is fully + viewable, can be previewed, or only has "about the book" information. These + three "viewability modes" are the same ones returned by the Dynamic Links + API. + + The element can appear only as a child of . + + The value attribute will take the form of the following URIs to represent + the relevant viewing capability: + + Full View: http://schemas.google.com/books/2008#view_all_pages + Limited Preview: http://schemas.google.com/books/2008#view_partial + Snippet View/No Preview: http://schemas.google.com/books/2008#view_no_pages + Unknown view: http://schemas.google.com/books/2008#view_unknown + """ + + _tag = 'viewability' + _namespace = BOOK_SEARCH_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, text=None, + extension_elements=None, extension_attributes=None): + self.value = value + _AtomFromString.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Embeddability(_AtomFromString): + """ + Many of the books found on Google Book Search can be embedded on third-party + sites using the Embedded Viewer. The element indicates + whether a particular book result is available for embedding. By definition, + a book that cannot be previewed on Book Search cannot be embedded on third- + party sites. + + The element can appear only as a child of . + + The value attribute will take on one of the following URIs: + embeddable: http://schemas.google.com/books/2008#embeddable + not embeddable: http://schemas.google.com/books/2008#not_embeddable + """ + + _tag = 'embeddability' + _namespace = BOOK_SEARCH_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, text=None, extension_elements=None, + extension_attributes=None): + self.value = value + _AtomFromString.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Review(_AtomFromString): + """ + When present, the element contains a user-generated review for + a given book. This element currently appears only in the user library and + user annotation feeds, as a child of . + + type: text, html, xhtml + xml:lang: id of the language, a guess, (always two letters?) + """ + + _tag = 'review' + _namespace = BOOK_SEARCH_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + _attributes['{http://www.w3.org/XML/1998/namespace}lang'] = 'lang' + + def __init__(self, type=None, lang=None, text=None, + extension_elements=None, extension_attributes=None): + self.type = type + self.lang = lang + _AtomFromString.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Rating(_AtomFromString): + """All attributes must take an integral string between 1 and 5. + The min, max, and average attributes represent 'community' ratings. The + value attribute is the user's (of the feed from which the item is fetched, + not necessarily the authenticated user) rating of the book. + """ + + _tag = 'rating' + _namespace = gdata.GDATA_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['min'] = 'min' + _attributes['max'] = 'max' + _attributes['average'] = 'average' + _attributes['value'] = 'value' + + def __init__(self, min=None, max=None, average=None, value=None, text=None, + extension_elements=None, extension_attributes=None): + self.min = min + self.max = max + self.average = average + self.value = value + _AtomFromString.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Book(_AtomFromString, gdata.GDataEntry): + """ + Represents an from either a search, annotation, library, or single + item feed. Note that dc_title attribute is the proper title of the volume, + title is an atom element and may not represent the full title. + """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + for i in (Creator, Identifier, Publisher, Subject,): + _children['{%s}%s' % (i._namespace, i._tag)] = (i._tag, [i]) + for i in (Date, Description, Format, Viewability, Embeddability, + Review, Rating): # Review, Rating maybe only in anno/lib entrys + _children['{%s}%s' % (i._namespace, i._tag)] = (i._tag, i) + # there is an atom title as well, should we clobber that? + del(i) + _children['{%s}%s' % (Title._namespace, Title._tag)] = ('dc_title', [Title]) + + def to_dict(self): + """Returns a dictionary of the book's available metadata. If the data + cannot be discovered, it is not included as a key in the returned dict. + The possible keys are: authors, embeddability, date, description, + format, identifiers, publishers, rating, review, subjects, title, and + viewability. + + Notes: + * Plural keys will be lists + * Singular keys will be strings + * Title, despite usually being a list, joins the title and subtitle + with a space as a single string. + * embeddability and viewability only return the portion of the URI + after # + * identifiers is a list of tuples, where the first item of each tuple + is the type of identifier and the second item is the identifying + string. Note that while doing dict() on this tuple may be possible, + some items may have multiple of the same identifier and converting + to a dict may resulted in collisions/dropped data. + * Rating returns only the user's rating. See Rating class for precise + definition. + """ + d = {} + if self.GetAnnotationLink(): + d['annotation'] = self.GetAnnotationLink().href + if self.creator: + d['authors'] = [x.text for x in self.creator] + if self.embeddability: + d['embeddability'] = self.embeddability.value.split('#')[-1] + if self.date: + d['date'] = self.date.text + if self.description: + d['description'] = self.description.text + if self.format: + d['format'] = self.format.text + if self.identifier: + d['identifiers'] = [('google_id', self.identifier[0].text)] + for x in self.identifier[1:]: + l = x.text.split(':') # should we lower the case of the ids? + d['identifiers'].append((l[0], ':'.join(l[1:]))) + if self.GetInfoLink(): + d['info'] = self.GetInfoLink().href + if self.GetPreviewLink(): + d['preview'] = self.GetPreviewLink().href + if self.publisher: + d['publishers'] = [x.text for x in self.publisher] + if self.rating: + d['rating'] = self.rating.value + if self.review: + d['review'] = self.review.text + if self.subject: + d['subjects'] = [x.text for x in self.subject] + if self.GetThumbnailLink(): + d['thumbnail'] = self.GetThumbnailLink().href + if self.dc_title: + d['title'] = ' '.join([x.text for x in self.dc_title]) + if self.viewability: + d['viewability'] = self.viewability.value.split('#')[-1] + return d + + def __init__(self, creator=None, date=None, + description=None, format=None, author=None, identifier=None, + publisher=None, subject=None, dc_title=None, viewability=None, + embeddability=None, review=None, rating=None, category=None, + content=None, contributor=None, atom_id=None, link=None, + published=None, rights=None, source=None, summary=None, + title=None, control=None, updated=None, text=None, + extension_elements=None, extension_attributes=None): + self.creator = creator + self.date = date + self.description = description + self.format = format + self.identifier = identifier + self.publisher = publisher + self.subject = subject + self.dc_title = dc_title or [] + self.viewability = viewability + self.embeddability = embeddability + self.review = review + self.rating = rating + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, contributor=contributor, atom_id=atom_id, + link=link, published=published, rights=rights, source=source, + summary=summary, title=title, control=control, updated=updated, + text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + + def GetThumbnailLink(self): + """Returns the atom.Link object representing the thumbnail URI.""" + for i in self.link: + if i.rel == THUMBNAIL_REL: + return i + + def GetInfoLink(self): + """ + Returns the atom.Link object representing the human-readable info URI. + """ + for i in self.link: + if i.rel == INFO_REL: + return i + + def GetPreviewLink(self): + """Returns the atom.Link object representing the preview URI.""" + for i in self.link: + if i.rel == PREVIEW_REL: + return i + + def GetAnnotationLink(self): + """ + Returns the atom.Link object representing the Annotation URI. + Note that the use of www.books in the href of this link seems to make + this information useless. Using books.service.ANNOTATION_FEED and + BOOK_SERVER to construct your URI seems to work better. + """ + for i in self.link: + if i.rel == ANNOTATION_REL: + return i + + def set_rating(self, value): + """Set user's rating. Must be an integral string between 1 nad 5""" + assert (value in ('1','2','3','4','5')) + if not isinstance(self.rating, Rating): + self.rating = Rating() + self.rating.value = value + + def set_review(self, text, type='text', lang='en'): + """Set user's review text""" + self.review = Review(text=text, type=type, lang=lang) + + def get_label(self): + """Get users label for the item as a string""" + for i in self.category: + if i.scheme == LABEL_SCHEME: + return i.term + + def set_label(self, term): + """Clear pre-existing label for the item and set term as the label.""" + self.remove_label() + self.category.append(atom.Category(term=term, scheme=LABEL_SCHEME)) + + def remove_label(self): + """Clear the user's label for the item""" + ln = len(self.category) + for i, j in enumerate(self.category[::-1]): + if j.scheme == LABEL_SCHEME: + del(self.category[ln-1-i]) + + def clean_annotations(self): + """Clear all annotations from an item. Useful for taking an item from + another user's library/annotation feed and adding it to the + authenticated user's library without adopting annotations.""" + self.remove_label() + self.review = None + self.rating = None + + + def get_google_id(self): + """Get Google's ID of the item.""" + return self.id.text.split('/')[-1] + + +class BookFeed(_AtomFromString, gdata.GDataFeed): + """Represents a feed of entries from a search.""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _children['{%s}%s' % (Book._namespace, Book._tag)] = (Book._tag, [Book]) + + +if __name__ == '__main__': + import doctest + doctest.testfile('datamodels.txt') diff --git a/gam/gdata/books/data.py b/gam/gdata/books/data.py new file mode 100755 index 00000000000..3f7f978b34d --- /dev/null +++ b/gam/gdata/books/data.py @@ -0,0 +1,90 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains the data classes of the Google Book Search Data API""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import atom.data +import gdata.data +import gdata.dublincore.data +import gdata.opensearch.data + + +GBS_TEMPLATE = '{http://schemas.google.com/books/2008/}%s' + + +class CollectionEntry(gdata.data.GDEntry): + """Describes an entry in a feed of collections.""" + + +class CollectionFeed(gdata.data.BatchFeed): + """Describes a Book Search collection feed.""" + entry = [CollectionEntry] + + +class Embeddability(atom.core.XmlElement): + """Describes an embeddability.""" + _qname = GBS_TEMPLATE % 'embeddability' + value = 'value' + + +class OpenAccess(atom.core.XmlElement): + """Describes an open access.""" + _qname = GBS_TEMPLATE % 'openAccess' + value = 'value' + + +class Review(atom.core.XmlElement): + """User-provided review.""" + _qname = GBS_TEMPLATE % 'review' + lang = 'lang' + type = 'type' + + +class Viewability(atom.core.XmlElement): + """Describes a viewability.""" + _qname = GBS_TEMPLATE % 'viewability' + value = 'value' + + +class VolumeEntry(gdata.data.GDEntry): + """Describes an entry in a feed of Book Search volumes.""" + comments = gdata.data.Comments + language = [gdata.dublincore.data.Language] + open_access = OpenAccess + format = [gdata.dublincore.data.Format] + dc_title = [gdata.dublincore.data.Title] + viewability = Viewability + embeddability = Embeddability + creator = [gdata.dublincore.data.Creator] + rating = gdata.data.Rating + description = [gdata.dublincore.data.Description] + publisher = [gdata.dublincore.data.Publisher] + date = [gdata.dublincore.data.Date] + subject = [gdata.dublincore.data.Subject] + identifier = [gdata.dublincore.data.Identifier] + review = Review + + +class VolumeFeed(gdata.data.BatchFeed): + """Describes a Book Search volume feed.""" + entry = [VolumeEntry] + + diff --git a/gam/gdata/books/service.py b/gam/gdata/books/service.py new file mode 100755 index 00000000000..cbb846fcd42 --- /dev/null +++ b/gam/gdata/books/service.py @@ -0,0 +1,266 @@ +#!/usr/bin/python + +""" + Extend gdata.service.GDataService to support authenticated CRUD ops on + Books API + + http://code.google.com/apis/books/docs/getting-started.html + http://code.google.com/apis/books/docs/gdata/developers_guide_protocol.html + + TODO: (here and __init__) + * search based on label, review, or other annotations (possible?) + * edit (specifically, Put requests) seem to fail effect a change + + Problems With API: + * Adding a book with a review to the library adds a note, not a review. + This does not get included in the returned item. You see this by + looking at My Library through the website. + * Editing a review never edits a review (unless it is freshly added, but + see above). More generally, + * a Put request with changed annotations (label/rating/review) does NOT + change the data. Note: Put requests only work on the href from + GetEditLink (as per the spec). Do not try to PUT to the annotate or + library feeds, this will cause a 400 Invalid URI Bad Request response. + Attempting to Post to one of the feeds with the updated annotations + does not update them. See the following for (hopefully) a follow up: + google.com/support/forum/p/booksearch-apis/thread?tid=27fd7f68de438fc8 + * Attempts to workaround the edit problem continue to fail. For example, + removing the item, editing the data, readding the item, gives us only + our originally added data (annotations). This occurs even if we + completely shut python down, refetch the book from the public feed, + and re-add it. There is some kind of persistence going on that I + cannot change. This is likely due to the annotations being cached in + the annotation feed and the inability to edit (see Put, above) + * GetAnnotationLink has www.books.... as the server, but hitting www... + results in a bad URI error. + * Spec indicates there may be multiple labels, but there does not seem + to be a way to get the server to accept multiple labels, nor does the + web interface have an obvious way to have multiple labels. Multiple + labels are never returned. +""" + +__author__ = "James Sams " +__copyright__ = "Apache License v2.0" + +from shlex import split + +import gdata.service +try: + import books +except ImportError: + import gdata.books as books + + +BOOK_SERVER = "books.google.com" +GENERAL_FEED = "/books/feeds/volumes" +ITEM_FEED = "/books/feeds/volumes/" +LIBRARY_FEED = "/books/feeds/users/%s/collections/library/volumes" +ANNOTATION_FEED = "/books/feeds/users/%s/volumes" +PARTNER_FEED = "/books/feeds/p/%s/volumes" +BOOK_SERVICE = "print" +ACCOUNT_TYPE = "HOSTED_OR_GOOGLE" + + +class BookService(gdata.service.GDataService): + + def __init__(self, email=None, password=None, source=None, + server=BOOK_SERVER, account_type=ACCOUNT_TYPE, + exception_handlers=tuple(), **kwargs): + """source should be of form 'ProgramCompany - ProgramName - Version'""" + + gdata.service.GDataService.__init__(self, email=email, + password=password, service=BOOK_SERVICE, source=source, + server=server, **kwargs) + self.exception_handlers = exception_handlers + + def search(self, q, start_index="1", max_results="10", + min_viewability="none", feed=GENERAL_FEED, + converter=books.BookFeed.FromString): + """ + Query the Public search feed. q is either a search string or a + gdata.service.Query instance with a query set. + + min_viewability must be "none", "partial", or "full". + + If you change the feed to a single item feed, note that you will + probably need to change the converter to be Book.FromString + """ + + if not isinstance(q, gdata.service.Query): + q = gdata.service.Query(text_query=q) + if feed: + q.feed = feed + q['start-index'] = start_index + q['max-results'] = max_results + q['min-viewability'] = min_viewability + return self.Get(uri=q.ToUri(),converter=converter) + + def search_by_keyword(self, q='', feed=GENERAL_FEED, start_index="1", + max_results="10", min_viewability="none", **kwargs): + """ + Query the Public Search Feed by keyword. Non-keyword strings can be + set in q. This is quite fragile. Is there a function somewhere in + the Google library that will parse a query the same way that Google + does? + + Legal Identifiers are listed below and correspond to their meaning + at http://books.google.com/advanced_book_search: + all_words + exact_phrase + at_least_one + without_words + title + author + publisher + subject + isbn + lccn + oclc + seemingly unsupported: + publication_date: a sequence of two, two tuples: + ((min_month,min_year),(max_month,max_year)) + where month is one/two digit month, year is 4 digit, eg: + (('1','2000'),('10','2003')). Lower bound is inclusive, + upper bound is exclusive + """ + + for k, v in kwargs.items(): + if not v: + continue + k = k.lower() + if k == 'all_words': + q = "%s %s" % (q, v) + elif k == 'exact_phrase': + q = '%s "%s"' % (q, v.strip('"')) + elif k == 'at_least_one': + q = '%s %s' % (q, ' '.join(['OR "%s"' % x for x in split(v)])) + elif k == 'without_words': + q = '%s %s' % (q, ' '.join(['-"%s"' % x for x in split(v)])) + elif k in ('author','title', 'publisher'): + q = '%s %s' % (q, ' '.join(['in%s:"%s"'%(k,x) for x in split(v)])) + elif k == 'subject': + q = '%s %s' % (q, ' '.join(['%s:"%s"' % (k,x) for x in split(v)])) + elif k == 'isbn': + q = '%s ISBN%s' % (q, v) + elif k == 'issn': + q = '%s ISSN%s' % (q,v) + elif k == 'oclc': + q = '%s OCLC%s' % (q,v) + else: + raise ValueError("Unsupported search keyword") + return self.search(q.strip(),start_index=start_index, feed=feed, + max_results=max_results, + min_viewability=min_viewability) + + def search_library(self, q, id='me', **kwargs): + """Like search, but in a library feed. Default is the authenticated + user's feed. Change by setting id.""" + + if 'feed' in kwargs: + raise ValueError("kwarg 'feed' conflicts with library_id") + feed = LIBRARY_FEED % id + return self.search(q, feed=feed, **kwargs) + + def search_library_by_keyword(self, id='me', **kwargs): + """Hybrid of search_by_keyword and search_library + """ + + if 'feed' in kwargs: + raise ValueError("kwarg 'feed' conflicts with library_id") + feed = LIBRARY_FEED % id + return self.search_by_keyword(feed=feed,**kwargs) + + def search_annotations(self, q, id='me', **kwargs): + """Like search, but in an annotation feed. Default is the authenticated + user's feed. Change by setting id.""" + + if 'feed' in kwargs: + raise ValueError("kwarg 'feed' conflicts with library_id") + feed = ANNOTATION_FEED % id + return self.search(q, feed=feed, **kwargs) + + def search_annotations_by_keyword(self, id='me', **kwargs): + """Hybrid of search_by_keyword and search_annotations + """ + + if 'feed' in kwargs: + raise ValueError("kwarg 'feed' conflicts with library_id") + feed = ANNOTATION_FEED % id + return self.search_by_keyword(feed=feed,**kwargs) + + def add_item_to_library(self, item): + """Add the item, either an XML string or books.Book instance, to the + user's library feed""" + + feed = LIBRARY_FEED % 'me' + return self.Post(data=item, uri=feed, converter=books.Book.FromString) + + def remove_item_from_library(self, item): + """ + Remove the item, a books.Book instance, from the authenticated user's + library feed. Using an item retrieved from a public search will fail. + """ + + return self.Delete(item.GetEditLink().href) + + def add_annotation(self, item): + """ + Add the item, either an XML string or books.Book instance, to the + user's annotation feed. + """ + # do not use GetAnnotationLink, results in 400 Bad URI due to www + return self.Post(data=item, uri=ANNOTATION_FEED % 'me', + converter=books.Book.FromString) + + def edit_annotation(self, item): + """ + Send an edited item, a books.Book instance, to the user's annotation + feed. Note that whereas extra annotations in add_annotations, minus + ratings which are immutable once set, are simply added to the item in + the annotation feed, if an annotation has been removed from the item, + sending an edit request will remove that annotation. This should not + happen with add_annotation. + """ + + return self.Put(data=item, uri=item.GetEditLink().href, + converter=books.Book.FromString) + + def get_by_google_id(self, id): + return self.Get(ITEM_FEED + id, converter=books.Book.FromString) + + def get_library(self, id='me',feed=LIBRARY_FEED, start_index="1", + max_results="100", min_viewability="none", + converter=books.BookFeed.FromString): + """ + Return a generator object that will return gbook.Book instances until + the search feed no longer returns an item from the GetNextLink method. + Thus max_results is not the maximum number of items that will be + returned, but rather the number of items per page of searches. This has + been set high to reduce the required number of network requests. + """ + + q = gdata.service.Query() + q.feed = feed % id + q['start-index'] = start_index + q['max-results'] = max_results + q['min-viewability'] = min_viewability + x = self.Get(uri=q.ToUri(), converter=converter) + while 1: + for entry in x.entry: + yield entry + else: + l = x.GetNextLink() + if l: # hope the server preserves our preferences + x = self.Get(uri=l.href, converter=converter) + else: + break + + def get_annotations(self, id='me', start_index="1", max_results="100", + min_viewability="none", converter=books.BookFeed.FromString): + """ + Like get_library, but for the annotation feed + """ + + return self.get_library(id=id, feed=ANNOTATION_FEED, + max_results=max_results, min_viewability = min_viewability, + converter=converter) diff --git a/gam/gdata/calendar/__init__.py b/gam/gdata/calendar/__init__.py new file mode 100755 index 00000000000..06c041075a9 --- /dev/null +++ b/gam/gdata/calendar/__init__.py @@ -0,0 +1,1044 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains extensions to ElementWrapper objects used with Google Calendar.""" + + +__author__ = 'api.vli (Vivian Li), api.rboyd (Ryan Boyd)' + + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata + + +# XML namespaces which are often used in Google Calendar entities. +GCAL_NAMESPACE = 'http://schemas.google.com/gCal/2005' +GCAL_TEMPLATE = '{http://schemas.google.com/gCal/2005}%s' +WEB_CONTENT_LINK_REL = '%s/%s' % (GCAL_NAMESPACE, 'webContent') +GACL_NAMESPACE = gdata.GACL_NAMESPACE +GACL_TEMPLATE = gdata.GACL_TEMPLATE + + + +class ValueAttributeContainer(atom.AtomBase): + """A parent class for all Calendar classes which have a value attribute. + + Children include Color, AccessLevel, Hidden + """ + + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class Color(ValueAttributeContainer): + """The Google Calendar color element""" + + _tag = 'color' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + + +class AccessLevel(ValueAttributeContainer): + """The Google Calendar accesslevel element""" + + _tag = 'accesslevel' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Hidden(ValueAttributeContainer): + """The Google Calendar hidden element""" + + _tag = 'hidden' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Selected(ValueAttributeContainer): + """The Google Calendar selected element""" + + _tag = 'selected' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Timezone(ValueAttributeContainer): + """The Google Calendar timezone element""" + + _tag = 'timezone' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Where(atom.AtomBase): + """The Google Calendar Where element""" + + _tag = 'where' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['valueString'] = 'value_string' + + def __init__(self, value_string=None, extension_elements=None, + extension_attributes=None, text=None): + self.value_string = value_string + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class CalendarListEntry(gdata.GDataEntry, gdata.LinkFinder): + """A Google Calendar meta Entry flavor of an Atom Entry """ + + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}color' % GCAL_NAMESPACE] = ('color', Color) + _children['{%s}accesslevel' % GCAL_NAMESPACE] = ('access_level', + AccessLevel) + _children['{%s}hidden' % GCAL_NAMESPACE] = ('hidden', Hidden) + _children['{%s}selected' % GCAL_NAMESPACE] = ('selected', Selected) + _children['{%s}timezone' % GCAL_NAMESPACE] = ('timezone', Timezone) + _children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', Where) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + color=None, access_level=None, hidden=None, timezone=None, + selected=None, + where=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=None) + + self.color = color + self.access_level = access_level + self.hidden = hidden + self.selected = selected + self.timezone = timezone + self.where = where + + +class CalendarListFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Calendar meta feed flavor of an Atom Feed""" + + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarListEntry]) + + +class Scope(atom.AtomBase): + """The Google ACL scope element""" + + _tag = 'scope' + _namespace = GACL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + _attributes['type'] = 'type' + + def __init__(self, extension_elements=None, value=None, scope_type=None, + extension_attributes=None, text=None): + self.value = value + self.type = scope_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Role(ValueAttributeContainer): + """The Google Calendar timezone element""" + + _tag = 'role' + _namespace = GACL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class CalendarAclEntry(gdata.GDataEntry, gdata.LinkFinder): + """A Google Calendar ACL Entry flavor of an Atom Entry """ + + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}scope' % GACL_NAMESPACE] = ('scope', Scope) + _children['{%s}role' % GACL_NAMESPACE] = ('role', Role) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + scope=None, role=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=None) + self.scope = scope + self.role = role + + +class CalendarAclFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Calendar ACL feed flavor of an Atom Feed""" + + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarAclEntry]) + + +class CalendarEventCommentEntry(gdata.GDataEntry, gdata.LinkFinder): + """A Google Calendar event comments entry flavor of an Atom Entry""" + + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + +class CalendarEventCommentFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Calendar event comments feed flavor of an Atom Feed""" + + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [CalendarEventCommentEntry]) + + +class ExtendedProperty(gdata.ExtendedProperty): + """A transparent subclass of gdata.ExtendedProperty added to this module + for backwards compatibility.""" + + +class Reminder(atom.AtomBase): + """The Google Calendar reminder element""" + + _tag = 'reminder' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['absoluteTime'] = 'absolute_time' + _attributes['days'] = 'days' + _attributes['hours'] = 'hours' + _attributes['minutes'] = 'minutes' + _attributes['method'] = 'method' + + def __init__(self, absolute_time=None, + days=None, hours=None, minutes=None, method=None, + extension_elements=None, + extension_attributes=None, text=None): + self.absolute_time = absolute_time + if days is not None: + self.days = str(days) + else: + self.days = None + if hours is not None: + self.hours = str(hours) + else: + self.hours = None + if minutes is not None: + self.minutes = str(minutes) + else: + self.minutes = None + self.method = method + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class When(atom.AtomBase): + """The Google Calendar When element""" + + _tag = 'when' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}reminder' % gdata.GDATA_NAMESPACE] = ('reminder', [Reminder]) + _attributes['startTime'] = 'start_time' + _attributes['endTime'] = 'end_time' + + def __init__(self, start_time=None, end_time=None, reminder=None, + extension_elements=None, extension_attributes=None, text=None): + self.start_time = start_time + self.end_time = end_time + self.reminder = reminder or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Recurrence(atom.AtomBase): + """The Google Calendar Recurrence element""" + + _tag = 'recurrence' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + +class UriEnumElement(atom.AtomBase): + + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, tag, enum_map, attrib_name='value', + extension_elements=None, extension_attributes=None, text=None): + self.tag=tag + self.enum_map=enum_map + self.attrib_name=attrib_name + self.value=None + self.text=text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def findKey(self, value): + res=[item[0] for item in self.enum_map.items() if item[1] == value] + if res is None or len(res) == 0: + return None + return res[0] + + def _ConvertElementAttributeToMember(self, attribute, value): + # Special logic to use the enum_map to set the value of the object's value member. + if attribute == self.attrib_name and value != '': + self.value = self.enum_map[value] + return + # Find the attribute in this class's list of attributes. + if self.__class__._attributes.has_key(attribute): + # Find the member of this class which corresponds to the XML attribute + # (lookup in current_class._attributes) and set this member to the + # desired value (using self.__dict__). + setattr(self, self.__class__._attributes[attribute], value) + else: + # The current class doesn't map this attribute, so try to parent class. + atom.ExtensionContainer._ConvertElementAttributeToMember(self, + attribute, + value) + + def _AddMembersToElementTree(self, tree): + # Convert the members of this class which are XML child nodes. + # This uses the class's _children dictionary to find the members which + # should become XML child nodes. + member_node_names = [values[0] for tag, values in + self.__class__._children.iteritems()] + for member_name in member_node_names: + member = getattr(self, member_name) + if member is None: + pass + elif isinstance(member, list): + for instance in member: + instance._BecomeChildElement(tree) + else: + member._BecomeChildElement(tree) + # Special logic to set the desired XML attribute. + key = self.findKey(self.value) + if key is not None: + tree.attrib[self.attrib_name]=key + # Convert the members of this class which are XML attributes. + for xml_attribute, member_name in self.__class__._attributes.iteritems(): + member = getattr(self, member_name) + if member is not None: + tree.attrib[xml_attribute] = member + # Lastly, call the parent's _AddMembersToElementTree to get any + # extension elements. + atom.ExtensionContainer._AddMembersToElementTree(self, tree) + + + +class AttendeeStatus(UriEnumElement): + """The Google Calendar attendeeStatus element""" + + _tag = 'attendeeStatus' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + attendee_enum = { + 'http://schemas.google.com/g/2005#event.accepted' : 'ACCEPTED', + 'http://schemas.google.com/g/2005#event.declined' : 'DECLINED', + 'http://schemas.google.com/g/2005#event.invited' : 'INVITED', + 'http://schemas.google.com/g/2005#event.tentative' : 'TENTATIVE'} + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'attendeeStatus', AttendeeStatus.attendee_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class AttendeeType(UriEnumElement): + """The Google Calendar attendeeType element""" + + _tag = 'attendeeType' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + attendee_type_enum = { + 'http://schemas.google.com/g/2005#event.optional' : 'OPTIONAL', + 'http://schemas.google.com/g/2005#event.required' : 'REQUIRED' } + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'attendeeType', + AttendeeType.attendee_type_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes,text=text) + + +class Visibility(UriEnumElement): + """The Google Calendar Visibility element""" + + _tag = 'visibility' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + visibility_enum = { + 'http://schemas.google.com/g/2005#event.confidential' : 'CONFIDENTIAL', + 'http://schemas.google.com/g/2005#event.default' : 'DEFAULT', + 'http://schemas.google.com/g/2005#event.private' : 'PRIVATE', + 'http://schemas.google.com/g/2005#event.public' : 'PUBLIC' } + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'visibility', Visibility.visibility_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class Transparency(UriEnumElement): + """The Google Calendar Transparency element""" + + _tag = 'transparency' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + transparency_enum = { + 'http://schemas.google.com/g/2005#event.opaque' : 'OPAQUE', + 'http://schemas.google.com/g/2005#event.transparent' : 'TRANSPARENT' } + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, tag='transparency', + enum_map=Transparency.transparency_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class Comments(atom.AtomBase): + """The Google Calendar comments element""" + + _tag = 'comments' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + gdata.FeedLink) + _attributes['rel'] = 'rel' + + def __init__(self, rel=None, feed_link=None, extension_elements=None, + extension_attributes=None, text=None): + self.rel = rel + self.feed_link = feed_link + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class EventStatus(UriEnumElement): + """The Google Calendar eventStatus element""" + + _tag = 'eventStatus' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + status_enum = { 'http://schemas.google.com/g/2005#event.canceled' : 'CANCELED', + 'http://schemas.google.com/g/2005#event.confirmed' : 'CONFIRMED', + 'http://schemas.google.com/g/2005#event.tentative' : 'TENTATIVE'} + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, tag='eventStatus', + enum_map=EventStatus.status_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class Who(UriEnumElement): + """The Google Calendar Who element""" + + _tag = 'who' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + _children['{%s}attendeeStatus' % gdata.GDATA_NAMESPACE] = ( + 'attendee_status', AttendeeStatus) + _children['{%s}attendeeType' % gdata.GDATA_NAMESPACE] = ('attendee_type', + AttendeeType) + _attributes['valueString'] = 'name' + _attributes['email'] = 'email' + + relEnum = { 'http://schemas.google.com/g/2005#event.attendee' : 'ATTENDEE', + 'http://schemas.google.com/g/2005#event.organizer' : 'ORGANIZER', + 'http://schemas.google.com/g/2005#event.performer' : 'PERFORMER', + 'http://schemas.google.com/g/2005#event.speaker' : 'SPEAKER', + 'http://schemas.google.com/g/2005#message.bcc' : 'BCC', + 'http://schemas.google.com/g/2005#message.cc' : 'CC', + 'http://schemas.google.com/g/2005#message.from' : 'FROM', + 'http://schemas.google.com/g/2005#message.reply-to' : 'REPLY_TO', + 'http://schemas.google.com/g/2005#message.to' : 'TO' } + + def __init__(self, name=None, email=None, attendee_status=None, + attendee_type=None, rel=None, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'who', Who.relEnum, attrib_name='rel', + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.name = name + self.email = email + self.attendee_status = attendee_status + self.attendee_type = attendee_type + self.rel = rel + + +class OriginalEvent(atom.AtomBase): + """The Google Calendar OriginalEvent element""" + + _tag = 'originalEvent' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + # TODO: The when tag used to map to a EntryLink, make sure it should really be a When. + _children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', When) + _attributes['id'] = 'id' + _attributes['href'] = 'href' + + def __init__(self, id=None, href=None, when=None, + extension_elements=None, extension_attributes=None, text=None): + self.id = id + self.href = href + self.when = when + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def GetCalendarEventEntryClass(): + return CalendarEventEntry + + +# This class is not completely defined here, because of a circular reference +# in which CalendarEventEntryLink and CalendarEventEntry refer to one another. +class CalendarEventEntryLink(gdata.EntryLink): + """An entryLink which contains a calendar event entry + + Within an event's recurranceExceptions, an entry link + points to a calendar event entry. This class exists + to capture the calendar specific extensions in the entry. + """ + + _tag = 'entryLink' + _namespace = gdata.GDATA_NAMESPACE + _children = gdata.EntryLink._children.copy() + _attributes = gdata.EntryLink._attributes.copy() + # The CalendarEventEntryLink should like CalendarEventEntry as a child but + # that class hasn't been defined yet, so we will wait until after defining + # CalendarEventEntry to list it in _children. + + +class RecurrenceException(atom.AtomBase): + """The Google Calendar RecurrenceException element""" + + _tag = 'recurrenceException' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}entryLink' % gdata.GDATA_NAMESPACE] = ('entry_link', + CalendarEventEntryLink) + _children['{%s}originalEvent' % gdata.GDATA_NAMESPACE] = ('original_event', + OriginalEvent) + _attributes['specialized'] = 'specialized' + + def __init__(self, specialized=None, entry_link=None, + original_event=None, extension_elements=None, + extension_attributes=None, text=None): + self.specialized = specialized + self.entry_link = entry_link + self.original_event = original_event + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class SendEventNotifications(atom.AtomBase): + """The Google Calendar sendEventNotifications element""" + + _tag = 'sendEventNotifications' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, extension_elements=None, + value=None, extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class QuickAdd(atom.AtomBase): + """The Google Calendar quickadd element""" + + _tag = 'quickadd' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, extension_elements=None, + value=None, extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def _TransferToElementTree(self, element_tree): + if self.value: + element_tree.attrib['value'] = self.value + element_tree.tag = GCAL_TEMPLATE % 'quickadd' + atom.AtomBase._TransferToElementTree(self, element_tree) + return element_tree + + def _TakeAttributeFromElementTree(self, attribute, element_tree): + if attribute == 'value': + self.value = element_tree.attrib[attribute] + del element_tree.attrib[attribute] + else: + atom.AtomBase._TakeAttributeFromElementTree(self, attribute, + element_tree) + + +class SyncEvent(atom.AtomBase): + _tag = 'syncEvent' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value='false', extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class UID(atom.AtomBase): + _tag = 'uid' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Sequence(atom.AtomBase): + _tag = 'sequence' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class WebContentGadgetPref(atom.AtomBase): + + _tag = 'webContentGadgetPref' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['value'] = 'value' + + """The Google Calendar Web Content Gadget Preferences element""" + + def __init__(self, name=None, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class WebContent(atom.AtomBase): + + _tag = 'webContent' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}webContentGadgetPref' % GCAL_NAMESPACE] = ('gadget_pref', + [WebContentGadgetPref]) + _attributes['url'] = 'url' + _attributes['width'] = 'width' + _attributes['height'] = 'height' + + def __init__(self, url=None, width=None, height=None, text=None, + gadget_pref=None, extension_elements=None, extension_attributes=None): + self.url = url + self.width = width + self.height = height + self.text = text + self.gadget_pref = gadget_pref or [] + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class WebContentLink(atom.Link): + + _tag = 'link' + _namespace = atom.ATOM_NAMESPACE + _children = atom.Link._children.copy() + _attributes = atom.Link._attributes.copy() + _children['{%s}webContent' % GCAL_NAMESPACE] = ('web_content', WebContent) + + def __init__(self, title=None, href=None, link_type=None, + web_content=None): + atom.Link.__init__(self, rel=WEB_CONTENT_LINK_REL, title=title, href=href, + link_type=link_type) + self.web_content = web_content + + +class GuestsCanInviteOthers(atom.AtomBase): + """Indicates whether event attendees may invite others to the event. + + This element may only be changed by the organizer of the event. If not + included as part of the event entry, this element will default to true + during a POST request, and will inherit its previous value during a PUT + request. + """ + _tag = 'guestsCanInviteOthers' + _namespace = GCAL_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value='true', *args, **kwargs): + atom.AtomBase.__init__(self, *args, **kwargs) + self.value = value + + +class GuestsCanSeeGuests(atom.AtomBase): + """Indicates whether attendees can see other people invited to the event. + + The organizer always sees all attendees. Guests always see themselves. This + property affects what attendees see in the event's guest list via both the + Calendar UI and API feeds. + + This element may only be changed by the organizer of the event. + + If not included as part of the event entry, this element will default to + true during a POST request, and will inherit its previous value during a + PUT request. + """ + _tag = 'guestsCanSeeGuests' + _namespace = GCAL_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value='true', *args, **kwargs): + atom.AtomBase.__init__(self, *args, **kwargs) + self.value = value + + +class GuestsCanModify(atom.AtomBase): + """Indicates whether event attendees may modify the original event. + + If yes, changes are visible to organizer and other attendees. Otherwise, + any changes made by attendees will be restricted to that attendee's + calendar. + + This element may only be changed by the organizer of the event, and may + be set to 'true' only if both gCal:guestsCanInviteOthers and + gCal:guestsCanSeeGuests are set to true in the same PUT/POST request. + Otherwise, request fails with HTTP error code 400 (Bad Request). + + If not included as part of the event entry, this element will default to + false during a POST request, and will inherit its previous value during a + PUT request.""" + _tag = 'guestsCanModify' + _namespace = GCAL_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value='false', *args, **kwargs): + atom.AtomBase.__init__(self, *args, **kwargs) + self.value = value + + + +class CalendarEventEntry(gdata.BatchEntry): + """A Google Calendar flavor of an Atom Entry """ + + _tag = gdata.BatchEntry._tag + _namespace = gdata.BatchEntry._namespace + _children = gdata.BatchEntry._children.copy() + _attributes = gdata.BatchEntry._attributes.copy() + # This class also contains WebContentLinks but converting those members + # is handled in a special version of _ConvertElementTreeToMember. + _children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', [Where]) + _children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', [When]) + _children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', [Who]) + _children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = ( + 'extended_property', [ExtendedProperty]) + _children['{%s}visibility' % gdata.GDATA_NAMESPACE] = ('visibility', + Visibility) + _children['{%s}transparency' % gdata.GDATA_NAMESPACE] = ('transparency', + Transparency) + _children['{%s}eventStatus' % gdata.GDATA_NAMESPACE] = ('event_status', + EventStatus) + _children['{%s}recurrence' % gdata.GDATA_NAMESPACE] = ('recurrence', + Recurrence) + _children['{%s}recurrenceException' % gdata.GDATA_NAMESPACE] = ( + 'recurrence_exception', [RecurrenceException]) + _children['{%s}sendEventNotifications' % GCAL_NAMESPACE] = ( + 'send_event_notifications', SendEventNotifications) + _children['{%s}quickadd' % GCAL_NAMESPACE] = ('quick_add', QuickAdd) + _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) + _children['{%s}originalEvent' % gdata.GDATA_NAMESPACE] = ('original_event', + OriginalEvent) + _children['{%s}sequence' % GCAL_NAMESPACE] = ('sequence', Sequence) + _children['{%s}reminder' % gdata.GDATA_NAMESPACE] = ('reminder', [Reminder]) + _children['{%s}syncEvent' % GCAL_NAMESPACE] = ('sync_event', SyncEvent) + _children['{%s}uid' % GCAL_NAMESPACE] = ('uid', UID) + _children['{%s}guestsCanInviteOthers' % GCAL_NAMESPACE] = ( + 'guests_can_invite_others', GuestsCanInviteOthers) + _children['{%s}guestsCanModify' % GCAL_NAMESPACE] = ( + 'guests_can_modify', GuestsCanModify) + _children['{%s}guestsCanSeeGuests' % GCAL_NAMESPACE] = ( + 'guests_can_see_guests', GuestsCanSeeGuests) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + transparency=None, comments=None, event_status=None, + send_event_notifications=None, visibility=None, + recurrence=None, recurrence_exception=None, + where=None, when=None, who=None, quick_add=None, + extended_property=None, original_event=None, + batch_operation=None, batch_id=None, batch_status=None, + sequence=None, reminder=None, sync_event=None, uid=None, + guests_can_invite_others=None, guests_can_modify=None, + guests_can_see_guests=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.BatchEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + batch_operation=batch_operation, batch_id=batch_id, + batch_status=batch_status, + title=title, updated=updated) + + self.transparency = transparency + self.comments = comments + self.event_status = event_status + self.send_event_notifications = send_event_notifications + self.visibility = visibility + self.recurrence = recurrence + self.recurrence_exception = recurrence_exception or [] + self.where = where or [] + self.when = when or [] + self.who = who or [] + self.quick_add = quick_add + self.extended_property = extended_property or [] + self.original_event = original_event + self.sequence = sequence + self.reminder = reminder or [] + self.sync_event = sync_event + self.uid = uid + self.text = text + self.guests_can_invite_others = guests_can_invite_others + self.guests_can_modify = guests_can_modify + self.guests_can_see_guests = guests_can_see_guests + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + # We needed to add special logic to _ConvertElementTreeToMember because we + # want to make links with a rel of WEB_CONTENT_LINK_REL into a + # WebContentLink + def _ConvertElementTreeToMember(self, child_tree): + # Special logic to handle Web Content links + if (child_tree.tag == '{%s}link' % atom.ATOM_NAMESPACE and + child_tree.attrib['rel'] == WEB_CONTENT_LINK_REL): + if self.link is None: + self.link = [] + self.link.append(atom._CreateClassFromElementTree(WebContentLink, + child_tree)) + return + # Find the element's tag in this class's list of child members + if self.__class__._children.has_key(child_tree.tag): + member_name = self.__class__._children[child_tree.tag][0] + member_class = self.__class__._children[child_tree.tag][1] + # If the class member is supposed to contain a list, make sure the + # matching member is set to a list, then append the new member + # instance to the list. + if isinstance(member_class, list): + if getattr(self, member_name) is None: + setattr(self, member_name, []) + getattr(self, member_name).append(atom._CreateClassFromElementTree( + member_class[0], child_tree)) + else: + setattr(self, member_name, + atom._CreateClassFromElementTree(member_class, child_tree)) + else: + atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree) + + + def GetWebContentLink(self): + """Finds the first link with rel set to WEB_CONTENT_REL + + Returns: + A gdata.calendar.WebContentLink or none if none of the links had rel + equal to WEB_CONTENT_REL + """ + + for a_link in self.link: + if a_link.rel == WEB_CONTENT_LINK_REL: + return a_link + return None + + +def CalendarEventEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventEntry, xml_string) + + +def CalendarEventCommentEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventCommentEntry, xml_string) + + +CalendarEventEntryLink._children = {'{%s}entry' % atom.ATOM_NAMESPACE: + ('entry', CalendarEventEntry)} + + +def CalendarEventEntryLinkFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventEntryLink, xml_string) + + +class CalendarEventFeed(gdata.BatchFeed, gdata.LinkFinder): + """A Google Calendar event feed flavor of an Atom Feed""" + + _tag = gdata.BatchFeed._tag + _namespace = gdata.BatchFeed._namespace + _children = gdata.BatchFeed._children.copy() + _attributes = gdata.BatchFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [CalendarEventEntry]) + _children['{%s}timezone' % GCAL_NAMESPACE] = ('timezone', Timezone) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + total_results=None, start_index=None, items_per_page=None, + interrupted=None, timezone=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + interrupted=interrupted, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.timezone = timezone + + +def CalendarListEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarListEntry, xml_string) + + +def CalendarAclEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarAclEntry, xml_string) + + +def CalendarListFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarListFeed, xml_string) + + +def CalendarAclFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarAclFeed, xml_string) + + +def CalendarEventFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventFeed, xml_string) + + +def CalendarEventCommentFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventCommentFeed, xml_string) diff --git a/gam/gdata/calendar/__init__.pyc b/gam/gdata/calendar/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8c63c7c475c3a1da35effa9032f6ba493a2f802 GIT binary patch literal 40015 zcmd^odvqPgecoIE1PSo{0tHeMS0X8a5(!YUMB6fD2?Rk35(Pp_P?S)~SC@Aez$Gv4 zy}Y|137c$tD&-`89Hp_HwCNu`J@!cxw?1_pw@G@MI%@i`oA}gq-KK5QRPjmw$fIqV zq;1-?uKRu8+?`!qT!?@KKynRW2eY%Yv-9}9=QlI$f4sT-zS*B2E1Jg7I{beUSM&+Q z0%Il+=FCLS%wv2Tuqr;YQ^;0g-4>J!xf()Hz#%93t7f{X%m7IBj&rtY!GqFDT&;~QH!33RV zqRRwG?>4~(6C&PaCfiKU#6}ZtFqb!(iA{=ZHWQmo&~3u?=JFOZnKQvg^O`ZQ8uQ}B zRudr8R;6xssc6xbBz3Dx-EM+yN$Pf&y2AuJlGL3pb*Bk-C8@hzDte+PQ;j|D!EO`W zp}OKRs=kNq2zuS)J4~=w4|+3?_qoS=O>k$X*t^_=eI~d&sl|SmdZ!7z1s}V|J-*8X z_b&L@0r&WB6C7Od_96FpzX|SJ@b>-gv1fulJ-$a>e%L*@#{>^#+VG%zaIXoDWF8!K z4-S~1Kl5O~JveBBW0?mJxd(?#@URIF>C5kPsrQ-SktFp|mwLYmjwh+_cd30Q_&}2S zL6>^i1dnAt_#yY;0TY}^vOn%pA2h*-lhh|%>Jbw>nJHw@JveHDlf0SeH^GqlZa^(K z<#HS|!Eol))9%4TCODIM@RWP-un9&o51w`p-e-bmG7rwW2alNG+027c_ux?zoXb2I za}SQ2;JM5L-#vK03C?F8jJpRPFu?^q_@Em2ynFDN2`*;dc)>mRkO^MQJeY6~PMF}& zWFEZa9z1S>m(__M);B)l9z0=!JZ~nRG{Gy144M*XMZttm2pun}ypyKqczDd@hfGjZ zu2UumR3L`Hp>F7kJ#B(X6paYvq#H_k8JEsZjCGmJT2Bm{`A5oeR7^e-4Q!5j|Y`9)9|A zcqiV3pxF4{cs&e9ZSqDWXHugBxSx*W+G7I)QE@t)DMbBKszZOVIx{eZiVQq_?AW8x z{mA#RC1kvHD2m9P4n+fpqPQEe>)|E#2AUo3!R_;f@@zO5$Mw>s**I+UQ&^wJ+D;;0 z^c;c^!;2wB*k;0ZbGgG@US}?Mk`+v>OH#lkYzo*-j$@AS1Xy7D*JfS=o9Z(D`MwSo zeI3D|S1Z&}DX&;AM3FaHt$T%X+4a0l4kPdSbg4M)O&6|(UctM@%6o;R_Wj-6o;NgI zDhDXZD^-f+*#LuAEm!MDyuo5IjH0vQwXl4|d#V%!VFkHkc8-cEGRBKl8p0Jlg5YM( z+%zVSNQWZ0Z8NofW(p}tZ!Wsrz`FyETiu3t?WZQt!lKT%L9x=hR zb>+HpTXUm*1f)iuN?|6<=LK5%{7f~NE%TN&%;$q@5pg!Z5LajP!XP3i3sD$%F-Ng6 zSjtnZ;=@FUeA)$iFj}pIKJ%x|RcvM3?6>0!1GpmMYG+4h+m76hTz6Y%PCz0sD&mQL zIK)DW4S0vne4Ioh! zl%l@(IRHNU%4cQOOZm7k6>q`anW4e6`O(2=htH1<4h`Fp;V4D5LNQb;{jGd+8-wi# zR<~Ps;tNmWidg5)TqioT6F{Vq&LHJ#bS=1qV!h1I4vUTI*^sHN$<(N+l%U*$JN|-By8t_WyLsz zMAGl1`i~V!65o?Ocqc0O5-Zt+$gKEyX(s$A$n;wFVBGlTydF#v z-;+IfH+l-pVnGj*m0z3=>uL8fMf~LzL|XC}NL~}25~(3IgROLn%XB3v8~o(^hqTl ziAg@8(AU$o5s@U2P;zY&jmUwU*t7VCIJhLPnB4cwsloBVluKKNvYGFyfcXsCr{K5- z5|D?_RB%NU4J@nff(+D_ zvI3G#$;e1lAiAkNQjHK8_fVAxHzjEu1Z|{2*xH<=p=hN+;)1!#z6<-7(tsZ@R@pRI zw3G&O*R~`L#w=7~JB!>xBi8c%U;*21CUz2~IDpZk2nH7jb~9mI@YH$UWVvt+LSA*! zgUU$JxRLHtC5R~Ue(X?mOLCv^%?c=aNiGpT!N(b?#ox#GNT2z@a%2B4rcq2}TEb!c z{n^xnYWfsL-FJoOMGh=4R%wWj|2%@&n9DhH8TyY*olui7JW!1=IM9fod_!Ntpxgw( z=`=SxX=c2MA>3eY!te-7Bg}_TC2n>}i$d;hMexE#MexpMMey1tMeybpMfCD{k@wM< zs9uZ$&5iimp3 zKgN1WR+gCDM9ZiTSRtC?tWUYjr)c4mJNh1jLk!e$tFQIlhj0BkToEIrd7IEs(!5={ z_S|-a{N1C5cj8Gqeq)lyEG0Z0W@JaIyCQ@D@)ah8Q@Mmri59{{-*W=ArX3%O9!9VT zt(ipiT96vnTA&ZJOMT8^z3%$Nidqb=Fh^*>*B2jU_NLE5q>>JfpL;fCc0`1X(z(KF z{6i@GbGWP;pBYfK^*pQ$shCCjp|QG*NS0}}RO2CLy$V$yyu>Q@-4wi-f(aB1h(fU^ z#G0io5JCuCkl8jlfr~(&(F_KM&Uz7RH&515O2&*dhuX8&ya;Kc$d4le;tN_#OBF~k zBCu3}oFansoT(j5OjdZ>VQTM7OjaP%smY4Moi#JPpE2F?Lozr?O0;5@()e`|MO85$ zqh_mbm=az3Ieb}6&KjhwNvajVk_K6m2s$@~zxRf)Md9F!YzYZnVi%FH1*20}PQ1#m zLP^A9{j7mfn#chT1UblcRAO;;iq^q*$sF<+KK)C$nibM4c^)j5#jjm2QoI(*lSX7w zeof?5NJ2~bTW4Owtm_t%3W_f>MK2`oq!?;k3*v6m{o{N)ks~cF`4E#RkWd`t*cRC( zyvWc+PhLVKUL^@nPV5rIwg*{D9Y!4jyYmh9eLOR?%- zqMBk(W?e(9k~k$XsRhTqjW|Uz;tL6h4r?Vfoxr!g&UR3Sn=du(YRF2ko3lxYoCHLC zFNjH$4q9WRGpU%N=1lmdLh;J=LOqCx8MQ)Ox>PEc;<cn_LwEuPCRpnA^|4`%BSu)S0H4i}6cV>1hN%I6m?@_joMG@3gLMp2)slT+B{#j^>J689_3d6; z(VXZ`RLOL7(HI1QCL1PB5E3_=1U4}p0+zL5$^IqHRGFJBVLOMKY_>!VDcu@Fku01=d6{V=?d- z69-a?gufo@)Czyu0*AZAEZDItzM7F(!5ucMptw+vEh=*`BHjv*iTB_D_7S@RH+)cJ zoEM>(02`e}kVRV}4sZ~@L0MNJ;FLvJ8DOUH3T}az>rCxrf>5v%rS=%XJj?b+8L$b> zn8RvV%xTKEhWPRjfs0+6I0#vbOe=+ES@=P~T`Ps=X?*KGwu4G=7D@IJMQD#9sQ6)V zwho3~Y+;pQZQmh^Kz?XLDKbU3g6#D34!&6{%o{gq)yGk25(um=vP7D+x|C=Pwfhh| znYB(kSthy*_0n)-TMP#i1%@9_ zSSAvI;ll}rV;U^QaFQ4?N;+>@dNVtdpDENdn{NH!@^s#^c)%JhP66oN;*T@9z+eq2 zzz7GGEH1IgS{_*>G_%gGCttyhC484a*d@rLb=G9`=^L>fA*&e5*BD{ww(2iyS7}mrdKg+_qdt z+wP9L+BqvIP$yY)piZus1$9i83)*|kOKk-B227*(3FAA>tqyaeM9OjM<$}4jPC$+^ z07XMy44H`9l?|poVXhu0C?f?!iKq2Xm>UoB_SDNLMlbUsX7Ji3PW-Da?^j#87OyUJmpA!s zREC9hnG;vvH2Av+JqnA7E^lqX8CsD`fy|;UyV-w<0VN5IG_bNR{1s?THNc6GB=%g2 z;Nu?`z@8g3{ZSlqx;cL`gyI~^HM{{+{sapa;F3;S5VCZWelFGqf!Q=8%T0bT2rTGD zc^&|J$EFx8UDHi@~w#NYTFLS=uNquZTpqFt9`Ut>bSka zV|)JNCRpVba~!e;M3Up2|5=ZJ@`E`J(F3dL8h_p9H7o&?fr4^e z^Zw^J z=H+gqP^YHBy!+UzmBk3ree8* zFY69;l_kqS)EJ?B2$41;UaX-=_7jBo4XBW5+YN{NPB`okK$fB$5Mi2@2>92TMnby= z1`BrMkFfg5mbAAs5*hgM~Qqt}(a!P_b4RlUCN@3)^tbwLyqKbZ+$Y$SE*}5nv2wT$jk$ z;1oJp!qUIE1gs%%IyHQH@WR>g-b(`YuO4qG$mV0=RAvZg79-=gU?+UUJis;YlW+`p%kQXe{d9w7=7S*^e{?a zB9f<-h#jJ^a+3a7PFDJ}OuiulLhgeM2-O;#A4)52OzW%+NLn!}-Om3ZM6H>vNt|nv z24CZssQsn&DbBr%sPnG~d!(fX3LN>|JLPmgf}+++CS+%R{Vh2i<*ZKKT5mj(i7EjaFPQ^?0%NZ%K4XqO>6TU*x%V zK{GA%-DApGE@o&)q4lu5h+nXs3x*{L3Jy#mxCMO6ZVmWehF|cf@D=})45$yKOe5nL zoMvgl47^~K{1sO1IbZiXDOlT;30$B~i_ub4oyANQ>VNv?u!GkM#y^fq|7J6c(<8V& zx5v&waiN9$wHC8gOe4cmz#M$9g{?wbFjU|sTLm>Az`Om!ENo(=AT7~O6s;gyh6Gzp z$01n#Ou9OGXmE6B`0Vo3mLg4j<%W4cqM-jQN?3+@KyqMx1}hk@u@TF7bD?2j1zF3F zqB2WiEw3Y=I3%o{uvxu}Oy#>N_N@$4`82y=jSLO!#y@OEII2(GFf&MfB5A!iT`l6D zen@b{PktMbw~c6!IpD0sWr(l>&i8|`+y%*4(WvcJ;2r@dCrnk+DFWt2#5R}{C5w#R zKvf{;Cj7$|U0d)E8***KKkT)&1OK#**6{$_WA)%}4|mBrj(xGP8`g2|>4lxFTK-{c zEO<(7<*evkXlJw^K`R_b&p6y>PhFp`N>gx}y#-ILa3IG@(k)q-Zz<#Y5zVY*Gs-$b zu}V{*2N2N|9`?m4S)<3`IIM%GhKHA9HA!kAwtCJ#GdMajG3+mg22|@)g-Yq8u+*voGey&qWU{=ezZ=R-QW= zS5;}eysS#c)gp(9mm}4h*=c1120GIE3mFbl5^R0Y0?|1a_RUuN**41R(E z%jwgL6nPiTl1gY2*!qxn)nu@{0ZhVj)4orN#d;_h_II>T^LUgM(hX}``lR^GBEd~> zP;>HW3ifH`@;}F5jranGp_5HI!icu4u?FZp5XE-MA&49z`%4!gI0;dnJXbGGm0*>X zpjJeJeu(BLaYa9jU@4sAHk!y=t0CYX;0yGqb0%vSf$ci+qay%ChY_@*?(^E==sa6k z&9=~ghJ~;sQf~U%2}>KRt*7hZ`B1rTTxJ0jt!krf>kPr(9FMCX__BRl}1H5U5CmQa_oM&{q7XQQ}%5$LH~_ zBWy?3<_s;R4LXXn6bJH+IwPX7rzy zXG!Xj={nf~h@{{7bm!NJw&SMgChf z%`t(Ucro=lbxoHOj)vA+s>mzI6x?Xb-y-22c|mGKKF>@^vWS6IZV63=G6d~ltysw4 zMrHiJ#o$W})-iDWL2Gd>3u(&dvaG(S1+B5cLfR7%i!5ZRLe^piFG0EgdA4_rgsd;( zTQ9OPq{@kqWvyM@dIMsXb_x_JJ`aBv!Nq7bCVhct2dJ6DG!BzU@ym#`#I)>gj7!_b z0p2?9uU!tr+PQ(BiXKML3T2{})pS@xrqVi#^*d(SyvQzxnD-F`4Rr$%t7Sk9!FCSW zWd*n2fwUQp`4J1p0$hG?jka$1C=W_&nej{yw zVk@gD`Cq{o{sLRE3z24ux1+Obz2x!pbCsglauK=oL(Xy%S9B3Umh5476)w~fip<*- z*`_q@`3Gs1n}!3yns~5Q>hL4_ew2J)AR*K6WT6~|Mdsk;;8C2^TJQjdj*~8XFYm=U zsc?D?<-jj+bO_TaC#7K_qe{KUnCRa_?LNm!Z$ShggG^NSdzFE0W%1po7JVV$=ilX!m;*UFF}2RMi{!wx?m zH9r3X13+K)%gcDvflzGf8cG< z&dnC!ZvY_V{*;i%5Nm9xf>`V6B29w6hGW8H|IpB*HP-!PN*I%`0cGUXsu`db_+;5^ zy(|&!I;MuE2u%h3V|>K_Ck*~61Ht(}V$2C~eQKW3*7G5fPP>f)nS0FbQ*aU!>7OkGU`FtID9e>UpTIo%VWLXGkJgbtp9a9&Fqiw%Ju&lzo4gxtQy?5wPWCj zPp0i2OqVZMf+lzs3{yd~#=c)+Z2tU?PZ+ZHz%>Heui=A#N}!dp(wOnVp3vL~gq1!s zi;bM3p+aRu3l+~{hO3_1qtR*$vjp5QL%rYH_(u|r$r{QBfEXC|U@GjU6BZ#OI)Z7_ zkxD@0x6Z-B4pKO?Omiq+;?jhD!wPda`HG7ty{al@cND|?8+X>z30a!ZYnY3azgj8J zd6zJaH(jVqVGpuPbDFVpUQXPf9%VI(@J1%RN;SqzUgLzVz$-*Ny(^xV&rbLq!Hbwa zOulx#gfqPw>t|H4`fP}Tf>|CQ;T62GbLYn~aS4e&iaA%wW7zOUse-1KVy}dmO7NO% zrRr?dIQ%Pp^};wxYP_p?Mvl`|t8uc{f(|^6@qHQoc*E-)E*-*_MPYKFcJq2pCOtAG zj84i-_?qvmQFb9H(=y9Gc`J-9)TiwJU);`ZD(XY}s^MM_agSd%xt`qO)2ywmPjbD{ zj1s9!T1&-N*wXY3e4&d~r4-k>J~0e*x5-1m;o12xwDBUc=?CwuJd*u>OC(Ft3!cYb z6gF_ntA*7Xjt6WkR&Ir9$eH*G;yk^ZimD=ywre$&v-)3-D!?iH7Gw^`z zT4NtVuP`~Oa|*9d7vc=+vtG$$JCPke00MtZ^+&OXqYAxCAPFoPg(c1n-h~n2+2GiS z#{~t^N{|RTBv>l(&S?aSB9^WMjX+Uj7aV;NjUYQwCy|yr7!!f;|2dwm5pnRpiEsJ; z0zrywD~xBvOMyjHf2Zx#R7{f7_MilL1{8wAGa#>;IBXiSQ^vChMlO5 z_+;YAkm0$&hPe>#HfhI*Xi#M+VyR8{KJlRZAodD|I-{2T>3eHkjg>yV<0eD4FNV9d#vu;J}qnBAu%XT zXI$ybtn>*iKgkn#jcv|YiLllrd7|^P(s!`-Bze+widK@m@E|RHd$S3*C+F7SSokFO zRuk?>l5sA4lDyr7J2Um!kv`dJ!d=M=I2?XAjaj=**psB;?02Q@F`*8C=b2$R6+S5# zXTRT(WW{OlN%B4u_9n?V1zsiGWx~CgI^YcWB=>$3?n`oejokN`@XjRd-bUI16W*1i z9c-i>GU44x+I@|*`%Sn%N$YE*;Ux1l_`}ih_nL`&Oz?nt4I1jbCeWGj69=ep9wlf# zfMDUa;ENtvb`Rg7lpzZVj|g1o8Lrh35zTKlQ&sDubtE!2zlI&r?zF90XXZ$rBwKL% z+^@iyVbTo=v?klF9?Gt_w!RS*hjC0H*UGtL)1(mFl}P?`*2CjV9`{RSVXcsSgwk# z-6;KE%D&*{sziB`V&?CKiy8HXUnW&r)znS0{{t453?3#;wKbY$KY1udI6j(ohrGm> zDB>9deBbe>EWg*dXI{0^Q$)YgdGA=BCX-7hH#D8Ry_HdM-WyJ6D3ZqV;&=?y>OSYoS2x!X+buO+uAXS=C=DY->y zJ524X$t}vGt6VC? z#pyheTppZS0Bq9$O)SLTSWLB6&;g|O(M42e%W1ykBug2lkG!?E{1BfKHT+*=@NXGt zV-upa|8E$OIN4PppJnXdG5FUEzQll7Vs}^mb;bzic0S-Y82k4Oev?5uGeEeq9z|5x z{KE`LFBd=6?;Th2Kd|^W8T=N5R&`#ix~o4gH`@;UdJOe4Tetv>8w7UFJ1$V`r?`n1 zoW^)1Vd}%|#8pxl;p>yYS0LzRyGt7)@V28nS)f!I)q7!9Yb3$ut|dX-BJ)%N^Vr*7 zfJ)!I^(ICc8|XTOQeTCa87z7PU%)lD?8A|{z%6}`8>Jr3xpbS@3mVbOoobWpNpc60 z;Q*6KS3CkNU@V3Rgn1wqQ>b5$fM!pG8TUOmB62kpDI z70A+f3}3m*PPJ{jxeVM^r{Sxa&FkFqEKVh8iUIhG?Qzk;rjJB7<8~-ZJnCosUuAHF z0nH?uu`ASS;B4_e+~94C5e9h#jl$;b z_{zOC-Da;H%(;O%yAc$pOJ!`y&Yd>POe583Y^140DywEd`H%R}PZ7)yB4XNl;P=Q~ zF>yF9wt>9^J2dR+*qYmo)ZLmU=xHmmcOl-V9;FZB=Egye5McV$%f057I1pS);Q~o8 z2IYs-=+$m>5xl9f8Tgm+(rIiGj72)I!B3WOnuHebSX~wiMH7uXI{}2b9~$92sI*w6 zQ;8najpq$|_Vf{_{?DIvgCPR8{R-@ewDIGA!}3qCiM(m3$oO8PDCo+CGBy3?@A9*w z*7_~r`4k#pVToX*ZG`P`lAIQ_Qe?FTO=1RCe9dp=KjbJ9pv;DC7{d*im*SXaT;~W> z;y3xW(W#2Cp+1WAH@=98Eiu^%cfG&fxP5 zzRF;b0UhQ1Uu5tN2H$4zEe3Ax%O@=Qzk?v{IK@`jX86=xv%jSCTsK99n7n~UYIGvsj)Pry3Q>qG_>`sm>|raZ z0Wp9spk~S9LFUnPi#<|lJ`H-|*|YZWacSE+Xi7wbJLl3KR+&mWY-Rf@zLRxtEY6cp ztOOJiMq8tAvL8teOz!Qq*Z*C%{KE*;F8}u!!)1dNZp{IdJFhj3ot2sz_3gA#EzU@L zC+++K8%CH|-FAA-?NkFfv7V0HZ={VBJ(<@p+#{%nLiEGAH0f4vAOcqB7JblupalXe zW7UzQVqJOMspl~1Y@tOYyGZ11#IcHGx0%>RBfxqzLVGJNGy>RJXU-S3IGl}L(gGS< zpEzD*pYeJC*ATz>;|Si4mIj^7?qLq`4$e$qrPwM3x|8-tfhL9>QlN%mlN1~zj%`w) zfMKT;=v~+=1?m>IOMzO24O5^~1yF}}Lpt1*gW?71lg#&$4P>;Om`hjb8d5J^nvHP; zc^B@d!U~le+;!ltgvY5$prmqjswz`rWf|@S#IH+DlJwEV706&#p+6x(tzZ}{R{>eJj~#~Gx#42{)EB*WU!xs)1+2k zHTgZFFBM4Bxli&My^pM2)`s*;5jXE<%{Zuj%K!ftZx`kNWRQ05UsChkyjAx(R-IDl z)nL&71s{Bq?IZWeFlaE=J(5DL!qxeXSk8SYdksi2b&7Zge%aNht|F=92k;|v@PpbrY(=JOpCr{D6igiUA7$_u1Cl47xFA8_XYcz@GLU8{C3B3o zK7*?aZZJsI&5!f;bq1ed@OcJ5!{Bc+_yU7e5B&;nQ*~1+=o?J?Wd^^(K$aZoq2J=| zw;6ngfmq+~Gxnbti2q4D{?EJ>+ax0Rf6Cy0Gx$FYzRTeMGWZ^XRppKntxjNOVYVE8 zm<&1kG=k3dPMqto3+qgL>?kPJ3;P#)L@)wHXlcY9BFPnZ4e-M;_8{_ftL zJze|v?C;*+wSQypy557myL-EOPxbWl?83FXcURBup8I-xx;lHe_uhfa>)oo~UA=qo ae+R<7y{~q4bak}n_O^F*?DrVAzyA*#Di+-U literal 0 HcmV?d00001 diff --git a/gam/gdata/calendar/data.py b/gam/gdata/calendar/data.py new file mode 100755 index 00000000000..c24f04d9d47 --- /dev/null +++ b/gam/gdata/calendar/data.py @@ -0,0 +1,300 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains the data classes of the Google Calendar Data API""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import atom.data +import gdata.acl.data +import gdata.data +import gdata.geo.data +import gdata.opensearch.data + + +GCAL_TEMPLATE = '{http://schemas.google.com/gCal/2005/}%s' + + +class AccessLevelProperty(atom.core.XmlElement): + """Describes how much a given user may do with an event or calendar""" + _qname = GCAL_TEMPLATE % 'accesslevel' + value = 'value' + + +class AllowGSync2Property(atom.core.XmlElement): + """Whether the user is permitted to run Google Apps Sync""" + _qname = GCAL_TEMPLATE % 'allowGSync2' + value = 'value' + + +class AllowGSyncProperty(atom.core.XmlElement): + """Whether the user is permitted to run Google Apps Sync""" + _qname = GCAL_TEMPLATE % 'allowGSync' + value = 'value' + + +class AnyoneCanAddSelfProperty(atom.core.XmlElement): + """Whether anyone can add self as attendee""" + _qname = GCAL_TEMPLATE % 'anyoneCanAddSelf' + value = 'value' + + +class CalendarAclRole(gdata.acl.data.AclRole): + """Describes the Calendar roles of an entry in the Calendar access control list""" + _qname = gdata.acl.data.GACL_TEMPLATE % 'role' + + +class CalendarCommentEntry(gdata.data.GDEntry): + """Describes an entry in a feed of a Calendar event's comments""" + + +class CalendarCommentFeed(gdata.data.GDFeed): + """Describes feed of a Calendar event's comments""" + entry = [CalendarCommentEntry] + + +class CalendarComments(gdata.data.Comments): + """Describes a container of a feed link for Calendar comment entries""" + _qname = gdata.data.GD_TEMPLATE % 'comments' + + +class CalendarExtendedProperty(gdata.data.ExtendedProperty): + """Defines a value for the realm attribute that is used only in the calendar API""" + _qname = gdata.data.GD_TEMPLATE % 'extendedProperty' + + +class CalendarWhere(gdata.data.Where): + """Extends the base Where class with Calendar extensions""" + _qname = gdata.data.GD_TEMPLATE % 'where' + + +class ColorProperty(atom.core.XmlElement): + """Describes the color of a calendar""" + _qname = GCAL_TEMPLATE % 'color' + value = 'value' + + +class GuestsCanInviteOthersProperty(atom.core.XmlElement): + """Whether guests can invite others to the event""" + _qname = GCAL_TEMPLATE % 'guestsCanInviteOthers' + value = 'value' + + +class GuestsCanModifyProperty(atom.core.XmlElement): + """Whether guests can modify event""" + _qname = GCAL_TEMPLATE % 'guestsCanModify' + value = 'value' + + +class GuestsCanSeeGuestsProperty(atom.core.XmlElement): + """Whether guests can see other attendees""" + _qname = GCAL_TEMPLATE % 'guestsCanSeeGuests' + value = 'value' + + +class HiddenProperty(atom.core.XmlElement): + """Describes whether a calendar is hidden""" + _qname = GCAL_TEMPLATE % 'hidden' + value = 'value' + + +class IcalUIDProperty(atom.core.XmlElement): + """Describes the UID in the ical export of the event""" + _qname = GCAL_TEMPLATE % 'uid' + value = 'value' + + +class OverrideNameProperty(atom.core.XmlElement): + """Describes the override name property of a calendar""" + _qname = GCAL_TEMPLATE % 'overridename' + value = 'value' + + +class PrivateCopyProperty(atom.core.XmlElement): + """Indicates whether this is a private copy of the event, changes to which should not be sent to other calendars""" + _qname = GCAL_TEMPLATE % 'privateCopy' + value = 'value' + + +class QuickAddProperty(atom.core.XmlElement): + """Describes whether gd:content is for quick-add processing""" + _qname = GCAL_TEMPLATE % 'quickadd' + value = 'value' + + +class ResourceProperty(atom.core.XmlElement): + """Describes whether gd:who is a resource such as a conference room""" + _qname = GCAL_TEMPLATE % 'resource' + value = 'value' + id = 'id' + + +class EventWho(gdata.data.Who): + """Extends the base Who class with Calendar extensions""" + _qname = gdata.data.GD_TEMPLATE % 'who' + resource = ResourceProperty + + +class SelectedProperty(atom.core.XmlElement): + """Describes whether a calendar is selected""" + _qname = GCAL_TEMPLATE % 'selected' + value = 'value' + + +class SendAclNotificationsProperty(atom.core.XmlElement): + """Describes whether to send ACL notifications to grantees""" + _qname = GCAL_TEMPLATE % 'sendAclNotifications' + value = 'value' + + +class CalendarAclEntry(gdata.data.GDEntry): + """Describes an entry in a feed of a Calendar access control list (ACL)""" + send_acl_notifications = SendAclNotificationsProperty + + +class CalendarAclFeed(gdata.data.GDFeed): + """Describes a Calendar access contorl list (ACL) feed""" + entry = [CalendarAclEntry] + + +class SendEventNotificationsProperty(atom.core.XmlElement): + """Describes whether to send event notifications to other participants of the event""" + _qname = GCAL_TEMPLATE % 'sendEventNotifications' + value = 'value' + + +class SequenceNumberProperty(atom.core.XmlElement): + """Describes sequence number of an event""" + _qname = GCAL_TEMPLATE % 'sequence' + value = 'value' + + +class CalendarRecurrenceExceptionEntry(gdata.data.GDEntry): + """Describes an entry used by a Calendar recurrence exception entry link""" + uid = IcalUIDProperty + sequence = SequenceNumberProperty + + +class CalendarRecurrenceException(gdata.data.RecurrenceException): + """Describes an exception to a recurring Calendar event""" + _qname = gdata.data.GD_TEMPLATE % 'recurrenceException' + + +class SettingsProperty(atom.core.XmlElement): + """User preference name-value pair""" + _qname = GCAL_TEMPLATE % 'settingsProperty' + name = 'name' + value = 'value' + + +class SettingsEntry(gdata.data.GDEntry): + """Describes a Calendar Settings property entry""" + settings_property = SettingsProperty + + +class CalendarSettingsFeed(gdata.data.GDFeed): + """Personal settings for Calendar application""" + entry = [SettingsEntry] + + +class SuppressReplyNotificationsProperty(atom.core.XmlElement): + """Lists notification methods to be suppressed for this reply""" + _qname = GCAL_TEMPLATE % 'suppressReplyNotifications' + methods = 'methods' + + +class SyncEventProperty(atom.core.XmlElement): + """Describes whether this is a sync scenario where the Ical UID and Sequence number are honored during inserts and updates""" + _qname = GCAL_TEMPLATE % 'syncEvent' + value = 'value' + + +class CalendarEventEntry(gdata.data.BatchEntry): + """Describes a Calendar event entry""" + quickadd = QuickAddProperty + send_event_notifications = SendEventNotificationsProperty + sync_event = SyncEventProperty + anyone_can_add_self = AnyoneCanAddSelfProperty + extended_property = [CalendarExtendedProperty] + sequence = SequenceNumberProperty + guests_can_invite_others = GuestsCanInviteOthersProperty + guests_can_modify = GuestsCanModifyProperty + guests_can_see_guests = GuestsCanSeeGuestsProperty + georss_where = gdata.geo.data.GeoRssWhere + private_copy = PrivateCopyProperty + suppress_reply_notifications = SuppressReplyNotificationsProperty + uid = IcalUIDProperty + + +class TimeZoneProperty(atom.core.XmlElement): + """Describes the time zone of a calendar""" + _qname = GCAL_TEMPLATE % 'timezone' + value = 'value' + + +class TimesCleanedProperty(atom.core.XmlElement): + """Describes how many times calendar was cleaned via Manage Calendars""" + _qname = GCAL_TEMPLATE % 'timesCleaned' + value = 'value' + + +class CalendarEntry(gdata.data.GDEntry): + """Describes a Calendar entry in the feed of a user's calendars""" + timezone = TimeZoneProperty + overridename = OverrideNameProperty + hidden = HiddenProperty + selected = SelectedProperty + times_cleaned = TimesCleanedProperty + color = ColorProperty + where = [CalendarWhere] + accesslevel = AccessLevelProperty + + +class CalendarEventFeed(gdata.data.BatchFeed): + """Describes a Calendar event feed""" + allow_g_sync2 = AllowGSync2Property + timezone = TimeZoneProperty + entry = [CalendarEventEntry] + times_cleaned = TimesCleanedProperty + allow_g_sync = AllowGSyncProperty + + +class CalendarFeed(gdata.data.GDFeed): + """Describes a feed of Calendars""" + entry = [CalendarEntry] + + +class WebContentGadgetPref(atom.core.XmlElement): + """Describes a single web content gadget preference""" + _qname = GCAL_TEMPLATE % 'webContentGadgetPref' + name = 'name' + value = 'value' + + +class WebContent(atom.core.XmlElement): + """Describes a "web content" extension""" + _qname = GCAL_TEMPLATE % 'webContent' + height = 'height' + width = 'width' + web_content_gadget_pref = [WebContentGadgetPref] + url = 'url' + display = 'display' + + diff --git a/gam/gdata/calendar/service.py b/gam/gdata/calendar/service.py new file mode 100755 index 00000000000..115d1b839bf --- /dev/null +++ b/gam/gdata/calendar/service.py @@ -0,0 +1,598 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CalendarService extends the GDataService to streamline Google Calendar operations. + + CalendarService: Provides methods to query feeds and manipulate items. Extends + GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + + +__author__ = 'api.vli (Vivian Li)' + + +import urllib +import gdata +import atom.service +import gdata.service +import gdata.calendar +import atom + + +DEFAULT_BATCH_URL = ('http://www.google.com/calendar/feeds/default/private' + '/full/batch') + + +class Error(Exception): + pass + + +class RequestError(Error): + pass + + +class CalendarService(gdata.service.GDataService): + """Client for the Google Calendar service.""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', additional_headers=None, **kwargs): + """Creates a client for the Google Calendar service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'www.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='cl', source=source, + server=server, additional_headers=additional_headers, **kwargs) + + def GetCalendarEventFeed(self, uri='/calendar/feeds/default/private/full'): + return self.Get(uri, converter=gdata.calendar.CalendarEventFeedFromString) + + def GetCalendarEventEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarEventEntryFromString) + + def GetCalendarListFeed(self, uri='/calendar/feeds/default/allcalendars/full'): + return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString) + + def GetCalendarSettingsFeed(self, uri='/calendar/feeds/default/settings'): + return self.Get(uri) + + def GetAllCalendarsFeed(self, uri='/calendar/feeds/default/allcalendars/full'): + return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString) + + def GetOwnCalendarsFeed(self, uri='/calendar/feeds/default/owncalendars/full'): + return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString) + + def GetCalendarListEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarListEntryFromString) + + def GetCalendarAclFeed(self, uri='/calendar/feeds/default/acl/full'): + return self.Get(uri, converter=gdata.calendar.CalendarAclFeedFromString) + + def GetCalendarAclEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarAclEntryFromString) + + def GetCalendarEventCommentFeed(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarEventCommentFeedFromString) + + def GetCalendarEventCommentEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarEventCommentEntryFromString) + + def Query(self, uri, converter=None): + """Performs a query and returns a resulting feed or entry. + + Args: + feed: string The feed which is to be queried + + Returns: + On success, a GDataFeed or Entry depending on which is sent from the + server. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + if converter: + result = self.Get(uri, converter=converter) + else: + result = self.Get(uri) + return result + + def CalendarQuery(self, query): + if isinstance(query, CalendarEventQuery): + return self.Query(query.ToUri(), + converter=gdata.calendar.CalendarEventFeedFromString) + elif isinstance(query, CalendarListQuery): + return self.Query(query.ToUri(), + converter=gdata.calendar.CalendarListFeedFromString) + elif isinstance(query, CalendarEventCommentQuery): + return self.Query(query.ToUri(), + converter=gdata.calendar.CalendarEventCommentFeedFromString) + else: + return self.Query(query.ToUri()) + + def InsertEvent(self, new_event, insert_uri, url_params=None, + escape_params=True): + """Adds an event to Google Calendar. + + Args: + new_event: atom.Entry or subclass A new event which is to be added to + Google Calendar. + insert_uri: the URL to post new events to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the event created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Post(new_event, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarEventEntryFromString) + + def InsertCalendarSubscription(self, calendar, insert_uri='/calendar/feeds/default/allcalendars/full', + url_params=None, escape_params=True): + """Subscribes user to the provided calendar. + + Args: + calendar: The calendar to which the user should be subscribed. + insert_uri: string The insert URL of the entry to be added. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the subscription created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Post(calendar, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarListEntryFromString) + + def InsertCalendar(self, new_calendar, url_params=None, + escape_params=True): + """Creates a new calendar. + + Args: + new_calendar: The calendar to be created + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the calendar created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + insert_uri = '/calendar/feeds/default/owncalendars/full' + response = self.Post(new_calendar, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarListEntryFromString) + return response + + def UpdateCalendar(self, calendar, url_params=None, + escape_params=True): + """Updates a calendar. + + Args: + calendar: The calendar which should be updated + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the calendar created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + update_uri = calendar.GetEditLink().href + response = self.Put(data=calendar, uri=update_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarListEntryFromString) + return response + + def InsertAclEntry(self, new_entry, insert_uri, url_params=None, + escape_params=True): + """Adds an ACL entry (rule) to Google Calendar. + + Args: + new_entry: atom.Entry or subclass A new ACL entry which is to be added to + Google Calendar. + insert_uri: the URL to post new entries to the ACL feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the ACL entry created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Post(new_entry, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarAclEntryFromString) + + def InsertEventComment(self, new_entry, insert_uri, url_params=None, + escape_params=True): + """Adds an entry to Google Calendar. + + Args: + new_entry: atom.Entry or subclass A new entry which is to be added to + Google Calendar. + insert_uri: the URL to post new entrys to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the comment created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Post(new_entry, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarEventCommentEntryFromString) + + def _RemoveStandardUrlPrefix(self, url): + url_prefix = 'http://%s/' % self.server + if url.startswith(url_prefix): + return url[len(url_prefix) - 1:] + return url + + def DeleteEvent(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes an event with the specified ID from Google Calendar. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + 'http://www.google.com/calendar/feeds/default/private/full/abx' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, a httplib.HTTPResponse containing the server's + response to the DELETE request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + edit_uri = self._RemoveStandardUrlPrefix(edit_uri) + return self.Delete('%s' % edit_uri, + url_params=url_params, escape_params=escape_params) + + def DeleteAclEntry(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes an ACL entry at the given edit_uri from Google Calendar. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + 'http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, a httplib.HTTPResponse containing the server's + response to the DELETE request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + edit_uri = self._RemoveStandardUrlPrefix(edit_uri) + return self.Delete('%s' % edit_uri, + url_params=url_params, escape_params=escape_params) + + def DeleteCalendarEntry(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes a calendar entry at the given edit_uri from Google Calendar. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + 'http://www.google.com/calendar/feeds/default/allcalendars/abcdef@group.calendar.google.com' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, True is returned + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Delete(edit_uri, url_params=url_params, + escape_params=escape_params) + + def UpdateEvent(self, edit_uri, updated_event, url_params=None, + escape_params=True): + """Updates an existing event. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_event: string, atom.Entry, or subclass containing + the Atom Entry which will replace the event which is + stored at the edit_url + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + edit_uri = self._RemoveStandardUrlPrefix(edit_uri) + return self.Put(updated_event, '%s' % edit_uri, + url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarEventEntryFromString) + + def UpdateAclEntry(self, edit_uri, updated_rule, url_params=None, + escape_params=True): + """Updates an existing ACL rule. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_rule: string, atom.Entry, or subclass containing + the Atom Entry which will replace the event which is + stored at the edit_url + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + edit_uri = self._RemoveStandardUrlPrefix(edit_uri) + return self.Put(updated_rule, '%s' % edit_uri, + url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarAclEntryFromString) + + def ExecuteBatch(self, batch_feed, url, + converter=gdata.calendar.CalendarEventFeedFromString): + """Sends a batch request feed to the server. + + The batch request needs to be sent to the batch URL for a particular + calendar. You can find the URL by calling GetBatchLink().href on the + CalendarEventFeed. + + Args: + batch_feed: gdata.calendar.CalendarEventFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: str The batch URL for the Calendar to which these operations should + be applied. + converter: Function (optional) The function used to convert the server's + response to an object. The default value is + CalendarEventFeedFromString. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a CalendarEventFeed. + """ + return self.Post(batch_feed, url, converter=converter) + + +class CalendarEventQuery(gdata.service.Query): + + def __init__(self, user='default', visibility='private', projection='full', + text_query=None, params=None, categories=None): + gdata.service.Query.__init__(self, + feed='http://www.google.com/calendar/feeds/%s/%s/%s' % ( + urllib.quote(user), + urllib.quote(visibility), + urllib.quote(projection)), + text_query=text_query, params=params, categories=categories) + + def _GetStartMin(self): + if 'start-min' in self.keys(): + return self['start-min'] + else: + return None + + def _SetStartMin(self, val): + self['start-min'] = val + + start_min = property(_GetStartMin, _SetStartMin, + doc="""The start-min query parameter""") + + def _GetStartMax(self): + if 'start-max' in self.keys(): + return self['start-max'] + else: + return None + + def _SetStartMax(self, val): + self['start-max'] = val + + start_max = property(_GetStartMax, _SetStartMax, + doc="""The start-max query parameter""") + + def _GetOrderBy(self): + if 'orderby' in self.keys(): + return self['orderby'] + else: + return None + + def _SetOrderBy(self, val): + if val is not 'lastmodified' and val is not 'starttime': + raise Error, "Order By must be either 'lastmodified' or 'starttime'" + self['orderby'] = val + + orderby = property(_GetOrderBy, _SetOrderBy, + doc="""The orderby query parameter""") + + def _GetSortOrder(self): + if 'sortorder' in self.keys(): + return self['sortorder'] + else: + return None + + def _SetSortOrder(self, val): + if (val is not 'ascending' and val is not 'descending' + and val is not 'a' and val is not 'd' and val is not 'ascend' + and val is not 'descend'): + raise Error, "Sort order must be either ascending, ascend, " + ( + "a or descending, descend, or d") + self['sortorder'] = val + + sortorder = property(_GetSortOrder, _SetSortOrder, + doc="""The sortorder query parameter""") + + def _GetSingleEvents(self): + if 'singleevents' in self.keys(): + return self['singleevents'] + else: + return None + + def _SetSingleEvents(self, val): + self['singleevents'] = val + + singleevents = property(_GetSingleEvents, _SetSingleEvents, + doc="""The singleevents query parameter""") + + def _GetFutureEvents(self): + if 'futureevents' in self.keys(): + return self['futureevents'] + else: + return None + + def _SetFutureEvents(self, val): + self['futureevents'] = val + + futureevents = property(_GetFutureEvents, _SetFutureEvents, + doc="""The futureevents query parameter""") + + def _GetRecurrenceExpansionStart(self): + if 'recurrence-expansion-start' in self.keys(): + return self['recurrence-expansion-start'] + else: + return None + + def _SetRecurrenceExpansionStart(self, val): + self['recurrence-expansion-start'] = val + + recurrence_expansion_start = property(_GetRecurrenceExpansionStart, + _SetRecurrenceExpansionStart, + doc="""The recurrence-expansion-start query parameter""") + + def _GetRecurrenceExpansionEnd(self): + if 'recurrence-expansion-end' in self.keys(): + return self['recurrence-expansion-end'] + else: + return None + + def _SetRecurrenceExpansionEnd(self, val): + self['recurrence-expansion-end'] = val + + recurrence_expansion_end = property(_GetRecurrenceExpansionEnd, + _SetRecurrenceExpansionEnd, + doc="""The recurrence-expansion-end query parameter""") + + def _SetTimezone(self, val): + self['ctz'] = val + + def _GetTimezone(self): + if 'ctz' in self.keys(): + return self['ctz'] + else: + return None + + ctz = property(_GetTimezone, _SetTimezone, + doc="""The ctz query parameter which sets report time on the server.""") + + +class CalendarListQuery(gdata.service.Query): + """Queries the Google Calendar meta feed""" + + def __init__(self, userId=None, text_query=None, + params=None, categories=None): + if userId is None: + userId = 'default' + + gdata.service.Query.__init__(self, feed='http://www.google.com/calendar/feeds/' + +userId, + text_query=text_query, params=params, + categories=categories) + +class CalendarEventCommentQuery(gdata.service.Query): + """Queries the Google Calendar event comments feed""" + + def __init__(self, feed=None): + gdata.service.Query.__init__(self, feed=feed) diff --git a/gam/gdata/calendar/service.pyc b/gam/gdata/calendar/service.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcf77b707775b6d20a18c9ef268224c39e22c48f GIT binary patch literal 27615 zcmeHQ&2t>bb??OjOMn6e@f(sLazuRqv`D~mtbD+ZERY0E+B8HD09$69Yz8|6V8Gp- z)yymb$fObr%pq}n@qdt1F1e>FmmE{M=HydOIi)I9$t9IaRdR5Czt`O}vkNT0j0GZw zAg0+f)6*ZX-+TQ&x*y8_aqQ^3?Z3TUQ|70P|3AW&{5KL`sXItZ>P|`d1IjO}`hdEF zjLw(UowCgjsyjnAUnx94QpgWU8I&AW{*Ver)GDg2sE11JDD|T|N0om>1xMxSaQ5^i z<&UVKtTsl~#xZ$!G<)~5@?Wy=#^l{-_U;wsAM1Sga`x`H^2a*gy^_5AgT!eB>3YQaBj?k05N|uHL4aGY={pUt8MfMWFAbb9 z4I0U`GjBSB8ascAJ(C8ng*CSB#oJ5KZ7=p3x56Zyb*{IY^4!^64{PgAEo$Bi;uOs} ze)fSAHqjZ+spAVLT6K0cmNmWX-g3Nnt=$NksADP)T5*sdANY=!5ISLVEx9=DEUkx$ zgJ16vb+yy59;O(+?O{7XHDS}a>MmdF0B3R%bLF+d>3j9infgI^FZ7zut?**_EBq`Z z-@@DVG;Ph!%xrFMPOr(tOxL2uOwCN~jDUE?4_3W)J)LRA;XS~3X0=_f&#ZW9Z9N@A z9rJM<#WiH)2biI*;7W#(1fY|Wau=ugYH9#U%E#4e(`y9PYI;;|8j;_w<5ri#BPF@y z268jYNf0M9cf)vPd*)iS*{nyNpUkXzjmuw~{@{blt!-dM^Kuf`oT>LO&aC-ByuAPT zmbf|H+ICs(d${dh66UkhewnTMl>RMWbhoE z1@f`JOsPBpaL+K z)9NAk=SgWxKRKfwf{UKA1> zF+Gcy$;&9)NqjZV<`G<+@$ceFK17l#wNX+V18SqJ?#s%9xyF3n$0W;SVjAxwQ&IOZ zy+_o2+*Z`jOO+C)a&c;e^$Y-MuYwP!V5Xi^d$JyqCp*quyq3%w_7*g}us&;;mNON# zgnia8f_ViFcu_o=I8w&(d>?Esxx}YFVNqa^UK{H~GYxCP`=`yDmX{=(QS3jadRxqX zmPGBi7WDdvyoT(V3zO-e*J{j`GC#tKEhtl>oT)1%cdOM?Vxk7E&I*Lm6L^Z54fV8F*8|T- z#}=jr@mwmGez~kqm~63QhWm%jFa^7pBK#}~>Z>l75|>T7#8#I}gZmN^X+WgYD}>=T^Q zQ>Y$Y+|?H@)Ur^i(ldd>rBEqQaEV<40++%|I)S94x%p-qZ|^mB_cZIk)d%4rW!>Ea zm!1fN=IpCdauI1SjPdGqJDrKhPx}m~*U+|w69w2_7}8fLamETzvi(@VV}l)ugM2R- zB|(}}4$eSBxC%ubLnfoBoe~x_3SkS93dZCl(ANQ;MXQAYE}9M#*x$#izL0=Rc5*=D zGZQ#huUlM7IJTvHY>z~8Nay`T@tgU( zM_}x**!@Tn_GIT-0}6+O1eC#gfI&L6T7UxuP%^}fDZY;z zD5>$ed;y0@j3*F=ACy&dEPsxL9Z#UeY9TrK8!Ym-gBW^ag9@uQ`qA7M2WdNQ@)^`i z7^WaJh%wNCwhdf~x5cp6skN?R9m|wEYN~u-6gU{%piV(mV-sQEYn|8Ctys19~r-y$Yqe9S4^%sG?Ph z#=zJMsXrSHmf)~`a-5$`CaIUUlgU}<#?sPlXyv?vLh56LY*jaOsbEi>gb5&tn)VBQ zmv?cG4^|?7+ZJcuc10J3x{qmSLC*{x$oTXkCuTA-O)b|`JrUVV-il7AC$C>3JJreW z(@q>BBgRNIRw<1SjP0SK`~`}@!UA;-y4nt~by~0z<`t-JWwj#)mvm5V45^I@F?Z3F zJ|d-m+`IH}vGj_=Sf^*3B^)8e9AWPQ5G+bEJtrP9Esgm%BClPZAisPy1- zJV-bORT?XuC><-N12xV*FY4J>aV6|mrb5ta1|}pv8rsXs1s#2$yeJr(43?Fe^Tos9 z1k}38B)Ys8MMh&utQl-prTnbprBP#Aa|f`Aq`gwB({?lGpp2ENo1`Ef1~+}OwZKZj_XyiE5Xu4mqyW&hhZD@ zj>Gyo%}?NyVY60mqocbz>Cg|shIF^!R{?JTUL@(X9Z`_fyjGAky%I(B0B(tb9q}ei zb#WW4kQHU!)aEZ^RNJFrfJ0a9gOh_v2K;;*UYESPwsW>ikKyxhbC_{la^PmtOABp% zsRsiV_&ockF{37CRSS>ji=B&;CYy#NB_(!DVy@JHMPkVY>J``wb>=Ct?TLko1esBY zRGnk6OJTwN8b83*4?7O|=1rj(;b=d@t+x4{J`Gd(esGQdg3)5Jy^_@8a0PaD+M7v_ zM3t8IWcp6cxYzXZFSslQc1t4my*2^cJ65B)h0p~nPS&G#o%l}PqMP)sOp$}NL+sIy zg{+N00nP?zDk?`c!hUQG`JBJt5R^Bq)CKZ3lHe0@2s1U}CUKr;Mv;?m&Jo<-0Erd2 z!8y`(#tp>A>wdr@eJ6eD)0*_H7-7g%Cg!j3R&#<=!U@R8=nTl@hp*!XGQ)$xf)oMC z!Fu2*OAK~M5f9*j8-mov!6ttSu8_OlxGr0&?!^PhBW;=8d;p}{iAgVL)rev}wYDds zRSWSh=ssTy`qSBPbIN=SRg3FR)_8ibiuhZog0wEqEvqv2Yo(`4uan&Wj)X{^Nu8{e zNaWB2Etyj;5H*1bl|I>d1&_O-cdajIkGtp#)F8kLK=niPx^0MVxTA(F)e{)W&p`Fd zE$m3q4)xH(2ANb=>7%Wi{a-9>@p&qbAOb!!(=P2rfXtsN;-h4ZyxI3 zt+07lzF3cgRdFoeZWrZxS;;e+*DHBerPdh)Q>mJ|zk_=FN$Hx(?85#_5}&5@vC`|B z(zy)t$`Ua~Pa{D-ot;wK^CedlmignPcP_5o^8W?C$mD<729wdm0Vx8xWMJJ{|Bg=W zvl*aV;1!vI{RjqO3|bj19>X7Wl|6XFF5}p)cAwC6NJf=$hC}2EW4*EkrH0KHRMM-XeEaWx--=c>pJAjB(Bvs_dU5*jG!K9DVb zXC7!*jqz!U>j-Wnu?xU+0|UYM`9)|Hi&Ly)LBtZX>HeK$CZv~4-pGDO5u4!7!9jz~ zFkKg^3x_Opfx-sAdTx`2a29U$OSBqC@2YOli0%c8a5CYrhrb-xZ%dmF!8D8l%J^O~ z>nVjo0n{aAGM@{*yBcV+DH0$#L+c4NDENE_;rJRbewPmcDBUJJys)~9>xJMDBWltL zYT+uJw9d_I+G)9qyX9h+WgHM^wTDEXw2+;(0Uq`;gZe=o?gs4Ho!{~rt$JWBf7Tb{ zOg{U$32$X<()6pp%9{+L=z+1&_)Cck~U*?GS{>f zaIekZnqQjFr8_;P&zB%3`w-tra^|ZYwKqVH;&{I%=v@%QNXDcG5{dTjY^AYhw{{om zLAidpWpt;65x<*r;IYyxxm)JfWGvt5WGuN*?!mPSiGM8wt8pwG{;xBadidEpU;pYF z4`axgEzfMdv!}IQFd5@`egZG=3nt?@lTW$J=MhZCy-7BaX?gBACp6crV-QLIZ>p{G|)V#4jS!~J4pbqQ3N+}i^=Vhh{pxJyVBnBqn; zD~I>gN^nm8E>x47+3``xYho%`30Ps52T1#a?3OdF;b0qSw)N~1x2R$hg5GqUhH}sxJ~ew{9m`tv<_7KJTdWWQH&C1;RiGFG+$ ze`mlPiBJ9&>}XK}2zidgma)X3PwE)|w6^aV*0cmsq_b!f``hhd9yxhqKGUVRmmNF9 z9gjQO5qbgs}tO??fwZH3gXHJhlm)rD^3o@6X5m86s%7@k3L&PHO zA~c@?Cg@-mYRT;OMdT%(CW4e!sLc$9`$Vj96$Q+;eyMcYxeg!(+BBYTj29CUT~w2W=tBF>7%aPm=;L5PwOwQ5V|cPH095$r}M28UKj^LbAwEg(C|~Xs9zD6IwIh z26qt8!`FGxI}Jm8bWNU2u+ zsD{v0WNr&lGjPcdU2->#?E#|(*WP!MeoGo!6ZFQqo+rXl}n@J<8MvMhDcX0No_jz6^e~YF$CB{yergp3`T$wLtaT7y*oM@7K)8Y zv6oeV*jFg_iWEcaD;*gNv9EMwtP@h~xMCbE6gw%!5DH6&$U+1x1jj;cr={2_6(9%} zik*>Sh=7I2Sa^%rSBQy)%(xUgs{(|-La|q+7$RUHCKlcz@Rg2<^|};$O$7*ig<=y@ z3?Z)&4GYB(>9F{>|^h(gf2sntDVwd$>;ea{hLc~ zOAj5;QHL^*-qt_u$2?*jgke69?kaEP8Iye_GL4GaBzjx_oKIr+uWbKIkC`Jw6bj^?Do`N!$=wkZzb*LzlgD}iuK&e=YGzbAs5F&Q zyIc?;<6w1)IHIyONVb&A$`Tje2y1S%3yKo>M(l9UpuWD&#D^Vf}zSlSS5MC+mMWSt3l7#!GJlS1YBXrJT!ItlDRcWrGNUftb?bl}ZjA zXD)}uHM5Q*k;u+{RK2{f6@vX&k= zMNXhQq)0dG(!A}_A{X~_&pNk{8z|a6D7uv~f^K__@$CWzOru+*;oeiU&-g$?*q)7H z**tJ;AaeG=hWV!d-ZSIEBL|5#4+;}xe0yU8#`k^!4=$loJ@EijvkMFGtr!8Q>5913 zOHmeuw3dE$kSiA#{wl`31X2An7-D}rAL*~B^}0K#`M}Xsh6AQ$hx3h2#7uq{o#NyH z%H8v1{2EDszb%1nI+QM{zjWF07p5Fkkz2DxiPzR^*i)6ehD3)1kLf5%w{tbB3QLX~ zvd{(#Wfq=l!`pJ}d>%2Oy5xS!WQz%@y+E98){Xs^bM;R24^|D1NA!#WsaS1m_W7< zdu&Oar9l=qg+D@&nPq7rU2s1^>i!-Q>DLq4WxMU5?4m|m(uxF_&rPrSc4KXXg_ZTD z-(3Sf)A+EjMjoM|8zfiuOATAPa2b5l1{0SVf!SjLaaqIZg5h=&2pVUl@5{V0WZT|Y)@~x%n zmAR#>H>%hNA&h^1s}@MeBzhc`OID*vk@f(WbFGQ;J?4mnf_42wby>8&ssD)3huqCC cCX-6J0w$aND^~{3;NQfr)GsD}I&tiO0ZE>N0ssI2 literal 0 HcmV?d00001 diff --git a/gam/gdata/calendar_resource/__init__.py b/gam/gdata/calendar_resource/__init__.py new file mode 100755 index 00000000000..8b137891791 --- /dev/null +++ b/gam/gdata/calendar_resource/__init__.py @@ -0,0 +1 @@ + diff --git a/gam/gdata/calendar_resource/client.py b/gam/gdata/calendar_resource/client.py new file mode 100755 index 00000000000..5f3f7bcb2ff --- /dev/null +++ b/gam/gdata/calendar_resource/client.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CalendarResourceClient simplifies Calendar Resources API calls. + +CalendarResourceClient extends gdata.client.GDClient to ease interaction with +the Google Apps Calendar Resources API. These interactions include the ability +to create, retrieve, update, and delete calendar resources in a Google Apps +domain. +""" + + +__author__ = 'Vic Fryzel ' + + +import gdata.calendar_resource.data +import gdata.client +import urllib + + +# Feed URI template. This must end with a / +# The strings in this template are eventually replaced with the API version +# and Google Apps domain name, respectively. +RESOURCE_FEED_TEMPLATE = '/a/feeds/calendar/resource/%s/%s/' + + +class CalendarResourceClient(gdata.client.GDClient): + """Client extension for the Google Calendar Resource API service. + + Attributes: + host: string The hostname for the Calendar Resouce API service. + api_version: string The version of the Calendar Resource API. + """ + + host = 'apps-apis.google.com' + api_version = '2.0' + auth_service = 'apps' + auth_scopes = gdata.gauth.AUTH_SCOPES['apps'] + + def __init__(self, domain, auth_token=None, **kwargs): + """Constructs a new client for the Calendar Resource API. + + Args: + domain: string The Google Apps domain with Calendar Resources. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the calendar resource + data. + kwargs: The other parameters to pass to the gdata.client.GDClient + constructor. + """ + gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs) + self.domain = domain + + def make_resource_feed_uri(self, resource_id=None, params=None): + """Creates a resource feed URI for the Calendar Resource API. + + Using this client's Google Apps domain, create a feed URI for calendar + resources in that domain. If a resource_id is provided, return a URI + for that specific resource. If params are provided, append them as GET + params. + + Args: + resource_id: string (optional) The ID of the calendar resource for which + to make a feed URI. + params: dict (optional) key -> value params to append as GET vars to the + URI. Example: params={'start': 'my-resource-id'} + Returns: + A string giving the URI for calendar resources for this client's Google + Apps domain. + """ + uri = RESOURCE_FEED_TEMPLATE % (self.api_version, self.domain) + if resource_id: + uri += resource_id + if params: + uri += '?' + urllib.urlencode(params) + return uri + + MakeResourceFeedUri = make_resource_feed_uri + + def get_resource_feed(self, uri=None, **kwargs): + """Fetches a ResourceFeed of calendar resources at the given URI. + + Args: + uri: string The URI of the feed to pull. + kwargs: The other parameters to pass to gdata.client.GDClient.get_feed(). + + Returns: + A ResourceFeed object representing the feed at the given URI. + """ + + if uri is None: + uri = self.MakeResourceFeedUri() + return self.get_feed( + uri, + desired_class=gdata.calendar_resource.data.CalendarResourceFeed, + **kwargs) + + GetResourceFeed = get_resource_feed + + def get_resource(self, uri=None, resource_id=None, **kwargs): + """Fetches a single calendar resource by resource ID. + + Args: + uri: string The base URI of the feed from which to fetch the resource. + resource_id: string The string ID of the Resource to fetch. + kwargs: The other parameters to pass to gdata.client.GDClient.get_entry(). + + Returns: + A Resource object representing the calendar resource with the given + base URI and resource ID. + """ + + if uri is None: + uri = self.MakeResourceFeedUri(resource_id) + return self.get_entry( + uri, + desired_class=gdata.calendar_resource.data.CalendarResourceEntry, + **kwargs) + + GetResource = get_resource + + def create_resource(self, resource_id, resource_common_name=None, + resource_description=None, resource_type=None, **kwargs): + """Creates a calendar resource with the given properties. + + Args: + resource_id: string The resource ID of the calendar resource. + resource_common_name: string (optional) The common name of the resource. + resource_description: string (optional) The description of the resource. + resource_type: string (optional) The type of the resource. + kwargs: The other parameters to pass to gdata.client.GDClient.post(). + + Returns: + gdata.calendar_resource.data.CalendarResourceEntry of the new resource. + """ + new_resource = gdata.calendar_resource.data.CalendarResourceEntry( + resource_id=resource_id, + resource_common_name=resource_common_name, + resource_description=resource_description, + resource_type=resource_type) + return self.post(new_resource, self.MakeResourceFeedUri(), **kwargs) + + CreateResource = create_resource + + def update_resource(self, resource_id, resource_common_name=None, + resource_description=None, resource_type=None, **kwargs): + """Updates the calendar resource with the given resource ID. + + Args: + resource_id: string The resource ID of the calendar resource to update. + resource_common_name: string (optional) The common name to give the + resource. + resource_description: string (optional) The description to give the + resource. + resource_type: string (optional) The type to give the resource. + kwargs: The other parameters to pass to gdata.client.GDClient.update(). + + Returns: + gdata.calendar_resource.data.CalendarResourceEntry of the updated + resource. + """ + new_resource = gdata.calendar_resource.data.CalendarResourceEntry( + resource_id=resource_id, + resource_common_name=resource_common_name, + resource_description=resource_description, + resource_type=resource_type) + return self.update( + new_resource, + **kwargs) + + UpdateResource = update_resource + + def delete_resource(self, resource_id, **kwargs): + """Deletes the calendar resource with the given resource ID. + + Args: + resource_id: string The resource ID of the calendar resource to delete. + kwargs: The other parameters to pass to gdata.client.GDClient.delete() + + Returns: + An HTTP response object. See gdata.client.request(). + """ + + return self.delete(self.MakeResourceFeedUri(resource_id), **kwargs) + + DeleteResource = delete_resource diff --git a/gam/gdata/calendar_resource/data.py b/gam/gdata/calendar_resource/data.py new file mode 100755 index 00000000000..527fd484b89 --- /dev/null +++ b/gam/gdata/calendar_resource/data.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data model for parsing and generating XML for the Calendar Resource API.""" + + +__author__ = 'Vic Fryzel ' + + +import atom.core +import atom.data +import gdata.apps +import gdata.apps_property +import gdata.data + + +# This is required to work around a naming conflict between the Google +# Spreadsheets API and Python's built-in property function +pyproperty = property + + +# The apps:property name of the resourceId property +RESOURCE_ID_NAME = 'resourceId' +# The apps:property name of the resourceCommonName property +RESOURCE_COMMON_NAME_NAME = 'resourceCommonName' +# The apps:property name of the resourceDescription property +RESOURCE_DESCRIPTION_NAME = 'resourceDescription' +# The apps:property name of the resourceType property +RESOURCE_TYPE_NAME = 'resourceType' + + +class CalendarResourceEntry(gdata.data.GDEntry): + """Represents a Calendar Resource entry in object form.""" + + property = [gdata.apps_property.AppsProperty] + + def _GetProperty(self, name): + """Get the apps:property value with the given name. + + Args: + name: string Name of the apps:property value to get. + + Returns: + The apps:property value with the given name, or None if the name was + invalid. + """ + + for p in self.property: + if p.name == name: + return p.value + return None + + def _SetProperty(self, name, value): + """Set the apps:property value with the given name to the given value. + + Args: + name: string Name of the apps:property value to set. + value: string Value to give the apps:property value with the given name. + """ + + for i in range(len(self.property)): + if self.property[i].name == name: + self.property[i].value = value + return + self.property.append(gdata.apps_property.AppsProperty(name=name, value=value)) + + def GetResourceId(self): + """Get the resource ID of this Calendar Resource object. + + Returns: + The resource ID of this Calendar Resource object as a string or None. + """ + + return self._GetProperty(RESOURCE_ID_NAME) + + def SetResourceId(self, value): + """Set the resource ID of this Calendar Resource object. + + Args: + value: string The new resource ID value to give this object. + """ + + self._SetProperty(RESOURCE_ID_NAME, value) + + resource_id = pyproperty(GetResourceId, SetResourceId) + + def GetResourceCommonName(self): + """Get the common name of this Calendar Resource object. + + Returns: + The common name of this Calendar Resource object as a string or None. + """ + + return self._GetProperty(RESOURCE_COMMON_NAME_NAME) + + def SetResourceCommonName(self, value): + """Set the common name of this Calendar Resource object. + + Args: + value: string The new common name value to give this object. + """ + + self._SetProperty(RESOURCE_COMMON_NAME_NAME, value) + + resource_common_name = pyproperty( + GetResourceCommonName, + SetResourceCommonName) + + def GetResourceDescription(self): + """Get the description of this Calendar Resource object. + + Returns: + The description of this Calendar Resource object as a string or None. + """ + + return self._GetProperty(RESOURCE_DESCRIPTION_NAME) + + def SetResourceDescription(self, value): + """Set the description of this Calendar Resource object. + + Args: + value: string The new description value to give this object. + """ + + self._SetProperty(RESOURCE_DESCRIPTION_NAME, value) + + resource_description = pyproperty( + GetResourceDescription, + SetResourceDescription) + + def GetResourceType(self): + """Get the type of this Calendar Resource object. + + Returns: + The type of this Calendar Resource object as a string or None. + """ + + return self._GetProperty(RESOURCE_TYPE_NAME) + + def SetResourceType(self, value): + """Set the type value of this Calendar Resource object. + + Args: + value: string The new type value to give this object. + """ + + self._SetProperty(RESOURCE_TYPE_NAME, value) + + resource_type = pyproperty(GetResourceType, SetResourceType) + + def __init__(self, resource_id=None, resource_common_name=None, + resource_description=None, resource_type=None, *args, **kwargs): + """Constructs a new CalendarResourceEntry object with the given arguments. + + Args: + resource_id: string (optional) The resource ID to give this new object. + resource_common_name: string (optional) The common name to give this new + object. + resource_description: string (optional) The description to give this new + object. + resource_type: string (optional) The type to give this new object. + args: The other parameters to pass to gdata.entry.GDEntry constructor. + kwargs: The other parameters to pass to gdata.entry.GDEntry constructor. + """ + super(CalendarResourceEntry, self).__init__(*args, **kwargs) + if resource_id: + self.resource_id = resource_id + if resource_common_name: + self.resource_common_name = resource_common_name + if resource_description: + self.resource_description = resource_description + if resource_type: + self.resource_type = resource_type + + +class CalendarResourceFeed(gdata.data.GDFeed): + """Represents a feed of CalendarResourceEntry objects.""" + + # Override entry so that this feed knows how to type its list of entries. + entry = [CalendarResourceEntry] diff --git a/gam/gdata/client.py b/gam/gdata/client.py new file mode 100755 index 00000000000..7e2314c3c37 --- /dev/null +++ b/gam/gdata/client.py @@ -0,0 +1,1126 @@ +#!/usr/bin/env python +# +# Copyright (C) 2008, 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +"""Provides a client to interact with Google Data API servers. + +This module is used for version 2 of the Google Data APIs. The primary class +in this module is GDClient. + + GDClient: handles auth and CRUD operations when communicating with servers. + GDataClient: deprecated client for version one services. Will be removed. +""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import re +import atom.client +import atom.core +import atom.http_core +import gdata.gauth +import gdata.data + + +class Error(Exception): + pass + + +class RequestError(Error): + status = None + reason = None + body = None + headers = None + + +class RedirectError(RequestError): + pass + + +class CaptchaChallenge(RequestError): + captcha_url = None + captcha_token = None + + +class ClientLoginTokenMissing(Error): + pass + + +class MissingOAuthParameters(Error): + pass + + +class ClientLoginFailed(RequestError): + pass + + +class UnableToUpgradeToken(RequestError): + pass + + +class Unauthorized(Error): + pass + + +class BadAuthenticationServiceURL(RedirectError): + pass + + +class BadAuthentication(RequestError): + pass + + +class NotModified(RequestError): + pass + +class NotImplemented(RequestError): + pass + + +def error_from_response(message, http_response, error_class, + response_body=None): + + """Creates a new exception and sets the HTTP information in the error. + + Args: + message: str human readable message to be displayed if the exception is + not caught. + http_response: The response from the server, contains error information. + error_class: The exception to be instantiated and populated with + information from the http_response + response_body: str (optional) specify if the response has already been read + from the http_response object. + """ + if response_body is None: + body = http_response.read() + else: + body = response_body + error = error_class('%s: %i, %s' % (message, http_response.status, body)) + error.status = http_response.status + error.reason = http_response.reason + error.body = body + error.headers = atom.http_core.get_headers(http_response) + return error + + +def get_xml_version(version): + """Determines which XML schema to use based on the client API version. + + Args: + version: string which is converted to an int. The version string is in + the form 'Major.Minor.x.y.z' and only the major version number + is considered. If None is provided assume version 1. + """ + if version is None: + return 1 + return int(version.split('.')[0]) + + +class GDClient(atom.client.AtomPubClient): + """Communicates with Google Data servers to perform CRUD operations. + + This class is currently experimental and may change in backwards + incompatible ways. + + This class exists to simplify the following three areas involved in using + the Google Data APIs. + + CRUD Operations: + + The client provides a generic 'request' method for making HTTP requests. + There are a number of convenience methods which are built on top of + request, which include get_feed, get_entry, get_next, post, update, and + delete. These methods contact the Google Data servers. + + Auth: + + Reading user-specific private data requires authorization from the user as + do any changes to user data. An auth_token object can be passed into any + of the HTTP requests to set the Authorization header in the request. + + You may also want to set the auth_token member to a an object which can + use modify_request to set the Authorization header in the HTTP request. + + If you are authenticating using the email address and password, you can + use the client_login method to obtain an auth token and set the + auth_token member. + + If you are using browser redirects, specifically AuthSub, you will want + to use gdata.gauth.AuthSubToken.from_url to obtain the token after the + redirect, and you will probably want to updgrade this since use token + to a multiple use (session) token using the upgrade_token method. + + API Versions: + + This client is multi-version capable and can be used with Google Data API + version 1 and version 2. The version should be specified by setting the + api_version member to a string, either '1' or '2'. + """ + + # The gsessionid is used by Google Calendar to prevent redirects. + __gsessionid = None + api_version = None + # Name of the Google Data service when making a ClientLogin request. + auth_service = None + # URL prefixes which should be requested for AuthSub and OAuth. + auth_scopes = None + + def request(self, method=None, uri=None, auth_token=None, + http_request=None, converter=None, desired_class=None, + redirects_remaining=4, **kwargs): + """Make an HTTP request to the server. + + See also documentation for atom.client.AtomPubClient.request. + + If a 302 redirect is sent from the server to the client, this client + assumes that the redirect is in the form used by the Google Calendar API. + The same request URI and method will be used as in the original request, + but a gsessionid URL parameter will be added to the request URI with + the value provided in the server's 302 redirect response. If the 302 + redirect is not in the format specified by the Google Calendar API, a + RedirectError will be raised containing the body of the server's + response. + + The method calls the client's modify_request method to make any changes + required by the client before the request is made. For example, a + version 2 client could add a GData-Version: 2 header to the request in + its modify_request method. + + Args: + method: str The HTTP verb for this request, usually 'GET', 'POST', + 'PUT', or 'DELETE' + uri: atom.http_core.Uri, str, or unicode The URL being requested. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. + http_request: (optional) atom.http_core.HttpRequest + converter: function which takes the body of the response as it's only + argument and returns the desired object. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. + redirects_remaining: (optional) int, if this number is 0 and the + server sends a 302 redirect, the request method + will raise an exception. This parameter is used in + recursive request calls to avoid an infinite loop. + + Any additional arguments are passed through to + atom.client.AtomPubClient.request. + + Returns: + An HTTP response object (see atom.http_core.HttpResponse for a + description of the object's interface) if no converter was + specified and no desired_class was specified. If a converter function + was provided, the results of calling the converter are returned. If no + converter was specified but a desired_class was provided, the response + body will be converted to the class using + atom.core.parse. + """ + if isinstance(uri, (str, unicode)): + uri = atom.http_core.Uri.parse_uri(uri) + + # Add the gsession ID to the URL to prevent further redirects. + # TODO: If different sessions are using the same client, there will be a + # multitude of redirects and session ID shuffling. + # If the gsession ID is in the URL, adopt it as the standard location. + if uri is not None and uri.query is not None and 'gsessionid' in uri.query: + self.__gsessionid = uri.query['gsessionid'] + # The gsession ID could also be in the HTTP request. + elif (http_request is not None and http_request.uri is not None + and http_request.uri.query is not None + and 'gsessionid' in http_request.uri.query): + self.__gsessionid = http_request.uri.query['gsessionid'] + # If the gsession ID is stored in the client, and was not present in the + # URI then add it to the URI. + elif self.__gsessionid is not None: + uri.query['gsessionid'] = self.__gsessionid + + # The AtomPubClient should call this class' modify_request before + # performing the HTTP request. + #http_request = self.modify_request(http_request) + + response = atom.client.AtomPubClient.request(self, method=method, + uri=uri, auth_token=auth_token, http_request=http_request, **kwargs) + # On success, convert the response body using the desired converter + # function if present. + if response is None: + return None + if response.status == 200 or response.status == 201: + if converter is not None: + return converter(response) + elif desired_class is not None: + if self.api_version is not None: + return atom.core.parse(response.read(), desired_class, + version=get_xml_version(self.api_version)) + else: + # No API version was specified, so allow parse to + # use the default version. + return atom.core.parse(response.read(), desired_class) + else: + return response + # TODO: move the redirect logic into the Google Calendar client once it + # exists since the redirects are only used in the calendar API. + elif response.status == 302: + if redirects_remaining > 0: + location = (response.getheader('Location') + or response.getheader('location')) + if location is not None: + m = re.compile('[\?\&]gsessionid=(\w*)').search(location) + if m is not None: + self.__gsessionid = m.group(1) + # Make a recursive call with the gsession ID in the URI to follow + # the redirect. + return self.request(method=method, uri=uri, auth_token=auth_token, + http_request=http_request, converter=converter, + desired_class=desired_class, + redirects_remaining=redirects_remaining-1, + **kwargs) + else: + raise error_from_response('302 received without Location header', + response, RedirectError) + else: + raise error_from_response('Too many redirects from server', + response, RedirectError) + elif response.status == 401: + raise error_from_response('Unauthorized - Server responded with', + response, Unauthorized) + elif response.status == 304: + raise error_from_response('Entry Not Modified - Server responded with', + response, NotModified) + elif response.status == 501: + raise error_from_response( + 'This API operation is not implemented. - Server responded with', + response, NotImplemented) + # If the server's response was not a 200, 201, 302, 304, 401, or 501, raise + # an exception. + else: + raise error_from_response('Server responded with', response, + RequestError) + + Request = request + + def request_client_login_token( + self, email, password, source, service=None, + account_type='HOSTED_OR_GOOGLE', + auth_url=atom.http_core.Uri.parse_uri( + 'https://www.google.com/accounts/ClientLogin'), + captcha_token=None, captcha_response=None): + service = service or self.auth_service + # Set the target URL. + http_request = atom.http_core.HttpRequest(uri=auth_url, method='POST') + http_request.add_body_part( + gdata.gauth.generate_client_login_request_body(email=email, + password=password, service=service, source=source, + account_type=account_type, captcha_token=captcha_token, + captcha_response=captcha_response), + 'application/x-www-form-urlencoded') + + # Use the underlying http_client to make the request. + response = self.http_client.request(http_request) + + response_body = response.read() + if response.status == 200: + token_string = gdata.gauth.get_client_login_token_string(response_body) + if token_string is not None: + return gdata.gauth.ClientLoginToken(token_string) + else: + raise ClientLoginTokenMissing( + 'Recieved a 200 response to client login request,' + ' but no token was present. %s' % (response_body,)) + elif response.status == 403: + captcha_challenge = gdata.gauth.get_captcha_challenge(response_body) + if captcha_challenge: + challenge = CaptchaChallenge('CAPTCHA required') + challenge.captcha_url = captcha_challenge['url'] + challenge.captcha_token = captcha_challenge['token'] + raise challenge + elif response_body.splitlines()[0] == 'Error=BadAuthentication': + raise BadAuthentication('Incorrect username or password') + else: + raise error_from_response('Server responded with a 403 code', + response, RequestError, response_body) + elif response.status == 302: + # Google tries to redirect all bad URLs back to + # http://www.google.. If a redirect + # attempt is made, assume the user has supplied an incorrect + # authentication URL + raise error_from_response('Server responded with a redirect', + response, BadAuthenticationServiceURL, + response_body) + else: + raise error_from_response('Server responded to ClientLogin request', + response, ClientLoginFailed, response_body) + + RequestClientLoginToken = request_client_login_token + + def client_login(self, email, password, source, service=None, + account_type='HOSTED_OR_GOOGLE', + auth_url=atom.http_core.Uri.parse_uri( + 'https://www.google.com/accounts/ClientLogin'), + captcha_token=None, captcha_response=None): + """Performs an auth request using the user's email address and password. + + In order to modify user specific data and read user private data, your + application must be authorized by the user. One way to demonstrage + authorization is by including a Client Login token in the Authorization + HTTP header of all requests. This method requests the Client Login token + by sending the user's email address, password, the name of the + application, and the service code for the service which will be accessed + by the application. If the username and password are correct, the server + will respond with the client login code and a new ClientLoginToken + object will be set in the client's auth_token member. With the auth_token + set, future requests from this client will include the Client Login + token. + + For a list of service names, see + http://code.google.com/apis/gdata/faq.html#clientlogin + For more information on Client Login, see: + http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html + + Args: + email: str The user's email address or username. + password: str The password for the user's account. + source: str The name of your application. This can be anything you + like but should should give some indication of which app is + making the request. + service: str The service code for the service you would like to access. + For example, 'cp' for contacts, 'cl' for calendar. For a full + list see + http://code.google.com/apis/gdata/faq.html#clientlogin + If you are using a subclass of the gdata.client.GDClient, the + service will usually be filled in for you so you do not need + to specify it. For example see BloggerClient, + SpreadsheetsClient, etc. + account_type: str (optional) The type of account which is being + authenticated. This can be either 'GOOGLE' for a Google + Account, 'HOSTED' for a Google Apps Account, or the + default 'HOSTED_OR_GOOGLE' which will select the Google + Apps Account if the same email address is used for both + a Google Account and a Google Apps Account. + auth_url: str (optional) The URL to which the login request should be + sent. + captcha_token: str (optional) If a previous login attempt was reponded + to with a CAPTCHA challenge, this is the token which + identifies the challenge (from the CAPTCHA's URL). + captcha_response: str (optional) If a previous login attempt was + reponded to with a CAPTCHA challenge, this is the + response text which was contained in the challenge. + + Returns: + None + + Raises: + A RequestError or one of its suclasses: BadAuthentication, + BadAuthenticationServiceURL, ClientLoginFailed, + ClientLoginTokenMissing, or CaptchaChallenge + """ + service = service or self.auth_service + self.auth_token = self.request_client_login_token(email, password, + source, service=service, account_type=account_type, auth_url=auth_url, + captcha_token=captcha_token, captcha_response=captcha_response) + + ClientLogin = client_login + + def upgrade_token(self, token=None, url=atom.http_core.Uri.parse_uri( + 'https://www.google.com/accounts/AuthSubSessionToken')): + """Asks the Google auth server for a multi-use AuthSub token. + + For details on AuthSub, see: + http://code.google.com/apis/accounts/docs/AuthSub.html + + Args: + token: gdata.gauth.AuthSubToken or gdata.gauth.SecureAuthSubToken + (optional) If no token is passed in, the client's auth_token member + is used to request the new token. The token object will be modified + to contain the new session token string. + url: str or atom.http_core.Uri (optional) The URL to which the token + upgrade request should be sent. Defaults to: + https://www.google.com/accounts/AuthSubSessionToken + + Returns: + The upgraded gdata.gauth.AuthSubToken object. + """ + # Default to using the auth_token member if no token is provided. + if token is None: + token = self.auth_token + # We cannot upgrade a None token. + if token is None: + raise UnableToUpgradeToken('No token was provided.') + if not isinstance(token, gdata.gauth.AuthSubToken): + raise UnableToUpgradeToken( + 'Cannot upgrade the token because it is not an AuthSubToken object.') + http_request = atom.http_core.HttpRequest(uri=url, method='GET') + token.modify_request(http_request) + # Use the lower level HttpClient to make the request. + response = self.http_client.request(http_request) + if response.status == 200: + token._upgrade_token(response.read()) + return token + else: + raise UnableToUpgradeToken( + 'Server responded to token upgrade request with %s: %s' % ( + response.status, response.read())) + + UpgradeToken = upgrade_token + + def revoke_token(self, token=None, url=atom.http_core.Uri.parse_uri( + 'https://www.google.com/accounts/AuthSubRevokeToken')): + """Requests that the token be invalidated. + + This method can be used for both AuthSub and OAuth tokens (to invalidate + a ClientLogin token, the user must change their password). + + Returns: + True if the server responded with a 200. + + Raises: + A RequestError if the server responds with a non-200 status. + """ + # Default to using the auth_token member if no token is provided. + if token is None: + token = self.auth_token + + http_request = atom.http_core.HttpRequest(uri=url, method='GET') + token.modify_request(http_request) + response = self.http_client.request(http_request) + if response.status != 200: + raise error_from_response('Server sent non-200 to revoke token', + response, RequestError, response.read()) + + return True + + RevokeToken = revoke_token + + def get_oauth_token(self, scopes, next, consumer_key, consumer_secret=None, + rsa_private_key=None, + url=gdata.gauth.REQUEST_TOKEN_URL): + """Obtains an OAuth request token to allow the user to authorize this app. + + Once this client has a request token, the user can authorize the request + token by visiting the authorization URL in their browser. After being + redirected back to this app at the 'next' URL, this app can then exchange + the authorized request token for an access token. + + For more information see the documentation on Google Accounts with OAuth: + http://code.google.com/apis/accounts/docs/OAuth.html#AuthProcess + + Args: + scopes: list of strings or atom.http_core.Uri objects which specify the + URL prefixes which this app will be accessing. For example, to access + the Google Calendar API, you would want to use scopes: + ['https://www.google.com/calendar/feeds/', + 'http://www.google.com/calendar/feeds/'] + next: str or atom.http_core.Uri object, The URL which the user's browser + should be sent to after they authorize access to their data. This + should be a URL in your application which will read the token + information from the URL and upgrade the request token to an access + token. + consumer_key: str This is the identifier for this application which you + should have received when you registered your application with Google + to use OAuth. + consumer_secret: str (optional) The shared secret between your app and + Google which provides evidence that this request is coming from you + application and not another app. If present, this libraries assumes + you want to use an HMAC signature to verify requests. Keep this data + a secret. + rsa_private_key: str (optional) The RSA private key which is used to + generate a digital signature which is checked by Google's server. If + present, this library assumes that you want to use an RSA signature + to verify requests. Keep this data a secret. + url: The URL to which a request for a token should be made. The default + is Google's OAuth request token provider. + """ + http_request = None + if rsa_private_key is not None: + http_request = gdata.gauth.generate_request_for_request_token( + consumer_key, gdata.gauth.RSA_SHA1, scopes, + rsa_key=rsa_private_key, auth_server_url=url, next=next) + elif consumer_secret is not None: + http_request = gdata.gauth.generate_request_for_request_token( + consumer_key, gdata.gauth.HMAC_SHA1, scopes, + consumer_secret=consumer_secret, auth_server_url=url, next=next) + else: + raise MissingOAuthParameters( + 'To request an OAuth token, you must provide your consumer secret' + ' or your private RSA key.') + + response = self.http_client.request(http_request) + response_body = response.read() + + if response.status != 200: + raise error_from_response('Unable to obtain OAuth request token', + response, RequestError, response_body) + + if rsa_private_key is not None: + return gdata.gauth.rsa_token_from_body(response_body, consumer_key, + rsa_private_key, + gdata.gauth.REQUEST_TOKEN) + elif consumer_secret is not None: + return gdata.gauth.hmac_token_from_body(response_body, consumer_key, + consumer_secret, + gdata.gauth.REQUEST_TOKEN) + + GetOAuthToken = get_oauth_token + + def get_access_token(self, request_token, + url=gdata.gauth.ACCESS_TOKEN_URL): + """Exchanges an authorized OAuth request token for an access token. + + Contacts the Google OAuth server to upgrade a previously authorized + request token. Once the request token is upgraded to an access token, + the access token may be used to access the user's data. + + For more details, see the Google Accounts OAuth documentation: + http://code.google.com/apis/accounts/docs/OAuth.html#AccessToken + + Args: + request_token: An OAuth token which has been authorized by the user. + url: (optional) The URL to which the upgrade request should be sent. + Defaults to: https://www.google.com/accounts/OAuthAuthorizeToken + """ + http_request = gdata.gauth.generate_request_for_access_token( + request_token, auth_server_url=url) + response = self.http_client.request(http_request) + response_body = response.read() + if response.status != 200: + raise error_from_response( + 'Unable to upgrade OAuth request token to access token', + response, RequestError, response_body) + + return gdata.gauth.upgrade_to_access_token(request_token, response_body) + + GetAccessToken = get_access_token + + def modify_request(self, http_request): + """Adds or changes request before making the HTTP request. + + This client will add the API version if it is specified. + Subclasses may override this method to add their own request + modifications before the request is made. + """ + http_request = atom.client.AtomPubClient.modify_request(self, + http_request) + if self.api_version is not None: + http_request.headers['GData-Version'] = self.api_version + return http_request + + ModifyRequest = modify_request + + def get_feed(self, uri, auth_token=None, converter=None, + desired_class=gdata.data.GDFeed, **kwargs): + return self.request(method='GET', uri=uri, auth_token=auth_token, + converter=converter, desired_class=desired_class, + **kwargs) + + GetFeed = get_feed + + def get_entry(self, uri, auth_token=None, converter=None, + desired_class=gdata.data.GDEntry, etag=None, **kwargs): + http_request = atom.http_core.HttpRequest() + # Conditional retrieval + if etag is not None: + http_request.headers['If-None-Match'] = etag + return self.request(method='GET', uri=uri, auth_token=auth_token, + http_request=http_request, converter=converter, + desired_class=desired_class, **kwargs) + + GetEntry = get_entry + + def get_next(self, feed, auth_token=None, converter=None, + desired_class=None, **kwargs): + """Fetches the next set of results from the feed. + + When requesting a feed, the number of entries returned is capped at a + service specific default limit (often 25 entries). You can specify your + own entry-count cap using the max-results URL query parameter. If there + are more results than could fit under max-results, the feed will contain + a next link. This method performs a GET against this next results URL. + + Returns: + A new feed object containing the next set of entries in this feed. + """ + if converter is None and desired_class is None: + desired_class = feed.__class__ + return self.get_feed(feed.find_next_link(), auth_token=auth_token, + converter=converter, desired_class=desired_class, + **kwargs) + + GetNext = get_next + + # TODO: add a refresh method to re-fetch the entry/feed from the server + # if it has been updated. + + def post(self, entry, uri, auth_token=None, converter=None, + desired_class=None, **kwargs): + if converter is None and desired_class is None: + desired_class = entry.__class__ + http_request = atom.http_core.HttpRequest() + http_request.add_body_part( + entry.to_string(get_xml_version(self.api_version)), + 'application/atom+xml') + return self.request(method='POST', uri=uri, auth_token=auth_token, + http_request=http_request, converter=converter, + desired_class=desired_class, **kwargs) + + Post = post + + def update(self, entry, auth_token=None, force=False, **kwargs): + """Edits the entry on the server by sending the XML for this entry. + + Performs a PUT and converts the response to a new entry object with a + matching class to the entry passed in. + + Args: + entry: + auth_token: + force: boolean stating whether an update should be forced. Defaults to + False. Normally, if a change has been made since the passed in + entry was obtained, the server will not overwrite the entry since + the changes were based on an obsolete version of the entry. + Setting force to True will cause the update to silently + overwrite whatever version is present. + + Returns: + A new Entry object of a matching type to the entry which was passed in. + """ + http_request = atom.http_core.HttpRequest() + http_request.add_body_part( + entry.to_string(get_xml_version(self.api_version)), + 'application/atom+xml') + # Include the ETag in the request if present. + if force: + http_request.headers['If-Match'] = '*' + elif hasattr(entry, 'etag') and entry.etag: + http_request.headers['If-Match'] = entry.etag + + return self.request(method='PUT', uri=entry.find_edit_link(), + auth_token=auth_token, http_request=http_request, + desired_class=entry.__class__, **kwargs) + + Update = update + + def delete(self, entry_or_uri, auth_token=None, force=False, **kwargs): + http_request = atom.http_core.HttpRequest() + + # Include the ETag in the request if present. + if force: + http_request.headers['If-Match'] = '*' + elif hasattr(entry_or_uri, 'etag') and entry_or_uri.etag: + http_request.headers['If-Match'] = entry_or_uri.etag + + # If the user passes in a URL, just delete directly, may not work as + # the service might require an ETag. + if isinstance(entry_or_uri, (str, unicode, atom.http_core.Uri)): + return self.request(method='DELETE', uri=entry_or_uri, + http_request=http_request, auth_token=auth_token, + **kwargs) + + return self.request(method='DELETE', uri=entry_or_uri.find_edit_link(), + http_request=http_request, auth_token=auth_token, + **kwargs) + + Delete = delete + + #TODO: implement batch requests. + #def batch(feed, uri, auth_token=None, converter=None, **kwargs): + # pass + + # TODO: add a refresh method to request a conditional update to an entry + # or feed. + + +def _add_query_param(param_string, value, http_request): + if value: + http_request.uri.query[param_string] = value + + +class Query(object): + + def __init__(self, text_query=None, categories=None, author=None, alt=None, + updated_min=None, updated_max=None, pretty_print=False, + published_min=None, published_max=None, start_index=None, + max_results=None, strict=False): + """Constructs a Google Data Query to filter feed contents serverside. + + Args: + text_query: Full text search str (optional) + categories: list of strings (optional). Each string is a required + category. To include an 'or' query, put a | in the string between + terms. For example, to find everything in the Fitz category and + the Laurie or Jane category (Fitz and (Laurie or Jane)) you would + set categories to ['Fitz', 'Laurie|Jane']. + author: str (optional) The service returns entries where the author + name and/or email address match your query string. + alt: str (optional) for the Alternative representation type you'd like + the feed in. If you don't specify an alt parameter, the service + returns an Atom feed. This is equivalent to alt='atom'. + alt='rss' returns an RSS 2.0 result feed. + alt='json' returns a JSON representation of the feed. + alt='json-in-script' Requests a response that wraps JSON in a script + tag. + alt='atom-in-script' Requests an Atom response that wraps an XML + string in a script tag. + alt='rss-in-script' Requests an RSS response that wraps an XML + string in a script tag. + updated_min: str (optional), RFC 3339 timestamp format, lower bounds. + For example: 2005-08-09T10:57:00-08:00 + updated_max: str (optional) updated time must be earlier than timestamp. + pretty_print: boolean (optional) If True the server's XML response will + be indented to make it more human readable. Defaults to False. + published_min: str (optional), Similar to updated_min but for published + time. + published_max: str (optional), Similar to updated_max but for published + time. + start_index: int or str (optional) 1-based index of the first result to + be retrieved. Note that this isn't a general cursoring mechanism. + If you first send a query with ?start-index=1&max-results=10 and + then send another query with ?start-index=11&max-results=10, the + service cannot guarantee that the results are equivalent to + ?start-index=1&max-results=20, because insertions and deletions + could have taken place in between the two queries. + max_results: int or str (optional) Maximum number of results to be + retrieved. Each service has a default max (usually 25) which can + vary from service to service. There is also a service-specific + limit to the max_results you can fetch in a request. + strict: boolean (optional) If True, the server will return an error if + the server does not recognize any of the parameters in the request + URL. Defaults to False. + """ + self.text_query = text_query + self.categories = categories or [] + self.author = author + self.alt = alt + self.updated_min = updated_min + self.updated_max = updated_max + self.pretty_print = pretty_print + self.published_min = published_min + self.published_max = published_max + self.start_index = start_index + self.max_results = max_results + self.strict = strict + + def modify_request(self, http_request): + _add_query_param('q', self.text_query, http_request) + if self.categories: + http_request.uri.query['categories'] = ','.join(self.categories) + _add_query_param('author', self.author, http_request) + _add_query_param('alt', self.alt, http_request) + _add_query_param('updated-min', self.updated_min, http_request) + _add_query_param('updated-max', self.updated_max, http_request) + if self.pretty_print: + http_request.uri.query['prettyprint'] = 'true' + _add_query_param('published-min', self.published_min, http_request) + _add_query_param('published-max', self.published_max, http_request) + if self.start_index is not None: + http_request.uri.query['start-index'] = str(self.start_index) + if self.max_results is not None: + http_request.uri.query['max-results'] = str(self.max_results) + if self.strict: + http_request.uri.query['strict'] = 'true' + + + ModifyRequest = modify_request + + +class GDQuery(atom.http_core.Uri): + + def _get_text_query(self): + return self.query['q'] + + def _set_text_query(self, value): + self.query['q'] = value + + text_query = property(_get_text_query, _set_text_query, + doc='The q parameter for searching for an exact text match on content') + + +class ResumableUploader(object): + """Resumable upload helper for the Google Data protocol.""" + + DEFAULT_CHUNK_SIZE = 5242880 # 5MB + + def __init__(self, client, file_handle, content_type, total_file_size, + chunk_size=None, desired_class=None): + """Starts a resumable upload to a service that supports the protocol. + + Args: + client: gdata.client.GDClient A Google Data API service. + file_handle: object A file-like object containing the file to upload. + content_type: str The mimetype of the file to upload. + total_file_size: int The file's total size in bytes. + chunk_size: int The size of each upload chunk. If None, the + DEFAULT_CHUNK_SIZE will be used. + desired_class: object (optional) The type of gdata.data.GDEntry to parse + the completed entry as. This should be specific to the API. + """ + self.client = client + self.file_handle = file_handle + self.content_type = content_type + self.total_file_size = total_file_size + self.chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE + self.desired_class = desired_class or gdata.data.GDEntry + self.upload_uri = None + + # Send the entire file if the chunk size is less than fize's total size. + if self.total_file_size <= self.chunk_size: + self.chunk_size = total_file_size + + def _init_session(self, resumable_media_link, entry=None, headers=None, + auth_token=None): + """Starts a new resumable upload to a service that supports the protocol. + + The method makes a request to initiate a new upload session. The unique + upload uri returned by the server (and set in this method) should be used + to send upload chunks to the server. + + Args: + resumable_media_link: str The full URL for the #resumable-create-media or + #resumable-edit-media link for starting a resumable upload request or + updating media using a resumable PUT. + entry: A (optional) gdata.data.GDEntry containging metadata to create the + upload from. + headers: dict (optional) Additional headers to send in the initial request + to create the resumable upload request. These headers will override + any default headers sent in the request. For example: + headers={'Slug': 'MyTitle'}. + auth_token: (optional) An object which sets the Authorization HTTP header + in its modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. + + Returns: + The final Atom entry as created on the server. The entry will be + parsed accoring to the class specified in self.desired_class. + + Raises: + RequestError if the unique upload uri is not set or the + server returns something other than an HTTP 308 when the upload is + incomplete. + """ + http_request = atom.http_core.HttpRequest() + + # Send empty POST if Atom XML wasn't specified. + if entry is None: + http_request.add_body_part('', self.content_type, size=0) + else: + http_request.add_body_part(str(entry), 'application/atom+xml', + size=len(str(entry))) + http_request.headers['X-Upload-Content-Type'] = self.content_type + http_request.headers['X-Upload-Content-Length'] = self.total_file_size + + if headers is not None: + http_request.headers.update(headers) + + response = self.client.request(method='POST', + uri=resumable_media_link, + auth_token=auth_token, + http_request=http_request) + + self.upload_uri = (response.getheader('location') or + response.getheader('Location')) + + _InitSession = _init_session + + def upload_chunk(self, start_byte, content_bytes): + """Uploads a byte range (chunk) to the resumable upload server. + + Args: + start_byte: int The byte offset of the total file where the byte range + passed in lives. + content_bytes: str The file contents of this chunk. + + Returns: + The final Atom entry created on the server. The entry object's type will + be the class specified in self.desired_class. + + Raises: + RequestError if the unique upload uri is not set or the + server returns something other than an HTTP 308 when the upload is + incomplete. + """ + if self.upload_uri is None: + raise RequestError('Resumable upload request not initialized.') + + # Adjustment if last byte range is less than defined chunk size. + chunk_size = self.chunk_size + if len(content_bytes) <= chunk_size: + chunk_size = len(content_bytes) + + http_request = atom.http_core.HttpRequest() + http_request.add_body_part(content_bytes, self.content_type, + size=len(content_bytes)) + http_request.headers['Content-Range'] = ('bytes %s-%s/%s' + % (start_byte, + start_byte + chunk_size - 1, + self.total_file_size)) + + try: + response = self.client.request(method='POST', uri=self.upload_uri, + http_request=http_request, + desired_class=self.desired_class) + return response + except RequestError, error: + if error.status == 308: + return None + else: + raise error + + UploadChunk = upload_chunk + + def upload_file(self, resumable_media_link, entry=None, headers=None, + auth_token=None): + """Uploads an entire file in chunks using the resumable upload protocol. + + If you are interested in pausing an upload or controlling the chunking + yourself, use the upload_chunk() method instead. + + Args: + resumable_media_link: str The full URL for the #resumable-create-media for + starting a resumable upload request. + entry: A (optional) gdata.data.GDEntry containging metadata to create the + upload from. + headers: dict Additional headers to send in the initial request to create + the resumable upload request. These headers will override any default + headers sent in the request. For example: headers={'Slug': 'MyTitle'}. + auth_token: (optional) An object which sets the Authorization HTTP header + in its modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. + + Returns: + The final Atom entry created on the server. The entry object's type will + be the class specified in self.desired_class. + + Raises: + RequestError if anything other than a HTTP 308 is returned + when the request raises an exception. + """ + self._init_session(resumable_media_link, headers=headers, + auth_token=auth_token, entry=entry) + + start_byte = 0 + entry = None + + while not entry: + entry = self.upload_chunk( + start_byte, self.file_handle.read(self.chunk_size)) + start_byte += self.chunk_size + + return entry + + UploadFile = upload_file + + def update_file(self, entry_or_resumable_edit_link, headers=None, force=False, + auth_token=None): + """Updates the contents of an existing file using the resumable protocol. + + If you are interested in pausing an upload or controlling the chunking + yourself, use the upload_chunk() method instead. + + Args: + entry_or_resumable_edit_link: object or string A gdata.data.GDEntry for + the entry/file to update or the full uri of the link with rel + #resumable-edit-media. + headers: dict Additional headers to send in the initial request to create + the resumable upload request. These headers will override any default + headers sent in the request. For example: headers={'Slug': 'MyTitle'}. + force boolean (optional) True to force an update and set the If-Match + header to '*'. If False and entry_or_resumable_edit_link is a + gdata.data.GDEntry object, its etag value is used. Otherwise this + parameter should be set to True to force the update. + auth_token: (optional) An object which sets the Authorization HTTP header + in its modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. + + Returns: + The final Atom entry created on the server. The entry object's type will + be the class specified in self.desired_class. + + Raises: + RequestError if anything other than a HTTP 308 is returned + when the request raises an exception. + """ + # Need to override the POST request for a resumable update (required). + customer_headers = {'X-HTTP-Method-Override': 'PUT'} + + if headers is not None: + customer_headers.update(headers) + + if isinstance(entry_or_resumable_edit_link, gdata.data.GDEntry): + resumable_edit_link = entry_or_resumable_edit_link.find_url( + 'http://schemas.google.com/g/2005#resumable-edit-media') + customer_headers['If-Match'] = entry_or_resumable_edit_link.etag + else: + resumable_edit_link = entry_or_resumable_edit_link + + if force: + customer_headers['If-Match'] = '*' + + return self.upload_file(resumable_edit_link, headers=customer_headers, + auth_token=auth_token) + + UpdateFile = update_file + + def query_upload_status(self, uri=None): + """Queries the current status of a resumable upload request. + + Args: + uri: str (optional) A resumable upload uri to query and override the one + that is set in this object. + + Returns: + An integer representing the file position (byte) to resume the upload from + or True if the upload is complete. + + Raises: + RequestError if anything other than a HTTP 308 is returned + when the request raises an exception. + """ + # Override object's unique upload uri. + if uri is None: + uri = self.upload_uri + + http_request = atom.http_core.HttpRequest() + http_request.headers['Content-Length'] = '0' + http_request.headers['Content-Range'] = 'bytes */%s' % self.total_file_size + + try: + response = self.client.request( + method='POST', uri=uri, http_request=http_request) + if response.status == 201: + return True + else: + raise error_from_response( + '%s returned by server' % response.status, response, RequestError) + except RequestError, error: + if error.status == 308: + for pair in error.headers: + if pair[0].capitalize() == 'Range': + return int(pair[1].split('-')[1]) + 1 + else: + raise error + + QueryUploadStatus = query_upload_status diff --git a/gam/gdata/codesearch/__init__.py b/gam/gdata/codesearch/__init__.py new file mode 100755 index 00000000000..fa23ef021dc --- /dev/null +++ b/gam/gdata/codesearch/__init__.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2007 Benoit Chesneau +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +"""Contains extensions to Atom objects used by Google Codesearch""" + +__author__ = 'Benoit Chesneau' + + +import atom +import gdata + + +CODESEARCH_NAMESPACE='http://schemas.google.com/codesearch/2006' +CODESEARCH_TEMPLATE='{http://shema.google.com/codesearch/2006}%s' + + +class Match(atom.AtomBase): + """ The Google Codesearch match element """ + _tag = 'match' + _namespace = CODESEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['lineNumber'] = 'line_number' + _attributes['type'] = 'type' + + def __init__(self, line_number=None, type=None, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.type = type + self.line_number = line_number + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class File(atom.AtomBase): + """ The Google Codesearch file element""" + _tag = 'file' + _namespace = CODESEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.name = name + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Package(atom.AtomBase): + """ The Google Codesearch package element""" + _tag = 'package' + _namespace = CODESEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['uri'] = 'uri' + + def __init__(self, name=None, uri=None, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.name = name + self.uri = uri + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class CodesearchEntry(gdata.GDataEntry): + """ Google codesearch atom entry""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + _children['{%s}file' % CODESEARCH_NAMESPACE] = ('file', File) + _children['{%s}package' % CODESEARCH_NAMESPACE] = ('package', Package) + _children['{%s}match' % CODESEARCH_NAMESPACE] = ('match', [Match]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + match=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=None) + + self.match = match or [] + + +def CodesearchEntryFromString(xml_string): + """Converts an XML string into a CodesearchEntry object. + + Args: + xml_string: string The XML describing a Codesearch feed entry. + + Returns: + A CodesearchEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(CodesearchEntry, xml_string) + + +class CodesearchFeed(gdata.GDataFeed): + """feed containing list of Google codesearch Items""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CodesearchEntry]) + + +def CodesearchFeedFromString(xml_string): + """Converts an XML string into a CodesearchFeed object. + Args: + xml_string: string The XML describing a Codesearch feed. + Returns: + A CodeseartchFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(CodesearchFeed, xml_string) diff --git a/gam/gdata/codesearch/service.py b/gam/gdata/codesearch/service.py new file mode 100755 index 00000000000..d6e23359a4a --- /dev/null +++ b/gam/gdata/codesearch/service.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2007 Benoit Chesneau +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +"""CodesearchService extends GDataService to streamline Google Codesearch +operations""" + + +__author__ = 'Benoit Chesneau' + + +import atom +import gdata.service +import gdata.codesearch + + +class CodesearchService(gdata.service.GDataService): + """Client extension for Google codesearch service""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', additional_headers=None, **kwargs): + """Creates a client for the Google codesearch service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'www.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='codesearch', + source=source, server=server, additional_headers=additional_headers, + **kwargs) + + def Query(self, uri, converter=gdata.codesearch.CodesearchFeedFromString): + """Queries the Codesearch feed and returns the resulting feed of + entries. + + Args: + uri: string The full URI to be queried. This can contain query + parameters, a hostname, or simply the relative path to a Document + List feed. The DocumentQuery object is useful when constructing + query parameters. + converter: func (optional) A function which will be executed on the + retrieved item, generally to render it into a Python object. + By default the CodesearchFeedFromString function is used to + return a CodesearchFeed object. This is because most feed + queries will result in a feed and not a single entry. + + Returns : + A CodesearchFeed objects representing the feed returned by the server + """ + return self.Get(uri, converter=converter) + + def GetSnippetsFeed(self, text_query=None): + """Retrieve Codesearch feed for a keyword + + Args: + text_query : string (optional) The contents of the q query parameter. This + string is URL escaped upon conversion to a URI. + Returns: + A CodesearchFeed objects representing the feed returned by the server + """ + + query=gdata.codesearch.service.CodesearchQuery(text_query=text_query) + feed = self.Query(query.ToUri()) + return feed + + +class CodesearchQuery(gdata.service.Query): + """Object used to construct the query to the Google Codesearch feed. here only as a shorcut""" + + def __init__(self, feed='/codesearch/feeds/search', text_query=None, + params=None, categories=None): + """Constructor for Codesearch Query. + + Args: + feed: string (optional) The path for the feed. (e.g. '/codesearch/feeds/search') + text_query: string (optional) The contents of the q query parameter. This + string is URL escaped upon conversion to a URI. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to + the query's items. + categories: list (optional) List of category strings which should be + included as query categories. See gdata.service.Query for + additional documentation. + + Yelds: + A CodesearchQuery object to construct a URI based on Codesearch feed + """ + + gdata.service.Query.__init__(self, feed, text_query, params, categories) diff --git a/gam/gdata/contacts/__init__.py b/gam/gdata/contacts/__init__.py new file mode 100755 index 00000000000..363a4a52402 --- /dev/null +++ b/gam/gdata/contacts/__init__.py @@ -0,0 +1,741 @@ +#!/usr/bin/env python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to ElementWrapper objects used with Google Contacts.""" + +__author__ = 'dbrattli (Dag Brattli)' + + +import atom +import gdata + + +## Constants from http://code.google.com/apis/gdata/elements.html ## +REL_HOME = 'http://schemas.google.com/g/2005#home' +REL_WORK = 'http://schemas.google.com/g/2005#work' +REL_OTHER = 'http://schemas.google.com/g/2005#other' + +# AOL Instant Messenger protocol +IM_AIM = 'http://schemas.google.com/g/2005#AIM' +IM_MSN = 'http://schemas.google.com/g/2005#MSN' # MSN Messenger protocol +IM_YAHOO = 'http://schemas.google.com/g/2005#YAHOO' # Yahoo Messenger protocol +IM_SKYPE = 'http://schemas.google.com/g/2005#SKYPE' # Skype protocol +IM_QQ = 'http://schemas.google.com/g/2005#QQ' # QQ protocol +# Google Talk protocol +IM_GOOGLE_TALK = 'http://schemas.google.com/g/2005#GOOGLE_TALK' +IM_ICQ = 'http://schemas.google.com/g/2005#ICQ' # ICQ protocol +IM_JABBER = 'http://schemas.google.com/g/2005#JABBER' # Jabber protocol +IM_NETMEETING = 'http://schemas.google.com/g/2005#netmeeting' # NetMeeting + +PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo' +PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo' + +# Different phone types, for moro info see: +# http://code.google.com/apis/gdata/docs/2.0/elements.html#gdPhoneNumber +PHONE_CAR = 'http://schemas.google.com/g/2005#car' +PHONE_FAX = 'http://schemas.google.com/g/2005#fax' +PHONE_GENERAL = 'http://schemas.google.com/g/2005#general' +PHONE_HOME = REL_HOME +PHONE_HOME_FAX = 'http://schemas.google.com/g/2005#home_fax' +PHONE_INTERNAL = 'http://schemas.google.com/g/2005#internal-extension' +PHONE_MOBILE = 'http://schemas.google.com/g/2005#mobile' +PHONE_OTHER = REL_OTHER +PHONE_PAGER = 'http://schemas.google.com/g/2005#pager' +PHONE_SATELLITE = 'http://schemas.google.com/g/2005#satellite' +PHONE_VOIP = 'http://schemas.google.com/g/2005#voip' +PHONE_WORK = REL_WORK +PHONE_WORK_FAX = 'http://schemas.google.com/g/2005#work_fax' +PHONE_WORK_MOBILE = 'http://schemas.google.com/g/2005#work_mobile' +PHONE_WORK_PAGER = 'http://schemas.google.com/g/2005#work_pager' +PHONE_MAIN = 'http://schemas.google.com/g/2005#main' +PHONE_ASSISTANT = 'http://schemas.google.com/g/2005#assistant' +PHONE_CALLBACK = 'http://schemas.google.com/g/2005#callback' +PHONE_COMPANY_MAIN = 'http://schemas.google.com/g/2005#company_main' +PHONE_ISDN = 'http://schemas.google.com/g/2005#isdn' +PHONE_OTHER_FAX = 'http://schemas.google.com/g/2005#other_fax' +PHONE_RADIO = 'http://schemas.google.com/g/2005#radio' +PHONE_TELEX = 'http://schemas.google.com/g/2005#telex' +PHONE_TTY_TDD = 'http://schemas.google.com/g/2005#tty_tdd' + +EXTERNAL_ID_ORGANIZATION = 'organization' + +RELATION_MANAGER = 'manager' + +CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008' + + +class GDataBase(atom.AtomBase): + """The Google Contacts intermediate class from atom.AtomBase.""" + + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, + extension_elements=None, extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class ContactsBase(GDataBase): + """The Google Contacts intermediate class for Contacts namespace.""" + + _namespace = CONTACTS_NAMESPACE + + +class OrgName(GDataBase): + """The Google Contacts OrgName element.""" + + _tag = 'orgName' + + +class OrgTitle(GDataBase): + """The Google Contacts OrgTitle element.""" + + _tag = 'orgTitle' + + +class OrgDepartment(GDataBase): + """The Google Contacts OrgDepartment element.""" + + _tag = 'orgDepartment' + + +class OrgJobDescription(GDataBase): + """The Google Contacts OrgJobDescription element.""" + + _tag = 'orgJobDescription' + + +class Where(GDataBase): + """The Google Contacts Where element.""" + + _tag = 'where' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['label'] = 'label' + _attributes['valueString'] = 'value_string' + + def __init__(self, value_string=None, rel=None, label=None, + text=None, extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.rel = rel + self.label = label + self.value_string = value_string + + +class When(GDataBase): + """The Google Contacts When element.""" + + _tag = 'when' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['startTime'] = 'start_time' + _attributes['endTime'] = 'end_time' + _attributes['label'] = 'label' + + def __init__(self, start_time=None, end_time=None, label=None, + text=None, extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.start_time = start_time + self.end_time = end_time + self.label = label + + +class Organization(GDataBase): + """The Google Contacts Organization element.""" + + _tag = 'organization' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['label'] = 'label' + _attributes['rel'] = 'rel' + _attributes['primary'] = 'primary' + _children['{%s}orgName' % GDataBase._namespace] = ( + 'org_name', OrgName) + _children['{%s}orgTitle' % GDataBase._namespace] = ( + 'org_title', OrgTitle) + _children['{%s}orgDepartment' % GDataBase._namespace] = ( + 'org_department', OrgDepartment) + _children['{%s}orgJobDescription' % GDataBase._namespace] = ( + 'org_job_description', OrgJobDescription) + #_children['{%s}where' % GDataBase._namespace] = ('where', Where) + + def __init__(self, label=None, rel=None, primary='false', org_name=None, + org_title=None, org_department=None, org_job_description=None, + where=None, text=None, + extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.label = label + self.rel = rel or REL_OTHER + self.primary = primary + self.org_name = org_name + self.org_title = org_title + self.org_department = org_department + self.org_job_description = org_job_description + self.where = where + + +class PostalAddress(GDataBase): + """The Google Contacts PostalAddress element.""" + + _tag = 'postalAddress' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['primary'] = 'primary' + + def __init__(self, primary=None, rel=None, text=None, + extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.rel = rel or REL_OTHER + self.primary = primary + + +class FormattedAddress(GDataBase): + """The Google Contacts FormattedAddress element.""" + + _tag = 'formattedAddress' + + +class StructuredPostalAddress(GDataBase): + """The Google Contacts StructuredPostalAddress element.""" + + _tag = 'structuredPostalAddress' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['primary'] = 'primary' + _children['{%s}formattedAddress' % GDataBase._namespace] = ( + 'formatted_address', FormattedAddress) + + def __init__(self, rel=None, primary=None, + formatted_address=None, text=None, + extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.rel = rel or REL_OTHER + self.primary = primary + self.formatted_address = formatted_address + + +class IM(GDataBase): + """The Google Contacts IM element.""" + + _tag = 'im' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['address'] = 'address' + _attributes['primary'] = 'primary' + _attributes['protocol'] = 'protocol' + _attributes['label'] = 'label' + _attributes['rel'] = 'rel' + + def __init__(self, primary='false', rel=None, address=None, protocol=None, + label=None, text=None, + extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.protocol = protocol + self.address = address + self.primary = primary + self.rel = rel or REL_OTHER + self.label = label + + +class Email(GDataBase): + """The Google Contacts Email element.""" + + _tag = 'email' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['address'] = 'address' + _attributes['primary'] = 'primary' + _attributes['rel'] = 'rel' + _attributes['label'] = 'label' + + def __init__(self, label=None, rel=None, address=None, primary='false', + text=None, extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.label = label + self.rel = rel or REL_OTHER + self.address = address + self.primary = primary + + +class PhoneNumber(GDataBase): + """The Google Contacts PhoneNumber element.""" + + _tag = 'phoneNumber' + _children = GDataBase._children.copy() + _attributes = GDataBase._attributes.copy() + _attributes['label'] = 'label' + _attributes['rel'] = 'rel' + _attributes['uri'] = 'uri' + _attributes['primary'] = 'primary' + + def __init__(self, label=None, rel=None, uri=None, primary='false', + text=None, extension_elements=None, extension_attributes=None): + GDataBase.__init__(self, text=text, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.label = label + self.rel = rel or REL_OTHER + self.uri = uri + self.primary = primary + + +class Nickname(ContactsBase): + """The Google Contacts Nickname element.""" + + _tag = 'nickname' + + +class Occupation(ContactsBase): + """The Google Contacts Occupation element.""" + + _tag = 'occupation' + + +class Gender(ContactsBase): + """The Google Contacts Gender element.""" + + _tag = 'gender' + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, + text=None, extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.value = value + + +class Birthday(ContactsBase): + """The Google Contacts Birthday element.""" + + _tag = 'birthday' + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['when'] = 'when' + + def __init__(self, when=None, + text=None, extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.when = when + + +class Relation(ContactsBase): + """The Google Contacts Relation element.""" + + _tag = 'relation' + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['label'] = 'label' + _attributes['rel'] = 'rel' + + def __init__(self, label=None, rel=None, + text=None, extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.label = label + self.rel = rel + + +def RelationFromString(xml_string): + return atom.CreateClassFromXMLString(Relation, xml_string) + + +class UserDefinedField(ContactsBase): + """The Google Contacts UserDefinedField element.""" + + _tag = 'userDefinedField' + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['key'] = 'key' + _attributes['value'] = 'value' + + def __init__(self, key=None, value=None, + text=None, extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.key = key + self.value = value + + +def UserDefinedFieldFromString(xml_string): + return atom.CreateClassFromXMLString(UserDefinedField, xml_string) + + +class Website(ContactsBase): + """The Google Contacts Website element.""" + + _tag = 'website' + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['href'] = 'href' + _attributes['label'] = 'label' + _attributes['primary'] = 'primary' + _attributes['rel'] = 'rel' + + def __init__(self, href=None, label=None, primary='false', rel=None, + text=None, extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.href = href + self.label = label + self.primary = primary + self.rel = rel + + +def WebsiteFromString(xml_string): + return atom.CreateClassFromXMLString(Website, xml_string) + + +class ExternalId(ContactsBase): + """The Google Contacts ExternalId element.""" + + _tag = 'externalId' + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['label'] = 'label' + _attributes['rel'] = 'rel' + _attributes['value'] = 'value' + + def __init__(self, label=None, rel=None, value=None, + text=None, extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.label = label + self.rel = rel + self.value = value + + +def ExternalIdFromString(xml_string): + return atom.CreateClassFromXMLString(ExternalId, xml_string) + + +class Event(ContactsBase): + """The Google Contacts Event element.""" + + _tag = 'event' + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['label'] = 'label' + _attributes['rel'] = 'rel' + _children['{%s}when' % ContactsBase._namespace] = ('when', When) + + def __init__(self, label=None, rel=None, when=None, + text=None, extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.label = label + self.rel = rel + self.when = when + + +def EventFromString(xml_string): + return atom.CreateClassFromXMLString(Event, xml_string) + + +class Deleted(GDataBase): + """The Google Contacts Deleted element.""" + + _tag = 'deleted' + + +class GroupMembershipInfo(ContactsBase): + """The Google Contacts GroupMembershipInfo element.""" + + _tag = 'groupMembershipInfo' + + _children = ContactsBase._children.copy() + _attributes = ContactsBase._attributes.copy() + _attributes['deleted'] = 'deleted' + _attributes['href'] = 'href' + + def __init__(self, deleted=None, href=None, text=None, + extension_elements=None, extension_attributes=None): + ContactsBase.__init__(self, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.deleted = deleted + self.href = href + + +class PersonEntry(gdata.BatchEntry): + """Base class for ContactEntry and ProfileEntry.""" + + _children = gdata.BatchEntry._children.copy() + _children['{%s}organization' % gdata.GDATA_NAMESPACE] = ( + 'organization', [Organization]) + _children['{%s}phoneNumber' % gdata.GDATA_NAMESPACE] = ( + 'phone_number', [PhoneNumber]) + _children['{%s}nickname' % CONTACTS_NAMESPACE] = ('nickname', Nickname) + _children['{%s}occupation' % CONTACTS_NAMESPACE] = ('occupation', Occupation) + _children['{%s}gender' % CONTACTS_NAMESPACE] = ('gender', Gender) + _children['{%s}birthday' % CONTACTS_NAMESPACE] = ('birthday', Birthday) + _children['{%s}postalAddress' % gdata.GDATA_NAMESPACE] = ('postal_address', + [PostalAddress]) + _children['{%s}structuredPostalAddress' % gdata.GDATA_NAMESPACE] = ( + 'structured_pstal_address', [StructuredPostalAddress]) + _children['{%s}email' % gdata.GDATA_NAMESPACE] = ('email', [Email]) + _children['{%s}im' % gdata.GDATA_NAMESPACE] = ('im', [IM]) + _children['{%s}relation' % CONTACTS_NAMESPACE] = ('relation', [Relation]) + _children['{%s}userDefinedField' % CONTACTS_NAMESPACE] = ( + 'user_defined_field', [UserDefinedField]) + _children['{%s}website' % CONTACTS_NAMESPACE] = ('website', [Website]) + _children['{%s}externalId' % CONTACTS_NAMESPACE] = ( + 'external_id', [ExternalId]) + _children['{%s}event' % CONTACTS_NAMESPACE] = ('event', [Event]) + # The following line should be removed once the Python support + # for GData 2.0 is mature. + _attributes = gdata.BatchEntry._attributes.copy() + _attributes['{%s}etag' % gdata.GDATA_NAMESPACE] = 'etag' + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, organization=None, phone_number=None, + nickname=None, occupation=None, gender=None, birthday=None, + postal_address=None, structured_pstal_address=None, email=None, + im=None, relation=None, user_defined_field=None, website=None, + external_id=None, event=None, batch_operation=None, + batch_id=None, batch_status=None, text=None, + extension_elements=None, extension_attributes=None, etag=None): + gdata.BatchEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, + batch_operation=batch_operation, + batch_id=batch_id, batch_status=batch_status, + title=title, updated=updated) + self.organization = organization or [] + self.phone_number = phone_number or [] + self.nickname = nickname + self.occupation = occupation + self.gender = gender + self.birthday = birthday + self.postal_address = postal_address or [] + self.structured_pstal_address = structured_pstal_address or [] + self.email = email or [] + self.im = im or [] + self.relation = relation or [] + self.user_defined_field = user_defined_field or [] + self.website = website or [] + self.external_id = external_id or [] + self.event = event or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + # The following line should be removed once the Python support + # for GData 2.0 is mature. + self.etag = etag + + +class ContactEntry(PersonEntry): + """A Google Contact flavor of an Atom Entry.""" + + _children = PersonEntry._children.copy() + + _children['{%s}deleted' % gdata.GDATA_NAMESPACE] = ('deleted', Deleted) + _children['{%s}groupMembershipInfo' % CONTACTS_NAMESPACE] = ( + 'group_membership_info', [GroupMembershipInfo]) + _children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = ( + 'extended_property', [gdata.ExtendedProperty]) + # Overwrite the organization rule in PersonEntry so that a ContactEntry + # may only contain one element. + _children['{%s}organization' % gdata.GDATA_NAMESPACE] = ( + 'organization', Organization) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, organization=None, phone_number=None, + nickname=None, occupation=None, gender=None, birthday=None, + postal_address=None, structured_pstal_address=None, email=None, + im=None, relation=None, user_defined_field=None, website=None, + external_id=None, event=None, batch_operation=None, + batch_id=None, batch_status=None, text=None, + extension_elements=None, extension_attributes=None, etag=None, + deleted=None, extended_property=None, + group_membership_info=None): + PersonEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated, + organization=organization, phone_number=phone_number, + nickname=nickname, occupation=occupation, + gender=gender, birthday=birthday, + postal_address=postal_address, + structured_pstal_address=structured_pstal_address, + email=email, im=im, relation=relation, + user_defined_field=user_defined_field, + website=website, external_id=external_id, event=event, + batch_operation=batch_operation, batch_id=batch_id, + batch_status=batch_status, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes, etag=etag) + self.deleted = deleted + self.extended_property = extended_property or [] + self.group_membership_info = group_membership_info or [] + + def GetPhotoLink(self): + for a_link in self.link: + if a_link.rel == PHOTO_LINK_REL: + return a_link + return None + + def GetPhotoEditLink(self): + for a_link in self.link: + if a_link.rel == PHOTO_EDIT_LINK_REL: + return a_link + return None + + +def ContactEntryFromString(xml_string): + return atom.CreateClassFromXMLString(ContactEntry, xml_string) + + +class ContactsFeed(gdata.BatchFeed, gdata.LinkFinder): + """A Google Contacts feed flavor of an Atom Feed.""" + + _children = gdata.BatchFeed._children.copy() + + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ContactEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def ContactsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(ContactsFeed, xml_string) + + +class GroupEntry(gdata.BatchEntry): + """Represents a contact group.""" + _children = gdata.BatchEntry._children.copy() + _children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = ( + 'extended_property', [gdata.ExtendedProperty]) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, + rights=None, source=None, summary=None, control=None, + title=None, updated=None, + extended_property=None, batch_operation=None, batch_id=None, + batch_status=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.BatchEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + batch_operation=batch_operation, + batch_id=batch_id, batch_status=batch_status, + title=title, updated=updated) + self.extended_property = extended_property or [] + + +def GroupEntryFromString(xml_string): + return atom.CreateClassFromXMLString(GroupEntry, xml_string) + + +class GroupsFeed(gdata.BatchFeed): + """A Google contact groups feed flavor of an Atom Feed.""" + _children = gdata.BatchFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GroupEntry]) + + +def GroupsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GroupsFeed, xml_string) + + +class ProfileEntry(PersonEntry): + """A Google Profiles flavor of an Atom Entry.""" + + +def ProfileEntryFromString(xml_string): + """Converts an XML string into a ProfileEntry object. + + Args: + xml_string: string The XML describing a Profile entry. + + Returns: + A ProfileEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(ProfileEntry, xml_string) + + +class ProfilesFeed(gdata.BatchFeed, gdata.LinkFinder): + """A Google Profiles feed flavor of an Atom Feed.""" + + _children = gdata.BatchFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def ProfilesFeedFromString(xml_string): + """Converts an XML string into a ProfilesFeed object. + + Args: + xml_string: string The XML describing a Profiles feed. + + Returns: + A ProfilesFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(ProfilesFeed, xml_string) + diff --git a/gam/gdata/contacts/__init__.pyc b/gam/gdata/contacts/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..919580e35efd407769510f2e8c22e0c53e8bb7ed GIT binary patch literal 29275 zcmdsAU2I%eUjL8nII-h=If?Vtw3DW3owl*twApU6*|uYQ;<&NL&b8Y#)9%iW=Um&9 zjAy2EuajoiyDXdRE^J|uU|HaS2M|a+AV5ff5JE^u2*d*tLLi~#2?@aiLP9+7gy8r4 zpSd418D}SnJxPguPtM1^=lsvP|L^-h$KC(6tN;16Kbk8@{a-i!e+QSh)+1s>vPct> zO^E3d(=DYg$s%L(-IDDV(<9j)G20}&O-!$3d&O**>~=9bB)daQpJe;Q^h>s1%udPf z6f+>%0WrHIyGzV&$?g`jN3wgw?3L_ZG5aLDPt2fX2gU4{?0zu^Bzr(yjB-%SLCFq@ z8ItTFF^433SX_*FM9g8y9u;#$vd6?6mF!dE0-obyj!E`uF;7YM88OEtn-uf3WKW2B zMzYU}NlNylm=ltHPRz5CJtgL(WS>Lro!4c#f$~D<6-T2F%v;;I;{PIn2SN}WLSGa%q4M0bkZF*1jcX zI;foqYu^@=32JA<+IPgv1+`bg+LRcFh3uG^`Jj0rY#tZ$o;FX2c|T~r8aAiJe4x!2 z#as)Tvtje3m=Cr2l9-Qz<}ZcKQ)0fX&F_lI1q<<&u=%o>eDKg>SUWAIAa1u@pV1XE zVM9iYD{e+>m%`dvG0QAu=fte&b5}HVWC=uD4DXv4b6xLS5OX8ee<{4--lvu>U|%P!^{ugBKx_7WV}-L z^To24bU*RkvRAAi>sONLlDq1b{j0Tnwd&TAmBs6B!S|ADo@E9oa&pWTP8a_d|1#bw+_2*N)$^mHUSY*u&3hxu+Q~?vvO2mv zdiIr9&YfJTth(*pd8<;p;XU6@pA~<_t$EM2b8Bj9ro9_y<}>XLel2xrcDB7+=PzHI zOM55Vf%D20@8x#xo1C4UoKEK!Qqz~)o7dF%74L<1dVV)GHkNk0m)g0v?E0&&>le$* z-mCY$zd&RJHW3#-KU#B3C#x$Jzv6xF0iSYB(LZA!ZR@oc@@?YBQvMTf_`bl@@p#!S zyS04DJKfIBMEV@NeyyFmi)G)fmGh-D5yiIk##Sqf#gf||DpvE$Aek520msYxZmCrC z+a%kYm14EMRUv`vsgGo>%nzeA#bro`rm=w3sj4 z@XoZe+~AefeED`x!D$QdqG#F!Tp6VZc|Sjv_uNng??LStE^h{j3sDZCi?mBc zDD>cM)OxRPm+L#^dLQ*p=-Ks}{-6fhHpIRCO~_qn>I34;4|lWM^GFs}+-5PERN`F) ze*)Dc3nidRa;a8XP3HZ|>PQL+p&Tg?yxFpdmYi)! z$OLWTO(8e>o(Fz0dZSodygfQmxm7Mz^2Qro&aa+1J96&and)tSrBXiQ)e6bsS5J>F z8@!LvklW{S#d6WlyRr3W`7m4pcdk@}&;!duxQY@Jo zmWq=vQ~={njuI`DHe@Pg*WvKd%4lwLm&fVC2y?hRl1Fc%7Yj3(80_j!D7^&QSOcmP z6%^I=MK_{4tV&=k@9^G2(k{kUYN6tn4A*dEm@Dq^1v(@;hwny_ZhSVgkQ!f@kCxxz zo7~!!;%vW+%j0X~N-1*8)|NAP=>^p3zYQ0M26;CZhbykSQ>(?PcbBXa(}<1%uEjBw zewvqqz~ub=^488>b#cNS&Yf=sz%CU1(gRQa6rSB+^4ecCdyazHYi+CPJBM+@)=vEh zM!1eEnmSPpQ=f3F`5N@lvj3njpF)3w`A3~bi@@=)2vO5xT8U$Tqlzn9iG3(xCEl$p zPPks7R;*I4KIm$Uppy+&qp{;?MK~^2q_O!it;%u0auZjyD&$&MVIf9zK{l=&^9C|* zjAL4+06*FFZF0R=)dDCAC_+)NMfl{PLuh&dhBkBEa>4XyZX0r_X!`AyyKu1ek=KxZ zyl0WLT404ST2z94i(vbd%&?VI;Fa==$Wb`l%$L^OoCj%MURH^v74upyaE81%;5aVj z{yr#2RSZmX9qz_1S%y-_K={p$8qo|Uu!-?&QKKx{cS#Dz8%De1T_Og{o?NFt1 z_A}vBwwAL!gAuUq;&D(4v|i{$dlLQpff}G%2yi4KXoB>pXxxary^6-YKx25Fn|`-P zXiVwv@l|ht#^nf$i8R3Cvg%_nnreQ|FRr?(d1-NhC8DWYHnxe=LV*p6N)Lz37IlHl z7OR2G7ONisnO^`P&S@sZ=om6LHZ+q2?zNb*$*LjOVH6H6M1tFkQ%&v2VJ=Muz{~Es2U$%@QcqeHgj*fTkhi9&2EYT@)z|Tw4$H*s)TgpaBVDOCEckSTCEjV^EGQx zLHg!YQK#whwNu{LQ9&J5>5Pp&6+<;D>aD87`MYsDn@y-ZdE|TbdmlVlq_BLpArxivq`mMeykbx9QJpmPO zqX1(~FJ%}r+bPDF*&*x5Bhx4A$RmS(HFM0;!lkrhh1Scw?FSB&6-!wj`w8p{AWfHQb>F z>QLe8l!$x@WBocV?`b5>BJxm|{&WvuUcCvQYWiFSGNzO=Fz-DtWHD4ol*Q0|+!PiA zeiHxfk-PYGsu~~1Qmr~>=r71GidP-vhfd`$)|LCk?;DUaN^akPt5oJ# zd9hMkg_p@S%?O%6qarBD_Fe=HoEGFqtQd9FkI7Cu2T&;DBRlO7N-$s6 z3jSKnH4U^8k}h>1U71gixp!qA&T6J}eh zc}7I$Qw)c%Dwnq`O!+v4bJ!A~4i|=3$QLG1Xy6Mi48DXk@CC~tVlsAPv8gPe3qx45 zuaQAGZ=xZlE-QmjR>6i&%D#WacvfBRrtG8S%UEg6b&9;08{2+FQ!DX0WCgx~0so0$ zllB|g#vxdQ5NurbshN-@XpAXId>fg~A_=%K>IyS*bZbPGAu6D*-TS*k?jl-Ag&x_l zM{~9neN4?nq`;{ZS5=#>W4J>+QG2jj!dS|erp*QXsgr|W7_1#xUZ-a;r71GEKz~#d=OExW?J_43Y+{ zhXM-x1~fqK?GI=}ZH(-cDNXi~6QOvB_UhSeHi5#{PNMh$C{JqV2JSw~^q)ixkzeHc z2<|}s)0@)Jd!`4n>{OQjQ#=nErd-}c@Pac;k7H-g)6{0(yXvZEph1*)8V$ffNha9C` zALTLGKV;&saVdI`p>$IZ`D~%ER@HEr2c7&28%#dxF`7P^VI>^tF)ctEK)l(o05mEl z;rWj-I!Z>BXesxBTNe*Ys54J6ezrr z4tB3^na+;}Ue?059Q#hQ4htH0Z1}B(k}k&7o@Ma_l9)p|tm`BOuJE-|CQ6;X-8Vwu zF*3MmfQWd|Rxah9F6;eh&tDkq<;mg?~)!>HeiZ`>M97~m^Gv2c9MId^eP>o{jH>V&&gEW74n z(Jf(bPyJuVX>KDId^3a*?eV2xmrVowYW>`BZ#xw1ohHM1jHv_1$S|zyBpH5#Z~3!8 zhSWvZ2rS=aOwT}=&cl+Sli^!n$4YL_EOWTFgvgGf-)01ji0!*EVx#xxs=Mf6_eV%$ zs!5@~6YjORZw*eiX7}220Hc*|RR=jDI?m&kori`|t<>BlEBMrBdjieYxy;E0m-0~R zlno;A{%$ra0>evq!n34fJT(b32@Jfoj|0FT2aXEc#$NP9O*J`Ajg(p zHA#i=h2vK>(vTcCrb1eRF)EDIP_?`XLbG$b(>D0;@~sa55Q)*=eIg$oBjsHKkiSWA z2a#!_Jj6ac3f-~m4$vJJFT&P{^!_+TdPJG@&G>c$qKQ)7W`1~jxZngLa7z*-z!CLv z4lb)=gh%^dojr^*VCZg#<8KvlmEs~QBuue@g2skUO2EHtO@*Y(mO*r_sPGhTW^`Jo zqUJurSby4@9uGYtJ+kW#&?AEsmDVCs{M#5Q5?dy)MGK)+p_+9Svo-0zGKQhB+ptC2)`QcL3=EbLgqR;%Si5O{C*aqnRCK6KQ+SPOPy z;c0C_IA0jokNBmX^;rKfdLOa=T8BXYT|vhP^w&B>`R@+u5aq9RyTsiS8)JJ>KDra6D50_d#*@$6AK!!ygj&K&%?)HYrp`#61|RK3cCn zChkzI8mBqw-N(f}6svyPR@WE&aInkmFi&@KktQRkp5^&YTk?BC?&3tLql~OP36${Y zRO1fkVE=ry8(HxunJ=5|A6mtzGRj{9Ni>Vm+yn_4b3HbD4EP55Y$q-Cbb=xx)ZXT68*mJic3< zuu({8uF9zO@DWDY^O5L=hS*r=ZI0bnzsZIXF(_O&*7LdIs>5KR5WK+iug+GwBC(-E zeW|%6I#a_K(`GNF!$3Qa#LQj;cM8ja0hcS9aKVDQDdT*J&xHU|-Bc%xi*3@OqRB#m z41t&Wr1=m(xcC~{cU_G+Zd}Ac4r<%+dn&#*AhHiOAj0?HWI^~IOjGRs0On%r2hKFZ z9uS;q*00fkJ8d-J&cu43R6h)kS3ocQQoSA&(H-uC*@zwof|$TvnnAz2HG{7ANcC5P zM=-`-ss46Q#9;fR`X@mVy$wqBuY)4`+%MIC42tOafK>l2D5C#^QvL6sxJTB9q}rQU zwnsngmGwhXJs1@C$@(#cXHeFklIoeDe!r|Am+FN?(LQxR)}PkfaP%9!P{h)VPt_p5 zhC>@_`f0QP|Nn9Yn~A9A@Z1KzB<6W$0$!`Wuu`#nLpR4%*A`1fZ^bow^1@=?FRbJ$ zI7Z5z<3VHH*5NshW(&Y+f6dcR=eik5yI-r~qzKmx>&6nIl746yV~Fr>RWSA&t&HL-j-C$B;vdW~) zq{75w0(2DRH=6`dmHmCpq{gJ1Ne`24OcXw3w!#mrYdZkZ>z(BYeaCRp1k}pJKw_6} zJhjoGJeZ^TN9(#X!{kiOW)EMYv&MwFfDHn8i8*3|^A3}7CI^|MnM^Wym&t2P7y#|i z&*LxvR6jwfW89(7&7lLpq1DmRt+~{w|AYDsL89tC*mbn)XyQQqr#o>7CH`OslD)bI z6Z@e1^kFr*ABh(Zi5?4G4Ga3+RogKdvi6+$&sp`a_AE4kIbeR3BEM&i)Qez}Wj zVXx#)&FrNLK+9M+N2Zz%*GVpw@;9LiRF83=PBe8jsS%?!V#d5LO22x z!UsoyLgwHIP>32F{s{?#!#^QdaQG+W36A}wxICib@~DbSNEI9b`V^%K4*Y~L!GWKU zA$Jl_C)S@)Q33&+lQ(YH&D)0wD=Ex}*k-PRRVn{c=@t20Rwg2|Y-y+&zJus?l?oW*262(^Hwt zIc&{U&zYXwqp!(oGdb!f!+2mT7DHM0r0XNB*{@7vAben`$HXP}V(L4#YV!1s*vY3S zrWQ6@0q3j1v~>YC_v8WHtQgEajiVPif46!5nPJ*C&FX;CSfmN$c#EnCst)8?B%{_L z(sjNYa#z(GxB!+)zsB6xkwh9dMyi!;Y&SktY?$QHRkrF$KlD=*HZs^rDX8@<7V16_oJ2oEE+sJ!B91h^i(J zlOkpp2U+XpOR{cn1WB`gfRpwBaSJVh6B$v%16ED#c4UgGH;OakMO0Ej{T*bipnfo; zbMs{Noy7ixvO~fZ6QSx!qjjby_`!6eQ4J3SEiaVFHj8ME>=tXbRhBEtA#26u6`TjS z1Gjl=i)w_$Y?_}$wQfE4EA&2K$NX9ecavE4pcd#(2Ko;jA_xar^>WaFay(x%X0CLj zQ4b|l8dvbtmg5^x8Xo41nnmlkSa*lXT_%y$^=%gUI-Kt^iR>@J>iiUwpJwtsCMq;k zt}7I%+6p zrOy~H?^{URR+d)-V}p-&13&ab^ENyP_J$Ms`)U3tdsD;Ot%3%@vAkqHNutNW$w@U? zMpRYZ&?qTzO1wfh8m83-M}q?mPA`q^;O1GlyI@Vg`hYb7>jTyVbTwEUus(Ld7JwUV zcY_;E85ZS7jA6BOqcnLuLaMfEpOVQMy0J2!TJA6>)66tVtm8Y2@eB3RDEsb3gI8Ir z;p9(xvAnfaJt~KJ<)7GA9FA)h|0bw9$W49#1hr9B+mF@NYPw)D!@3mY*0GK1(egw} z)i-#H>QfzJ8Q;f%-zU`M=Hz&TUB|&P=<4W&hSdE?As+0!15AUUN9-VC8Nb1K;c8$R zjk*wVV+@yigMK{AXyO-jT?nL3Ipw{MB-CUY$o&5Lj6)gNsLHfF21OC(Q`{R9J6vKF zJI>D_7fBY)ZL)az1&r`pTvL*y;>sgo2fOPeb~Fi;-{DMf5j#k6@y}2NR`Xv1*BCA( z>NtMjowH5P0}9`Hy$ihU4cuE+p=#sy_5kUTaFw+MLY4-jMkB(Q$H{mve+$txr zmmrA`S2&}RM`KqY%Hm5_|GN_-{r$;gsw9nvt@GOH0*p)qcY?0lCJtbAE z*FGEINh0th9JfzXoKF3sg-xQ8%HH-Uj z%=l0b!7;fyo#mKKV){oB#~jwrzBK;S0?g&?SMOZTA`NLvJt92t@zU%}IxJkBbuQ~s zC+a(yn#tjB2L)ODbs)VDwfgsef`a~qAN4e$<;oRxmLM1VD?LFE_&Yr69zm=9OFQa% zK`E17m`SG>rZSVxZNTP`o$H-&0%y{>@stxQT}*vo&pFh#$#f>|q^5PA_HLaJC0SI= z@tLSFKf5 z_+^c8u}_W9&djAU*K+!F)Y;VhM7%SdtAa+%uunRviK*Gx{4v{feBBoouH_acCNzpC z{efN2+|)#F)|pIYrn0Gpso9J=xPfC@01O$fq5TNXh$MT&4n>_qrr3PErbZ~7V#7;J zUSo2O$s0`cJGp)_e}_d1%H}WY)2y3ga)rqPllPc>z(hZ)f5_ZNOuo!SJ@t9!)|eES z++^|-Og=@Tua}CWuUa)^S zmb9%5^{eRoW+V+0B@HBcovU{jE{MkRWf9T}UK9mm* x9maJ~TlNm^9UAQG(Q63*AE;lwL;H_R44v=mdFE(e&k?j7=|xLV-@r3R{tu^`J~aRU literal 0 HcmV?d00001 diff --git a/gam/gdata/contacts/client.py b/gam/gdata/contacts/client.py new file mode 100755 index 00000000000..ef712ce709b --- /dev/null +++ b/gam/gdata/contacts/client.py @@ -0,0 +1,474 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from types import ListType, DictionaryType + + +"""Contains a client to communicate with the Contacts servers. + +For documentation on the Contacts API, see: +http://code.google.com/apis/contatcs/ +""" + +__author__ = 'vinces1979@gmail.com (Vince Spicer)' + + +import gdata.client +import gdata.contacts.data +import atom.data +import atom.http_core +import gdata.gauth + + +class ContactsClient(gdata.client.GDClient): + api_version = '3' + auth_service = 'cp' + server = "www.google.com" + contact_list = "default" + auth_scopes = gdata.gauth.AUTH_SCOPES['cp'] + + def get_feed_uri(self, kind='contacts', contact_list=None, projection='full', + scheme="http"): + """Builds a feed URI. + + Args: + kind: The type of feed to return, typically 'groups' or 'contacts'. + Default value: 'contacts'. + contact_list: The contact list to return a feed for. + Default value: self.contact_list. + projection: The projection to apply to the feed contents, for example + 'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'. + scheme: The URL scheme such as 'http' or 'https', None to return a + relative URI without hostname. + + Returns: + A feed URI using the given kind, contact list, and projection. + Example: '/m8/feeds/contacts/default/full'. + """ + contact_list = contact_list or self.contact_list + if kind == 'profiles': + contact_list = 'domain/%s' % contact_list + prefix = scheme and '%s://%s' % (scheme, self.server) or '' + return '%s/m8/feeds/%s/%s/%s' % (prefix, kind, contact_list, projection) + + GetFeedUri = get_feed_uri + + def get_contact(self, uri, desired_class=gdata.contacts.data.ContactEntry, + auth_token=None, **kwargs): + return self.get_feed(uri, auth_token=auth_token, + desired_class=desired_class, **kwargs) + + + GetContact = get_contact + + + def create_contact(self, new_contact, insert_uri=None, auth_token=None, **kwargs): + """Adds an new contact to Google Contacts. + + Args: + new_contact: atom.Entry or subclass A new contact which is to be added to + Google Contacts. + insert_uri: the URL to post new contacts to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the contact created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + insert_uri = insert_uri or self.GetFeedUri() + return self.Post(new_contact, insert_uri, + auth_token=auth_token, **kwargs) + + CreateContact = create_contact + + def add_contact(self, new_contact, insert_uri=None, auth_token=None, + billing_information=None, birthday=None, calendar_link=None, **kwargs): + """Adds an new contact to Google Contacts. + + Args: + new_contact: atom.Entry or subclass A new contact which is to be added to + Google Contacts. + insert_uri: the URL to post new contacts to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the contact created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + contact = gdata.contacts.data.ContactEntry() + + if billing_information is not None: + if not isinstance(billing_information, gdata.contacts.data.BillingInformation): + billing_information = gdata.contacts.data.BillingInformation(text=billing_information) + + contact.billing_information = billing_information + + if birthday is not None: + if not isinstance(birthday, gdata.contacts.data.Birthday): + birthday = gdata.contacts.data.Birthday(when=birthday) + + contact.birthday = birthday + + if calendar_link is not None: + if type(calendar_link) is not ListType: + calendar_link = [calendar_link] + + for link in calendar_link: + if not isinstance(link, gdata.contacts.data.CalendarLink): + if type(link) is not DictionaryType: + raise TypeError, "calendar_link Requires dictionary not %s" % type(link) + + link = gdata.contacts.data.CalendarLink( + rel=link.get("rel", None), + label=link.get("label", None), + primary=link.get("primary", None), + href=link.get("href", None), + ) + + contact.calendar_link.append(link) + + insert_uri = insert_uri or self.GetFeedUri() + return self.Post(contact, insert_uri, + auth_token=auth_token, **kwargs) + + AddContact = add_contact + + def get_contacts(self, desired_class=gdata.contacts.data.ContactsFeed, + auth_token=None, **kwargs): + """Obtains a feed with the contacts belonging to the current user. + + Args: + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (desired_class=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.SpreadsheetsFeed. + """ + return self.get_feed(self.GetFeedUri(), auth_token=auth_token, + desired_class=desired_class, **kwargs) + + GetContacts = get_contacts + + def get_group(self, uri=None, desired_class=gdata.contacts.data.GroupEntry, + auth_token=None, **kwargs): + """ Get a single groups details + Args: + uri: the group uri or id + """ + return self.get(uri, desired_class=desired_class, auth_token=auth_token, **kwargs) + + GetGroup = get_group + + def get_groups(self, uri=None, desired_class=gdata.contacts.data.GroupsFeed, + auth_token=None, **kwargs): + uri = uri or self.GetFeedUri('groups') + return self.get_feed(uri, desired_class=desired_class, auth_token=auth_token, **kwargs) + + GetGroups = get_groups + + def create_group(self, new_group, insert_uri=None, url_params=None, + desired_class=None): + insert_uri = insert_uri or self.GetFeedUri('groups') + return self.Post(new_group, insert_uri, url_params=url_params, + desired_class=desired_class) + + CreateGroup = create_group + + def update_group(self, edit_uri, updated_group, url_params=None, + escape_params=True, desired_class=None): + return self.Put(updated_group, self._CleanUri(edit_uri), + url_params=url_params, + escape_params=escape_params, + desired_class=desired_class) + + UpdateGroup = update_group + + def delete_group(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + return self.Delete(self._CleanUri(edit_uri), + url_params=url_params, escape_params=escape_params) + + DeleteGroup = delete_group + + def change_photo(self, media, contact_entry_or_url, content_type=None, + content_length=None): + """Change the photo for the contact by uploading a new photo. + + Performs a PUT against the photo edit URL to send the binary data for the + photo. + + Args: + media: filename, file-like-object, or a gdata.MediaSource object to send. + contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this + method will search for an edit photo link URL and + perform a PUT to the URL. + content_type: str (optional) the mime type for the photo data. This is + necessary if media is a file or file name, but if media + is a MediaSource object then the media object can contain + the mime type. If media_type is set, it will override the + mime type in the media object. + content_length: int or str (optional) Specifying the content length is + only required if media is a file-like object. If media + is a filename, the length is determined using + os.path.getsize. If media is a MediaSource object, it is + assumed that it already contains the content length. + """ + if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry): + url = contact_entry_or_url.GetPhotoEditLink().href + else: + url = contact_entry_or_url + if isinstance(media, gdata.MediaSource): + payload = media + # If the media object is a file-like object, then use it as the file + # handle in the in the MediaSource. + elif hasattr(media, 'read'): + payload = gdata.MediaSource(file_handle=media, + content_type=content_type, content_length=content_length) + # Assume that the media object is a file name. + else: + payload = gdata.MediaSource(content_type=content_type, + content_length=content_length, file_path=media) + return self.Put(payload, url) + + ChangePhoto = change_photo + + def get_photo(self, contact_entry_or_url): + """Retrives the binary data for the contact's profile photo as a string. + + Args: + contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string + containing the photo link's URL. If the contact entry does not + contain a photo link, the image will not be fetched and this method + will return None. + """ + # TODO: add the ability to write out the binary image data to a file, + # reading and writing a chunk at a time to avoid potentially using up + # large amounts of memory. + url = None + if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry): + photo_link = contact_entry_or_url.GetPhotoLink() + if photo_link: + url = photo_link.href + else: + url = contact_entry_or_url + if url: + return self.Get(url, desired_class=str) + else: + return None + + GetPhoto = get_photo + + def delete_photo(self, contact_entry_or_url): + url = None + if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry): + url = contact_entry_or_url.GetPhotoEditLink().href + else: + url = contact_entry_or_url + if url: + self.Delete(url) + + DeletePhoto = delete_photo + + def get_profiles_feed(self, uri=None): + """Retrieves a feed containing all domain's profiles. + + Args: + uri: string (optional) the URL to retrieve the profiles feed, + for example /m8/feeds/profiles/default/full + + Returns: + On success, a ProfilesFeed containing the profiles. + On failure, raises a RequestError. + """ + + uri = uri or self.GetFeedUri('profiles') + return self.Get(uri, + desired_class=gdata.contacts.data.ProfilesFeedFromString) + + GetProfilesFeed = get_profiles_feed + + def get_profile(self, uri): + """Retrieves a domain's profile for the user. + + Args: + uri: string the URL to retrieve the profiles feed, + for example /m8/feeds/profiles/default/full/username + + Returns: + On success, a ProfileEntry containing the profile for the user. + On failure, raises a RequestError + """ + return self.Get(uri, + desired_class=gdata.contacts.data.ProfileEntryFromString) + + GetProfile = get_profile + + def update_profile(self, edit_uri, updated_profile, auth_token=None, **kwargs): + """Updates an existing profile. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_profile: string atom.Entry or subclass containing + the Atom Entry which will replace the profile which is + stored at the edit_url. + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_params will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, raises a RequestError. + """ + return self.Put(updated_profile, self._CleanUri(edit_uri), + desired_class=gdata.contacts.data.ProfileEntryFromString) + + UpdateProfile = update_profile + + def execute_batch(self, batch_feed, url, desired_class=None): + """Sends a batch request feed to the server. + + Args: + batch_feed: gdata.contacts.ContactFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: str The batch URL to which these operations should be applied. + converter: Function (optional) The function used to convert the server's + response to an object. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a ContactsFeed. + """ + return self.Post(batch_feed, url, desired_class=desired_class) + + ExecuteBatch = execute_batch + + def execute_batch_profiles(self, batch_feed, url, + desired_class=gdata.contacts.data.ProfilesFeedFromString): + """Sends a batch request feed to the server. + + Args: + batch_feed: gdata.profiles.ProfilesFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: string The batch URL to which these operations should be applied. + converter: Function (optional) The function used to convert the server's + response to an object. The default value is + gdata.profiles.ProfilesFeedFromString. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a ProfilesFeed. + """ + return self.Post(batch_feed, url, desired_class=desired_class) + + ExecuteBatchProfiles = execute_batch_profiles + + +class ContactsQuery(gdata.client.Query): + """ + Create a custom Contacts Query + + Full specs can be found at: U{Contacts query parameters reference + } + """ + + def __init__(self, feed=None, group=None, orderby=None, showdeleted=None, + sortorder=None, requirealldeleted=None, **kwargs): + """ + @param max_results: The maximum number of entries to return. If you want + to receive all of the contacts, rather than only the default maximum, you + can specify a very large number for max-results. + @param start-index: The 1-based index of the first result to be retrieved. + @param updated-min: The lower bound on entry update dates. + @param group: Constrains the results to only the contacts belonging to the + group specified. Value of this parameter specifies group ID + @param orderby: Sorting criterion. The only supported value is + lastmodified. + @param showdeleted: Include deleted contacts in the returned contacts feed + @pram sortorder: Sorting order direction. Can be either ascending or + descending. + @param requirealldeleted: Only relevant if showdeleted and updated-min + are also provided. It dictates the behavior of the server in case it + detects that placeholders of some entries deleted since the point in + time specified as updated-min may have been lost. + """ + gdata.client.Query.__init__(self, **kwargs) + self.group = group + self.orderby = orderby + self.sortorder = sortorder + self.showdeleted = showdeleted + + def modify_request(self, http_request): + if self.group: + gdata.client._add_query_param('group', self.group, http_request) + if self.orderby: + gdata.client._add_query_param('orderby', self.orderby, http_request) + if self.sortorder: + gdata.client._add_query_param('sortorder', self.sortorder, http_request) + if self.showdeleted: + gdata.client._add_query_param('showdeleted', self.showdeleted, http_request) + gdata.client.Query.modify_request(self, http_request) + + ModifyRequest = modify_request + + +class ProfilesQuery(gdata.client.Query): + def __init__(self, feed=None): + self.feed = feed or 'http://www.google.com/m8/feeds/profiles/default/full' + + + def _CleanUri(self, uri): + """Sanitizes a feed URI. + + Args: + uri: The URI to sanitize, can be relative or absolute. + + Returns: + The given URI without its http://server prefix, if any. + Keeps the leading slash of the URI. + """ + url_prefix = 'http://%s' % self.server + if uri.startswith(url_prefix): + uri = uri[len(url_prefix):] + return uri diff --git a/gam/gdata/contacts/data.py b/gam/gdata/contacts/data.py new file mode 100755 index 00000000000..782a05dae33 --- /dev/null +++ b/gam/gdata/contacts/data.py @@ -0,0 +1,474 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data model classes for parsing and generating XML for the Contacts API.""" + + +__author__ = 'vinces1979@gmail.com (Vince Spicer)' + + +import atom.core +import gdata +import gdata.data + + +PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo' +PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo' + +EXTERNAL_ID_ORGANIZATION = 'organization' + +RELATION_MANAGER = 'manager' + +CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008' +CONTACTS_TEMPLATE = '{%s}%%s' % CONTACTS_NAMESPACE + + +class BillingInformation(atom.core.XmlElement): + """ + gContact:billingInformation + Specifies billing information of the entity represented by the contact. The element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'billingInformation' + + +class Birthday(atom.core.XmlElement): + """ + Stores birthday date of the person represented by the contact. The element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'birthday' + when = 'when' + + +class CalendarLink(atom.core.XmlElement): + """ + Storage for URL of the contact's calendar. The element can be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'calendarLink' + rel = 'rel' + label = 'label' + primary = 'primary' + href = 'href' + + +class DirectoryServer(atom.core.XmlElement): + """ + A directory server associated with this contact. + May not be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'directoryServer' + + +class Event(atom.core.XmlElement): + """ + These elements describe events associated with a contact. + They may be repeated + """ + + _qname = CONTACTS_TEMPLATE % 'event' + label = 'label' + rel = 'rel' + when = gdata.data.When + + +class ExternalId(atom.core.XmlElement): + """ + Describes an ID of the contact in an external system of some kind. + This element may be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'externalId' + + +def ExternalIdFromString(xml_string): + return atom.core.parse(ExternalId, xml_string) + + +class Gender(atom.core.XmlElement): + """ + Specifies the gender of the person represented by the contact. + The element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'directoryServer' + value = 'value' + + +class Hobby(atom.core.XmlElement): + """ + Describes an ID of the contact in an external system of some kind. + This element may be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'hobby' + + +class Initials(atom.core.XmlElement): + """ Specifies the initials of the person represented by the contact. The + element cannot be repeated. """ + + _qname = CONTACTS_TEMPLATE % 'initials' + + +class Jot(atom.core.XmlElement): + """ + Storage for arbitrary pieces of information about the contact. Each jot + has a type specified by the rel attribute and a text value. + The element can be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'jot' + rel = 'rel' + + +class Language(atom.core.XmlElement): + """ + Specifies the preferred languages of the contact. + The element can be repeated. + + The language must be specified using one of two mutually exclusive methods: + using the freeform @label attribute, or using the @code attribute, whose value + must conform to the IETF BCP 47 specification. + """ + + _qname = CONTACTS_TEMPLATE % 'language' + code = 'code' + label = 'label' + + +class MaidenName(atom.core.XmlElement): + """ + Specifies maiden name of the person represented by the contact. + The element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'maidenName' + + +class Mileage(atom.core.XmlElement): + """ + Specifies the mileage for the entity represented by the contact. + Can be used for example to document distance needed for reimbursement + purposes. The value is not interpreted. The element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'mileage' + + +class NickName(atom.core.XmlElement): + """ + Specifies the nickname of the person represented by the contact. + The element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'nickname' + + +class Occupation(atom.core.XmlElement): + """ + Specifies the occupation/profession of the person specified by the contact. + The element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'occupation' + + +class Priority(atom.core.XmlElement): + """ + Classifies importance of the contact into 3 categories: + * Low + * Normal + * High + + The priority element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'priority' + + +class Relation(atom.core.XmlElement): + """ + This element describe another entity (usually a person) that is in a + relation of some kind with the contact. + """ + + _qname = CONTACTS_TEMPLATE % 'relation' + rel = 'rel' + label = 'label' + + +class Sensitivity(atom.core.XmlElement): + """ + Classifies sensitivity of the contact into the following categories: + * Confidential + * Normal + * Personal + * Private + + The sensitivity element cannot be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'sensitivity' + rel = 'rel' + + +class UserDefinedField(atom.core.XmlElement): + """ + Represents an arbitrary key-value pair attached to the contact. + """ + + _qname = CONTACTS_TEMPLATE % 'userDefinedField' + key = 'key' + value = 'value' + + +def UserDefinedFieldFromString(xml_string): + return atom.core.parse(UserDefinedField, xml_string) + + +class Website(atom.core.XmlElement): + """ + Describes websites associated with the contact, including links. + May be repeated. + """ + + _qname = CONTACTS_TEMPLATE % 'website' + + href = 'href' + label = 'label' + primary = 'primary' + rel = 'rel' + + +def WebsiteFromString(xml_string): + return atom.core.parse(Website, xml_string) + + +class HouseName(atom.core.XmlElement): + """ + Used in places where houses or buildings have names (and + not necessarily numbers), eg. "The Pillars". + """ + + _qname = CONTACTS_TEMPLATE % 'housename' + + +class Street(atom.core.XmlElement): + """ + Can be street, avenue, road, etc. This element also includes the house + number and room/apartment/flat/floor number. + """ + + _qname = CONTACTS_TEMPLATE % 'street' + + +class POBox(atom.core.XmlElement): + """ + Covers actual P.O. boxes, drawers, locked bags, etc. This is usually but not + always mutually exclusive with street + """ + + _qname = CONTACTS_TEMPLATE % 'pobox' + + +class Neighborhood(atom.core.XmlElement): + """ + This is used to disambiguate a street address when a city contains more than + one street with the same name, or to specify a small place whose mail is + routed through a larger postal town. In China it could be a county or a + minor city. + """ + + _qname = CONTACTS_TEMPLATE % 'neighborhood' + + +class City(atom.core.XmlElement): + """ + Can be city, village, town, borough, etc. This is the postal town and not + necessarily the place of residence or place of business. + """ + + _qname = CONTACTS_TEMPLATE % 'city' + + +class SubRegion(atom.core.XmlElement): + """ + Handles administrative districts such as U.S. or U.K. counties that are not + used for mail addressing purposes. Subregion is not intended for + delivery addresses. + """ + + _qname = CONTACTS_TEMPLATE % 'subregion' + + +class Region(atom.core.XmlElement): + """ + A state, province, county (in Ireland), Land (in Germany), + departement (in France), etc. + """ + + _qname = CONTACTS_TEMPLATE % 'region' + + +class PostalCode(atom.core.XmlElement): + """ + Postal code. Usually country-wide, but sometimes specific to the + city (e.g. "2" in "Dublin 2, Ireland" addresses). + """ + + _qname = CONTACTS_TEMPLATE % 'postcode' + + +class Country(atom.core.XmlElement): + """ The name or code of the country. """ + + _qname = CONTACTS_TEMPLATE % 'country' + + +class PersonEntry(gdata.data.BatchEntry): + """Represents a google contact""" + + billing_information = BillingInformation + birthday = Birthday + calendar_link = [CalendarLink] + directory_server = DirectoryServer + event = [Event] + external_id = [ExternalId] + gender = Gender + hobby = [Hobby] + initals = Initials + jot = [Jot] + language= [Language] + maiden_name = MaidenName + mileage = Mileage + nickname = NickName + occupation = Occupation + priority = Priority + relation = [Relation] + sensitivity = Sensitivity + user_defined_field = [UserDefinedField] + website = [Website] + + name = gdata.data.Name + phone_number = [gdata.data.PhoneNumber] + organization = gdata.data.Organization + postal_address = [gdata.data.PostalAddress] + email = [gdata.data.Email] + im = [gdata.data.Im] + structured_postal_address = [gdata.data.StructuredPostalAddress] + extended_property = [gdata.data.ExtendedProperty] + + +class Deleted(atom.core.XmlElement): + """If present, indicates that this contact has been deleted.""" + _qname = gdata.GDATA_TEMPLATE % 'deleted' + + +class GroupMembershipInfo(atom.core.XmlElement): + """ + Identifies the group to which the contact belongs or belonged. + The group is referenced by its id. + """ + + _qname = CONTACTS_TEMPLATE % 'groupMembershipInfo' + + href = 'href' + deleted = 'deleted' + + +class ContactEntry(PersonEntry): + """A Google Contacts flavor of an Atom Entry.""" + + deleted = Deleted + group_membership_info = [GroupMembershipInfo] + organization = gdata.data.Organization + + def GetPhotoLink(self): + for a_link in self.link: + if a_link.rel == PHOTO_LINK_REL: + return a_link + return None + + def GetPhotoEditLink(self): + for a_link in self.link: + if a_link.rel == PHOTO_EDIT_LINK_REL: + return a_link + return None + + +class ContactsFeed(gdata.data.BatchFeed): + """A collection of Contacts.""" + entry = [ContactEntry] + + +class SystemGroup(atom.core.XmlElement): + """The contacts systemGroup element. + + When used within a contact group entry, indicates that the group in + question is one of the predefined system groups.""" + + _qname = CONTACTS_TEMPLATE % 'systemGroup' + id = 'id' + + +class GroupEntry(gdata.data.BatchEntry): + """Represents a contact group.""" + extended_property = [gdata.data.ExtendedProperty] + system_group = SystemGroup + + +class GroupsFeed(gdata.data.BatchFeed): + """A Google contact groups feed flavor of an Atom Feed.""" + entry = [GroupEntry] + + +class ProfileEntry(PersonEntry): + """A Google Profiles flavor of an Atom Entry.""" + + +def ProfileEntryFromString(xml_string): + """Converts an XML string into a ProfileEntry object. + + Args: + xml_string: string The XML describing a Profile entry. + + Returns: + A ProfileEntry object corresponding to the given XML. + """ + return atom.core.parse(ProfileEntry, xml_string) + + +class ProfilesFeed(gdata.data.BatchFeed): + """A Google Profiles feed flavor of an Atom Feed.""" + _qname = atom.data.ATOM_TEMPLATE % 'feed' + entry = [ProfileEntry] + + +def ProfilesFeedFromString(xml_string): + """Converts an XML string into a ProfilesFeed object. + + Args: + xml_string: string The XML describing a Profiles feed. + + Returns: + A ProfilesFeed object corresponding to the given XML. + """ + return atom.core.parse(ProfilesFeed, xml_string) + + diff --git a/gam/gdata/contacts/service.py b/gam/gdata/contacts/service.py new file mode 100755 index 00000000000..a9270177e4d --- /dev/null +++ b/gam/gdata/contacts/service.py @@ -0,0 +1,427 @@ +#!/usr/bin/env python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ContactsService extends the GDataService for Google Contacts operations. + + ContactsService: Provides methods to query feeds and manipulate items. + Extends GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + +__author__ = 'dbrattli (Dag Brattli)' + + +import gdata +import gdata.calendar +import gdata.service + + +DEFAULT_BATCH_URL = ('http://www.google.com/m8/feeds/contacts/default/full' + '/batch') +DEFAULT_PROFILES_BATCH_URL = ('http://www.google.com' + '/m8/feeds/profiles/default/full/batch') + +GDATA_VER_HEADER = 'GData-Version' + + +class Error(Exception): + pass + + +class RequestError(Error): + pass + + +class ContactsService(gdata.service.GDataService): + """Client for the Google Contacts service.""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', additional_headers=None, + contact_list='default', **kwargs): + """Creates a client for the Contacts service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'www.google.com'. + contact_list: string (optional) The name of the default contact list to + use when no URI is specified to the methods of the service. + Default value: 'default' (the logged in user's contact list). + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + + self.contact_list = contact_list + gdata.service.GDataService.__init__( + self, email=email, password=password, service='cp', source=source, + server=server, additional_headers=additional_headers, **kwargs) + + def GetFeedUri(self, kind='contacts', contact_list=None, projection='full', + scheme=None): + """Builds a feed URI. + + Args: + kind: The type of feed to return, typically 'groups' or 'contacts'. + Default value: 'contacts'. + contact_list: The contact list to return a feed for. + Default value: self.contact_list. + projection: The projection to apply to the feed contents, for example + 'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'. + scheme: The URL scheme such as 'http' or 'https', None to return a + relative URI without hostname. + + Returns: + A feed URI using the given kind, contact list, and projection. + Example: '/m8/feeds/contacts/default/full'. + """ + contact_list = contact_list or self.contact_list + if kind == 'profiles': + contact_list = 'domain/%s' % contact_list + prefix = scheme and '%s://%s' % (scheme, self.server) or '' + return '%s/m8/feeds/%s/%s/%s' % (prefix, kind, contact_list, projection) + + def GetContactsFeed(self, uri=None): + uri = uri or self.GetFeedUri() + return self.Get(uri, converter=gdata.contacts.ContactsFeedFromString) + + def GetContact(self, uri): + return self.Get(uri, converter=gdata.contacts.ContactEntryFromString) + + def CreateContact(self, new_contact, insert_uri=None, url_params=None, + escape_params=True): + """Adds an new contact to Google Contacts. + + Args: + new_contact: atom.Entry or subclass A new contact which is to be added to + Google Contacts. + insert_uri: the URL to post new contacts to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the contact created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + insert_uri = insert_uri or self.GetFeedUri() + return self.Post(new_contact, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.ContactEntryFromString) + + def UpdateContact(self, edit_uri, updated_contact, url_params=None, + escape_params=True): + """Updates an existing contact. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_contact: string, atom.Entry or subclass containing + the Atom Entry which will replace the contact which is + stored at the edit_url + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + return self.Put(updated_contact, self._CleanUri(edit_uri), + url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.ContactEntryFromString) + + def DeleteContact(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes an contact with the specified ID from Google Contacts. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + '/m8/feeds/contacts/default/full/xxx/yyy' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, a httplib.HTTPResponse containing the server's + response to the DELETE request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + return self.Delete(self._CleanUri(edit_uri), + url_params=url_params, escape_params=escape_params) + + def GetGroupsFeed(self, uri=None): + uri = uri or self.GetFeedUri('groups') + return self.Get(uri, converter=gdata.contacts.GroupsFeedFromString) + + def CreateGroup(self, new_group, insert_uri=None, url_params=None, + escape_params=True): + insert_uri = insert_uri or self.GetFeedUri('groups') + return self.Post(new_group, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.GroupEntryFromString) + + def UpdateGroup(self, edit_uri, updated_group, url_params=None, + escape_params=True): + return self.Put(updated_group, self._CleanUri(edit_uri), + url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.GroupEntryFromString) + + def DeleteGroup(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + return self.Delete(self._CleanUri(edit_uri), + url_params=url_params, escape_params=escape_params) + + def ChangePhoto(self, media, contact_entry_or_url, content_type=None, + content_length=None): + """Change the photo for the contact by uploading a new photo. + + Performs a PUT against the photo edit URL to send the binary data for the + photo. + + Args: + media: filename, file-like-object, or a gdata.MediaSource object to send. + contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this + method will search for an edit photo link URL and + perform a PUT to the URL. + content_type: str (optional) the mime type for the photo data. This is + necessary if media is a file or file name, but if media + is a MediaSource object then the media object can contain + the mime type. If media_type is set, it will override the + mime type in the media object. + content_length: int or str (optional) Specifying the content length is + only required if media is a file-like object. If media + is a filename, the length is determined using + os.path.getsize. If media is a MediaSource object, it is + assumed that it already contains the content length. + """ + if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry): + url = contact_entry_or_url.GetPhotoEditLink().href + else: + url = contact_entry_or_url + if isinstance(media, gdata.MediaSource): + payload = media + # If the media object is a file-like object, then use it as the file + # handle in the in the MediaSource. + elif hasattr(media, 'read'): + payload = gdata.MediaSource(file_handle=media, + content_type=content_type, content_length=content_length) + # Assume that the media object is a file name. + else: + payload = gdata.MediaSource(content_type=content_type, + content_length=content_length, file_path=media) + return self.Put(payload, url, extra_headers={'If-Match': '*'}) + + def GetPhoto(self, contact_entry_or_url): + """Retrives the binary data for the contact's profile photo as a string. + + Args: + contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string + containing the photo link's URL. If the contact entry does not + contain a photo link, the image will not be fetched and this method + will return None. + """ + # TODO: add the ability to write out the binary image data to a file, + # reading and writing a chunk at a time to avoid potentially using up + # large amounts of memory. + url = None + if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry): + photo_link = contact_entry_or_url.GetPhotoLink() + if photo_link: + url = photo_link.href + else: + url = contact_entry_or_url + if url: + return self.Get(url, converter=str) + else: + return None + + def DeletePhoto(self, contact_entry_or_url): + url = None + if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry): + url = contact_entry_or_url.GetPhotoEditLink().href + else: + url = contact_entry_or_url + if url: + self.Delete(url, extra_headers={'If-Match': '*'}) + + def GetProfilesFeed(self, uri=None): + """Retrieves a feed containing all domain's profiles. + + Args: + uri: string (optional) the URL to retrieve the profiles feed, + for example /m8/feeds/profiles/default/full + + Returns: + On success, a ProfilesFeed containing the profiles. + On failure, raises a RequestError. + """ + + uri = uri or self.GetFeedUri('profiles') + return self.Get(uri, + converter=gdata.contacts.ProfilesFeedFromString) + + def GetProfile(self, uri): + """Retrieves a domain's profile for the user. + + Args: + uri: string the URL to retrieve the profiles feed, + for example /m8/feeds/profiles/default/full/username + + Returns: + On success, a ProfileEntry containing the profile for the user. + On failure, raises a RequestError + """ + return self.Get(uri, + converter=gdata.contacts.ProfileEntryFromString) + + def UpdateProfile(self, edit_uri, updated_profile, url_params=None, + escape_params=True): + """Updates an existing profile. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_profile: string atom.Entry or subclass containing + the Atom Entry which will replace the profile which is + stored at the edit_url. + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_params will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, raises a RequestError. + """ + return self.Put(updated_profile, self._CleanUri(edit_uri), + url_params=url_params, escape_params=escape_params, + converter=gdata.contacts.ProfileEntryFromString) + + def ExecuteBatch(self, batch_feed, url, + converter=gdata.contacts.ContactsFeedFromString): + """Sends a batch request feed to the server. + + Args: + batch_feed: gdata.contacts.ContactFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: str The batch URL to which these operations should be applied. + converter: Function (optional) The function used to convert the server's + response to an object. The default value is ContactsFeedFromString. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a ContactsFeed. + """ + return self.Post(batch_feed, url, converter=converter) + + def ExecuteBatchProfiles(self, batch_feed, url, + converter=gdata.contacts.ProfilesFeedFromString): + """Sends a batch request feed to the server. + + Args: + batch_feed: gdata.profiles.ProfilesFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: string The batch URL to which these operations should be applied. + converter: Function (optional) The function used to convert the server's + response to an object. The default value is + gdata.profiles.ProfilesFeedFromString. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a ProfilesFeed. + """ + return self.Post(batch_feed, url, converter=converter) + + def _CleanUri(self, uri): + """Sanitizes a feed URI. + + Args: + uri: The URI to sanitize, can be relative or absolute. + + Returns: + The given URI without its http://server prefix, if any. + Keeps the leading slash of the URI. + """ + url_prefix = 'http://%s' % self.server + if uri.startswith(url_prefix): + uri = uri[len(url_prefix):] + return uri + +class ContactsQuery(gdata.service.Query): + + def __init__(self, feed=None, text_query=None, params=None, + categories=None, group=None): + self.feed = feed or '/m8/feeds/contacts/default/full' + if group: + self._SetGroup(group) + gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query, + params=params, categories=categories) + + def _GetGroup(self): + if 'group' in self: + return self['group'] + else: + return None + + def _SetGroup(self, group_id): + self['group'] = group_id + + group = property(_GetGroup, _SetGroup, + doc='The group query parameter to find only contacts in this group') + +class GroupsQuery(gdata.service.Query): + + def __init__(self, feed=None, text_query=None, params=None, + categories=None): + self.feed = feed or '/m8/feeds/groups/default/full' + gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query, + params=params, categories=categories) + + +class ProfilesQuery(gdata.service.Query): + """Constructs a query object for the profiles feed.""" + + def __init__(self, feed=None, text_query=None, params=None, + categories=None): + self.feed = feed or '/m8/feeds/profiles/default/full' + gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query, + params=params, categories=categories) diff --git a/gam/gdata/contacts/service.pyc b/gam/gdata/contacts/service.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc047123be6b39d243e78500f9f07b04b0c1c569 GIT binary patch literal 19267 zcmeHP-E$mQR==Z>Wy_YMME;1AHA!cKksNGkv%3k4DnMjO{veK`ZOK{IThwf8rX{sK z>YnY}V_V?7U;|GSPgGGw@jw*?e*h2gz#DJ8@InqPH2?kV>0j#q?PlA!|C;#wF%J80ICYGAu!raG& zUN>vklX8vo3DY@Y?oXM{l(~P>bWWQ1l-b4Sr_6I>o*DDw`_rZ~ZQ^Npdb)gi+H}sC zc)~n5V;;=NyP5LcS<^Y|-@PL5UMb(rn$GOtyI0G1ubR$lgYV|bcdv0cTMOqn)<10+ zvznzv)Gq8+oIgt1aS%T#;`~H*ZEz45`x$G^f}i!{{4m&!W86k*Cpd`Gq}T68MI0nWe1MvQqyMeD zUe$XfO|B(vwjbq(+u3H6M+bKkTPz1R`>8w+9`7gZ{h*ztkK(*QgF&bKAV^YlCkndw zBFJ_}HLD%D7v2q`e6N2Hr>J5fk9&D+aUFMp$Og8^lXTBsT@1GO6C2=PbbM9)BIzat zhVT2=x2PpagVpff+5kE2MKatLk2OnJT9fHZy>o#u(3-4cD z+UtOOD-z&aGWcTeFl4pAjKi`QQ;lKPHEBBdhbKTVoTtolMKfHVG|z#7z%&6d z!WtgFL1Y7xJqHW|()1R1280Bbl?Bd7fiv<1h=zJ*YH!cVTi~014p_tIuha_6N&$eJ zRM-Kwp@85T;PgH~Oc3qU@gpZSMH`qbTy*Q0Ki@}=WYVauP_R-h* zK_-~@V{tg#qN@2ay>X1gJ`lfEMHN}CBN#J!8mB>;1^2=`R6Dj8x0Brjvw~5OUA@wE zWGZLBQm@0^ckRvx3;e#D?d_>{aulw2=c*sud+$Ab4DD!_g;z3smvoD-k z{uMx8^xded^4h@A1~h8FU1YhoAbLf3PxOUn6A>_1S3wAF_lhZsg0PKV%Ip_&cnYpf zl%-qyanwP_HWsL_1p4c)4w{?SMES%|qJv&HF2!V?b2xu3nBR$P?Efyk|K0C>{{!At zR^I$kh>}lzw`_YqJ^*l$)HHMTKCpcl&9H6fX}YXb;)P5gz5a()o&N*%!O z3oKv96^bLywzbrsoecUfUAkf|#rKMZNqxl@xHR^>Jg6$ZV(|+9#DWbe*&^t|-{EVx z6&Yja<)xUlRhJc)gy?b!PhvNDGA2Nfwr|D7O~}r@JlWuIh)iZ~7SM3E@phw4;N(F# z5E!6Y49V@yIv98cB!D1`kkc$_7=W$XD$g2?Euq3jK^lT%>0`yY0BiUyJSv(vp#vf9 zqmV9VL9b?2<-@XHH}mXZOFSqgs8Eof_48z0WT6>`Ue&v?i$|8=Xc}`BRro-t@&+yx z799<|$yU)Iyo@pn{384Xo<uL@=9UcKp2hU7bi}(4()e*H!T^n-_`wL|#|JHN z60M%)AS$whMWGqN-S&6dU78Xr1J$a%2Kxu~!!m=W7fn%OrMGsDw5>0a6h=hR0=q2> z_z(qAQxC9JZ$_&3=MC1Xl+QB6KrJ+uc5TdZe2HqXBG4 zI|j!Z>PK8t4|lO`N4>agc_+)dag^2@zXQ8C@5j)RsHg0u+GEaw9nmi6p;dQ4M<_YK z{C<3h$Vb&%-?y?yQq2&if0j~zwBaN`)wp3?3xM4L6dFQ5GQBXM9NN3t4%*_rc!Hpk zUAQFu9HWSWx^qrbH;)o}AI`8vUk|(^69m6IZ;Pnt+xg|-_V)HBd>Y=N)eclE&_=wV zIKNg=U>+^oEcGAgot7m5X=DzT-^n_MzBI3WN3mi;)_ZEEg1b!6x$MMbx9El-*D%2XE z6AOnAVbrA$#X*6I~Rt>f2t_o^>8y60>ycIMAS*SFXIZy<{1f+%on!n)ymxU6iQkS?j( zOizkD^6|T`2Q9Qm4lVwIGEUpfoWD>xiKXkjh!=F7=Nw+ZQvy;9c+V3xe~I2RQDZ{_ z0UE@rz*P*vVNa5=h+7ivI?i<-s~<_D4}NpigTKQA=QV>MB*cI-`D=JUXL5)jHRF%| zByQnPN-PoK!z#K!f0EU0gftwh=^m!h6~3*!?UF`fV%2K<(pF zJv7+WU>5TIJ#JO;Ok8;B=JLi!VqN7pDOwe=!tdjDl%#phtQ2~eAC1$SAaPVL3vvDd zw~jb3K`Ym&p&=&9=_rf|m-ymq8LYyJ4E7&6SOFDqg2)ghtDQ}Y?LwkR~Xmoew6O1U(?&q zV4`@p%Uee~hw!b4?1B2Cwy5s74H+dLe&vqENzs3kuyfTOANDCmctcAde!L>b^aL9D)Ioa8aSm~H8Sv;v75vWn{G8%GH~fV z1lp03v~iRpWKY`CKc=6n)N$S}7OlESP9 zoLKfCK}J^6HGYP*2b_a)ZOq64AEj!34_(CJ=5<-re zALdG>n^M198*VT7kg2$zql6{Hu;^nYE%1kbRvb^$Bqxt zVXV?31RySgd5O9R<{>`k<`_X-bdom}v6SwTv|-tTx`=~n0k=Bvq?o{UU&yoqUpe<*}MXq8((6|Agjc(VKbwLSt@X3W=EM=4@lOF(+q9RZ&m!SY_EZPS9RR zNrT!@OEuHclyD6jAgtI_f1b~t86Vy@&L?$(IYipFlV)XVk19|KaqFdutc# z45DJ1t6tKuWF=(sMHOS$ZcrDXZv=HhnU@7FFo8T!U1k_uniav3iqMs+4$Uhh2NCqT zARj+LPGdI)n=>XUN*M5<3s9=cT%?m3e90p#F~p$XQ1Rg8GQ%8xjEhP-!WG==S_~%< zl!kDXg_vyBDhx^C`6A#q~dMZMp2k%uK<$pFv=0 z77RVrI5#0|+{BwQz=V~?TyQ*^jjva1tgK5rujJd$2l$p2m@FCl5bWDXrB+SJZK!Gc z8)#_|lGy3+6P`Zisq~_R-r)^C6B^TG7C8_^sc1VcViJVaM&2a+%|iGrf!KhC3JwU> zF>lR-_{LsZ_7cPy_BZS4>roE4z3yc@mcU)L00X9`7^N_d7}Ju@^w;VLyCC*1O(x%@gOODI<+D>+Q1q^*&r8A5rkk45hNpl%IMbJ|Ew_;N7+ZaG`qJajHz|K<)dt!b?emReNrgt|US6`{1-Z-r zN)fyZ;gHZ^O&g2~G;X2XaGIBL^kJ73jNVx&`F4wa>Brb0-9N)f9k6KovoM_uiFt*ZTq%aahy z(HfBGbT1{s#)#TBD$t2=x>eUvpkAz*FMERZ@>rkRWo5Gb7VGAHO&F}MWUTEYtHoYe z|KR>D%(44MdVgfGK!AzS1wkY?6o3o)NLT$@f7wF7u*a%;=tIjRYpgS5?y*0pAKSaE z&j%0iA&l1eN!-TPGt8))RL73a4RjRrJt@b}-f(6#7|A;a+hBq+Tq*t3-YEz`piFoL zr^*`>AFSeX@yoO@ij8A^5}k#08FYMu!Iw`dh~@A!gmTug2}A+o7FZ zbQmNg4E|rdVO73Ryy=jA!zH|iX1XP2BtKNfurm+;hlqWhXI~)p;g2A2;}>>@HS5Ce zdl;2Mf36I>K$O**{~j*5mKj+rBl}tyFBAojqKiN)z`olCtf-FQBR3TwQoajabLh3&Cvn_U<&TwGl5DeBO}+0`hV-6pootA36=ItM z2_`!n1j7-w#x`mycnP^7{#^?T(VLf2V!n@aXM?0~A8~pa!7``MB20X)v9TaA2+=>p zdI@5;TAi%jYH5^^*52D8HJJbi~JGDyhnRN)VJ`cWped$w|{XU?6mzDCqx@3rV%Zhxrl9G zug^?QH*tNb*_e5=*_gdJGl{z?+~Js=#NYYpOViUcuTH;$>(}Sb&z+YAH{KHYA-C+e zQLFsN0Ll#odxbNmlO{%CNjJlc#7I(y-qR2N$q7)R|*1#xzwSMA-a3$4V2O7&UuxdTP2k(~y*=OrRaa8+zh5a9EVxl8dN*p{vSx~e3KaIuZI~)Sh`}Q-^K)dP2AK)Q-A5ZqgK!YQ?ViY zAbPsh;_p;oO;$^!Q5JNlABGL(2;{-SWt31K(WJ(&#uU_EYwI^x?%myP-B{UPz1>1^ zso)QV_@|rUXE*QMUEiv`ri|TMTiIS|eZC&HZm+Mbt%qU}tUqbT@)H}hAWcXnm7g03 z850df3I!ermLcUM + representing a contact might have a nested entry from a contact feed. + """ + _qname = GDATA_TEMPLATE % 'entryLink' + entry = GDEntry + rel = 'rel' + read_only = 'readOnly' + href = 'href' + + +class FeedLink(atom.core.XmlElement): + """The gd:feedLink element. + + Represents a logically nested feed. For example, a calendar feed might + have a nested feed representing all comments on entries. + """ + _qname = GDATA_TEMPLATE % 'feedLink' + feed = GDFeed + rel = 'rel' + read_only = 'readOnly' + count_hint = 'countHint' + href = 'href' + + +class AdditionalName(atom.core.XmlElement): + """The gd:additionalName element. + + Specifies additional (eg. middle) name of the person. + Contains an attribute for the phonetic representaton of the name. + """ + _qname = GDATA_TEMPLATE % 'additionalName' + yomi = 'yomi' + + +class Comments(atom.core.XmlElement): + """The gd:comments element. + + Contains a comments feed for the enclosing entry (such as a calendar event). + """ + _qname = GDATA_TEMPLATE % 'comments' + rel = 'rel' + feed_link = FeedLink + + +class Country(atom.core.XmlElement): + """The gd:country element. + + Country name along with optional country code. The country code is + given in accordance with ISO 3166-1 alpha-2: + http://www.iso.org/iso/iso-3166-1_decoding_table + """ + _qname = GDATA_TEMPLATE % 'country' + code = 'code' + + +class EmailImParent(atom.core.XmlElement): + address = 'address' + label = 'label' + rel = 'rel' + primary = 'primary' + + +class Email(EmailImParent): + """The gd:email element. + + An email address associated with the containing entity (which is + usually an entity representing a person or a location). + """ + _qname = GDATA_TEMPLATE % 'email' + display_name = 'displayName' + + +class FamilyName(atom.core.XmlElement): + """The gd:familyName element. + + Specifies family name of the person, eg. "Smith". + """ + _qname = GDATA_TEMPLATE % 'familyName' + yomi = 'yomi' + + +class Im(EmailImParent): + """The gd:im element. + + An instant messaging address associated with the containing entity. + """ + _qname = GDATA_TEMPLATE % 'im' + protocol = 'protocol' + + +class GivenName(atom.core.XmlElement): + """The gd:givenName element. + + Specifies given name of the person, eg. "John". + """ + _qname = GDATA_TEMPLATE % 'givenName' + yomi = 'yomi' + + +class NamePrefix(atom.core.XmlElement): + """The gd:namePrefix element. + + Honorific prefix, eg. 'Mr' or 'Mrs'. + """ + _qname = GDATA_TEMPLATE % 'namePrefix' + + +class NameSuffix(atom.core.XmlElement): + """The gd:nameSuffix element. + + Honorific suffix, eg. 'san' or 'III'. + """ + _qname = GDATA_TEMPLATE % 'nameSuffix' + + +class FullName(atom.core.XmlElement): + """The gd:fullName element. + + Unstructured representation of the name. + """ + _qname = GDATA_TEMPLATE % 'fullName' + + +class Name(atom.core.XmlElement): + """The gd:name element. + + Allows storing person's name in a structured way. Consists of + given name, additional name, family name, prefix, suffix and full name. + """ + _qname = GDATA_TEMPLATE % 'name' + given_name = GivenName + additional_name = AdditionalName + family_name = FamilyName + name_prefix = NamePrefix + name_suffix = NameSuffix + full_name = FullName + + +class OrgDepartment(atom.core.XmlElement): + """The gd:orgDepartment element. + + Describes a department within an organization. Must appear within a + gd:organization element. + """ + _qname = GDATA_TEMPLATE % 'orgDepartment' + + +class OrgJobDescription(atom.core.XmlElement): + """The gd:orgJobDescription element. + + Describes a job within an organization. Must appear within a + gd:organization element. + """ + _qname = GDATA_TEMPLATE % 'orgJobDescription' + + +class OrgName(atom.core.XmlElement): + """The gd:orgName element. + + The name of the organization. Must appear within a gd:organization + element. + + Contains a Yomigana attribute (Japanese reading aid) for the + organization name. + """ + _qname = GDATA_TEMPLATE % 'orgName' + yomi = 'yomi' + + +class OrgSymbol(atom.core.XmlElement): + """The gd:orgSymbol element. + + Provides a symbol of an organization. Must appear within a + gd:organization element. + """ + _qname = GDATA_TEMPLATE % 'orgSymbol' + + +class OrgTitle(atom.core.XmlElement): + """The gd:orgTitle element. + + The title of a person within an organization. Must appear within a + gd:organization element. + """ + _qname = GDATA_TEMPLATE % 'orgTitle' + + +class Organization(atom.core.XmlElement): + """The gd:organization element. + + An organization, typically associated with a contact. + """ + _qname = GDATA_TEMPLATE % 'organization' + label = 'label' + primary = 'primary' + rel = 'rel' + department = OrgDepartment + job_description = OrgJobDescription + name = OrgName + symbol = OrgSymbol + title = OrgTitle + + +class When(atom.core.XmlElement): + """The gd:when element. + + Represents a period of time or an instant. + """ + _qname = GDATA_TEMPLATE % 'when' + end = 'endTime' + start = 'startTime' + value = 'valueString' + + +class OriginalEvent(atom.core.XmlElement): + """The gd:originalEvent element. + + Equivalent to the Recurrence ID property specified in section 4.8.4.4 + of RFC 2445. Appears in every instance of a recurring event, to identify + the original event. + + Contains a element specifying the original start time of the + instance that has become an exception. + """ + _qname = GDATA_TEMPLATE % 'originalEvent' + id = 'id' + href = 'href' + when = When + + +class PhoneNumber(atom.core.XmlElement): + """The gd:phoneNumber element. + + A phone number associated with the containing entity (which is usually + an entity representing a person or a location). + """ + _qname = GDATA_TEMPLATE % 'phoneNumber' + label = 'label' + rel = 'rel' + uri = 'uri' + primary = 'primary' + + +class PostalAddress(atom.core.XmlElement): + """The gd:postalAddress element.""" + _qname = GDATA_TEMPLATE % 'postalAddress' + label = 'label' + rel = 'rel' + uri = 'uri' + primary = 'primary' + + +class Rating(atom.core.XmlElement): + """The gd:rating element. + + Represents a numeric rating of the enclosing entity, such as a + comment. Each rating supplies its own scale, although it may be + normalized by a service; for example, some services might convert all + ratings to a scale from 1 to 5. + """ + _qname = GDATA_TEMPLATE % 'rating' + average = 'average' + max = 'max' + min = 'min' + num_raters = 'numRaters' + rel = 'rel' + value = 'value' + + +class Recurrence(atom.core.XmlElement): + """The gd:recurrence element. + + Represents the dates and times when a recurring event takes place. + + The string that defines the recurrence consists of a set of properties, + each of which is defined in the iCalendar standard (RFC 2445). + + Specifically, the string usually begins with a DTSTART property that + indicates the starting time of the first instance of the event, and + often a DTEND property or a DURATION property to indicate when the + first instance ends. Next come RRULE, RDATE, EXRULE, and/or EXDATE + properties, which collectively define a recurring event and its + exceptions (but see below). (See section 4.8.5 of RFC 2445 for more + information about these recurrence component properties.) Last comes a + VTIMEZONE component, providing detailed timezone rules for any timezone + ID mentioned in the preceding properties. + + Google services like Google Calendar don't generally generate EXRULE + and EXDATE properties to represent exceptions to recurring events; + instead, they generate elements. However, + Google services may include EXRULE and/or EXDATE properties anyway; + for example, users can import events and exceptions into Calendar, and + if those imported events contain EXRULE or EXDATE properties, then + Calendar will provide those properties when it sends a + element. + + Note the the use of means that you can't be + sure just from examining a element whether there are + any exceptions to the recurrence description. To ensure that you find + all exceptions, look for elements in the feed, + and use their elements to match them up with + elements. + """ + _qname = GDATA_TEMPLATE % 'recurrence' + + +class RecurrenceException(atom.core.XmlElement): + """The gd:recurrenceException element. + + Represents an event that's an exception to a recurring event-that is, + an instance of a recurring event in which one or more aspects of the + recurring event (such as attendance list, time, or location) have been + changed. + + Contains a element that specifies the original + recurring event that this event is an exception to. + + When you change an instance of a recurring event, that instance becomes + an exception. Depending on what change you made to it, the exception + behaves in either of two different ways when the original recurring + event is changed: + + - If you add, change, or remove comments, attendees, or attendee + responses, then the exception remains tied to the original event, and + changes to the original event also change the exception. + - If you make any other changes to the exception (such as changing the + time or location) then the instance becomes "specialized," which means + that it's no longer as tightly tied to the original event. If you + change the original event, specialized exceptions don't change. But + see below. + + For example, say you have a meeting every Tuesday and Thursday at + 2:00 p.m. If you change the attendance list for this Thursday's meeting + (but not for the regularly scheduled meeting), then it becomes an + exception. If you change the time for this Thursday's meeting (but not + for the regularly scheduled meeting), then it becomes specialized. + + Regardless of whether an exception is specialized or not, if you do + something that deletes the instance that the exception was derived from, + then the exception is deleted. Note that changing the day or time of a + recurring event deletes all instances, and creates new ones. + + For example, after you've specialized this Thursday's meeting, say you + change the recurring meeting to happen on Monday, Wednesday, and Friday. + That change deletes all of the recurring instances of the + Tuesday/Thursday meeting, including the specialized one. + + If a particular instance of a recurring event is deleted, then that + instance appears as a containing a + that has its set to + "http://schemas.google.com/g/2005#event.canceled". (For more + information about canceled events, see RFC 2445.) + """ + _qname = GDATA_TEMPLATE % 'recurrenceException' + specialized = 'specialized' + entry_link = EntryLink + original_event = OriginalEvent + + +class Reminder(atom.core.XmlElement): + """The gd:reminder element. + + A time interval, indicating how long before the containing entity's start + time or due time attribute a reminder should be issued. Alternatively, + may specify an absolute time at which a reminder should be issued. Also + specifies a notification method, indicating what medium the system + should use to remind the user. + """ + _qname = GDATA_TEMPLATE % 'reminder' + absolute_time = 'absoluteTime' + method = 'method' + days = 'days' + hours = 'hours' + minutes = 'minutes' + + +class Agent(atom.core.XmlElement): + """The gd:agent element. + + The agent who actually receives the mail. Used in work addresses. + Also for 'in care of' or 'c/o'. + """ + _qname = GDATA_TEMPLATE % 'agent' + + +class HouseName(atom.core.XmlElement): + """The gd:housename element. + + Used in places where houses or buildings have names (and not + necessarily numbers), eg. "The Pillars". + """ + _qname = GDATA_TEMPLATE % 'housename' + + +class Street(atom.core.XmlElement): + """The gd:street element. + + Can be street, avenue, road, etc. This element also includes the + house number and room/apartment/flat/floor number. + """ + _qname = GDATA_TEMPLATE % 'street' + + +class PoBox(atom.core.XmlElement): + """The gd:pobox element. + + Covers actual P.O. boxes, drawers, locked bags, etc. This is usually + but not always mutually exclusive with street. + """ + _qname = GDATA_TEMPLATE % 'pobox' + + +class Neighborhood(atom.core.XmlElement): + """The gd:neighborhood element. + + This is used to disambiguate a street address when a city contains more + than one street with the same name, or to specify a small place whose + mail is routed through a larger postal town. In China it could be a + county or a minor city. + """ + _qname = GDATA_TEMPLATE % 'neighborhood' + + +class City(atom.core.XmlElement): + """The gd:city element. + + Can be city, village, town, borough, etc. This is the postal town and + not necessarily the place of residence or place of business. + """ + _qname = GDATA_TEMPLATE % 'city' + + +class Subregion(atom.core.XmlElement): + """The gd:subregion element. + + Handles administrative districts such as U.S. or U.K. counties that are + not used for mail addressing purposes. Subregion is not intended for + delivery addresses. + """ + _qname = GDATA_TEMPLATE % 'subregion' + + +class Region(atom.core.XmlElement): + """The gd:region element. + + A state, province, county (in Ireland), Land (in Germany), + departement (in France), etc. + """ + _qname = GDATA_TEMPLATE % 'region' + + +class Postcode(atom.core.XmlElement): + """The gd:postcode element. + + Postal code. Usually country-wide, but sometimes specific to the + city (e.g. "2" in "Dublin 2, Ireland" addresses). + """ + _qname = GDATA_TEMPLATE % 'postcode' + + +class Country(atom.core.XmlElement): + """The gd:country element. + + The name or code of the country. + """ + _qname = GDATA_TEMPLATE % 'country' + + +class FormattedAddress(atom.core.XmlElement): + """The gd:formattedAddress element. + + The full, unstructured postal address. + """ + _qname = GDATA_TEMPLATE % 'formattedAddress' + + +class StructuredPostalAddress(atom.core.XmlElement): + """The gd:structuredPostalAddress element. + + Postal address split into components. It allows to store the address + in locale independent format. The fields can be interpreted and used + to generate formatted, locale dependent address. The following elements + reperesent parts of the address: agent, house name, street, P.O. box, + neighborhood, city, subregion, region, postal code, country. The + subregion element is not used for postal addresses, it is provided for + extended uses of addresses only. In order to store postal address in an + unstructured form formatted address field is provided. + """ + _qname = GDATA_TEMPLATE % 'structuredPostalAddress' + rel = 'rel' + mail_class = 'mailClass' + usage = 'usage' + label = 'label' + primary = 'primary' + agent = Agent + house_name = HouseName + street = Street + po_box = PoBox + neighborhood = Neighborhood + city = City + subregion = Subregion + region = Region + postcode = Postcode + country = Country + formatted_address = FormattedAddress + + +class Where(atom.core.XmlElement): + """The gd:where element. + + A place (such as an event location) associated with the containing + entity. The type of the association is determined by the rel attribute; + the details of the location are contained in an embedded or linked-to + Contact entry. + + A element is more general than a element. The + former identifies a place using a text description and/or a Contact + entry, while the latter identifies a place using a specific geographic + location. + """ + _qname = GDATA_TEMPLATE % 'where' + label = 'label' + rel = 'rel' + value = 'valueString' + entry_link = EntryLink + + +class AttendeeType(atom.core.XmlElement): + """The gd:attendeeType element.""" + _qname = GDATA_TEMPLATE % 'attendeeType' + value = 'value' + + +class AttendeeStatus(atom.core.XmlElement): + """The gd:attendeeStatus element.""" + _qname = GDATA_TEMPLATE % 'attendeeStatus' + value = 'value' + + +class Who(atom.core.XmlElement): + """The gd:who element. + + A person associated with the containing entity. The type of the + association is determined by the rel attribute; the details about the + person are contained in an embedded or linked-to Contact entry. + + The element can be used to specify email senders and + recipients, calendar event organizers, and so on. + """ + _qname = GDATA_TEMPLATE % 'who' + email = 'email' + rel = 'rel' + value = 'valueString' + attendee_status = AttendeeStatus + attendee_type = AttendeeType + entry_link = EntryLink + + +class Deleted(atom.core.XmlElement): + """gd:deleted when present, indicates the containing entry is deleted.""" + _qname = GD_TEMPLATE % 'deleted' + + +class Money(atom.core.XmlElement): + """Describes money""" + _qname = GD_TEMPLATE % 'money' + amount = 'amount' + currency_code = 'currencyCode' + + +class MediaSource(object): + """GData Entries can refer to media sources, so this class provides a + place to store references to these objects along with some metadata. + """ + + def __init__(self, file_handle=None, content_type=None, content_length=None, + file_path=None, file_name=None): + """Creates an object of type MediaSource. + + Args: + file_handle: A file handle pointing to the file to be encapsulated in the + MediaSource. + content_type: string The MIME type of the file. Required if a file_handle + is given. + content_length: int The size of the file. Required if a file_handle is + given. + file_path: string (optional) A full path name to the file. Used in + place of a file_handle. + file_name: string The name of the file without any path information. + Required if a file_handle is given. + """ + self.file_handle = file_handle + self.content_type = content_type + self.content_length = content_length + self.file_name = file_name + + if (file_handle is None and content_type is not None and + file_path is not None): + self.set_file_handle(file_path, content_type) + + def set_file_handle(self, file_name, content_type): + """A helper function which can create a file handle from a given filename + and set the content type and length all at once. + + Args: + file_name: string The path and file name to the file containing the media + content_type: string A MIME type representing the type of the media + """ + + self.file_handle = open(file_name, 'rb') + self.content_type = content_type + self.content_length = os.path.getsize(file_name) + self.file_name = os.path.basename(file_name) + + SetFileHandle = set_file_handle + + def modify_request(self, http_request): + http_request.add_body_part(self.file_handle, self.content_type, + self.content_length) + return http_request + + ModifyRequest = modify_request diff --git a/gam/gdata/docs/__init__.py b/gam/gdata/docs/__init__.py new file mode 100755 index 00000000000..8031bc9b7ac --- /dev/null +++ b/gam/gdata/docs/__init__.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to Atom objects used with Google Documents.""" + +__author__ = ('api.jfisher (Jeff Fisher), ' + 'api.eric@google.com (Eric Bidelman)') + +import atom +import gdata + + +DOCUMENTS_NAMESPACE = 'http://schemas.google.com/docs/2007' + + +class Scope(atom.AtomBase): + """The DocList ACL scope element""" + + _tag = 'scope' + _namespace = gdata.GACL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + _attributes['type'] = 'type' + + def __init__(self, value=None, type=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.type = type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Role(atom.AtomBase): + """The DocList ACL role element""" + + _tag = 'role' + _namespace = gdata.GACL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class FeedLink(atom.AtomBase): + """The DocList gd:feedLink element""" + + _tag = 'feedLink' + _namespace = gdata.GDATA_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['href'] = 'href' + + def __init__(self, href=None, rel=None, text=None, extension_elements=None, + extension_attributes=None): + self.href = href + self.rel = rel + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class ResourceId(atom.AtomBase): + """The DocList gd:resourceId element""" + + _tag = 'resourceId' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class LastModifiedBy(atom.Person): + """The DocList gd:lastModifiedBy element""" + + _tag = 'lastModifiedBy' + _namespace = gdata.GDATA_NAMESPACE + + +class LastViewed(atom.Person): + """The DocList gd:lastViewed element""" + + _tag = 'lastViewed' + _namespace = gdata.GDATA_NAMESPACE + + +class WritersCanInvite(atom.AtomBase): + """The DocList docs:writersCanInvite element""" + + _tag = 'writersCanInvite' + _namespace = DOCUMENTS_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + +class DocumentListEntry(gdata.GDataEntry): + """The Google Documents version of an Atom Entry""" + + _tag = gdata.GDataEntry._tag + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feedLink', FeedLink) + _children['{%s}resourceId' % gdata.GDATA_NAMESPACE] = ('resourceId', + ResourceId) + _children['{%s}lastModifiedBy' % gdata.GDATA_NAMESPACE] = ('lastModifiedBy', + LastModifiedBy) + _children['{%s}lastViewed' % gdata.GDATA_NAMESPACE] = ('lastViewed', + LastViewed) + _children['{%s}writersCanInvite' % DOCUMENTS_NAMESPACE] = ( + 'writersCanInvite', WritersCanInvite) + + def __init__(self, resourceId=None, feedLink=None, lastViewed=None, + lastModifiedBy=None, writersCanInvite=None, author=None, + category=None, content=None, atom_id=None, link=None, + published=None, title=None, updated=None, text=None, + extension_elements=None, extension_attributes=None): + self.feedLink = feedLink + self.lastViewed = lastViewed + self.lastModifiedBy = lastModifiedBy + self.resourceId = resourceId + self.writersCanInvite = writersCanInvite + gdata.GDataEntry.__init__( + self, author=author, category=category, content=content, + atom_id=atom_id, link=link, published=published, title=title, + updated=updated, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + def GetAclLink(self): + """Extracts the DocListEntry's . + + Returns: + A FeedLink object. + """ + return self.feedLink + + def GetDocumentType(self): + """Extracts the type of document from the DocListEntry. + + This method returns the type of document the DocListEntry + represents. Possible values are document, presentation, + spreadsheet, folder, or pdf. + + Returns: + A string representing the type of document. + """ + if self.category: + for category in self.category: + if category.scheme == gdata.GDATA_NAMESPACE + '#kind': + return category.label + else: + return None + + +def DocumentListEntryFromString(xml_string): + """Converts an XML string into a DocumentListEntry object. + + Args: + xml_string: string The XML describing a Document List feed entry. + + Returns: + A DocumentListEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(DocumentListEntry, xml_string) + + +class DocumentListAclEntry(gdata.GDataEntry): + """A DocList ACL Entry flavor of an Atom Entry""" + + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}scope' % gdata.GACL_NAMESPACE] = ('scope', Scope) + _children['{%s}role' % gdata.GACL_NAMESPACE] = ('role', Role) + + def __init__(self, category=None, atom_id=None, link=None, + title=None, updated=None, scope=None, role=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.GDataEntry.__init__(self, author=None, category=category, + content=None, atom_id=atom_id, link=link, + published=None, title=title, + updated=updated, text=None) + self.scope = scope + self.role = role + + +def DocumentListAclEntryFromString(xml_string): + """Converts an XML string into a DocumentListAclEntry object. + + Args: + xml_string: string The XML describing a Document List ACL feed entry. + + Returns: + A DocumentListAclEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(DocumentListAclEntry, xml_string) + + +class DocumentListFeed(gdata.GDataFeed): + """A feed containing a list of Google Documents Items""" + + _tag = gdata.GDataFeed._tag + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [DocumentListEntry]) + + +def DocumentListFeedFromString(xml_string): + """Converts an XML string into a DocumentListFeed object. + + Args: + xml_string: string The XML describing a DocumentList feed. + + Returns: + A DocumentListFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(DocumentListFeed, xml_string) + + +class DocumentListAclFeed(gdata.GDataFeed): + """A DocList ACL feed flavor of a Atom feed""" + + _tag = gdata.GDataFeed._tag + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [DocumentListAclEntry]) + + +def DocumentListAclFeedFromString(xml_string): + """Converts an XML string into a DocumentListAclFeed object. + + Args: + xml_string: string The XML describing a DocumentList feed. + + Returns: + A DocumentListFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(DocumentListAclFeed, xml_string) diff --git a/gam/gdata/docs/client.py b/gam/gdata/docs/client.py new file mode 100755 index 00000000000..3ade9fb13f4 --- /dev/null +++ b/gam/gdata/docs/client.py @@ -0,0 +1,608 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""DocsClient extends gdata.client.GDClient to streamline DocList API calls.""" + + +__author__ = 'e.bidelman (Eric Bidelman)' + +import mimetypes +import urllib +import atom.data +import atom.http_core +import gdata.client +import gdata.docs.data +import gdata.gauth + + +# Feed URI templates +DOCLIST_FEED_URI = '/feeds/default/private/full/' +FOLDERS_FEED_TEMPLATE = DOCLIST_FEED_URI + '%s/contents' +ACL_FEED_TEMPLATE = DOCLIST_FEED_URI + '%s/acl' +REVISIONS_FEED_TEMPLATE = DOCLIST_FEED_URI + '%s/revisions' + + +class DocsClient(gdata.client.GDClient): + """Client extension for the Google Documents List API.""" + + host = 'docs.google.com' # default server for the API + api_version = '3.0' # default major version for the service. + auth_service = 'writely' + auth_scopes = gdata.gauth.AUTH_SCOPES['writely'] + + def __init__(self, auth_token=None, **kwargs): + """Constructs a new client for the DocList API. + + Args: + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: The other parameters to pass to gdata.client.GDClient constructor. + """ + gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs) + + def get_file_content(self, uri, auth_token=None, **kwargs): + """Fetches the file content from the specified uri. + + This method is useful for downloading/exporting a file within enviornments + like Google App Engine, where the user does not have the ability to write + the file to a local disk. + + Args: + uri: str The full URL to fetch the file contents from. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.request(). + + Returns: + The binary file content. + + Raises: + gdata.client.RequestError: on error response from server. + """ + server_response = self.request('GET', uri, auth_token=auth_token, **kwargs) + if server_response.status != 200: + raise gdata.client.RequestError, {'status': server_response.status, + 'reason': server_response.reason, + 'body': server_response.read()} + return server_response.read() + + GetFileContent = get_file_content + + def _download_file(self, uri, file_path, auth_token=None, **kwargs): + """Downloads a file to disk from the specified URI. + + Note: to download a file in memory, use the GetFileContent() method. + + Args: + uri: str The full URL to download the file from. + file_path: str The full path to save the file to. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_file_content(). + + Raises: + gdata.client.RequestError: on error response from server. + """ + f = open(file_path, 'wb') + try: + f.write(self.get_file_content(uri, auth_token=auth_token, **kwargs)) + except gdata.client.RequestError, e: + f.close() + raise e + f.flush() + f.close() + + _DownloadFile = _download_file + + def get_doclist(self, uri=None, limit=None, auth_token=None, **kwargs): + """Retrieves the main doclist feed containing the user's items. + + Args: + uri: str (optional) A URI to query the doclist feed. + limit: int (optional) A maximum cap for the number of results to + return in the feed. By default, the API returns a maximum of 100 + per page. Thus, if you set limit=5000, you will get <= 5000 + documents (guarenteed no more than 5000), and will need to follow the + feed's next links (feed.GetNextLink()) to the rest. See + get_everything(). Similarly, if you set limit=50, only <= 50 + documents are returned. Note: if the max-results parameter is set in + the uri parameter, it is chosen over a value set for limit. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.docs.data.DocList feed. + """ + if uri is None: + uri = DOCLIST_FEED_URI + + if isinstance(uri, (str, unicode)): + uri = atom.http_core.Uri.parse_uri(uri) + + # Add max-results param if it wasn't included in the uri. + if limit is not None and not 'max-results' in uri.query: + uri.query['max-results'] = limit + + return self.get_feed(uri, desired_class=gdata.docs.data.DocList, + auth_token=auth_token, **kwargs) + + GetDocList = get_doclist + + def get_doc(self, resource_id, etag=None, auth_token=None, **kwargs): + """Retrieves a particular document given by its resource id. + + Args: + resource_id: str The document/item's resource id. Example spreadsheet: + 'spreadsheet%3A0A1234567890'. + etag: str (optional) The document/item's etag value to be used in a + conditional GET. See http://code.google.com/apis/documents/docs/3.0/ + developers_guide_protocol.html#RetrievingCached. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_entry(). + + Returns: + A gdata.docs.data.DocsEntry object representing the retrieved entry. + + Raises: + ValueError if the resource_id is not a valid format. + """ + match = gdata.docs.data.RESOURCE_ID_PATTERN.match(resource_id) + if match is None: + raise ValueError, 'Invalid resource id: %s' % resource_id + return self.get_entry( + DOCLIST_FEED_URI + resource_id, etag=etag, + desired_class=gdata.docs.data.DocsEntry, + auth_token=auth_token, **kwargs) + + GetDoc = get_doc + + def get_everything(self, uri=None, auth_token=None, **kwargs): + """Retrieves the user's entire doc list. + + The method makes multiple HTTP requests (by following the feed's next links) + in order to fetch the user's entire document list. + + Args: + uri: str (optional) A URI to query the doclist feed with. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.GetDocList(). + + Returns: + A list of gdata.docs.data.DocsEntry objects representing the retrieved + entries. + """ + if uri is None: + uri = DOCLIST_FEED_URI + + feed = self.GetDocList(uri=uri, auth_token=auth_token, **kwargs) + entries = feed.entry + + while feed.GetNextLink() is not None: + feed = self.GetDocList( + feed.GetNextLink().href, auth_token=auth_token, **kwargs) + entries.extend(feed.entry) + + return entries + + GetEverything = get_everything + + def get_acl_permissions(self, resource_id, auth_token=None, **kwargs): + """Retrieves a the ACL sharing permissions for a document. + + Args: + resource_id: str The document/item's resource id. Example for pdf: + 'pdf%3A0A1234567890'. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + A gdata.docs.data.AclFeed object representing the document's ACL entries. + + Raises: + ValueError if the resource_id is not a valid format. + """ + match = gdata.docs.data.RESOURCE_ID_PATTERN.match(resource_id) + if match is None: + raise ValueError, 'Invalid resource id: %s' % resource_id + + return self.get_feed( + ACL_FEED_TEMPLATE % resource_id, desired_class=gdata.docs.data.AclFeed, + auth_token=auth_token, **kwargs) + + GetAclPermissions = get_acl_permissions + + def get_revisions(self, resource_id, auth_token=None, **kwargs): + """Retrieves the revision history for a document. + + Args: + resource_id: str The document/item's resource id. Example for pdf: + 'pdf%3A0A1234567890'. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + A gdata.docs.data.RevisionFeed representing the document's revisions. + + Raises: + ValueError if the resource_id is not a valid format. + """ + match = gdata.docs.data.RESOURCE_ID_PATTERN.match(resource_id) + if match is None: + raise ValueError, 'Invalid resource id: %s' % resource_id + + return self.get_feed( + REVISIONS_FEED_TEMPLATE % resource_id, + desired_class=gdata.docs.data.RevisionFeed, auth_token=auth_token, + **kwargs) + + GetRevisions = get_revisions + + def create(self, doc_type, title, folder_or_id=None, writers_can_invite=None, + auth_token=None, **kwargs): + """Creates a new item in the user's doclist. + + Args: + doc_type: str The type of object to create. For example: 'document', + 'spreadsheet', 'folder', 'presentation'. + title: str A title for the document. + folder_or_id: gdata.docs.data.DocsEntry or str (optional) Folder entry or + the resouce id of a folder to create the object under. Note: A valid + resource id for a folder is of the form: folder%3Afolder_id. + writers_can_invite: bool (optional) False prevents collaborators from + being able to invite others to edit or view the document. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.post(). + + Returns: + gdata.docs.data.DocsEntry containing information newly created item. + """ + entry = gdata.docs.data.DocsEntry(title=atom.data.Title(text=title)) + entry.category.append(gdata.docs.data.make_kind_category(doc_type)) + + if isinstance(writers_can_invite, gdata.docs.data.WritersCanInvite): + entry.writers_can_invite = writers_can_invite + elif isinstance(writers_can_invite, bool): + entry.writers_can_invite = gdata.docs.data.WritersCanInvite( + value=str(writers_can_invite).lower()) + + uri = DOCLIST_FEED_URI + + if folder_or_id is not None: + if isinstance(folder_or_id, gdata.docs.data.DocsEntry): + # Verify that we're uploading the resource into to a folder. + if folder_or_id.get_document_type() == gdata.docs.data.FOLDER_LABEL: + uri = folder_or_id.content.src + else: + raise gdata.client.Error, 'Trying to upload item to a non-folder.' + else: + uri = FOLDERS_FEED_TEMPLATE % folder_or_id + + return self.post(entry, uri, auth_token=auth_token, **kwargs) + + Create = create + + def copy(self, source_entry, title, auth_token=None, **kwargs): + """Copies a native Google document, spreadsheet, or presentation. + + Note: arbitrary file types and PDFs do not support this feature. + + Args: + source_entry: gdata.docs.data.DocsEntry An object representing the source + document/folder. + title: str A title for the new document. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.post(). + + Returns: + A gdata.docs.data.DocsEntry of the duplicated document. + """ + entry = gdata.docs.data.DocsEntry( + title=atom.data.Title(text=title), + id=atom.data.Id(text=source_entry.GetSelfLink().href)) + return self.post(entry, DOCLIST_FEED_URI, auth_token=auth_token, **kwargs) + + Copy = copy + + def move(self, source_entry, folder_entry=None, + keep_in_folders=False, auth_token=None, **kwargs): + """Moves an item into a different folder (or to the root document list). + + Args: + source_entry: gdata.docs.data.DocsEntry An object representing the source + document/folder. + folder_entry: gdata.docs.data.DocsEntry (optional) An object representing + the destination folder. If None, set keep_in_folders to + True to remove the item from all parent folders. + keep_in_folders: boolean (optional) If True, the source entry + is not removed from any existing parent folders it is in. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.post(). + + Returns: + A gdata.docs.data.DocsEntry of the moved entry or True if just moving the + item out of all folders (e.g. Move(source_entry)). + """ + entry = gdata.docs.data.DocsEntry(id=source_entry.id) + + # Remove the item from any folders it is already in. + if not keep_in_folders: + for folder in source_entry.InFolders(): + self.delete( + '%s/contents/%s' % (folder.href, source_entry.resource_id.text), + force=True) + + # If we're moving the resource into a folder, verify it is a folder entry. + if folder_entry is not None: + if folder_entry.get_document_type() == gdata.docs.data.FOLDER_LABEL: + return self.post(entry, folder_entry.content.src, + auth_token=auth_token, **kwargs) + else: + raise gdata.client.Error, 'Trying to move item into a non-folder.' + + return True + + Move = move + + def upload(self, media, title, folder_or_uri=None, content_type=None, + auth_token=None, **kwargs): + """Uploads a file to Google Docs. + + Args: + media: A gdata.data.MediaSource object containing the file to be + uploaded or a string of the filepath. + title: str The title of the document on the server after being + uploaded. + folder_or_uri: gdata.docs.data.DocsEntry or str (optional) An object with + a link to the folder or the uri to upload the file to. + Note: A valid uri for a folder is of the form: + /feeds/default/private/full/folder%3Afolder_id/contents + content_type: str (optional) The file's mimetype. If not provided, the + one in the media source object is used or the mimetype is inferred + from the filename (if media is a string). When media is a filename, + it is always recommended to pass in a content type. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.post(). + + Returns: + A gdata.docs.data.DocsEntry containing information about uploaded doc. + """ + uri = None + if folder_or_uri is not None: + if isinstance(folder_or_uri, gdata.docs.data.DocsEntry): + # Verify that we're uploading the resource into to a folder. + if folder_or_uri.get_document_type() == gdata.docs.data.FOLDER_LABEL: + uri = folder_or_uri.content.src + else: + raise gdata.client.Error, 'Trying to upload item to a non-folder.' + else: + uri = folder_or_uri + else: + uri = DOCLIST_FEED_URI + + # Create media source if media is a filepath. + if isinstance(media, (str, unicode)): + mimetype = mimetypes.guess_type(media)[0] + if mimetype is None and content_type is None: + raise ValueError, ("Unknown mimetype. Please pass in the file's " + "content_type") + else: + media = gdata.data.MediaSource(file_path=media, + content_type=content_type) + + entry = gdata.docs.data.DocsEntry(title=atom.data.Title(text=title)) + + return self.post(entry, uri, media_source=media, + desired_class=gdata.docs.data.DocsEntry, + auth_token=auth_token, **kwargs) + + Upload = upload + + def download(self, entry_or_id_or_url, file_path, extra_params=None, + auth_token=None, **kwargs): + """Downloads a file from the Document List to local disk. + + Note: to download a file in memory, use the GetFileContent() method. + + Args: + entry_or_id_or_url: gdata.docs.data.DocsEntry or string representing a + resource id or URL to download the document from (such as the content + src link). + file_path: str The full path to save the file to. + extra_params: dict (optional) A map of any further parameters to control + how the document is downloaded/exported. For example, exporting a + spreadsheet as a .csv: extra_params={'gid': 0, 'exportFormat': 'csv'} + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self._download_file(). + + Raises: + gdata.client.RequestError if the download URL is malformed or the server's + response was not successful. + ValueError if entry_or_id_or_url was a resource id for a filetype + in which the download link cannot be manually constructed (e.g. pdf). + """ + if isinstance(entry_or_id_or_url, gdata.docs.data.DocsEntry): + url = entry_or_id_or_url.content.src + else: + if gdata.docs.data.RESOURCE_ID_PATTERN.match(entry_or_id_or_url): + url = gdata.docs.data.make_content_link_from_resource_id( + entry_or_id_or_url) + else: + url = entry_or_id_or_url + + if extra_params is not None: + if 'exportFormat' in extra_params and url.find('/Export?') == -1: + raise gdata.client.Error, ('This entry type cannot be exported ' + 'as a different format.') + + if 'gid' in extra_params and url.find('spreadsheets') == -1: + raise gdata.client.Error, 'gid param is not valid for this doc type.' + + url += '&' + urllib.urlencode(extra_params) + + self._download_file(url, file_path, auth_token=auth_token, **kwargs) + + Download = download + + def export(self, entry_or_id_or_url, file_path, gid=None, auth_token=None, + **kwargs): + """Exports a document from the Document List in a different format. + + Args: + entry_or_id_or_url: gdata.docs.data.DocsEntry or string representing a + resource id or URL to download the document from (such as the content + src link). + file_path: str The full path to save the file to. The export + format is inferred from the the file extension. + gid: str (optional) grid id for downloading a single grid of a + spreadsheet. The param should only be used for .csv and .tsv + spreadsheet exports. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.download(). + + Raises: + gdata.client.RequestError if the download URL is malformed or the server's + response was not successful. + """ + extra_params = {} + + match = gdata.docs.data.FILE_EXT_PATTERN.match(file_path) + if match: + extra_params['exportFormat'] = match.group(1) + + if gid is not None: + extra_params['gid'] = gid + + self.download(entry_or_id_or_url, file_path, extra_params, + auth_token=auth_token, **kwargs) + + Export = export + + +class DocsQuery(gdata.client.Query): + + def __init__(self, title=None, title_exact=None, opened_min=None, + opened_max=None, edited_min=None, edited_max=None, owner=None, + writer=None, reader=None, show_folders=None, + show_deleted=None, ocr=None, target_language=None, + source_language=None, convert=None, **kwargs): + """Constructs a query URL for the Google Documents List API. + + Args: + title: str (optional) Specifies the search terms for the title of a + document. This parameter used without title_exact will only + submit partial queries, not exact queries. + title_exact: str (optional) Meaningless without title. Possible values + are 'true' and 'false'. Note: Matches are case-insensitive. + opened_min: str (optional) Lower bound on the last time a document was + opened by the current user. Use the RFC 3339 timestamp + format. For example: opened_min='2005-08-09T09:57:00-08:00'. + opened_max: str (optional) Upper bound on the last time a document was + opened by the current user. (See also opened_min.) + edited_min: str (optional) Lower bound on the last time a document was + edited by the current user. This value corresponds to the + edited.text value in the doc's entry object, which + represents changes to the document's content or metadata. + Use the RFC 3339 timestamp format. For example: + edited_min='2005-08-09T09:57:00-08:00' + edited_max: str (optional) Upper bound on the last time a document was + edited by the user. (See also edited_min.) + owner: str (optional) Searches for documents with a specific owner. Use + the email address of the owner. For example: + owner='user@gmail.com' + writer: str (optional) Searches for documents which can be written to + by specific users. Use a single email address or a comma + separated list of email addresses. For example: + writer='user1@gmail.com,user@example.com' + reader: str (optional) Searches for documents which can be read by + specific users. (See also writer.) + show_folders: str (optional) Specifies whether the query should return + folders as well as documents. Possible values are 'true' + and 'false'. Default is false. + show_deleted: str (optional) Specifies whether the query should return + documents which are in the trash as well as other + documents. Possible values are 'true' and 'false'. + Default is false. + ocr: str (optional) Specifies whether to attempt OCR on a .jpg, .png, or + .gif upload. Possible values are 'true' and 'false'. Default is + false. See OCR in the Protocol Guide: + http://code.google.com/apis/documents/docs/3.0/developers_guide_protocol.html#OCR + target_language: str (optional) Specifies the language to translate a + document into. See Document Translation in the Protocol + Guide for a table of possible values: + http://code.google.com/apis/documents/docs/3.0/developers_guide_protocol.html#DocumentTranslation + source_language: str (optional) Specifies the source language of the + original document. Optional when using the translation + service. If not provided, Google will attempt to + auto-detect the source language. See Document + Translation in the Protocol Guide for a table of + possible values (link in target_language). + convert: str (optional) Used when uploading arbitrary file types to + specity if document-type uploads should convert to a native + Google Docs format. Possible values are 'true' and 'false'. + The default is 'true'. + """ + gdata.client.Query.__init__(self, **kwargs) + self.convert = convert + self.title = title + self.title_exact = title_exact + self.opened_min = opened_min + self.opened_max = opened_max + self.edited_min = edited_min + self.edited_max = edited_max + self.owner = owner + self.writer = writer + self.reader = reader + self.show_folders = show_folders + self.show_deleted = show_deleted + self.ocr = ocr + self.target_language = target_language + self.source_language = source_language + + def modify_request(self, http_request): + gdata.client._add_query_param('convert', self.convert, http_request) + gdata.client._add_query_param('title', self.title, http_request) + gdata.client._add_query_param('title-exact', self.title_exact, + http_request) + gdata.client._add_query_param('opened-min', self.opened_min, http_request) + gdata.client._add_query_param('opened-max', self.opened_max, http_request) + gdata.client._add_query_param('edited-min', self.edited_min, http_request) + gdata.client._add_query_param('edited-max', self.edited_max, http_request) + gdata.client._add_query_param('owner', self.owner, http_request) + gdata.client._add_query_param('writer', self.writer, http_request) + gdata.client._add_query_param('reader', self.reader, http_request) + gdata.client._add_query_param('showfolders', self.show_folders, + http_request) + gdata.client._add_query_param('showdeleted', self.show_deleted, + http_request) + gdata.client._add_query_param('ocr', self.ocr, http_request) + gdata.client._add_query_param('targetLanguage', self.target_language, + http_request) + gdata.client._add_query_param('sourceLanguage', self.source_language, + http_request) + gdata.client.Query.modify_request(self, http_request) + + ModifyRequest = modify_request diff --git a/gam/gdata/docs/data.py b/gam/gdata/docs/data.py new file mode 100755 index 00000000000..3bee41079f4 --- /dev/null +++ b/gam/gdata/docs/data.py @@ -0,0 +1,280 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data model classes for parsing and generating XML for the DocList Data API""" + +__author__ = 'e.bidelman (Eric Bidelman)' + + +import re +import atom.core +import atom.data +import gdata.acl.data +import gdata.data + +DOCUMENTS_NS = 'http://schemas.google.com/docs/2007' +DOCUMENTS_TEMPLATE = '{http://schemas.google.com/docs/2007}%s' +ACL_FEEDLINK_REL = 'http://schemas.google.com/acl/2007#accessControlList' +REVISION_FEEDLINK_REL = DOCUMENTS_NS + '/revisions' + +# XML Namespaces used in Google Documents entities. +DATA_KIND_SCHEME = 'http://schemas.google.com/g/2005#kind' +DOCUMENT_LABEL = 'document' +SPREADSHEET_LABEL = 'spreadsheet' +PRESENTATION_LABEL = 'presentation' +FOLDER_LABEL = 'folder' +PDF_LABEL = 'pdf' + +LABEL_SCHEME = 'http://schemas.google.com/g/2005/labels' +STARRED_LABEL_TERM = LABEL_SCHEME + '#starred' +TRASHED_LABEL_TERM = LABEL_SCHEME + '#trashed' +HIDDEN_LABEL_TERM = LABEL_SCHEME + '#hidden' +MINE_LABEL_TERM = LABEL_SCHEME + '#mine' +PRIVATE_LABEL_TERM = LABEL_SCHEME + '#private' +SHARED_WITH_DOMAIN_LABEL_TERM = LABEL_SCHEME + '#shared-with-domain' +VIEWED_LABEL_TERM = LABEL_SCHEME + '#viewed' + +DOCS_PARENT_LINK_REL = DOCUMENTS_NS + '#parent' +DOCS_PUBLISH_LINK_REL = DOCUMENTS_NS + '#publish' + +FILE_EXT_PATTERN = re.compile('.*\.([a-zA-Z]{3,}$)') +RESOURCE_ID_PATTERN = re.compile('^([a-z]*)(:|%3A)([\w-]*)$') + +# File extension/mimetype pairs of common format. +MIMETYPES = { + 'CSV': 'text/csv', + 'TSV': 'text/tab-separated-values', + 'TAB': 'text/tab-separated-values', + 'DOC': 'application/msword', + 'DOCX': ('application/vnd.openxmlformats-officedocument.' + 'wordprocessingml.document'), + 'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet', + 'ODT': 'application/vnd.oasis.opendocument.text', + 'RTF': 'application/rtf', + 'SXW': 'application/vnd.sun.xml.writer', + 'TXT': 'text/plain', + 'XLS': 'application/vnd.ms-excel', + 'XLSX': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + 'PDF': 'application/pdf', + 'PNG': 'image/png', + 'PPT': 'application/vnd.ms-powerpoint', + 'PPS': 'application/vnd.ms-powerpoint', + 'HTM': 'text/html', + 'HTML': 'text/html', + 'ZIP': 'application/zip', + 'SWF': 'application/x-shockwave-flash' + } + + +def make_kind_category(label): + """Builds the appropriate atom.data.Category for the label passed in. + + Args: + label: str The value for the category entry. + + Returns: + An atom.data.Category or None if label is None. + """ + if label is None: + return None + + return atom.data.Category( + scheme=DATA_KIND_SCHEME, term='%s#%s' % (DOCUMENTS_NS, label), label=label) + +MakeKindCategory = make_kind_category + +def make_content_link_from_resource_id(resource_id): + """Constructs export URL for a given resource. + + Args: + resource_id: str The document/item's resource id. Example presentation: + 'presentation%3A0A1234567890'. + + Raises: + gdata.client.ValueError if the resource_id is not a valid format. + """ + match = RESOURCE_ID_PATTERN.match(resource_id) + + if match: + label = match.group(1) + doc_id = match.group(3) + if label == DOCUMENT_LABEL: + return '/feeds/download/documents/Export?docId=%s' % doc_id + if label == PRESENTATION_LABEL: + return '/feeds/download/presentations/Export?docId=%s' % doc_id + if label == SPREADSHEET_LABEL: + return ('http://spreadsheets.google.com/feeds/download/spreadsheets/' + 'Export?key=%s' % doc_id) + raise ValueError, ('Invalid resource id: %s, or manually creating the ' + 'download url for this type of doc is not possible' + % resource_id) + +MakeContentLinkFromResourceId = make_content_link_from_resource_id + + +class ResourceId(atom.core.XmlElement): + """The DocList gd:resourceId element.""" + _qname = gdata.data.GDATA_TEMPLATE % 'resourceId' + + +class LastModifiedBy(atom.data.Person): + """The DocList gd:lastModifiedBy element.""" + _qname = gdata.data.GDATA_TEMPLATE % 'lastModifiedBy' + + +class LastViewed(atom.data.Person): + """The DocList gd:lastViewed element.""" + _qname = gdata.data.GDATA_TEMPLATE % 'lastViewed' + + +class WritersCanInvite(atom.core.XmlElement): + """The DocList docs:writersCanInvite element.""" + _qname = DOCUMENTS_TEMPLATE % 'writersCanInvite' + value = 'value' + + +class QuotaBytesUsed(atom.core.XmlElement): + """The DocList gd:quotaBytesUsed element.""" + _qname = gdata.data.GDATA_TEMPLATE % 'quotaBytesUsed' + + +class Publish(atom.core.XmlElement): + """The DocList docs:publish element.""" + _qname = DOCUMENTS_TEMPLATE % 'publish' + value = 'value' + + +class PublishAuto(atom.core.XmlElement): + """The DocList docs:publishAuto element.""" + _qname = DOCUMENTS_TEMPLATE % 'publishAuto' + value = 'value' + + +class PublishOutsideDomain(atom.core.XmlElement): + """The DocList docs:publishOutsideDomain element.""" + _qname = DOCUMENTS_TEMPLATE % 'publishOutsideDomain' + value = 'value' + + +class DocsEntry(gdata.data.GDEntry): + """A DocList version of an Atom Entry.""" + + last_viewed = LastViewed + last_modified_by = LastModifiedBy + resource_id = ResourceId + writers_can_invite = WritersCanInvite + quota_bytes_used = QuotaBytesUsed + feed_link = [gdata.data.FeedLink] + + def get_document_type(self): + """Extracts the type of document this DocsEntry is. + + This method returns the type of document the DocsEntry represents. Possible + values are document, presentation, spreadsheet, folder, or pdf. + + Returns: + A string representing the type of document. + """ + if self.category: + for category in self.category: + if category.scheme == DATA_KIND_SCHEME: + return category.label + else: + return None + + GetDocumentType = get_document_type + + def get_acl_feed_link(self): + """Extracts the DocsEntry's ACL feed . + + Returns: + A gdata.data.FeedLink object. + """ + for feed_link in self.feed_link: + if feed_link.rel == ACL_FEEDLINK_REL: + return feed_link + return None + + GetAclFeedLink = get_acl_feed_link + + def get_revisions_feed_link(self): + """Extracts the DocsEntry's revisions feed . + + Returns: + A gdata.data.FeedLink object. + """ + for feed_link in self.feed_link: + if feed_link.rel == REVISION_FEEDLINK_REL: + return feed_link + return None + + GetRevisionsFeedLink = get_revisions_feed_link + + def in_folders(self): + """Returns the parents link(s) (folders) of this entry.""" + links = [] + for link in self.link: + if link.rel == DOCS_PARENT_LINK_REL and link.href: + links.append(link) + return links + + InFolders = in_folders + + +class Acl(gdata.acl.data.AclEntry): + """A document ACL entry.""" + + +class DocList(gdata.data.GDFeed): + """The main DocList feed containing a list of Google Documents.""" + entry = [DocsEntry] + + +class AclFeed(gdata.acl.data.AclFeed): + """A DocList ACL feed.""" + entry = [Acl] + + +class Revision(gdata.data.GDEntry): + """A document Revision entry.""" + publish = Publish + publish_auto = PublishAuto + publish_outside_domain = PublishOutsideDomain + + def find_publish_link(self): + """Get the link that points to the published document on the web. + + Returns: + A str for the URL in the link with a rel ending in #publish. + """ + return self.find_url(DOCS_PUBLISH_LINK_REL) + + FindPublishLink = find_publish_link + + def get_publish_link(self): + """Get the link that points to the published document on the web. + + Returns: + A gdata.data.Link for the link with a rel ending in #publish. + """ + return self.get_link(DOCS_PUBLISH_LINK_REL) + + GetPublishLink = get_publish_link + + +class RevisionFeed(gdata.data.GDFeed): + """A DocList Revision feed.""" + entry = [Revision] diff --git a/gam/gdata/docs/service.py b/gam/gdata/docs/service.py new file mode 100755 index 00000000000..019528a820c --- /dev/null +++ b/gam/gdata/docs/service.py @@ -0,0 +1,611 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""DocsService extends the GDataService to streamline Google Documents + operations. + + DocsService: Provides methods to query feeds and manipulate items. + Extends GDataService. + + DocumentQuery: Queries a Google Document list feed. + + DocumentAclQuery: Queries a Google Document Acl feed. +""" + + +__author__ = ('api.jfisher (Jeff Fisher), ' + 'e.bidelman (Eric Bidelman)') + +import re +import atom +import gdata.service +import gdata.docs +import urllib + +# XML Namespaces used in Google Documents entities. +DATA_KIND_SCHEME = gdata.GDATA_NAMESPACE + '#kind' +DOCUMENT_LABEL = 'document' +SPREADSHEET_LABEL = 'spreadsheet' +PRESENTATION_LABEL = 'presentation' +FOLDER_LABEL = 'folder' +PDF_LABEL = 'pdf' + +LABEL_SCHEME = gdata.GDATA_NAMESPACE + '/labels' +STARRED_LABEL_TERM = LABEL_SCHEME + '#starred' +TRASHED_LABEL_TERM = LABEL_SCHEME + '#trashed' +HIDDEN_LABEL_TERM = LABEL_SCHEME + '#hidden' +MINE_LABEL_TERM = LABEL_SCHEME + '#mine' +PRIVATE_LABEL_TERM = LABEL_SCHEME + '#private' +SHARED_WITH_DOMAIN_LABEL_TERM = LABEL_SCHEME + '#shared-with-domain' +VIEWED_LABEL_TERM = LABEL_SCHEME + '#viewed' + +FOLDERS_SCHEME_PREFIX = gdata.docs.DOCUMENTS_NAMESPACE + '/folders/' + +# File extensions of documents that are permitted to be uploaded or downloaded. +SUPPORTED_FILETYPES = { + 'CSV': 'text/csv', + 'TSV': 'text/tab-separated-values', + 'TAB': 'text/tab-separated-values', + 'DOC': 'application/msword', + 'DOCX': ('application/vnd.openxmlformats-officedocument.' + 'wordprocessingml.document'), + 'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet', + 'ODT': 'application/vnd.oasis.opendocument.text', + 'RTF': 'application/rtf', + 'SXW': 'application/vnd.sun.xml.writer', + 'TXT': 'text/plain', + 'XLS': 'application/vnd.ms-excel', + 'XLSX': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + 'PDF': 'application/pdf', + 'PNG': 'image/png', + 'PPT': 'application/vnd.ms-powerpoint', + 'PPS': 'application/vnd.ms-powerpoint', + 'HTM': 'text/html', + 'HTML': 'text/html', + 'ZIP': 'application/zip', + 'SWF': 'application/x-shockwave-flash' + } + + +class DocsService(gdata.service.GDataService): + + """Client extension for the Google Documents service Document List feed.""" + + __FILE_EXT_PATTERN = re.compile('.*\.([a-zA-Z]{3,}$)') + __RESOURCE_ID_PATTERN = re.compile('^([a-z]*)(:|%3A)([\w-]*)$') + + def __init__(self, email=None, password=None, source=None, + server='docs.google.com', additional_headers=None, **kwargs): + """Creates a client for the Google Documents service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'docs.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='writely', source=source, + server=server, additional_headers=additional_headers, **kwargs) + + def _MakeKindCategory(self, label): + if label is None: + return None + return atom.Category(scheme=DATA_KIND_SCHEME, + term=gdata.docs.DOCUMENTS_NAMESPACE + '#' + label, label=label) + + def _MakeContentLinkFromId(self, resource_id): + match = self.__RESOURCE_ID_PATTERN.match(resource_id) + label = match.group(1) + doc_id = match.group(3) + if label == DOCUMENT_LABEL: + return '/feeds/download/documents/Export?docId=%s' % doc_id + if label == PRESENTATION_LABEL: + return '/feeds/download/presentations/Export?docId=%s' % doc_id + if label == SPREADSHEET_LABEL: + return ('http://spreadsheets.google.com/feeds/download/spreadsheets/' + 'Export?key=%s' % doc_id) + raise ValueError, 'Invalid resource id: %s' % resource_id + + def _UploadFile(self, media_source, title, category, folder_or_uri=None): + """Uploads a file to the Document List feed. + + Args: + media_source: A gdata.MediaSource object containing the file to be + uploaded. + title: string The title of the document on the server after being + uploaded. + category: An atom.Category object specifying the appropriate document + type. + folder_or_uri: DocumentListEntry or string (optional) An object with a + link to a folder or a uri to a folder to upload to. + Note: A valid uri for a folder is of the form: + /feeds/folders/private/full/folder%3Afolder_id + + Returns: + A DocumentListEntry containing information about the document created on + the Google Documents service. + """ + if folder_or_uri: + try: + uri = folder_or_uri.content.src + except AttributeError: + uri = folder_or_uri + else: + uri = '/feeds/documents/private/full' + + entry = gdata.docs.DocumentListEntry() + entry.title = atom.Title(text=title) + if category is not None: + entry.category.append(category) + entry = self.Post(entry, uri, media_source=media_source, + extra_headers={'Slug': media_source.file_name}, + converter=gdata.docs.DocumentListEntryFromString) + return entry + + def _DownloadFile(self, uri, file_path): + """Downloads a file. + + Args: + uri: string The full Export URL to download the file from. + file_path: string The full path to save the file to. + + Raises: + RequestError: on error response from server. + """ + server_response = self.request('GET', uri) + response_body = server_response.read() + if server_response.status != 200: + raise gdata.service.RequestError, {'status': server_response.status, + 'reason': server_response.reason, + 'body': response_body} + f = open(file_path, 'wb') + f.write(response_body) + f.flush() + f.close() + + def MoveIntoFolder(self, source_entry, folder_entry): + """Moves a document into a folder in the Document List Feed. + + Args: + source_entry: DocumentListEntry An object representing the source + document/folder. + folder_entry: DocumentListEntry An object with a link to the destination + folder. + + Returns: + A DocumentListEntry containing information about the document created on + the Google Documents service. + """ + entry = gdata.docs.DocumentListEntry() + entry.id = source_entry.id + entry = self.Post(entry, folder_entry.content.src, + converter=gdata.docs.DocumentListEntryFromString) + return entry + + def Query(self, uri, converter=gdata.docs.DocumentListFeedFromString): + """Queries the Document List feed and returns the resulting feed of + entries. + + Args: + uri: string The full URI to be queried. This can contain query + parameters, a hostname, or simply the relative path to a Document + List feed. The DocumentQuery object is useful when constructing + query parameters. + converter: func (optional) A function which will be executed on the + retrieved item, generally to render it into a Python object. + By default the DocumentListFeedFromString function is used to + return a DocumentListFeed object. This is because most feed + queries will result in a feed and not a single entry. + """ + return self.Get(uri, converter=converter) + + def QueryDocumentListFeed(self, uri): + """Retrieves a DocumentListFeed by retrieving a URI based off the Document + List feed, including any query parameters. A DocumentQuery object can + be used to construct these parameters. + + Args: + uri: string The URI of the feed being retrieved possibly with query + parameters. + + Returns: + A DocumentListFeed object representing the feed returned by the server. + """ + return self.Get(uri, converter=gdata.docs.DocumentListFeedFromString) + + def GetDocumentListEntry(self, uri): + """Retrieves a particular DocumentListEntry by its unique URI. + + Args: + uri: string The unique URI of an entry in a Document List feed. + + Returns: + A DocumentListEntry object representing the retrieved entry. + """ + return self.Get(uri, converter=gdata.docs.DocumentListEntryFromString) + + def GetDocumentListFeed(self, uri=None): + """Retrieves a feed containing all of a user's documents. + + Args: + uri: string A full URI to query the Document List feed. + """ + if not uri: + uri = gdata.docs.service.DocumentQuery().ToUri() + return self.QueryDocumentListFeed(uri) + + def GetDocumentListAclEntry(self, uri): + """Retrieves a particular DocumentListAclEntry by its unique URI. + + Args: + uri: string The unique URI of an entry in a Document List feed. + + Returns: + A DocumentListAclEntry object representing the retrieved entry. + """ + return self.Get(uri, converter=gdata.docs.DocumentListAclEntryFromString) + + def GetDocumentListAclFeed(self, uri): + """Retrieves a feed containing all of a user's documents. + + Args: + uri: string The URI of a document's Acl feed to retrieve. + + Returns: + A DocumentListAclFeed object representing the ACL feed + returned by the server. + """ + return self.Get(uri, converter=gdata.docs.DocumentListAclFeedFromString) + + def Upload(self, media_source, title, folder_or_uri=None, label=None): + """Uploads a document inside of a MediaSource object to the Document List + feed with the given title. + + Args: + media_source: MediaSource The gdata.MediaSource object containing a + document file to be uploaded. + title: string The title of the document on the server after being + uploaded. + folder_or_uri: DocumentListEntry or string (optional) An object with a + link to a folder or a uri to a folder to upload to. + Note: A valid uri for a folder is of the form: + /feeds/folders/private/full/folder%3Afolder_id + label: optional label describing the type of the document to be created. + + Returns: + A DocumentListEntry containing information about the document created + on the Google Documents service. + """ + + return self._UploadFile(media_source, title, self._MakeKindCategory(label), + folder_or_uri) + + def Download(self, entry_or_id_or_url, file_path, export_format=None, + gid=None, extra_params=None): + """Downloads a document from the Document List. + + Args: + entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry, + or a url to download from (such as the content src). + file_path: string The full path to save the file to. + export_format: the format to convert to, if conversion is required. + gid: grid id, for downloading a single grid of a spreadsheet + extra_params: a map of any further parameters to control how the document + is downloaded + + Raises: + RequestError if the service does not respond with success + """ + + if isinstance(entry_or_id_or_url, gdata.docs.DocumentListEntry): + url = entry_or_id_or_url.content.src + else: + if self.__RESOURCE_ID_PATTERN.match(entry_or_id_or_url): + url = self._MakeContentLinkFromId(entry_or_id_or_url) + else: + url = entry_or_id_or_url + + if export_format is not None: + if url.find('/Export?') == -1: + raise gdata.service.Error, ('This entry cannot be exported ' + 'as a different format') + url += '&exportFormat=%s' % export_format + + if gid is not None: + if url.find('spreadsheets') == -1: + raise gdata.service.Error, 'grid id param is not valid for this entry' + url += '&gid=%s' % gid + + if extra_params: + url += '&' + urllib.urlencode(extra_params) + + self._DownloadFile(url, file_path) + + def Export(self, entry_or_id_or_url, file_path, gid=None, extra_params=None): + """Downloads a document from the Document List in a different format. + + Args: + entry_or_id_or_url: a DocumentListEntry, or the resource id of an entry, + or a url to download from (such as the content src). + file_path: string The full path to save the file to. The export + format is inferred from the the file extension. + gid: grid id, for downloading a single grid of a spreadsheet + extra_params: a map of any further parameters to control how the document + is downloaded + + Raises: + RequestError if the service does not respond with success + """ + ext = None + match = self.__FILE_EXT_PATTERN.match(file_path) + if match: + ext = match.group(1) + self.Download(entry_or_id_or_url, file_path, ext, gid, extra_params) + + def CreateFolder(self, title, folder_or_uri=None): + """Creates a folder in the Document List feed. + + Args: + title: string The title of the folder on the server after being created. + folder_or_uri: DocumentListEntry or string (optional) An object with a + link to a folder or a uri to a folder to upload to. + Note: A valid uri for a folder is of the form: + /feeds/folders/private/full/folder%3Afolder_id + + Returns: + A DocumentListEntry containing information about the folder created on + the Google Documents service. + """ + if folder_or_uri: + try: + uri = folder_or_uri.content.src + except AttributeError: + uri = folder_or_uri + else: + uri = '/feeds/documents/private/full' + + folder_entry = gdata.docs.DocumentListEntry() + folder_entry.title = atom.Title(text=title) + folder_entry.category.append(self._MakeKindCategory(FOLDER_LABEL)) + folder_entry = self.Post(folder_entry, uri, + converter=gdata.docs.DocumentListEntryFromString) + + return folder_entry + + + def MoveOutOfFolder(self, source_entry): + """Moves a document into a folder in the Document List Feed. + + Args: + source_entry: DocumentListEntry An object representing the source + document/folder. + + Returns: + True if the entry was moved out. + """ + return self.Delete(source_entry.GetEditLink().href) + + # Deprecated methods + + #@atom.deprecated('Please use Upload instead') + def UploadPresentation(self, media_source, title, folder_or_uri=None): + """Uploads a presentation inside of a MediaSource object to the Document + List feed with the given title. + + This method is deprecated, use Upload instead. + + Args: + media_source: MediaSource The MediaSource object containing a + presentation file to be uploaded. + title: string The title of the presentation on the server after being + uploaded. + folder_or_uri: DocumentListEntry or string (optional) An object with a + link to a folder or a uri to a folder to upload to. + Note: A valid uri for a folder is of the form: + /feeds/folders/private/full/folder%3Afolder_id + + Returns: + A DocumentListEntry containing information about the presentation created + on the Google Documents service. + """ + return self._UploadFile( + media_source, title, self._MakeKindCategory(PRESENTATION_LABEL), + folder_or_uri=folder_or_uri) + + UploadPresentation = atom.deprecated('Please use Upload instead')( + UploadPresentation) + + #@atom.deprecated('Please use Upload instead') + def UploadSpreadsheet(self, media_source, title, folder_or_uri=None): + """Uploads a spreadsheet inside of a MediaSource object to the Document + List feed with the given title. + + This method is deprecated, use Upload instead. + + Args: + media_source: MediaSource The MediaSource object containing a spreadsheet + file to be uploaded. + title: string The title of the spreadsheet on the server after being + uploaded. + folder_or_uri: DocumentListEntry or string (optional) An object with a + link to a folder or a uri to a folder to upload to. + Note: A valid uri for a folder is of the form: + /feeds/folders/private/full/folder%3Afolder_id + + Returns: + A DocumentListEntry containing information about the spreadsheet created + on the Google Documents service. + """ + return self._UploadFile( + media_source, title, self._MakeKindCategory(SPREADSHEET_LABEL), + folder_or_uri=folder_or_uri) + + UploadSpreadsheet = atom.deprecated('Please use Upload instead')( + UploadSpreadsheet) + + #@atom.deprecated('Please use Upload instead') + def UploadDocument(self, media_source, title, folder_or_uri=None): + """Uploads a document inside of a MediaSource object to the Document List + feed with the given title. + + This method is deprecated, use Upload instead. + + Args: + media_source: MediaSource The gdata.MediaSource object containing a + document file to be uploaded. + title: string The title of the document on the server after being + uploaded. + folder_or_uri: DocumentListEntry or string (optional) An object with a + link to a folder or a uri to a folder to upload to. + Note: A valid uri for a folder is of the form: + /feeds/folders/private/full/folder%3Afolder_id + + Returns: + A DocumentListEntry containing information about the document created + on the Google Documents service. + """ + return self._UploadFile( + media_source, title, self._MakeKindCategory(DOCUMENT_LABEL), + folder_or_uri=folder_or_uri) + + UploadDocument = atom.deprecated('Please use Upload instead')( + UploadDocument) + + """Calling any of these functions is the same as calling Export""" + DownloadDocument = atom.deprecated('Please use Export instead')(Export) + DownloadPresentation = atom.deprecated('Please use Export instead')(Export) + DownloadSpreadsheet = atom.deprecated('Please use Export instead')(Export) + + """Calling any of these functions is the same as calling MoveIntoFolder""" + MoveDocumentIntoFolder = atom.deprecated( + 'Please use MoveIntoFolder instead')(MoveIntoFolder) + MovePresentationIntoFolder = atom.deprecated( + 'Please use MoveIntoFolder instead')(MoveIntoFolder) + MoveSpreadsheetIntoFolder = atom.deprecated( + 'Please use MoveIntoFolder instead')(MoveIntoFolder) + MoveFolderIntoFolder = atom.deprecated( + 'Please use MoveIntoFolder instead')(MoveIntoFolder) + + +class DocumentQuery(gdata.service.Query): + + """Object used to construct a URI to query the Google Document List feed""" + + def __init__(self, feed='/feeds/documents', visibility='private', + projection='full', text_query=None, params=None, + categories=None): + """Constructor for Document List Query + + Args: + feed: string (optional) The path for the feed. (e.g. '/feeds/documents') + visibility: string (optional) The visibility chosen for the current feed. + projection: string (optional) The projection chosen for the current feed. + text_query: string (optional) The contents of the q query parameter. This + string is URL escaped upon conversion to a URI. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to + the query's items. + categories: list (optional) List of category strings which should be + included as query categories. See gdata.service.Query for + additional documentation. + + Yields: + A DocumentQuery object used to construct a URI based on the Document + List feed. + """ + self.visibility = visibility + self.projection = projection + gdata.service.Query.__init__(self, feed, text_query, params, categories) + + def ToUri(self): + """Generates a URI from the query parameters set in the object. + + Returns: + A string containing the URI used to retrieve entries from the Document + List feed. + """ + old_feed = self.feed + self.feed = '/'.join([old_feed, self.visibility, self.projection]) + new_feed = gdata.service.Query.ToUri(self) + self.feed = old_feed + return new_feed + + def AddNamedFolder(self, email, folder_name): + """Adds a named folder category, qualified by a schema. + + This function lets you query for documents that are contained inside a + named folder without fear of collision with other categories. + + Args: + email: string The email of the user who owns the folder. + folder_name: string The name of the folder. + + Returns: + The string of the category that was added to the object. + """ + + category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name) + self.categories.append(category) + return category + + def RemoveNamedFolder(self, email, folder_name): + """Removes a named folder category, qualified by a schema. + + Args: + email: string The email of the user who owns the folder. + folder_name: string The name of the folder. + + Returns: + The string of the category that was removed to the object. + """ + category = '{%s%s}%s' % (FOLDERS_SCHEME_PREFIX, email, folder_name) + self.categories.remove(category) + return category + + +class DocumentAclQuery(gdata.service.Query): + + """Object used to construct a URI to query a Document's ACL feed""" + + def __init__(self, resource_id, feed='/feeds/acl/private/full'): + """Constructor for Document ACL Query + + Args: + resource_id: string The resource id. (e.g. 'document%3Adocument_id', + 'spreadsheet%3Aspreadsheet_id', etc.) + feed: string (optional) The path for the feed. + (e.g. '/feeds/acl/private/full') + + Yields: + A DocumentAclQuery object used to construct a URI based on the Document + ACL feed. + """ + self.resource_id = resource_id + gdata.service.Query.__init__(self, feed) + + def ToUri(self): + """Generates a URI from the query parameters set in the object. + + Returns: + A string containing the URI used to retrieve entries from the Document + ACL feed. + """ + return '%s/%s' % (gdata.service.Query.ToUri(self), self.resource_id) diff --git a/gam/gdata/dublincore/__init__.py b/gam/gdata/dublincore/__init__.py new file mode 100755 index 00000000000..22071f7a11e --- /dev/null +++ b/gam/gdata/dublincore/__init__.py @@ -0,0 +1,15 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/gam/gdata/dublincore/data.py b/gam/gdata/dublincore/data.py new file mode 100755 index 00000000000..c6345c16fba --- /dev/null +++ b/gam/gdata/dublincore/data.py @@ -0,0 +1,78 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains the data classes of the Dublin Core Metadata Initiative (DCMI) Extension""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core + + +DC_TEMPLATE = '{http://purl.org/dc/terms/}%s' + + +class Creator(atom.core.XmlElement): + """Entity primarily responsible for making the resource.""" + _qname = DC_TEMPLATE % 'creator' + + +class Date(atom.core.XmlElement): + """Point or period of time associated with an event in the lifecycle of the resource.""" + _qname = DC_TEMPLATE % 'date' + + +class Description(atom.core.XmlElement): + """Account of the resource.""" + _qname = DC_TEMPLATE % 'description' + + +class Format(atom.core.XmlElement): + """File format, physical medium, or dimensions of the resource.""" + _qname = DC_TEMPLATE % 'format' + + +class Identifier(atom.core.XmlElement): + """An unambiguous reference to the resource within a given context.""" + _qname = DC_TEMPLATE % 'identifier' + + +class Language(atom.core.XmlElement): + """Language of the resource.""" + _qname = DC_TEMPLATE % 'language' + + +class Publisher(atom.core.XmlElement): + """Entity responsible for making the resource available.""" + _qname = DC_TEMPLATE % 'publisher' + + +class Rights(atom.core.XmlElement): + """Information about rights held in and over the resource.""" + _qname = DC_TEMPLATE % 'rights' + + +class Subject(atom.core.XmlElement): + """Topic of the resource.""" + _qname = DC_TEMPLATE % 'subject' + + +class Title(atom.core.XmlElement): + """Name given to the resource.""" + _qname = DC_TEMPLATE % 'title' + + diff --git a/gam/gdata/exif/__init__.py b/gam/gdata/exif/__init__.py new file mode 100755 index 00000000000..7f1f9c2abd8 --- /dev/null +++ b/gam/gdata/exif/__init__.py @@ -0,0 +1,217 @@ +# -*-*- encoding: utf-8 -*-*- +# +# This is gdata.photos.exif, implementing the exif namespace in gdata +# +# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $ +# +# Copyright 2007 HÃ¥vard Gulldahl +# Portions copyright 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module maps elements from the {EXIF} namespace[1] to GData objects. +These elements describe image data, using exif attributes[2]. + +Picasa Web Albums uses the exif namespace to represent Exif data encoded +in a photo [3]. + +Picasa Web Albums uses the following exif elements: +exif:distance +exif:exposure +exif:flash +exif:focallength +exif:fstop +exif:imageUniqueID +exif:iso +exif:make +exif:model +exif:tags +exif:time + +[1]: http://schemas.google.com/photos/exif/2007. +[2]: http://en.wikipedia.org/wiki/Exif +[3]: http://code.google.com/apis/picasaweb/reference.html#exif_reference +""" + + +__author__ = u'havard@gulldahl.no'# (HÃ¥vard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' + + +import atom +import gdata + +EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007' + +class ExifBaseElement(atom.AtomBase): + """Base class for elements in the EXIF_NAMESPACE (%s). To add new elements, you only need to add the element tag name to self._tag + """ % EXIF_NAMESPACE + + _tag = '' + _namespace = EXIF_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class Distance(ExifBaseElement): + "(float) The distance to the subject, e.g. 0.0" + + _tag = 'distance' +def DistanceFromString(xml_string): + return atom.CreateClassFromXMLString(Distance, xml_string) + +class Exposure(ExifBaseElement): + "(float) The exposure time used, e.g. 0.025 or 8.0E4" + + _tag = 'exposure' +def ExposureFromString(xml_string): + return atom.CreateClassFromXMLString(Exposure, xml_string) + +class Flash(ExifBaseElement): + """(string) Boolean value indicating whether the flash was used. + The .text attribute will either be `true' or `false' + + As a convenience, this object's .bool method will return what you want, + so you can say: + + flash_used = bool(Flash) + + """ + + _tag = 'flash' + def __bool__(self): + if self.text.lower() in ('true','false'): + return self.text.lower() == 'true' +def FlashFromString(xml_string): + return atom.CreateClassFromXMLString(Flash, xml_string) + +class Focallength(ExifBaseElement): + "(float) The focal length used, e.g. 23.7" + + _tag = 'focallength' +def FocallengthFromString(xml_string): + return atom.CreateClassFromXMLString(Focallength, xml_string) + +class Fstop(ExifBaseElement): + "(float) The fstop value used, e.g. 5.0" + + _tag = 'fstop' +def FstopFromString(xml_string): + return atom.CreateClassFromXMLString(Fstop, xml_string) + +class ImageUniqueID(ExifBaseElement): + "(string) The unique image ID for the photo. Generated by Google Photo servers" + + _tag = 'imageUniqueID' +def ImageUniqueIDFromString(xml_string): + return atom.CreateClassFromXMLString(ImageUniqueID, xml_string) + +class Iso(ExifBaseElement): + "(int) The iso equivalent value used, e.g. 200" + + _tag = 'iso' +def IsoFromString(xml_string): + return atom.CreateClassFromXMLString(Iso, xml_string) + +class Make(ExifBaseElement): + "(string) The make of the camera used, e.g. Fictitious Camera Company" + + _tag = 'make' +def MakeFromString(xml_string): + return atom.CreateClassFromXMLString(Make, xml_string) + +class Model(ExifBaseElement): + "(string) The model of the camera used,e.g AMAZING-100D" + + _tag = 'model' +def ModelFromString(xml_string): + return atom.CreateClassFromXMLString(Model, xml_string) + +class Time(ExifBaseElement): + """(int) The date/time the photo was taken, e.g. 1180294337000. + Represented as the number of milliseconds since January 1st, 1970. + + The value of this element will always be identical to the value + of the . + + Look at this object's .isoformat() for a human friendly datetime string: + + photo_epoch = Time.text # 1180294337000 + photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z' + + Alternatively: + photo_datetime = Time.datetime() # (requires python >= 2.3) + """ + + _tag = 'time' + def isoformat(self): + """(string) Return the timestamp as a ISO 8601 formatted string, + e.g. '2007-05-27T19:32:17.000Z' + """ + import time + epoch = float(self.text)/1000 + return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch)) + + def datetime(self): + """(datetime.datetime) Return the timestamp as a datetime.datetime object + + Requires python 2.3 + """ + import datetime + epoch = float(self.text)/1000 + return datetime.datetime.fromtimestamp(epoch) + +def TimeFromString(xml_string): + return atom.CreateClassFromXMLString(Time, xml_string) + +class Tags(ExifBaseElement): + """The container for all exif elements. + The element can appear as a child of a photo entry. + """ + + _tag = 'tags' + _children = atom.AtomBase._children.copy() + _children['{%s}fstop' % EXIF_NAMESPACE] = ('fstop', Fstop) + _children['{%s}make' % EXIF_NAMESPACE] = ('make', Make) + _children['{%s}model' % EXIF_NAMESPACE] = ('model', Model) + _children['{%s}distance' % EXIF_NAMESPACE] = ('distance', Distance) + _children['{%s}exposure' % EXIF_NAMESPACE] = ('exposure', Exposure) + _children['{%s}flash' % EXIF_NAMESPACE] = ('flash', Flash) + _children['{%s}focallength' % EXIF_NAMESPACE] = ('focallength', Focallength) + _children['{%s}iso' % EXIF_NAMESPACE] = ('iso', Iso) + _children['{%s}time' % EXIF_NAMESPACE] = ('time', Time) + _children['{%s}imageUniqueID' % EXIF_NAMESPACE] = ('imageUniqueID', ImageUniqueID) + + def __init__(self, extension_elements=None, extension_attributes=None, text=None): + ExifBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.fstop=None + self.make=None + self.model=None + self.distance=None + self.exposure=None + self.flash=None + self.focallength=None + self.iso=None + self.time=None + self.imageUniqueID=None +def TagsFromString(xml_string): + return atom.CreateClassFromXMLString(Tags, xml_string) + diff --git a/gam/gdata/finance/__init__.py b/gam/gdata/finance/__init__.py new file mode 100755 index 00000000000..28ab898d06a --- /dev/null +++ b/gam/gdata/finance/__init__.py @@ -0,0 +1,486 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Tan Swee Heng +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains extensions to Atom objects used with Google Finance.""" + + +__author__ = 'thesweeheng@gmail.com' + + +import atom +import gdata + + +GD_NAMESPACE = 'http://schemas.google.com/g/2005' +GF_NAMESPACE = 'http://schemas.google.com/finance/2007' + + +class Money(atom.AtomBase): + """The element.""" + _tag = 'money' + _namespace = GD_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['amount'] = 'amount' + _attributes['currencyCode'] = 'currency_code' + + def __init__(self, amount=None, currency_code=None, **kwargs): + self.amount = amount + self.currency_code = currency_code + atom.AtomBase.__init__(self, **kwargs) + + def __str__(self): + return "%s %s" % (self.amount, self.currency_code) + + +def MoneyFromString(xml_string): + return atom.CreateClassFromXMLString(Money, xml_string) + + +class _Monies(atom.AtomBase): + """An element containing multiple in multiple currencies.""" + _namespace = GF_NAMESPACE + _children = atom.AtomBase._children.copy() + _children['{%s}money' % GD_NAMESPACE] = ('money', [Money]) + + def __init__(self, money=None, **kwargs): + self.money = money or [] + atom.AtomBase.__init__(self, **kwargs) + + def __str__(self): + return " / ".join(["%s" % i for i in self.money]) + + +class CostBasis(_Monies): + """The element.""" + _tag = 'costBasis' + + +def CostBasisFromString(xml_string): + return atom.CreateClassFromXMLString(CostBasis, xml_string) + + +class DaysGain(_Monies): + """The element.""" + _tag = 'daysGain' + + +def DaysGainFromString(xml_string): + return atom.CreateClassFromXMLString(DaysGain, xml_string) + + +class Gain(_Monies): + """The element.""" + _tag = 'gain' + + +def GainFromString(xml_string): + return atom.CreateClassFromXMLString(Gain, xml_string) + + +class MarketValue(_Monies): + """The element.""" + _tag = 'gain' + _tag = 'marketValue' + + +def MarketValueFromString(xml_string): + return atom.CreateClassFromXMLString(MarketValue, xml_string) + + +class Commission(_Monies): + """The element.""" + _tag = 'commission' + + +def CommissionFromString(xml_string): + return atom.CreateClassFromXMLString(Commission, xml_string) + + +class Price(_Monies): + """The element.""" + _tag = 'price' + + +def PriceFromString(xml_string): + return atom.CreateClassFromXMLString(Price, xml_string) + + +class Symbol(atom.AtomBase): + """The element.""" + _tag = 'symbol' + _namespace = GF_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['fullName'] = 'full_name' + _attributes['exchange'] = 'exchange' + _attributes['symbol'] = 'symbol' + + def __init__(self, full_name=None, exchange=None, symbol=None, **kwargs): + self.full_name = full_name + self.exchange = exchange + self.symbol = symbol + atom.AtomBase.__init__(self, **kwargs) + + def __str__(self): + return "%s:%s (%s)" % (self.exchange, self.symbol, self.full_name) + + +def SymbolFromString(xml_string): + return atom.CreateClassFromXMLString(Symbol, xml_string) + + +class TransactionData(atom.AtomBase): + """The element.""" + _tag = 'transactionData' + _namespace = GF_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + _attributes['date'] = 'date' + _attributes['shares'] = 'shares' + _attributes['notes'] = 'notes' + _children = atom.AtomBase._children.copy() + _children['{%s}commission' % GF_NAMESPACE] = ('commission', Commission) + _children['{%s}price' % GF_NAMESPACE] = ('price', Price) + + def __init__(self, type=None, date=None, shares=None, + notes=None, commission=None, price=None, **kwargs): + self.type = type + self.date = date + self.shares = shares + self.notes = notes + self.commission = commission + self.price = price + atom.AtomBase.__init__(self, **kwargs) + + +def TransactionDataFromString(xml_string): + return atom.CreateClassFromXMLString(TransactionData, xml_string) + + +class TransactionEntry(gdata.GDataEntry): + """An entry of the transaction feed. + + A TransactionEntry contains TransactionData such as the transaction + type (Buy, Sell, Sell Short, or Buy to Cover), the number of units, + the date, the price, any commission, and any notes. + """ + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _children['{%s}transactionData' % GF_NAMESPACE] = ( + 'transaction_data', TransactionData) + + def __init__(self, transaction_data=None, **kwargs): + self.transaction_data = transaction_data + gdata.GDataEntry.__init__(self, **kwargs) + + def transaction_id(self): + return self.id.text.split("/")[-1] + + transaction_id = property(transaction_id, doc='The transaction ID.') + + +def TransactionEntryFromString(xml_string): + return atom.CreateClassFromXMLString(TransactionEntry, xml_string) + + +class TransactionFeed(gdata.GDataFeed): + """A feed that lists all of the transactions that have been recorded for + a particular position. + + A transaction is a collection of information about an instance of + buying or selling a particular security. The TransactionFeed lists all + of the transactions that have been recorded for a particular position + as a list of TransactionEntries. + """ + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [TransactionEntry]) + + +def TransactionFeedFromString(xml_string): + return atom.CreateClassFromXMLString(TransactionFeed, xml_string) + + +class TransactionFeedLink(atom.AtomBase): + """Link to TransactionFeed embedded in PositionEntry. + + If a PositionFeed is queried with transactions='true', TransactionFeeds + are inlined in the returned PositionEntries. These TransactionFeeds are + accessible via TransactionFeedLink's feed attribute. + """ + _tag = 'feedLink' + _namespace = GD_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['href'] = 'href' + _children = atom.AtomBase._children.copy() + _children['{%s}feed' % atom.ATOM_NAMESPACE] = ( + 'feed', TransactionFeed) + + def __init__(self, href=None, feed=None, **kwargs): + self.href = href + self.feed = feed + atom.AtomBase.__init__(self, **kwargs) + + +class PositionData(atom.AtomBase): + """The element.""" + _tag = 'positionData' + _namespace = GF_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['gainPercentage'] = 'gain_percentage' + _attributes['return1w'] = 'return1w' + _attributes['return4w'] = 'return4w' + _attributes['return3m'] = 'return3m' + _attributes['returnYTD'] = 'returnYTD' + _attributes['return1y'] = 'return1y' + _attributes['return3y'] = 'return3y' + _attributes['return5y'] = 'return5y' + _attributes['returnOverall'] = 'return_overall' + _attributes['shares'] = 'shares' + _children = atom.AtomBase._children.copy() + _children['{%s}costBasis' % GF_NAMESPACE] = ('cost_basis', CostBasis) + _children['{%s}daysGain' % GF_NAMESPACE] = ('days_gain', DaysGain) + _children['{%s}gain' % GF_NAMESPACE] = ('gain', Gain) + _children['{%s}marketValue' % GF_NAMESPACE] = ('market_value', MarketValue) + + def __init__(self, gain_percentage=None, + return1w=None, return4w=None, return3m=None, returnYTD=None, + return1y=None, return3y=None, return5y=None, return_overall=None, + shares=None, cost_basis=None, days_gain=None, + gain=None, market_value=None, **kwargs): + self.gain_percentage = gain_percentage + self.return1w = return1w + self.return4w = return4w + self.return3m = return3m + self.returnYTD = returnYTD + self.return1y = return1y + self.return3y = return3y + self.return5y = return5y + self.return_overall = return_overall + self.shares = shares + self.cost_basis = cost_basis + self.days_gain = days_gain + self.gain = gain + self.market_value = market_value + atom.AtomBase.__init__(self, **kwargs) + + +def PositionDataFromString(xml_string): + return atom.CreateClassFromXMLString(PositionData, xml_string) + + +class PositionEntry(gdata.GDataEntry): + """An entry of the position feed. + + A PositionEntry contains the ticker exchange and Symbol for a stock, + mutual fund, or other security, along with PositionData such as the + number of units of that security that the user holds, and performance + statistics. + """ + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _children['{%s}positionData' % GF_NAMESPACE] = ( + 'position_data', PositionData) + _children['{%s}symbol' % GF_NAMESPACE] = ('symbol', Symbol) + _children['{%s}feedLink' % GD_NAMESPACE] = ( + 'feed_link', TransactionFeedLink) + + def __init__(self, position_data=None, symbol=None, feed_link=None, + **kwargs): + self.position_data = position_data + self.symbol = symbol + self.feed_link = feed_link + gdata.GDataEntry.__init__(self, **kwargs) + + def position_title(self): + return self.title.text + + position_title = property(position_title, + doc='The position title as a string (i.e. position.title.text).') + + def ticker_id(self): + return self.id.text.split("/")[-1] + + ticker_id = property(ticker_id, doc='The position TICKER ID.') + + def transactions(self): + if self.feed_link.feed: + return self.feed_link.feed.entry + else: + return None + + transactions = property(transactions, doc=""" + Inlined TransactionEntries are returned if PositionFeed is queried + with transactions='true'.""") + + +def PositionEntryFromString(xml_string): + return atom.CreateClassFromXMLString(PositionEntry, xml_string) + + +class PositionFeed(gdata.GDataFeed): + """A feed that lists all of the positions in a particular portfolio. + + A position is a collection of information about a security that the + user holds. The PositionFeed lists all of the positions in a particular + portfolio as a list of PositionEntries. + """ + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PositionEntry]) + + +def PositionFeedFromString(xml_string): + return atom.CreateClassFromXMLString(PositionFeed, xml_string) + + +class PositionFeedLink(atom.AtomBase): + """Link to PositionFeed embedded in PortfolioEntry. + + If a PortfolioFeed is queried with positions='true', the PositionFeeds + are inlined in the returned PortfolioEntries. These PositionFeeds are + accessible via PositionFeedLink's feed attribute. + """ + _tag = 'feedLink' + _namespace = GD_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['href'] = 'href' + _children = atom.AtomBase._children.copy() + _children['{%s}feed' % atom.ATOM_NAMESPACE] = ( + 'feed', PositionFeed) + + def __init__(self, href=None, feed=None, **kwargs): + self.href = href + self.feed = feed + atom.AtomBase.__init__(self, **kwargs) + + +class PortfolioData(atom.AtomBase): + """The element.""" + _tag = 'portfolioData' + _namespace = GF_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['currencyCode'] = 'currency_code' + _attributes['gainPercentage'] = 'gain_percentage' + _attributes['return1w'] = 'return1w' + _attributes['return4w'] = 'return4w' + _attributes['return3m'] = 'return3m' + _attributes['returnYTD'] = 'returnYTD' + _attributes['return1y'] = 'return1y' + _attributes['return3y'] = 'return3y' + _attributes['return5y'] = 'return5y' + _attributes['returnOverall'] = 'return_overall' + _children = atom.AtomBase._children.copy() + _children['{%s}costBasis' % GF_NAMESPACE] = ('cost_basis', CostBasis) + _children['{%s}daysGain' % GF_NAMESPACE] = ('days_gain', DaysGain) + _children['{%s}gain' % GF_NAMESPACE] = ('gain', Gain) + _children['{%s}marketValue' % GF_NAMESPACE] = ('market_value', MarketValue) + + def __init__(self, currency_code=None, gain_percentage=None, + return1w=None, return4w=None, return3m=None, returnYTD=None, + return1y=None, return3y=None, return5y=None, return_overall=None, + cost_basis=None, days_gain=None, gain=None, market_value=None, **kwargs): + self.currency_code = currency_code + self.gain_percentage = gain_percentage + self.return1w = return1w + self.return4w = return4w + self.return3m = return3m + self.returnYTD = returnYTD + self.return1y = return1y + self.return3y = return3y + self.return5y = return5y + self.return_overall = return_overall + self.cost_basis = cost_basis + self.days_gain = days_gain + self.gain = gain + self.market_value = market_value + atom.AtomBase.__init__(self, **kwargs) + + +def PortfolioDataFromString(xml_string): + return atom.CreateClassFromXMLString(PortfolioData, xml_string) + + +class PortfolioEntry(gdata.GDataEntry): + """An entry of the PortfolioFeed. + + A PortfolioEntry contains the portfolio's title along with PortfolioData + such as currency, total market value, and overall performance statistics. + """ + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _children['{%s}portfolioData' % GF_NAMESPACE] = ( + 'portfolio_data', PortfolioData) + _children['{%s}feedLink' % GD_NAMESPACE] = ( + 'feed_link', PositionFeedLink) + + def __init__(self, portfolio_data=None, feed_link=None, **kwargs): + self.portfolio_data = portfolio_data + self.feed_link = feed_link + gdata.GDataEntry.__init__(self, **kwargs) + + def portfolio_title(self): + return self.title.text + + def set_portfolio_title(self, portfolio_title): + self.title = atom.Title(text=portfolio_title, title_type='text') + + portfolio_title = property(portfolio_title, set_portfolio_title, + doc='The portfolio title as a string (i.e. portfolio.title.text).') + + def portfolio_id(self): + return self.id.text.split("/")[-1] + + portfolio_id = property(portfolio_id, + doc='The portfolio ID. Do not confuse with portfolio.id.') + + def positions(self): + if self.feed_link.feed: + return self.feed_link.feed.entry + else: + return None + + positions = property(positions, doc=""" + Inlined PositionEntries are returned if PortfolioFeed was queried + with positions='true'.""") + + +def PortfolioEntryFromString(xml_string): + return atom.CreateClassFromXMLString(PortfolioEntry, xml_string) + + +class PortfolioFeed(gdata.GDataFeed): + """A feed that lists all of the user's portfolios. + + A portfolio is a collection of positions that the user holds in various + securities, plus metadata. The PortfolioFeed lists all of the user's + portfolios as a list of PortfolioEntries. + """ + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PortfolioEntry]) + + +def PortfolioFeedFromString(xml_string): + return atom.CreateClassFromXMLString(PortfolioFeed, xml_string) + + diff --git a/gam/gdata/finance/data.py b/gam/gdata/finance/data.py new file mode 100755 index 00000000000..5e0caa8920b --- /dev/null +++ b/gam/gdata/finance/data.py @@ -0,0 +1,156 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains the data classes of the Google Finance Portfolio Data API""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import atom.data +import gdata.data +import gdata.opensearch.data + + +GF_TEMPLATE = '{http://schemas.google.com/finance/2007/}%s' + + +class Commission(atom.core.XmlElement): + """Commission for the transaction""" + _qname = GF_TEMPLATE % 'commission' + money = [gdata.data.Money] + + +class CostBasis(atom.core.XmlElement): + """Cost basis for the portfolio or position""" + _qname = GF_TEMPLATE % 'costBasis' + money = [gdata.data.Money] + + +class DaysGain(atom.core.XmlElement): + """Today's gain for the portfolio or position""" + _qname = GF_TEMPLATE % 'daysGain' + money = [gdata.data.Money] + + +class Gain(atom.core.XmlElement): + """Total gain for the portfolio or position""" + _qname = GF_TEMPLATE % 'gain' + money = [gdata.data.Money] + + +class MarketValue(atom.core.XmlElement): + """Market value for the portfolio or position""" + _qname = GF_TEMPLATE % 'marketValue' + money = [gdata.data.Money] + + +class PortfolioData(atom.core.XmlElement): + """Data for the portfolio""" + _qname = GF_TEMPLATE % 'portfolioData' + return_overall = 'returnOverall' + currency_code = 'currencyCode' + return3y = 'return3y' + return4w = 'return4w' + market_value = MarketValue + return_y_t_d = 'returnYTD' + cost_basis = CostBasis + gain_percentage = 'gainPercentage' + days_gain = DaysGain + return3m = 'return3m' + return5y = 'return5y' + return1w = 'return1w' + gain = Gain + return1y = 'return1y' + + +class PortfolioEntry(gdata.data.GDEntry): + """Describes an entry in a feed of Finance portfolios""" + portfolio_data = PortfolioData + + +class PortfolioFeed(gdata.data.GDFeed): + """Describes a Finance portfolio feed""" + entry = [PortfolioEntry] + + +class PositionData(atom.core.XmlElement): + """Data for the position""" + _qname = GF_TEMPLATE % 'positionData' + return_y_t_d = 'returnYTD' + return5y = 'return5y' + return_overall = 'returnOverall' + cost_basis = CostBasis + return3y = 'return3y' + return1y = 'return1y' + return4w = 'return4w' + shares = 'shares' + days_gain = DaysGain + gain_percentage = 'gainPercentage' + market_value = MarketValue + gain = Gain + return3m = 'return3m' + return1w = 'return1w' + + +class Price(atom.core.XmlElement): + """Price of the transaction""" + _qname = GF_TEMPLATE % 'price' + money = [gdata.data.Money] + + +class Symbol(atom.core.XmlElement): + """Stock symbol for the company""" + _qname = GF_TEMPLATE % 'symbol' + symbol = 'symbol' + exchange = 'exchange' + full_name = 'fullName' + + +class PositionEntry(gdata.data.GDEntry): + """Describes an entry in a feed of Finance positions""" + symbol = Symbol + position_data = PositionData + + +class PositionFeed(gdata.data.GDFeed): + """Describes a Finance position feed""" + entry = [PositionEntry] + + +class TransactionData(atom.core.XmlElement): + """Data for the transction""" + _qname = GF_TEMPLATE % 'transactionData' + shares = 'shares' + notes = 'notes' + date = 'date' + type = 'type' + commission = Commission + price = Price + + +class TransactionEntry(gdata.data.GDEntry): + """Describes an entry in a feed of Finance transactions""" + transaction_data = TransactionData + + +class TransactionFeed(gdata.data.GDFeed): + """Describes a Finance transaction feed""" + entry = [TransactionEntry] + + diff --git a/gam/gdata/finance/service.py b/gam/gdata/finance/service.py new file mode 100755 index 00000000000..6e3eb86d476 --- /dev/null +++ b/gam/gdata/finance/service.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Tan Swee Heng +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Classes to interact with the Google Finance server.""" + + +__author__ = 'thesweeheng@gmail.com' + + +import gdata.service +import gdata.finance +import atom + + +class PortfolioQuery(gdata.service.Query): + """A query object for the list of a user's portfolios.""" + + def returns(self): + return self.get('returns', False) + + def set_returns(self, value): + if value is 'true' or value is True: + self['returns'] = 'true' + + returns = property(returns, set_returns, doc="The returns query parameter") + + def positions(self): + return self.get('positions', False) + + def set_positions(self, value): + if value is 'true' or value is True: + self['positions'] = 'true' + + positions = property(positions, set_positions, + doc="The positions query parameter") + + +class PositionQuery(gdata.service.Query): + """A query object for the list of a user's positions in a portfolio.""" + + def returns(self): + return self.get('returns', False) + + def set_returns(self, value): + if value is 'true' or value is True: + self['returns'] = 'true' + + returns = property(returns, set_returns, + doc="The returns query parameter") + + def transactions(self): + return self.get('transactions', False) + + def set_transactions(self, value): + if value is 'true' or value is True: + self['transactions'] = 'true' + + transactions = property(transactions, set_transactions, + doc="The transactions query parameter") + + +class FinanceService(gdata.service.GDataService): + + def __init__(self, email=None, password=None, source=None, + server='finance.google.com', **kwargs): + """Creates a client for the Finance service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'finance.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__(self, + email=email, password=password, service='finance', server=server, + **kwargs) + + def GetPortfolioFeed(self, query=None): + uri = '/finance/feeds/default/portfolios' + if query: + uri = PortfolioQuery(feed=uri, params=query).ToUri() + return self.Get(uri, converter=gdata.finance.PortfolioFeedFromString) + + def GetPositionFeed(self, portfolio_entry=None, portfolio_id=None, + query=None): + """ + Args: + portfolio_entry: PortfolioEntry (optional; see Notes) + portfolio_id: string (optional; see Notes) This may be obtained + from a PortfolioEntry's portfolio_id attribute. + query: PortfolioQuery (optional) + + Notes: + Either a PortfolioEntry OR a portfolio ID must be provided. + """ + if portfolio_entry: + uri = portfolio_entry.GetSelfLink().href + '/positions' + elif portfolio_id: + uri = '/finance/feeds/default/portfolios/%s/positions' % portfolio_id + if query: + uri = PositionQuery(feed=uri, params=query).ToUri() + return self.Get(uri, converter=gdata.finance.PositionFeedFromString) + + def GetTransactionFeed(self, position_entry=None, + portfolio_id=None, ticker_id=None): + """ + Args: + position_entry: PositionEntry (optional; see Notes) + portfolio_id: string (optional; see Notes) This may be obtained + from a PortfolioEntry's portfolio_id attribute. + ticker_id: string (optional; see Notes) This may be obtained from + a PositionEntry's ticker_id attribute. Alternatively it can + be constructed using the security's exchange and symbol, + e.g. 'NASDAQ:GOOG' + + Notes: + Either a PositionEntry OR (a portfolio ID AND ticker ID) must + be provided. + """ + if position_entry: + uri = position_entry.GetSelfLink().href + '/transactions' + elif portfolio_id and ticker_id: + uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions' \ + % (portfolio_id, ticker_id) + return self.Get(uri, converter=gdata.finance.TransactionFeedFromString) + + def GetPortfolio(self, portfolio_id=None, query=None): + uri = '/finance/feeds/default/portfolios/%s' % portfolio_id + if query: + uri = PortfolioQuery(feed=uri, params=query).ToUri() + return self.Get(uri, converter=gdata.finance.PortfolioEntryFromString) + + def AddPortfolio(self, portfolio_entry=None): + uri = '/finance/feeds/default/portfolios' + return self.Post(portfolio_entry, uri, + converter=gdata.finance.PortfolioEntryFromString) + + def UpdatePortfolio(self, portfolio_entry=None): + uri = portfolio_entry.GetEditLink().href + return self.Put(portfolio_entry, uri, + converter=gdata.finance.PortfolioEntryFromString) + + def DeletePortfolio(self, portfolio_entry=None): + uri = portfolio_entry.GetEditLink().href + return self.Delete(uri) + + def GetPosition(self, portfolio_id=None, ticker_id=None, query=None): + uri = '/finance/feeds/default/portfolios/%s/positions/%s' \ + % (portfolio_id, ticker_id) + if query: + uri = PositionQuery(feed=uri, params=query).ToUri() + return self.Get(uri, converter=gdata.finance.PositionEntryFromString) + + def DeletePosition(self, position_entry=None, + portfolio_id=None, ticker_id=None, transaction_feed=None): + """A position is deleted by deleting all its transactions. + + Args: + position_entry: PositionEntry (optional; see Notes) + portfolio_id: string (optional; see Notes) This may be obtained + from a PortfolioEntry's portfolio_id attribute. + ticker_id: string (optional; see Notes) This may be obtained from + a PositionEntry's ticker_id attribute. Alternatively it can + be constructed using the security's exchange and symbol, + e.g. 'NASDAQ:GOOG' + transaction_feed: TransactionFeed (optional; see Notes) + + Notes: + Either a PositionEntry OR (a portfolio ID AND ticker ID) OR + a TransactionFeed must be provided. + """ + if transaction_feed: + feed = transaction_feed + else: + if position_entry: + feed = self.GetTransactionFeed(position_entry=position_entry) + elif portfolio_id and ticker_id: + feed = self.GetTransactionFeed( + portfolio_id=portfolio_id, ticker_id=ticker_id) + for txn in feed.entry: + self.DeleteTransaction(txn) + return True + + def GetTransaction(self, portfolio_id=None, ticker_id=None, + transaction_id=None): + uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions/%s' \ + % (portfolio_id, ticker_id, transaction_id) + return self.Get(uri, converter=gdata.finance.TransactionEntryFromString) + + def AddTransaction(self, transaction_entry=None, transaction_feed = None, + position_entry=None, portfolio_id=None, ticker_id=None): + """ + Args: + transaction_entry: TransactionEntry (required) + transaction_feed: TransactionFeed (optional; see Notes) + position_entry: PositionEntry (optional; see Notes) + portfolio_id: string (optional; see Notes) This may be obtained + from a PortfolioEntry's portfolio_id attribute. + ticker_id: string (optional; see Notes) This may be obtained from + a PositionEntry's ticker_id attribute. Alternatively it can + be constructed using the security's exchange and symbol, + e.g. 'NASDAQ:GOOG' + + Notes: + Either a TransactionFeed OR a PositionEntry OR (a portfolio ID AND + ticker ID) must be provided. + """ + if transaction_feed: + uri = transaction_feed.GetPostLink().href + elif position_entry: + uri = position_entry.GetSelfLink().href + '/transactions' + elif portfolio_id and ticker_id: + uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions' \ + % (portfolio_id, ticker_id) + return self.Post(transaction_entry, uri, + converter=gdata.finance.TransactionEntryFromString) + + def UpdateTransaction(self, transaction_entry=None): + uri = transaction_entry.GetEditLink().href + return self.Put(transaction_entry, uri, + converter=gdata.finance.TransactionEntryFromString) + + def DeleteTransaction(self, transaction_entry=None): + uri = transaction_entry.GetEditLink().href + return self.Delete(uri) diff --git a/gam/gdata/gauth.py b/gam/gdata/gauth.py new file mode 100755 index 00000000000..563656c07e8 --- /dev/null +++ b/gam/gdata/gauth.py @@ -0,0 +1,1306 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +"""Provides auth related token classes and functions for Google Data APIs. + +Token classes represent a user's authorization of this app to access their +data. Usually these are not created directly but by a GDClient object. + +ClientLoginToken +AuthSubToken +SecureAuthSubToken +OAuthHmacToken +OAuthRsaToken +TwoLeggedOAuthHmacToken +TwoLeggedOAuthRsaToken + +Functions which are often used in application code (as opposed to just within +the gdata-python-client library) are the following: + +generate_auth_sub_url +authorize_request_token + +The following are helper functions which are used to save and load auth token +objects in the App Engine datastore. These should only be used if you are using +this library within App Engine: + +ae_load +ae_save +""" + + +import time +import random +import urllib +import atom.http_core + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +PROGRAMMATIC_AUTH_LABEL = 'GoogleLogin auth=' +AUTHSUB_AUTH_LABEL = 'AuthSub token=' + + +# This dict provides the AuthSub and OAuth scopes for all services by service +# name. The service name (key) is used in ClientLogin requests. +AUTH_SCOPES = { + 'cl': ( # Google Calendar API + 'https://www.google.com/calendar/feeds/', + 'http://www.google.com/calendar/feeds/'), + 'gbase': ( # Google Base API + 'http://base.google.com/base/feeds/', + 'http://www.google.com/base/feeds/'), + 'blogger': ( # Blogger API + 'http://www.blogger.com/feeds/',), + 'codesearch': ( # Google Code Search API + 'http://www.google.com/codesearch/feeds/',), + 'cp': ( # Contacts API + 'https://www.google.com/m8/feeds/', + 'http://www.google.com/m8/feeds/'), + 'finance': ( # Google Finance API + 'http://finance.google.com/finance/feeds/',), + 'health': ( # Google Health API + 'https://www.google.com/health/feeds/',), + 'writely': ( # Documents List API + 'https://docs.google.com/feeds/', + 'http://docs.google.com/feeds/'), + 'lh2': ( # Picasa Web Albums API + 'http://picasaweb.google.com/data/',), + 'apps': ( # Google Apps Provisioning API + 'http://www.google.com/a/feeds/', + 'https://www.google.com/a/feeds/', + 'http://apps-apis.google.com/a/feeds/', + 'https://apps-apis.google.com/a/feeds/'), + 'weaver': ( # Health H9 Sandbox + 'https://www.google.com/h9/feeds/',), + 'wise': ( # Spreadsheets Data API + 'https://spreadsheets.google.com/feeds/', + 'http://spreadsheets.google.com/feeds/'), + 'sitemaps': ( # Google Webmaster Tools API + 'https://www.google.com/webmasters/tools/feeds/',), + 'youtube': ( # YouTube API + 'http://gdata.youtube.com/feeds/api/', + 'http://uploads.gdata.youtube.com/feeds/api', + 'http://gdata.youtube.com/action/GetUploadToken'), + 'books': ( # Google Books API + 'http://www.google.com/books/feeds/',), + 'analytics': ( # Google Analytics API + 'https://www.google.com/analytics/feeds/',), + 'jotspot': ( # Google Sites API + 'http://sites.google.com/feeds/', + 'https://sites.google.com/feeds/'), + 'local': ( # Google Maps Data API + 'http://maps.google.com/maps/feeds/',), + 'code': ( # Project Hosting Data API + 'http://code.google.com/feeds/issues',)} + + + +class Error(Exception): + pass + + +class UnsupportedTokenType(Error): + """Raised when token to or from blob is unable to convert the token.""" + pass + + +# ClientLogin functions and classes. +def generate_client_login_request_body(email, password, service, source, + account_type='HOSTED_OR_GOOGLE', captcha_token=None, + captcha_response=None): + """Creates the body of the autentication request + + See http://code.google.com/apis/accounts/AuthForInstalledApps.html#Request + for more details. + + Args: + email: str + password: str + service: str + source: str + account_type: str (optional) Defaul is 'HOSTED_OR_GOOGLE', other valid + values are 'GOOGLE' and 'HOSTED' + captcha_token: str (optional) + captcha_response: str (optional) + + Returns: + The HTTP body to send in a request for a client login token. + """ + # Create a POST body containing the user's credentials. + request_fields = {'Email': email, + 'Passwd': password, + 'accountType': account_type, + 'service': service, + 'source': source} + if captcha_token and captcha_response: + # Send the captcha token and response as part of the POST body if the + # user is responding to a captch challenge. + request_fields['logintoken'] = captcha_token + request_fields['logincaptcha'] = captcha_response + return urllib.urlencode(request_fields) + + +GenerateClientLoginRequestBody = generate_client_login_request_body + + +def get_client_login_token_string(http_body): + """Returns the token value for a ClientLoginToken. + + Reads the token from the server's response to a Client Login request and + creates the token value string to use in requests. + + Args: + http_body: str The body of the server's HTTP response to a Client Login + request + + Returns: + The token value string for a ClientLoginToken. + """ + for response_line in http_body.splitlines(): + if response_line.startswith('Auth='): + # Strip off the leading Auth= and return the Authorization value. + return response_line[5:] + return None + + +GetClientLoginTokenString = get_client_login_token_string + + +def get_captcha_challenge(http_body, + captcha_base_url='http://www.google.com/accounts/'): + """Returns the URL and token for a CAPTCHA challenge issued by the server. + + Args: + http_body: str The body of the HTTP response from the server which + contains the CAPTCHA challenge. + captcha_base_url: str This function returns a full URL for viewing the + challenge image which is built from the server's response. This + base_url is used as the beginning of the URL because the server + only provides the end of the URL. For example the server provides + 'Captcha?ctoken=Hi...N' and the URL for the image is + 'http://www.google.com/accounts/Captcha?ctoken=Hi...N' + + Returns: + A dictionary containing the information needed to repond to the CAPTCHA + challenge, the image URL and the ID token of the challenge. The + dictionary is in the form: + {'token': string identifying the CAPTCHA image, + 'url': string containing the URL of the image} + Returns None if there was no CAPTCHA challenge in the response. + """ + contains_captcha_challenge = False + captcha_parameters = {} + for response_line in http_body.splitlines(): + if response_line.startswith('Error=CaptchaRequired'): + contains_captcha_challenge = True + elif response_line.startswith('CaptchaToken='): + # Strip off the leading CaptchaToken= + captcha_parameters['token'] = response_line[13:] + elif response_line.startswith('CaptchaUrl='): + captcha_parameters['url'] = '%s%s' % (captcha_base_url, + response_line[11:]) + if contains_captcha_challenge: + return captcha_parameters + else: + return None + + +GetCaptchaChallenge = get_captcha_challenge + + +class ClientLoginToken(object): + + def __init__(self, token_string): + self.token_string = token_string + + def modify_request(self, http_request): + http_request.headers['Authorization'] = '%s%s' % (PROGRAMMATIC_AUTH_LABEL, + self.token_string) + + ModifyRequest = modify_request + + +# AuthSub functions and classes. +def _to_uri(str_or_uri): + if isinstance(str_or_uri, (str, unicode)): + return atom.http_core.Uri.parse_uri(str_or_uri) + return str_or_uri + + +def generate_auth_sub_url(next, scopes, secure=False, session=True, + request_url=atom.http_core.parse_uri( + 'https://www.google.com/accounts/AuthSubRequest'), + domain='default', scopes_param_prefix='auth_sub_scopes'): + """Constructs a URI for requesting a multiscope AuthSub token. + + The generated token will contain a URL parameter to pass along the + requested scopes to the next URL. When the Google Accounts page + redirects the broswser to the 'next' URL, it appends the single use + AuthSub token value to the URL as a URL parameter with the key 'token'. + However, the information about which scopes were requested is not + included by Google Accounts. This method adds the scopes to the next + URL before making the request so that the redirect will be sent to + a page, and both the token value and the list of scopes for which the token + was requested. + + Args: + next: atom.http_core.Uri or string The URL user will be sent to after + authorizing this web application to access their data. + scopes: list containint strings or atom.http_core.Uri objects. The URLs + of the services to be accessed. Could also be a single string + or single atom.http_core.Uri for requesting just one scope. + secure: boolean (optional) Determines whether or not the issued token + is a secure token. + session: boolean (optional) Determines whether or not the issued token + can be upgraded to a session token. + request_url: atom.http_core.Uri or str The beginning of the request URL. + This is normally + 'http://www.google.com/accounts/AuthSubRequest' or + '/accounts/AuthSubRequest' + domain: The domain which the account is part of. This is used for Google + Apps accounts, the default value is 'default' which means that + the requested account is a Google Account (@gmail.com for + example) + scopes_param_prefix: str (optional) The requested scopes are added as a + URL parameter to the next URL so that the page at + the 'next' URL can extract the token value and the + valid scopes from the URL. The key for the URL + parameter defaults to 'auth_sub_scopes' + + Returns: + An atom.http_core.Uri which the user's browser should be directed to in + order to authorize this application to access their information. + """ + if isinstance(next, (str, unicode)): + next = atom.http_core.Uri.parse_uri(next) + # If the user passed in a string instead of a list for scopes, convert to + # a single item tuple. + if isinstance(scopes, (str, unicode, atom.http_core.Uri)): + scopes = (scopes,) + scopes_string = ' '.join([str(scope) for scope in scopes]) + next.query[scopes_param_prefix] = scopes_string + + if isinstance(request_url, (str, unicode)): + request_url = atom.http_core.Uri.parse_uri(request_url) + request_url.query['next'] = str(next) + request_url.query['scope'] = scopes_string + if session: + request_url.query['session'] = '1' + else: + request_url.query['session'] = '0' + if secure: + request_url.query['secure'] = '1' + else: + request_url.query['secure'] = '0' + request_url.query['hd'] = domain + return request_url + + +def auth_sub_string_from_url(url, scopes_param_prefix='auth_sub_scopes'): + """Finds the token string (and scopes) after the browser is redirected. + + After the Google Accounts AuthSub pages redirect the user's broswer back to + the web application (using the 'next' URL from the request) the web app must + extract the token from the current page's URL. The token is provided as a + URL parameter named 'token' and if generate_auth_sub_url was used to create + the request, the token's valid scopes are included in a URL parameter whose + name is specified in scopes_param_prefix. + + Args: + url: atom.url.Url or str representing the current URL. The token value + and valid scopes should be included as URL parameters. + scopes_param_prefix: str (optional) The URL parameter key which maps to + the list of valid scopes for the token. + + Returns: + A tuple containing the token value as a string, and a tuple of scopes + (as atom.http_core.Uri objects) which are URL prefixes under which this + token grants permission to read and write user data. + (token_string, (scope_uri, scope_uri, scope_uri, ...)) + If no scopes were included in the URL, the second value in the tuple is + None. If there was no token param in the url, the tuple returned is + (None, None) + """ + if isinstance(url, (str, unicode)): + url = atom.http_core.Uri.parse_uri(url) + if 'token' not in url.query: + return (None, None) + token = url.query['token'] + # TODO: decide whether no scopes should be None or (). + scopes = None # Default to None for no scopes. + if scopes_param_prefix in url.query: + scopes = tuple(url.query[scopes_param_prefix].split(' ')) + return (token, scopes) + + +AuthSubStringFromUrl = auth_sub_string_from_url + + +def auth_sub_string_from_body(http_body): + """Extracts the AuthSub token from an HTTP body string. + + Used to find the new session token after making a request to upgrade a + single use AuthSub token. + + Args: + http_body: str The repsonse from the server which contains the AuthSub + key. For example, this function would find the new session token + from the server's response to an upgrade token request. + + Returns: + The raw token value string to use in an AuthSubToken object. + """ + for response_line in http_body.splitlines(): + if response_line.startswith('Token='): + # Strip off Token= and return the token value string. + return response_line[6:] + return None + + +class AuthSubToken(object): + + def __init__(self, token_string, scopes=None): + self.token_string = token_string + self.scopes = scopes or [] + + def modify_request(self, http_request): + """Sets Authorization header, allows app to act on the user's behalf.""" + http_request.headers['Authorization'] = '%s%s' % (AUTHSUB_AUTH_LABEL, + self.token_string) + + ModifyRequest = modify_request + + def from_url(str_or_uri): + """Creates a new AuthSubToken using information in the URL. + + Uses auth_sub_string_from_url. + + Args: + str_or_uri: The current page's URL (as a str or atom.http_core.Uri) + which should contain a token query parameter since the + Google auth server redirected the user's browser to this + URL. + """ + token_and_scopes = auth_sub_string_from_url(str_or_uri) + return AuthSubToken(token_and_scopes[0], token_and_scopes[1]) + + from_url = staticmethod(from_url) + FromUrl = from_url + + def _upgrade_token(self, http_body): + """Replaces the token value with a session token from the auth server. + + Uses the response of a token upgrade request to modify this token. Uses + auth_sub_string_from_body. + """ + self.token_string = auth_sub_string_from_body(http_body) + + +# Functions and classes for Secure-mode AuthSub +def build_auth_sub_data(http_request, timestamp, nonce): + """Creates the data string which must be RSA-signed in secure requests. + + For more details see the documenation on secure AuthSub requests: + http://code.google.com/apis/accounts/docs/AuthSub.html#signingrequests + + Args: + http_request: The request being made to the server. The Request's URL + must be complete before this signature is calculated as any changes + to the URL will invalidate the signature. + nonce: str Random 64-bit, unsigned number encoded as an ASCII string in + decimal format. The nonce/timestamp pair should always be unique to + prevent replay attacks. + timestamp: Integer representing the time the request is sent. The + timestamp should be expressed in number of seconds after January 1, + 1970 00:00:00 GMT. + """ + return '%s %s %s %s' % (http_request.method, str(http_request.uri), + str(timestamp), nonce) + + +def generate_signature(data, rsa_key): + """Signs the data string for a secure AuthSub request.""" + import base64 + try: + from tlslite.utils import keyfactory + except ImportError: + from gdata.tlslite.utils import keyfactory + private_key = keyfactory.parsePrivateKey(rsa_key) + signed = private_key.hashAndSign(data) + # Python2.3 and lower does not have the base64.b64encode function. + if hasattr(base64, 'b64encode'): + return base64.b64encode(signed) + else: + return base64.encodestring(signed).replace('\n', '') + + +class SecureAuthSubToken(AuthSubToken): + + def __init__(self, token_string, rsa_private_key, scopes=None): + self.token_string = token_string + self.scopes = scopes or [] + self.rsa_private_key = rsa_private_key + + def from_url(str_or_uri, rsa_private_key): + """Creates a new SecureAuthSubToken using information in the URL. + + Uses auth_sub_string_from_url. + + Args: + str_or_uri: The current page's URL (as a str or atom.http_core.Uri) + which should contain a token query parameter since the Google auth + server redirected the user's browser to this URL. + rsa_private_key: str the private RSA key cert used to sign all requests + made with this token. + """ + token_and_scopes = auth_sub_string_from_url(str_or_uri) + return SecureAuthSubToken(token_and_scopes[0], rsa_private_key, + token_and_scopes[1]) + + from_url = staticmethod(from_url) + FromUrl = from_url + + def modify_request(self, http_request): + """Sets the Authorization header and includes a digital signature. + + Calculates a digital signature using the private RSA key, a timestamp + (uses now at the time this method is called) and a random nonce. + + Args: + http_request: The atom.http_core.HttpRequest which contains all of the + information needed to send a request to the remote server. The + URL and the method of the request must be already set and cannot be + changed after this token signs the request, or the signature will + not be valid. + """ + timestamp = str(int(time.time())) + nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)]) + data = build_auth_sub_data(http_request, timestamp, nonce) + signature = generate_signature(data, self.rsa_private_key) + http_request.headers['Authorization'] = ( + '%s%s sigalg="rsa-sha1" data="%s" sig="%s"' % (AUTHSUB_AUTH_LABEL, + self.token_string, data, signature)) + + ModifyRequest = modify_request + + +# OAuth functions and classes. +RSA_SHA1 = 'RSA-SHA1' +HMAC_SHA1 = 'HMAC-SHA1' + + +def build_oauth_base_string(http_request, consumer_key, nonce, signaure_type, + timestamp, version, next='oob', token=None, + verifier=None): + """Generates the base string to be signed in the OAuth request. + + Args: + http_request: The request being made to the server. The Request's URL + must be complete before this signature is calculated as any changes + to the URL will invalidate the signature. + consumer_key: Domain identifying the third-party web application. This is + the domain used when registering the application with Google. It + identifies who is making the request on behalf of the user. + nonce: Random 64-bit, unsigned number encoded as an ASCII string in decimal + format. The nonce/timestamp pair should always be unique to prevent + replay attacks. + signaure_type: either RSA_SHA1 or HMAC_SHA1 + timestamp: Integer representing the time the request is sent. The + timestamp should be expressed in number of seconds after January 1, + 1970 00:00:00 GMT. + version: The OAuth version used by the requesting web application. This + value must be '1.0' or '1.0a'. If not provided, Google assumes version + 1.0 is in use. + next: The URL the user should be redirected to after granting access + to a Google service(s). It can include url-encoded query parameters. + The default value is 'oob'. (This is the oauth_callback.) + token: The string for the OAuth request token or OAuth access token. + verifier: str Sent as the oauth_verifier and required when upgrading a + request token to an access token. + """ + # First we must build the canonical base string for the request. + params = http_request.uri.query.copy() + params['oauth_consumer_key'] = consumer_key + params['oauth_nonce'] = nonce + params['oauth_signature_method'] = signaure_type + params['oauth_timestamp'] = str(timestamp) + if next is not None: + params['oauth_callback'] = str(next) + if token is not None: + params['oauth_token'] = token + if version is not None: + params['oauth_version'] = version + if verifier is not None: + params['oauth_verifier'] = verifier + # We need to get the key value pairs in lexigraphically sorted order. + sorted_keys = None + try: + sorted_keys = sorted(params.keys()) + # The sorted function is not available in Python2.3 and lower + except NameError: + sorted_keys = params.keys() + sorted_keys.sort() + pairs = [] + for key in sorted_keys: + pairs.append('%s=%s' % (urllib.quote(key, safe='~'), + urllib.quote(params[key], safe='~'))) + # We want to escape /'s too, so use safe='~' + all_parameters = urllib.quote('&'.join(pairs), safe='~') + normailzed_host = http_request.uri.host.lower() + normalized_scheme = (http_request.uri.scheme or 'http').lower() + non_default_port = None + if (http_request.uri.port is not None + and ((normalized_scheme == 'https' and http_request.uri.port != 443) + or (normalized_scheme == 'http' and http_request.uri.port != 80))): + non_default_port = http_request.uri.port + path = http_request.uri.path or '/' + request_path = None + if not path.startswith('/'): + path = '/%s' % path + if non_default_port is not None: + # Set the only safe char in url encoding to ~ since we want to escape / + # as well. + request_path = urllib.quote('%s://%s:%s%s' % ( + normalized_scheme, normailzed_host, non_default_port, path), safe='~') + else: + # Set the only safe char in url encoding to ~ since we want to escape / + # as well. + request_path = urllib.quote('%s://%s%s' % ( + normalized_scheme, normailzed_host, path), safe='~') + # TODO: ensure that token escaping logic is correct, not sure if the token + # value should be double escaped instead of single. + base_string = '&'.join((http_request.method.upper(), request_path, + all_parameters)) + # Now we have the base string, we can calculate the oauth_signature. + return base_string + + +def generate_hmac_signature(http_request, consumer_key, consumer_secret, + timestamp, nonce, version, next='oob', + token=None, token_secret=None, verifier=None): + import hmac + import base64 + base_string = build_oauth_base_string( + http_request, consumer_key, nonce, HMAC_SHA1, timestamp, version, + next, token, verifier=verifier) + hash_key = None + hashed = None + if token_secret is not None: + hash_key = '%s&%s' % (urllib.quote(consumer_secret, safe='~'), + urllib.quote(token_secret, safe='~')) + else: + hash_key = '%s&' % urllib.quote(consumer_secret, safe='~') + try: + import hashlib + hashed = hmac.new(hash_key, base_string, hashlib.sha1) + except ImportError: + import sha + hashed = hmac.new(hash_key, base_string, sha) + # Python2.3 does not have base64.b64encode. + if hasattr(base64, 'b64encode'): + return base64.b64encode(hashed.digest()) + else: + return base64.encodestring(hashed.digest()).replace('\n', '') + + +def generate_rsa_signature(http_request, consumer_key, rsa_key, + timestamp, nonce, version, next='oob', + token=None, token_secret=None, verifier=None): + import base64 + try: + from tlslite.utils import keyfactory + except ImportError: + from gdata.tlslite.utils import keyfactory + base_string = build_oauth_base_string( + http_request, consumer_key, nonce, RSA_SHA1, timestamp, version, + next, token, verifier=verifier) + private_key = keyfactory.parsePrivateKey(rsa_key) + # Sign using the key + signed = private_key.hashAndSign(base_string) + # Python2.3 does not have base64.b64encode. + if hasattr(base64, 'b64encode'): + return base64.b64encode(signed) + else: + return base64.encodestring(signed).replace('\n', '') + + +def generate_auth_header(consumer_key, timestamp, nonce, signature_type, + signature, version='1.0', next=None, token=None, + verifier=None): + """Builds the Authorization header to be sent in the request. + + Args: + consumer_key: Identifies the application making the request (str). + timestamp: + nonce: + signature_type: One of either HMAC_SHA1 or RSA_SHA1 + signature: The HMAC or RSA signature for the request as a base64 + encoded string. + version: The version of the OAuth protocol that this request is using. + Default is '1.0' + next: The URL of the page that the user's browser should be sent to + after they authorize the token. (Optional) + token: str The OAuth token value to be used in the oauth_token parameter + of the header. + verifier: str The OAuth verifier which must be included when you are + upgrading a request token to an access token. + """ + params = { + 'oauth_consumer_key': consumer_key, + 'oauth_version': version, + 'oauth_nonce': nonce, + 'oauth_timestamp': str(timestamp), + 'oauth_signature_method': signature_type, + 'oauth_signature': signature} + if next is not None: + params['oauth_callback'] = str(next) + if token is not None: + params['oauth_token'] = token + if verifier is not None: + params['oauth_verifier'] = verifier + pairs = [ + '%s="%s"' % ( + k, urllib.quote(v, safe='~')) for k, v in params.iteritems()] + return 'OAuth %s' % (', '.join(pairs)) + + +REQUEST_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetRequestToken' +ACCESS_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetAccessToken' + + +def generate_request_for_request_token( + consumer_key, signature_type, scopes, rsa_key=None, consumer_secret=None, + auth_server_url=REQUEST_TOKEN_URL, next='oob', version='1.0'): + """Creates request to be sent to auth server to get an OAuth request token. + + Args: + consumer_key: + signature_type: either RSA_SHA1 or HMAC_SHA1. The rsa_key must be + provided if the signature type is RSA but if the signature method + is HMAC, the consumer_secret must be used. + scopes: List of URL prefixes for the data which we want to access. For + example, to request access to the user's Blogger and Google Calendar + data, we would request + ['http://www.blogger.com/feeds/', + 'https://www.google.com/calendar/feeds/', + 'http://www.google.com/calendar/feeds/'] + rsa_key: Only used if the signature method is RSA_SHA1. + consumer_secret: Only used if the signature method is HMAC_SHA1. + auth_server_url: The URL to which the token request should be directed. + Defaults to 'https://www.google.com/accounts/OAuthGetRequestToken'. + next: The URL of the page that the user's browser should be sent to + after they authorize the token. (Optional) + version: The OAuth version used by the requesting web application. + Defaults to '1.0a' + + Returns: + An atom.http_core.HttpRequest object with the URL, Authorization header + and body filled in. + """ + request = atom.http_core.HttpRequest(auth_server_url, 'POST') + # Add the requested auth scopes to the Auth request URL. + if scopes: + request.uri.query['scope'] = ' '.join(scopes) + + timestamp = str(int(time.time())) + nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)]) + signature = None + if signature_type == HMAC_SHA1: + signature = generate_hmac_signature( + request, consumer_key, consumer_secret, timestamp, nonce, version, + next=next) + elif signature_type == RSA_SHA1: + signature = generate_rsa_signature( + request, consumer_key, rsa_key, timestamp, nonce, version, next=next) + else: + return None + + request.headers['Authorization'] = generate_auth_header( + consumer_key, timestamp, nonce, signature_type, signature, version, + next) + request.headers['Content-Length'] = '0' + return request + + +def generate_request_for_access_token( + request_token, auth_server_url=ACCESS_TOKEN_URL): + """Creates a request to ask the OAuth server for an access token. + + Requires a request token which the user has authorized. See the + documentation on OAuth with Google Data for more details: + http://code.google.com/apis/accounts/docs/OAuth.html#AccessToken + + Args: + request_token: An OAuthHmacToken or OAuthRsaToken which the user has + approved using their browser. + auth_server_url: (optional) The URL at which the OAuth access token is + requested. Defaults to + https://www.google.com/accounts/OAuthGetAccessToken + + Returns: + A new HttpRequest object which can be sent to the OAuth server to + request an OAuth Access Token. + """ + http_request = atom.http_core.HttpRequest(auth_server_url, 'POST') + http_request.headers['Content-Length'] = '0' + return request_token.modify_request(http_request) + + +def oauth_token_info_from_body(http_body): + """Exracts an OAuth request token from the server's response. + + Returns: + A tuple of strings containing the OAuth token and token secret. If + neither of these are present in the body, returns (None, None) + """ + token = None + token_secret = None + for pair in http_body.split('&'): + if pair.startswith('oauth_token='): + token = urllib.unquote(pair[len('oauth_token='):]) + if pair.startswith('oauth_token_secret='): + token_secret = urllib.unquote(pair[len('oauth_token_secret='):]) + return (token, token_secret) + + +def hmac_token_from_body(http_body, consumer_key, consumer_secret, + auth_state): + token_value, token_secret = oauth_token_info_from_body(http_body) + token = OAuthHmacToken(consumer_key, consumer_secret, token_value, + token_secret, auth_state) + return token + + +def rsa_token_from_body(http_body, consumer_key, rsa_private_key, + auth_state): + token_value, token_secret = oauth_token_info_from_body(http_body) + token = OAuthRsaToken(consumer_key, rsa_private_key, token_value, + token_secret, auth_state) + return token + + +DEFAULT_DOMAIN = 'default' +OAUTH_AUTHORIZE_URL = 'https://www.google.com/accounts/OAuthAuthorizeToken' + + +def generate_oauth_authorization_url( + token, next=None, hd=DEFAULT_DOMAIN, hl=None, btmpl=None, + auth_server=OAUTH_AUTHORIZE_URL): + """Creates a URL for the page where the request token can be authorized. + + Args: + token: str The request token from the OAuth server. + next: str (optional) URL the user should be redirected to after granting + access to a Google service(s). It can include url-encoded query + parameters. + hd: str (optional) Identifies a particular hosted domain account to be + accessed (for example, 'mycollege.edu'). Uses 'default' to specify a + regular Google account ('username@gmail.com'). + hl: str (optional) An ISO 639 country code identifying what language the + approval page should be translated in (for example, 'hl=en' for + English). The default is the user's selected language. + btmpl: str (optional) Forces a mobile version of the approval page. The + only accepted value is 'mobile'. + auth_server: str (optional) The start of the token authorization web + page. Defaults to + 'https://www.google.com/accounts/OAuthAuthorizeToken' + + Returns: + An atom.http_core.Uri pointing to the token authorization page where the + user may allow or deny this app to access their Google data. + """ + uri = atom.http_core.Uri.parse_uri(auth_server) + uri.query['oauth_token'] = token + uri.query['hd'] = hd + if next is not None: + uri.query['oauth_callback'] = str(next) + if hl is not None: + uri.query['hl'] = hl + if btmpl is not None: + uri.query['btmpl'] = btmpl + return uri + + +def oauth_token_info_from_url(url): + """Exracts an OAuth access token from the redirected page's URL. + + Returns: + A tuple of strings containing the OAuth token and the OAuth verifier which + need to sent when upgrading a request token to an access token. + """ + if isinstance(url, (str, unicode)): + url = atom.http_core.Uri.parse_uri(url) + token = None + verifier = None + if 'oauth_token' in url.query: + token = urllib.unquote(url.query['oauth_token']) + if 'oauth_verifier' in url.query: + verifier = urllib.unquote(url.query['oauth_verifier']) + return (token, verifier) + + +def authorize_request_token(request_token, url): + """Adds information to request token to allow it to become an access token. + + Modifies the request_token object passed in by setting and unsetting the + necessary fields to allow this token to form a valid upgrade request. + + Args: + request_token: The OAuth request token which has been authorized by the + user. In order for this token to be upgraded to an access token, + certain fields must be extracted from the URL and added to the token + so that they can be passed in an upgrade-token request. + url: The URL of the current page which the user's browser was redirected + to after they authorized access for the app. This function extracts + information from the URL which is needed to upgraded the token from + a request token to an access token. + + Returns: + The same token object which was passed in. + """ + token, verifier = oauth_token_info_from_url(url) + request_token.token = token + request_token.verifier = verifier + request_token.auth_state = AUTHORIZED_REQUEST_TOKEN + return request_token + + +AuthorizeRequestToken = authorize_request_token + + +def upgrade_to_access_token(request_token, server_response_body): + """Extracts access token information from response to an upgrade request. + + Once the server has responded with the new token info for the OAuth + access token, this method modifies the request_token to set and unset + necessary fields to create valid OAuth authorization headers for requests. + + Args: + request_token: An OAuth token which this function modifies to allow it + to be used as an access token. + server_response_body: str The server's response to an OAuthAuthorizeToken + request. This should contain the new token and token_secret which + are used to generate the signature and parameters of the Authorization + header in subsequent requests to Google Data APIs. + + Returns: + The same token object which was passed in. + """ + token, token_secret = oauth_token_info_from_body(server_response_body) + request_token.token = token + request_token.token_secret = token_secret + request_token.auth_state = ACCESS_TOKEN + request_token.next = None + request_token.verifier = None + return request_token + + +UpgradeToAccessToken = upgrade_to_access_token + + +REQUEST_TOKEN = 1 +AUTHORIZED_REQUEST_TOKEN = 2 +ACCESS_TOKEN = 3 + + +class OAuthHmacToken(object): + SIGNATURE_METHOD = HMAC_SHA1 + + def __init__(self, consumer_key, consumer_secret, token, token_secret, + auth_state, next=None, verifier=None): + self.consumer_key = consumer_key + self.consumer_secret = consumer_secret + self.token = token + self.token_secret = token_secret + self.auth_state = auth_state + self.next = next + self.verifier = verifier # Used to convert request token to access token. + + def generate_authorization_url( + self, google_apps_domain=DEFAULT_DOMAIN, language=None, btmpl=None, + auth_server=OAUTH_AUTHORIZE_URL): + """Creates the URL at which the user can authorize this app to access. + + Args: + google_apps_domain: str (optional) If the user should be signing in + using an account under a known Google Apps domain, provide the + domain name ('example.com') here. If not provided, 'default' + will be used, and the user will be prompted to select an account + if they are signed in with a Google Account and Google Apps + accounts. + language: str (optional) An ISO 639 country code identifying what + language the approval page should be translated in (for example, + 'en' for English). The default is the user's selected language. + btmpl: str (optional) Forces a mobile version of the approval page. The + only accepted value is 'mobile'. + auth_server: str (optional) The start of the token authorization web + page. Defaults to + 'https://www.google.com/accounts/OAuthAuthorizeToken' + """ + return generate_oauth_authorization_url( + self.token, hd=google_apps_domain, hl=language, btmpl=btmpl, + auth_server=auth_server) + + GenerateAuthorizationUrl = generate_authorization_url + + def modify_request(self, http_request): + """Sets the Authorization header in the HTTP request using the token. + + Calculates an HMAC signature using the information in the token to + indicate that the request came from this application and that this + application has permission to access a particular user's data. + + Returns: + The same HTTP request object which was passed in. + """ + timestamp = str(int(time.time())) + nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)]) + signature = generate_hmac_signature( + http_request, self.consumer_key, self.consumer_secret, timestamp, + nonce, version='1.0', next=self.next, token=self.token, + token_secret=self.token_secret, verifier=self.verifier) + http_request.headers['Authorization'] = generate_auth_header( + self.consumer_key, timestamp, nonce, HMAC_SHA1, signature, + version='1.0', next=self.next, token=self.token, + verifier=self.verifier) + return http_request + + ModifyRequest = modify_request + + +class OAuthRsaToken(OAuthHmacToken): + SIGNATURE_METHOD = RSA_SHA1 + + def __init__(self, consumer_key, rsa_private_key, token, token_secret, + auth_state, next=None, verifier=None): + self.consumer_key = consumer_key + self.rsa_private_key = rsa_private_key + self.token = token + self.token_secret = token_secret + self.auth_state = auth_state + self.next = next + self.verifier = verifier # Used to convert request token to access token. + + def modify_request(self, http_request): + """Sets the Authorization header in the HTTP request using the token. + + Calculates an RSA signature using the information in the token to + indicate that the request came from this application and that this + application has permission to access a particular user's data. + + Returns: + The same HTTP request object which was passed in. + """ + timestamp = str(int(time.time())) + nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)]) + signature = generate_rsa_signature( + http_request, self.consumer_key, self.rsa_private_key, timestamp, + nonce, version='1.0', next=self.next, token=self.token, + token_secret=self.token_secret, verifier=self.verifier) + http_request.headers['Authorization'] = generate_auth_header( + self.consumer_key, timestamp, nonce, RSA_SHA1, signature, + version='1.0', next=self.next, token=self.token, + verifier=self.verifier) + return http_request + + ModifyRequest = modify_request + + +class TwoLeggedOAuthHmacToken(OAuthHmacToken): + + def __init__(self, consumer_key, consumer_secret, requestor_id): + self.requestor_id = requestor_id + OAuthHmacToken.__init__( + self, consumer_key, consumer_secret, None, None, ACCESS_TOKEN, + next=None, verifier=None) + + def modify_request(self, http_request): + """Sets the Authorization header in the HTTP request using the token. + + Calculates an HMAC signature using the information in the token to + indicate that the request came from this application and that this + application has permission to access a particular user's data using 2LO. + + Returns: + The same HTTP request object which was passed in. + """ + http_request.uri.query['xoauth_requestor_id'] = self.requestor_id + return OAuthHmacToken.modify_request(self, http_request) + + ModifyRequest = modify_request + + +class TwoLeggedOAuthRsaToken(OAuthRsaToken): + + def __init__(self, consumer_key, rsa_private_key, requestor_id): + self.requestor_id = requestor_id + OAuthRsaToken.__init__( + self, consumer_key, rsa_private_key, None, None, ACCESS_TOKEN, + next=None, verifier=None) + + def modify_request(self, http_request): + """Sets the Authorization header in the HTTP request using the token. + + Calculates an RSA signature using the information in the token to + indicate that the request came from this application and that this + application has permission to access a particular user's data using 2LO. + + Returns: + The same HTTP request object which was passed in. + """ + http_request.uri.query['xoauth_requestor_id'] = self.requestor_id + return OAuthRsaToken.modify_request(self, http_request) + + ModifyRequest = modify_request + + +def _join_token_parts(*args): + """"Escapes and combines all strings passed in. + + Used to convert a token object's members into a string instead of + using pickle. + + Note: A None value will be converted to an empty string. + + Returns: + A string in the form 1x|member1|member2|member3... + """ + return '|'.join([urllib.quote_plus(a or '') for a in args]) + + +def _split_token_parts(blob): + """Extracts and unescapes fields from the provided binary string. + + Reverses the packing performed by _join_token_parts. Used to extract + the members of a token object. + + Note: An empty string from the blob will be interpreted as None. + + Args: + blob: str A string of the form 1x|member1|member2|member3 as created + by _join_token_parts + + Returns: + A list of unescaped strings. + """ + return [urllib.unquote_plus(part) or None for part in blob.split('|')] + + +def token_to_blob(token): + """Serializes the token data as a string for storage in a datastore. + + Supported token classes: ClientLoginToken, AuthSubToken, SecureAuthSubToken, + OAuthRsaToken, and OAuthHmacToken, TwoLeggedOAuthRsaToken, + TwoLeggedOAuthHmacToken. + + Args: + token: A token object which must be of one of the supported token classes. + + Raises: + UnsupportedTokenType if the token is not one of the supported token + classes listed above. + + Returns: + A string represenging this token. The string can be converted back into + an equivalent token object using token_from_blob. Note that any members + which are set to '' will be set to None when the token is deserialized + by token_from_blob. + """ + if isinstance(token, ClientLoginToken): + return _join_token_parts('1c', token.token_string) + # Check for secure auth sub type first since it is a subclass of + # AuthSubToken. + elif isinstance(token, SecureAuthSubToken): + return _join_token_parts('1s', token.token_string, token.rsa_private_key, + *token.scopes) + elif isinstance(token, AuthSubToken): + return _join_token_parts('1a', token.token_string, *token.scopes) + elif isinstance(token, TwoLeggedOAuthRsaToken): + return _join_token_parts( + '1rtl', token.consumer_key, token.rsa_private_key, token.requestor_id) + elif isinstance(token, TwoLeggedOAuthHmacToken): + return _join_token_parts( + '1htl', token.consumer_key, token.consumer_secret, token.requestor_id) + # Check RSA OAuth token first since the OAuthRsaToken is a subclass of + # OAuthHmacToken. + elif isinstance(token, OAuthRsaToken): + return _join_token_parts( + '1r', token.consumer_key, token.rsa_private_key, token.token, + token.token_secret, str(token.auth_state), token.next, + token.verifier) + elif isinstance(token, OAuthHmacToken): + return _join_token_parts( + '1h', token.consumer_key, token.consumer_secret, token.token, + token.token_secret, str(token.auth_state), token.next, + token.verifier) + else: + raise UnsupportedTokenType( + 'Unable to serialize token of type %s' % type(token)) + + +TokenToBlob = token_to_blob + + +def token_from_blob(blob): + """Deserializes a token string from the datastore back into a token object. + + Supported token classes: ClientLoginToken, AuthSubToken, SecureAuthSubToken, + OAuthRsaToken, and OAuthHmacToken, TwoLeggedOAuthRsaToken, + TwoLeggedOAuthHmacToken. + + Args: + blob: string created by token_to_blob. + + Raises: + UnsupportedTokenType if the token is not one of the supported token + classes listed above. + + Returns: + A new token object with members set to the values serialized in the + blob string. Note that any members which were set to '' in the original + token will now be None. + """ + parts = _split_token_parts(blob) + if parts[0] == '1c': + return ClientLoginToken(parts[1]) + elif parts[0] == '1a': + return AuthSubToken(parts[1], parts[2:]) + elif parts[0] == '1s': + return SecureAuthSubToken(parts[1], parts[2], parts[3:]) + elif parts[0] == '1rtl': + return TwoLeggedOAuthRsaToken(parts[1], parts[2], parts[3]) + elif parts[0] == '1htl': + return TwoLeggedOAuthHmacToken(parts[1], parts[2], parts[3]) + elif parts[0] == '1r': + auth_state = int(parts[5]) + return OAuthRsaToken(parts[1], parts[2], parts[3], parts[4], auth_state, + parts[6], parts[7]) + elif parts[0] == '1h': + auth_state = int(parts[5]) + return OAuthHmacToken(parts[1], parts[2], parts[3], parts[4], auth_state, + parts[6], parts[7]) + else: + raise UnsupportedTokenType( + 'Unable to deserialize token with type marker of %s' % parts[0]) + + +TokenFromBlob = token_from_blob + + +def dump_tokens(tokens): + return ','.join([token_to_blob(t) for t in tokens]) + + +def load_tokens(blob): + return [token_from_blob(s) for s in blob.split(',')] + + +def find_scopes_for_services(service_names=None): + """Creates a combined list of scope URLs for the desired services. + + This method searches the AUTH_SCOPES dictionary. + + Args: + service_names: list of strings (optional) Each name must be a key in the + AUTH_SCOPES dictionary. If no list is provided (None) then + the resulting list will contain all scope URLs in the + AUTH_SCOPES dict. + + Returns: + A list of URL strings which are the scopes needed to access these services + when requesting a token using AuthSub or OAuth. + """ + result_scopes = [] + if service_names is None: + for service_name, scopes in AUTH_SCOPES.iteritems(): + result_scopes.extend(scopes) + else: + for service_name in service_names: + result_scopes.extend(AUTH_SCOPES[service_name]) + return result_scopes + + +FindScopesForServices = find_scopes_for_services + + +def ae_save(token, token_key): + """Stores an auth token in the App Engine datastore. + + This is a convenience method for using the library with App Engine. + Recommended usage is to associate the auth token with the current_user. + If a user is signed in to the app using the App Engine users API, you + can use + gdata.gauth.ae_save(some_token, users.get_current_user().user_id()) + If you are not using the Users API you are free to choose whatever + string you would like for a token_string. + + Args: + token: an auth token object. Must be one of ClientLoginToken, + AuthSubToken, SecureAuthSubToken, OAuthRsaToken, or OAuthHmacToken + (see token_to_blob). + token_key: str A unique identified to be used when you want to retrieve + the token. If the user is signed in to App Engine using the + users API, I recommend using the user ID for the token_key: + users.get_current_user().user_id() + """ + import gdata.alt.app_engine + key_name = ''.join(('gd_auth_token', token_key)) + return gdata.alt.app_engine.set_token(key_name, token_to_blob(token)) + + +AeSave = ae_save + + +def ae_load(token_key): + """Retrieves a token object from the App Engine datastore. + + This is a convenience method for using the library with App Engine. + See also ae_save. + + Args: + token_key: str The unique key associated with the desired token when it + was saved using ae_save. + + Returns: + A token object if there was a token associated with the token_key or None + if the key could not be found. + """ + import gdata.alt.app_engine + key_name = ''.join(('gd_auth_token', token_key)) + token_string = gdata.alt.app_engine.get_token(key_name) + if token_string is not None: + return token_from_blob(token_string) + else: + return None + + +AeLoad = ae_load + + +def ae_delete(token_key): + """Removes the token object from the App Engine datastore.""" + import gdata.alt.app_engine + key_name = ''.join(('gd_auth_token', token_key)) + gdata.alt.app_engine.delete_token(key_name) + + +AeDelete = ae_delete diff --git a/gam/gdata/gauth.pyc b/gam/gdata/gauth.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6508acccbf46fe89c3489b9c991a7ce56435e1e8 GIT binary patch literal 49693 zcmeHwdvILWdEZ$81PSmdz9ibRbVZv2Xpx{K%d|y{wm|S{QzU%>S_xu$z1Vv}EV;)@>*MBuzSbOl`O6I8BpwCT)|n&8wNDO_RodcslMY znI`G)_xsMd_wHgT%N7(T(@@%r`#ASJzw>>+?{Ur@{_6JegR4JvrSAN{QT%-tzw9T6 zolBgX$Fbs)A=j+9d7Rk!keeS0=fiG(IGm5T`Asgtol%$Ie6ve7x%n-wvBxF1T(ZR- zPj0(pEB<`MCEM}mqb}KrKX+WR8-MP)WH0`F%q9Er=i@FpfIr{vk~iYdCtMQY&nI1S z5Px2B$szoC*(DSB^BpdE41d1UB}eh+yIgV%e?H}sC-LXgE_n)nzS||o-SPP`m%PKJ z<8Benc&B^WxtE;#*7>b2dD^90b@xPmcbiMz#fJ0$XWYwZ)DD;KaPvD|a*~PWce!+z zo4?N`?{>+1+{@^n-7ekj=F!+^UAkAaw%?`qyYvB4PqP(9OTeCFk|}LoT@xa({20`@=4IpB_gpxu}yzT=JYt zA66|c<>?N(_{h@)e8hEI52d$oUFJllN?Y%jtn-ea%8s0_959sEk zOP+W0hh6e5ZvL(QX^Kmfg2dmdmObWDJepU_j=1F8RM(>}`JhgwTyjk($Jih9kGteU zZvF{omEs%);RKgeW%Z;>KCHxVb4g7nPr39dH~)5*Ea>vMOOLzxceteP=HKa(#LYkL zlGM$gaLHom!s|>aD!^bbx%6G?utt9O8JFA$cW>r*Pr9V((r1)*Ill`!Xz^nHJuYeM z3sYf6*mty2MaF=jJc*_1VeKaQ^pX+(0ldg2qENyk8 zTC|#_oe3+T-D!NJ#%!YYV$@w~;O@!_N{edsdYWapl{Px#39_Avu4b#XX7e`hWocCF zq*1Hgjq06LHB1_vwBE(zh1G7fa2v&(J3Z5Eu*CMl4Lm}X?D}H+dZVSuIAR-7t`z4)1)s|@BKXa_}RS2)|MLeCAFZv*hRmh zLz1Y`V%wXIy7foBoutuZEsNSKD{VeUr`%Z0y3tw#{nQ#qFGSbbF-KQ!cbD3&qjhU+ zv$4>rb#5P4_DrXuW1bQnHGj3jo6H= z+wP=O5sJj%W=rkWW)ihq9M9DE&ti1Dy{cNQW@rw20OaDE>wBe?DOy`gtE>b6vQp!X zKf}LE*_%M>H>R>@uZw1<>h0xd^1bQeVl-P{O_H>8IJ*xijG{#=YQ)KG2kv_6u=#K@ zdla{py4{uR#Ia**Yimonwn>nq;<1=cQexZ+u6iDb0>XfX{}RV3h#|0 zZE-h?{Hp`{X8E0=_7C4P|2&QL$4WL?BwcK@YOQ)tm-t(uE^LFpmA&QO6h;S#2#{C}+wkaI8CwJex~T z=EbikmRf($4d~uHO=vl?0c**Q)>axlEy)uG;$&v8n6f*98ZyNz4e!#^VHnt>YboYl zr)R2UkOj3QTT0VzPkq);l~+!w+N?D|)l+^j2J{9xdKugWRXEmdx0_k0+ZMc%VQ`jf zXw94b`)I$I>J(T_cXc5x%?otivFxoyy44jj7t~+UX<+)iS_8AKNq!zXmv*lzD@hd* zjnAyi0{Z86yL~fn2`BdgZ{*W;KsVG{wdU<^qn^JthPU#YWr|~`3>ua&SDE=e(x~<~ z+TCmgzY+suAD5|}*I^u3`(_*LvOEanBjp#2G8$P%@kAPA0&%r7old(`#~uBv;CKqZ zjH8x17$hlQ=te6;LHRcC*o0 zxP9z&d#$A*KX$#geDukwx4%7z-mFuPCZ9NLq=ToBr&BAp<2}e>6~Bx{k5;;S@#ktQ zTZKUH090wb=Weg0>uXO@W3^Avh9S0rEz72Hh-)DDBwAYnaFAdnG5`cy>_Cu#Z!bg; zi>s~L0`M4~)Y~mgzpg~Cl21*N%fzfqyblMBS+xot_ImWeew1(O|=UuQ>tjPy+XKMYaWhHr;D}KCaGnje>^6RL~UTSPV{1}*+^76 zD}*b|KS8hw8@LG}J^%iM64q-g-TG3^Fl2wBJt?3NtU!TSS2OlPoOV|`E#DEuq37r3 zuGkO}V**+UFhfBRVpUhxpr8ow1Je*>62wj+FXI#J%q|7e74~D&-HKD+Kb(Y`2EId; zEK^^KNhm*M*WHG3OD7_#)Nd<(1Ek!i+C)s5!vP`0jqM<-u z&}TqMrfZfn(~RH@g+jvDp#+gG{cHUbnvA2$KC@{-vMw{7yh5V&d)G>ni1X1GGjP6` zXy~aQ*-dDlnl~3_NJb${j&%||*SSj)5hDEz8fmu)B`vedZmrYJsDergUuw5fn*k*I zym_MEQ2#35E`;lAm#baQnd@n{+C!3RZ`DTPb4VZr;I{9n>>S!w$pUr(bm>DwKTXz1d{Uev`y5*)I>>2aQiwr8Lv`B0WM zrY_geV6#ymvxQZN00T-z(_x7Uq)PKcBg^v%)e)c~=7Keb?=7U@fi0FQqgATPETr`s zXMd4#o{!8JE5WYD1QcJzyHn9wv?RS%TV5e%!*k;VQFtfMX=28D>OFN0@yYXzsi~<; zCWQH3=oOwf)b>(yCOmGr*8~`x_d4b>9VHEEF0e8YU2=5&2!QorOqLBawP5!!vlVQA zZ8R9{k@fO4pf=@1c|?z_$~hGy@rBbS#zSxAFUV4)JW92}5PRbit6?DOqZ8_?3DFem z3F9V^-Qw*~=RlJx{D^h+1c+8K2;4r+HFn`%rc$Yw&^0 z3TV-#NwPM*br$MY8Xv}G{8rwhfSv2CrtxDqQL^|5?-Q)o`(|hN4ZP{6bu~QAN2W_G z5bmzjI?%PKe8)#|dqc$lyGvrg*DtTZ=kXw8CEb2%91sDOF^GZt@OQLw0KbtTHLM>l z)cFek5w@JdPezVZT4dtf%TmR_>v=NlQh0YpsF_i2NOI>c#0(XD+IF zg4-~*!QPsw_3_hqLmeaGB^3-GSI{6ut7=JH)IK-8S?6DhgJ=xXM1v=G>|_$jVh#zv0>O zAT(TPBs%1EB{AUZ?B}8>FT2cy85njW?;Z{x5W8yEDmdc z001$SD%5n467%cEW_Mle;q?C_I`>8FUR;cye&T732V*xx~r z6cW<0E-JT%>Xq=&&GdHU;R}KC`Sw~0jn!kN64a_Kz*_BLu5aub(MQn*Kt^zuB9BI^ z-drUv1N`r6wxQn$;(DnKteb?!XoiGVv(dDA2U7&;Li zIt(D3d2#}G+sLG*jy@ulWdZgkwOL}(cN4vcgpADw{GNbZzHKPhH*RhaMPL*j_L|(?ggGT9Gwd%Lyz|s z0}RD7_h8GzX}XTJ;e@p*N0wb*4Y+%uqYtR3T-cdVFRNK7DXfl1qtqH;4g?Lh+0j;k zG?|KK!Pir~4uK36PFyb9h3HH11VK!XeXrf&;$+u2E^b;^^w3}(I)oX4< zA1>39fWZTep_`E(UHBYDuat(tENX<#t$}9fnzx}21!WoRm>XFV)X*0DZFS{(2e?EE z3yW4k<+6Prn5H_QpV*1r4qe&Eu1{4AQv_pZ4JlfRGpe7|He1x95bMx0HcI)mlsM0A zV#6sD=;8r+^jfJ4F=+B%O!gSXCq*kbCHx`j*T>B4YVFSAr$HPf$ltRjsh_!g()m0!a(sf@`+FJ zFvG)X9?tM^mWOjVsE`}&MoWc2sp#CcSD)iUDgj1eam+Wa@FrDxPk>}SN@Vl1OHbnf z8jdgVJ!&uhp>g~T#p$Mn4+tyzBNqD>9Ndw8l?R3&s%)!FR<;k}92kB(^sQY(;RsIw z6;kRS{ISon&*1QN0dT--!sX5G=4RJ!jARAHYs#*e5E&3&v{0+xgk&P=Vz%^ykV(Z!=~0Pc~1Ds2Idq8Ceimsk_ zBXV|Qvkb*B5(<^+`%B6S0KBJ;K{99n)Ir_`YKGV8sEZJpH0W%hLySz+e#uM^d`%0e z`FC=xuO>1Ia)6ld@q~wcLpbDWq{KHS&T5MSAqW$12)E(^@Zh755X!g=;K}e)kQif< zM39QvGDSeKJyr?5>Fsq#qDl2TtrJIV2-cm#GJn|6#f3%MfNh{KN&86C3s)m%09~5O zt$VyJ+q%)$pXTMM2sz?4WaRD3)+$NiU;zhV`uU+zF!u@R+f;{1W^^Rtk(G>(f!)Rz zakwOF+@(vC6gRIzki}P-?lBz99L>^9aME)q&u#oTlQ5#giunM}HdF>ZfD8+)A!lrr z{EA}o-H_?#o<2a)v$HY+AvB7=4{xx0`sY#58DpCStq5=}+cnJV1O=mrV=%#?iF4Hl z_#$M{G~QOa)~k7Vk>Zu@8i>{69zeHVUlIH)EDycK_wg?&!M%=XaITEO82#=sKY_fa z+S4-43vvr^lO7)y2@gy)_uZ{Y|GanO@{DpJLsfRYZyu4-rkj~I7CK=)@0mmQcWP@s znaw6Ir`ciVq{m?RA)2F4B@Yt^PbL1+&65qDnm@d7mzbA?kVRgSjdyJvxEDcl%wS0L z-t8Uc-Imq7+>|U8$`iPl&6hFaQmon7MDX)6TpOb<-R#m) zq9C4+ay1UX6CaJy-$5H`eHe%En_&qL;$Ywk{tn=#mEAIN!ta1!j=~nLPNRP#78hU^ zvi!>_VE1rqqe56@!-v&182QJ(ewPQF?iPN+ew;R97rmhABcdd7d86YibT|rtA z9uX8nQ-*>i`TGgW+FD7S2h*)n*(@Ruii|wAr*r~CCsPC0Dzw)UeRv%g#6#wpv&14R^${-{7!TLr zFd1Qd2xsw!^UL!%lYdjStpZzC4SQ@P-6)hT$kNJhke3n6Wltia_ z15H|~T&^-ot@D}tIvOPZHQa4DDF(B1=C2iJqZ3myVMF_R2Xd|Y>RVjp-)4`qL^pa# zu|P#UuY^%X#Ng2-#-i%Kb}yc&@}sOPn=m@G zZDec{oSQqgRYr$$5t94d9z_}u1{HGD)=s348^Zzffm~S}ar-^#Y#lC`4F{cI-K!Hp zx#5B-n2X}s>7!ZWddur~W=-~VU_p9k!5Ye;xS%;rX;?t(tIKK2Vp_n`y(!v@+WyfKE`wU+!dNR%L|wYbwTqWKr(}@#AV(OS`ROUf6kbof>x9MXTQ??kf2%~@wwFuPM?%dXZEF6EzCPIcT za^n!6V9y+2nSB|Vc>F0@cDv0CdUiUs+QkBb82x01;tR`MWTI#?S%+ggm$PnKc5L9z!^wT7`y?Eph6+B$o3q$l?*aCJA?Sxf8 zk!!OWEr54&0Uo12y4QK})Ug};mzzCK|>-req*k}Wc z(8e#aYe@i((RMbQS-*TXzXcbBfr?LpL7q|_t%2d4O~x5D394IsEBgs#*Q-ga9~tOY zhNJI5OxNg^?aaif(R*R0_Qx=V$t9N@8P#$ifkQu{ZdYY+qQB231=*sG=DSr zWj}}mXb_|ae;uM4UlK7=traES7^cwz91OIGo2RcmL?Oc$6j}$tVqyZ%7^(Fb6G5O* zf{26>1qBMGOpyd3k{zSCxC<75o8!W^TPaTv(?ukU*KT0b5CnK3^53sZn?S8)Xy6c$ z5%n7z%xhwSJOBxBhF))Aky!!Yyf!owFxsX2Dgz9!oTna;ztn*r6HX>cqlu_XFBm_DEf#Cd0205#9Nca+u)jj zN6V2uzR4Uw@P2yNcqWM>0~RNs+4(3tW7ZaP5Y>*M>bdw9uT^<49qAw&${J|n`dzXH zf)}^_fy#->?#gc1x-OZ|eJ}26Eo@15uJay*yo(okU=~uPi;C%N19W6^$A1}W2OW9S z*e>eFWBas(iK^@D`ROMzcF+0ere|~|7~gI$)OnZHXUk^r%YG2tgM0`7D{lKGAZH+0 zuqSL}R8e3j&lnO4Z#d2n4~pE#xhxG9D1kX8``pbj*Qqkb6qo)P14WTNeCqc27SEXp z7#JS_9@~TO7mvdOTYZb@Q~%&c6xNCdh#AEjxO;cp-QG_Ii+m8ijQWzPj=Seu6F~NG z)C1Xng~&dCj)!-~3DS|aLVORNeRr$7b&j`BUweJPTA%T1F8Ffg4j%1rH=%BPvEp;X)1B_-u*-f^Pq*tSQd(Jfx{FUcr8~%( zU2X~Pb1Q#bN$zuZ?^B_>-JSc~jol7+Q2FfPp+tA~<1ydKt1>&}o4UuLPIu4}RQEnf zrlQ5WUF)FlM(nla(;?BY+h0!Py9@Q%<8JI_&)h+w+nJ67iW2rf!f`!#_9bA#bABrt zYU)rcIuV^#6yCbk zO0d*)lB3Ytx^Uq42A$H}0P#|?9kb_r1fNiy_6+DCB$3-;1nWgPK1z_zOxAER7eQ!# z%~S&*0Fe_6m>3ZC3**>=oE#w@6=imMtUs^C@)ImRxjcLgmLC9LS$y)m23dY2hG7l! zi|11WWyAObc3dSprm!YA)`jxPO(=iyEk#(#k7YLspPkDmm%nBB-!BgJ6N#iUkl6AL zG;@70FHSr$^*BRMBc9YI6t6(=5+Z|+H`$(B}jtH1##yrnn~)*ex!u(Rd)gT>^*qC|m+nx4!KljI5FP_BaHBZLne4d%!` zr;j%e(iF)qTp!B{;f#h)`YEXjx2exz!CbS+xUDJkJ4tu4hItP)%|>ir56|;H3_P;i z)}XTHh4EtqQ^(Z1SsFB9xn)LHZg?)J%7Nv|LQH3PzRk6ue#*Nj@KDtc<={2ni$$R_ znD|zen3s%5c3bejOQvtvnpCtgSG@~@*wr58^GDkn_*{Kom2y;lWcjiro@`4mg@$TT z`=RXQA=@z~t1YI&oFCKiQ6XH)4w*c8k~RJbU|8b{s-z^xvSFMaJCq4G9^!H%90;ui z#{7*#*`-MaLB>lwFd9ZVW7WiM3jtMZ3{i8YZC~^hEIt$9y@UWXv(nKJ#1sP9wwff4 zm>Uf!FRVggF(}S-h+ebkRQQ~svn%uZQi?w1Q(FG)q}E;1`py^#$uh#`Gihg1fR{*I z=Df1t5gxaYQ9|*+{|vDc#ryt_8K2C+} zzzAF1wN{DBgr(^BM)MX~+$gg{mkS-uvTgKOD(BA#>S=3p5ACju>%U!yg587P_V9xeGF;h@NZ4(a z9f*nDUU_5X;Lt-*PaeS4?#e^>ZzujTUG8j;5Crph1T)2!3UD*t1;phMU=XO?Ne%cE z)dUP6mlwiN0{aLp;uU`f$-*6?XuRLjmImuz;IrG4pwU;QzSb|DOcIJ{q=Fkh=WRF8>BK0gOEWJG2hk@}mm10OY*GLvrBIS>6)VHI$B9 zHvosx1Q}{((coD*R4>UuAAgSh${Bbld-PBie;>}`zs`GP4Y((xxY_8Xj8AevH2IdW zZKRpKe~h<+`X<0&dS-b@wH+eQ-u_Ft@@N~d;E`hkoEpY`Fslw ztZ8n#=S7;tKfz=_i9?|uzjoDFlPfpkvqK9@{#EF?w%yI}HcXyjYV?>Uqd4DJd2r|e z?rf_(sEKv}*8M#;T_|qy>GIEUQJyZ|y`+7M>0a7;5BCxhn0pBcM0A))`P9M;DNik& zhp9!%ldFm95U3K0BCLelq#f-bfnX|lFVyEWEuiK?!z9g`4yy~A3kxq4sg+#Ws8!x1 zd09la>u6I}9*yM+-m!$FuDHX4$duz7Fm9ajqRT^0b|qS4Ulz0$-{xh52bpwwi7WmY zJ`h2veDEh1|0oY1;o+k=Y$)LnapZ(U`*#jJULf6Qa{$my@BkV0zzZZlkmfcZsEjM@ zGj@s??7zneA_6hmpqt|~cW_rSa764%hJFHf#c*K^!`@`zwJ?nwrvU~g6)5;Hsa%y4 zBsZye0|~gWJ#Gb)iWx}AgYCh01FKAefrvC?t>uMandYwa?lGH23CZF zUwx&S^JBi4;yla64Y(i z+fDR?jOoyGLLoSnN6WIJZgJYytWz11Ql+wOH7nH)szTm4t#xDhs$1$t0~by)E&%{O zZ$q$4V?6-16Kw;m5t1Z;0&_X+7pGTr*E9^4Z!OYIAG2=Mf#Ma?P-q{PM#zvrFE8KM z-l8b;E-WcBMKJ+mdUrrjHuD7%Qf2%W!Fm2A)h6#|tIJ@OCjKtWgZO)S_!JM{&BLd0 zkSsNbmlII@89x0U9{w5*896Z&4jR}?NP*nLItq#dCYVFmy*%Y5^?>LAUVLsAyPOI3 z3m6FTPl|1S7AXq$D(IZoLlYqW4yNd1yW+Ti9}lF^+|=>K_#fiNBz_wPf~imB>{eQ? z4d{20*+wjB->}~sZae;Bf4Bk#4-Hq|Rmq-08tGuKutS$R5!@r)*(+|(}v8z9Y~Tf$`rfU3h%%cfDU$@mcs=fec)jw@xU2Ivn?$wFxB7^?jza}Y&H~cU>sjb zcxW>B68nk3<^yN%vBz0B7{xwhPjJ`eEFoB-{4Px;rq69M$!d|QwvormG;*-KXzvfq zb3#%|4_jxG%R58Qp;+0|jImi1?uqE4-_C&C8@HoJ z+{34*c{qcEOi`uUO28MELqh}~XXIyc4 zEQfTa2!~XqvLxB=SNr@;WEDhPPy9$N!;0%1`g+ku=Dp57F0z%Gow`Hd1jZM_s#NdA zfDxsD-`KE)yG)}H>u<$?1zc@mM>99sJ34#e-AF(w-pD4 zC-kSHBdud{0mv17C0D4gJ$J(?ahSY5-vEMK&z7{_sWiF$ib7vUI+p0yHU(@w4SA(3 z&`A!HY(OHRR(4JNbD-&a@f+L2E@wCKzhGK21_iKKL^xSQ&tS$s&jfS_Z0Hn!1Q`yT z;xi~0y zCN*w#o(t9w*{lKFar0sPZ2L8s=a_VucRLUECe=+P1r~ph|36QQxpvy~TR{9W>T?F6 zP}adf?Vsv2Ee&XiVmW+0QERnc(ETRlvjMl1za z!~10l!AwIjcLaRM_R;NM0RH`Gm;79&}5|-Ye4JA3+^~%*{P@t^@XX7avGTOP^W*Un3!>95feo>`9 z!h86U5Jh!UIDy;%rGZL52sIG2hX78{g2;h77;I-CmracAgt`>=TBHPue~E7s3~20f z4k=5mR)@(tA4??DiDM!eJU$gZP~TVOQa8IPdu>DeX)iLAIo*oVIzIN$ee9tvc<6?9 zOTNA?Jwpc6BPsej=3__@|63mZ9S^b{Z+IV&w5dz%UnJYPo1Mt+!kML`#E75cL0Qsw7ykzwBx6g1B=vhk#cPC8a^!1xB(O=1FwI7X zC}${#-5Use833{$KL&{GG8jy`L0~e2U-kny7$5-%LC?UO#}J339>12ie!i>yhyiek znQ;Wzh0FkE-l9#LxHt2$Ks!NPSYi`a|CORC5yP`cSvR%k%1Y zMiM+0Wk%?r_+R5-tDqj|rO5MhyoaA1HXvf+0P-4)fI)NbBhBq*ahtgp5<&1zY>3aF zOedr^0$5Sp6F4Eo333nu<9|6oKqA<|fmlP!5rOh~0z@3vC1xK$In&%fxV$I_{o0EG z*wNw>*gYBN_<%+m>!{cDJqA#FuLsw0kLns!Z0`PS@&u+vtj7`aqPB$9<^BpR2U)dJ zx`4Qx+CG>ho}^lcOBtg>Z6JV_qnfR?$#)&{MfdqCdMaerU_&*%%{5B}Ej-ld;6i_d{)Ys zg5I*NOwoLbR`o?*tJ0p%*b4wd7qY&6^D&4Ij(I} z=U_9>D?`!=+b5PB*Jyw|d^v!H(4hc__{sO8@~tmSQDqwK=@1(*yjv_ME4n@OkNPmL zSHGE$_3Y;*NZexq5G3F>gw}Dve63Fev-x=^Q$(75=hrbc3;xl40GHf8=yH5v9$(dq z&%FQYnc2DO+~xP3xg<5p6!2f3Ci`W-!4FuAT$!B3_8PbT z&0P)L@5n}xh2&HyZh*lnR|8%Rs@1RM7sR=Hyi5%QSEI&jTj1?gXva zhXDJ&jb=~54Z?FVYhSDBpm)v@VC01czGJiAD;?gVCazYDQEp80R;?0VVBKgzQ{~Z) z0OpiK;SOdFg;p3MaYD`1?|H?n|7nZMRu?jQS)c)ifndWt7(NX!96q7`S~$feeS1)* zsFKxf8|KFJ%*>hD*%Es&;4GvpJVF={lUwMErGv(($ZNivsN<)9R2)v>SK9q99n!vca{DCMxB-{gtN|QEcd$DH zSjo%SvSEu$w{W{VAmW#?@7=gd$KCu^XcD$@15IT7v0$>Tj0c(q0fcZO7+uClv=`zL zkdi`036^B_sp3*xi%cxQC$Hvw7Fo(fSai18#EOAp6>L@Qt4NHr8-+5p-@^~1!)-){ zjH7$L&R*w_>|T9y2q&hxnAM&(E%cCV@U&i`Gr0J4#2eUu0r>`V6k&+4N!iguh#&+K z7#z(G;7UCjrr37*W2#YfU7N#Kku1a4Zf%eIJ~NZ;tJ-g6;)JI%T-tt!C{62S#MNZY zvF&DS(M>FqZ-v=B&28_kup?pl6PZZP7NTGj(oV#a6W;E_E$b%^V{<`#r-Hk^_O71F zjX$Mq!Y3=pvao=kjskR^`YJqv%WYYP3&|83OgvX+{YzO{*b8tKI3U{&1`f6*DVJN& zroBap1$oZJww7|i>}9_BLaAVxS!WZ0oOxANrc&M^FSQW^3SZ*aWoIg7HQ~)o(Q7a> zMbS6K&;%I;AqeS3OtP+9g)6_ArX~u}KGRG5AHfT3)52fFnK+w4+Na4RsG6>yrCM_k zMqCyg%La)zilOO~c0tH%i>iFj4ccmcgshI{e1ABVI=o2*Vri zm5Et~`0fZeH5bP)<`1h7CJ7!gh&4CMZBWTxdZ3Qq3YENq&-_P1EiNs0^BX?)+!;}5 zw#l5}p#rfBa8Gts40Xdh$Hyw$?#H$u`{Q51tvY`ChcpIqkKiN)R;@S> zasjaLe*n1zwzj!5Hgw)hB+EAy2L0!PWaZN>NKD?{dm`CaPRw?1R6mm_ekb&^>qW9d zc=rD*oBz=3#^xDZ{a0N6&xOl>5d>^pUK8q`T>cA8_s!z+U*_z4B`#0)RO0fg?0@Dx z`BylBUr$C%X8WI5MOH(Mcx-IH7N*XvwJ)X!tWRWa%cFeiNT+|8LB^RQoET>&HwF`y zM~)}`UlATbO8&OuH-lgHb{xE?Meq{X5>Sqo2C?=8Hx)I46Jz!-jSf|CVch+a0R_SX zq3rU?c!b(E5{VhxD_WiTTTICc^jj7FfDiE7P`*2gk}TQ?mYh*Yb=ic;bC)+fD; zB7{f(1?Cg6hL4=@Yw!k%6(-)$fh0?RXoGpf((A?>{xaj4*M~L4|BV!}iR2?fTH=}F zAVgA}r)C49(PzWs&c$cm^3CJ}e~4y1*vkhr*@Hg8h^36G5JVXy+jWV!)7ReUzpX+U zUvVo3DbW#f!)2kc#?Symv6u~mk`JEA>NR|~Ty|cViWl@*T|`s(J@k6K#xP7=r4tTT zUpT^GDumhB%rHKB0w#>*lzZ@h2i`pqV`O&AuEf6rR5nc4x$8D8OCs_K2YAqC8oS6npW+tvgn!C;ZhDp_x=4vKVQ@D8zv8j5kwvp@#3uxim;io)(C^6$cv!^)z z9B{X_P9Vrr+%iD`0JnmD795n9cCccBft4`6UE+7{dKVxuxmme$g7L8=UxQ$npZUiV zRNMXyYUF?wJ>kQLp`Y=IPg|v-c*QSc94{GOzJ}8{!a*3@FMtsJNlht;kgf2GA3EI) zr9)cJs~Nw#S0=<$IU(*8A(9yLHSaIrsw?(`c>y5Zd6=<CY*t6&LK>e4!^P_#~35*Pzbqr-ZcrnYvW1MSS3Rf`wB)8 zip+kZZhtcSQxj8rqSI~K$r4T|^l(Rimh>&?tF77sBJNRxydYmXSvg0G+bJ>*#6CUx z4xW95hj;RDmj@{wTG;$yUj88upXEVdlS-I{O$clj7UxrZ&}()!^9OBDsx!;CEwuR7 zhVtg^EJGcMxE6|lIkM#^ao6qLS=lwrJ^scKh|4u;!wSj85qHhk!6*IZ{aEbY{F7Wu<4Kpi;s}S zM;T_`lSpYdBQ5xTNQ?M#r2Pl%%t2}OagUI8Olg0an+kr-wAcg?`A$@90TkSkJ`x>k z4F|r+PYq%}rwkVnKiT_G1QK_U2msgfO>#>u`{9zKz$v}QTfCQT4On|Hi}u0J`VO?u75v%zwGnmz zAyR89FB^JsZhm?L!IXxI{TCqP{{iw+09^dPct$oG|4*KgImG{)hyTOFAMqa5@dh;z zebKhDnGgOL2Qi_7Ete#KCsT`VC2L-;b#5Z`3Y?lunkXjzDi31PALg0(vlz12uzb*B z!!iJp(b@ut|Bf@Q>yfTxG0M2LQPyQdI0Saf*(HYi88Y0rvXDJoVY36jefT|C`t7P5 z)J78e8yGEnm3=pZpSGd^`?wL*x6{{ld!-$-AH21stMk1}(l(Env11N5;{S_-=>oJ7 zYJg28-B?wC{)1$7dBqfg>}PTvz=Q|uMpDSzv`!ga-z@5&YE%4Yc+;9BrRPucON}b^ z3)`1d0`#@J!nIth*{&tNF<%U+CdCCBLm4Q|`sNnbd5^nwks{FIa!y}6patEw zngw?u*r4g~%MTA@)7gXW*28@F^tJoB;swKs1b7+3gi@#S!dYzz;5`lp&?6yMvBs|| z(h!41h<<}7ZTQ3h#?tmqX{LR*!9BR^xP@SAg@O+S&(c~48yx%i8btM1XJ;;7IWvo| z{M5Mz2y}Vsp7_5A-1eDO8AAx*pBL{p6eulyJX5PLMG7?xKpO#z*dxo>kGM|Iko{i; zMy#)b1tnThz`MD%LE8!(CaNrAvP(IVe{A=SwE9*jexHtS;Zi;x}r`mmq z+LR~s#Lsf>DGGZhci}lu**mm7`0aY;;YlZ z`9)uz_UbTQnfO!YQ zy!IkKVSoj6#wGDtRTP#@VZ*&@xh|83r}zM_-^s)L{s8Lbcg&PpT=dFSzRr^K=ZhU~ zN(3A7QoD`G!FAvGhz6HwdmzhhhG4Xg*c(R2Pw zwO&zQXUjmb`9DAT<_IT`VDBU}IPJaHXVgH=dTkoz+$Qzq-!e1}JJIT%cC$r`qv`M^ z+5F=uvX6!(FX9*K62FBXOq-nsdaPU)n?M3kS18b%R~(^rbkai=W@lKJmB!r2|(_79LPUkC=RIa`^(_brP^@`+!H|!gxvcZd0UH6}NRkrpq(7j*;Myj=DcM61F z#lC2muQJzbgBaYG#lCo^z0v`Vjg$>Dk-I2dSP%1h3q&e^MG0eY#5p%qcPCW#Gwip> z|H!u8UT!m9C)i=lz@`jL5#$woPl}}HdrnKXwStjYhpR5?~_5ZS|mljpe-Bx z7ng}Ma?V*=H3yYLkVu1SMyW=5IfCXnS3;7C6kJCN@f+l0ZOhE)GD^e%afd8`_gJx`Ftt%sr@=@dJELcwa?Y zk(9)H>?hZ^qp804>v!o}TU(p5wGUkTbF43=<$Mrj`{dk;yquoberMI}ks?`NuBB8m zbY^erO$zG;cZyXF7O`hScFdAZ^d>9UvrUTuRTf{m)im-i4A=*D`JJJj58pG_JdG94 z0rc$#GOYp1m7DHwh4L|foNUea7y~TCY<7EV`%tO2*l-cBNlV2_-B9kR!6~e!rLKpk z9N*2xUM}U_$dHc-?PsCT_50q|`a5VqFXn09QuGZZXm+%=(&(vwp72nqk*}E2S`w9# z_fgT()1j|v5a^-z3?AHCf-iz#g-E&GMoeXTf8~@`hpBtl0sXoRYbikLv2MHF%tDRG zzp^*`_t3S5J%Gcys|#sqK+#RdWDfFlwYoyyh&pqGrUs_ZLUjgaiwyyxogc&YB)-Mp^sJ3QXkjB1J)0A1K)N_v&qXCi}-#XzMY3p@(|&ml^DI>{M9zO!h%)52CG_) zpWzEN9yXy|NzpU6xB(M-JKn?lG%K1sC7>g0MZYha| zY*t2-_&Ad=dkpAIZKaFv_|7cBIsx=*pTIRaX131@NEbiB13fz0=-0L-`ZivE2nVgC z_Bx4`a2AG1t|6&1$%lEk$pbWCZ$|wB&wiYTALHTkJbaOdFX5n|lb($pxMW3(U&_n7 zwT|T%_|m`P;g@*$WgdQ&hyTLE@A2?!Jp4KjzllS49}cXn_I2|er_PCgn~#aBKv=<%QO zCOv1m$qv))A<#iQ!oxTzf|WfBd&MlxM%meRXV#l-;yrv!v_75A0y@U~itCG*?D74) z-lFHH1tAKcptB9IT{8D)uuH;fGkrIyE@S_W;>$DoZ+LVV5#^hPhg@Z|ew&B-|2o(l z`83=QSK)emWdF#1Y-hRck zZ3JnrVky-d@!N|fMUUFO-EZG^|JZ$FTgJBV8nt`N*zU16;N3T>j#z-Se|rC;`ybf< az}OBvc?wS+!*674WZMD! element.""" + + _tag = 'Point' + _children = atom.AtomBase._children.copy() + _children['{%s}pos' % GML_NAMESPACE] = ('pos', Pos) + def __init__(self, pos=None, extension_elements=None, extension_attributes=None, text=None): + GeoBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + if pos is None: + pos = Pos() + self.pos=pos +def PointFromString(xml_string): + return atom.CreateClassFromXMLString(Point, xml_string) + +class Where(GeoBaseElement): + """(container) Specifies a geographical location or region. + A container element, containing a single element. + (Not to be confused with .) + + Note that the (only) child attribute, .Point, is title-cased. + This reflects the names of elements in the xml stream + (principle of least surprise). + + As a convenience, you can get a tuple of (lat, lon) with Where.location(), + and set the same data with Where.setLocation( (lat, lon) ). + + Similarly, there are methods to set and get only latitude and longitude. + """ + + _tag = 'where' + _namespace = GEORSS_NAMESPACE + _children = atom.AtomBase._children.copy() + _children['{%s}Point' % GML_NAMESPACE] = ('Point', Point) + def __init__(self, point=None, extension_elements=None, extension_attributes=None, text=None): + GeoBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + if point is None: + point = Point() + self.Point=point + def location(self): + "(float, float) Return Where.Point.pos.text as a (lat,lon) tuple" + try: + return tuple([float(z) for z in self.Point.pos.text.split(' ')]) + except AttributeError: + return tuple() + def set_location(self, latlon): + """(bool) Set Where.Point.pos.text from a (lat,lon) tuple. + + Arguments: + lat (float): The latitude in degrees, from -90.0 to 90.0 + lon (float): The longitude in degrees, from -180.0 to 180.0 + + Returns True on success. + + """ + + assert(isinstance(latlon[0], float)) + assert(isinstance(latlon[1], float)) + try: + self.Point.pos.text = "%s %s" % (latlon[0], latlon[1]) + return True + except AttributeError: + return False + def latitude(self): + "(float) Get the latitude value of the geo-tag. See also .location()" + lat, lon = self.location() + return lat + + def longitude(self): + "(float) Get the longtitude value of the geo-tag. See also .location()" + lat, lon = self.location() + return lon + + longtitude = longitude + + def set_latitude(self, lat): + """(bool) Set the latitude value of the geo-tag. + + Args: + lat (float): The new latitude value + + See also .set_location() + """ + _lat, lon = self.location() + return self.set_location(lat, lon) + + def set_longitude(self, lon): + """(bool) Set the longtitude value of the geo-tag. + + Args: + lat (float): The new latitude value + + See also .set_location() + """ + lat, _lon = self.location() + return self.set_location(lat, lon) + + set_longtitude = set_longitude + +def WhereFromString(xml_string): + return atom.CreateClassFromXMLString(Where, xml_string) + diff --git a/gam/gdata/geo/data.py b/gam/gdata/geo/data.py new file mode 100755 index 00000000000..2aec9112bbd --- /dev/null +++ b/gam/gdata/geo/data.py @@ -0,0 +1,92 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains the data classes of the Geography Extension""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core + + +GEORSS_TEMPLATE = '{http://www.georss.org/georss/}%s' +GML_TEMPLATE = '{http://www.opengis.net/gml/}%s' +GEO_TEMPLATE = '{http://www.w3.org/2003/01/geo/wgs84_pos#/}%s' + + +class GeoLat(atom.core.XmlElement): + """Describes a W3C latitude.""" + _qname = GEO_TEMPLATE % 'lat' + + +class GeoLong(atom.core.XmlElement): + """Describes a W3C longitude.""" + _qname = GEO_TEMPLATE % 'long' + + +class GeoRssBox(atom.core.XmlElement): + """Describes a geographical region.""" + _qname = GEORSS_TEMPLATE % 'box' + + +class GeoRssPoint(atom.core.XmlElement): + """Describes a geographical location.""" + _qname = GEORSS_TEMPLATE % 'point' + + +class GmlLowerCorner(atom.core.XmlElement): + """Describes a lower corner of a region.""" + _qname = GML_TEMPLATE % 'lowerCorner' + + +class GmlPos(atom.core.XmlElement): + """Describes a latitude and longitude.""" + _qname = GML_TEMPLATE % 'pos' + + +class GmlPoint(atom.core.XmlElement): + """Describes a particular geographical point.""" + _qname = GML_TEMPLATE % 'Point' + pos = GmlPos + + +class GmlUpperCorner(atom.core.XmlElement): + """Describes an upper corner of a region.""" + _qname = GML_TEMPLATE % 'upperCorner' + + +class GmlEnvelope(atom.core.XmlElement): + """Describes a Gml geographical region.""" + _qname = GML_TEMPLATE % 'Envelope' + lower_corner = GmlLowerCorner + upper_corner = GmlUpperCorner + + +class GeoRssWhere(atom.core.XmlElement): + """Describes a geographical location or region.""" + _qname = GEORSS_TEMPLATE % 'where' + Point = GmlPoint + Envelope = GmlEnvelope + + +class W3CPoint(atom.core.XmlElement): + """Describes a W3C geographical location.""" + _qname = GEO_TEMPLATE % 'Point' + long = GeoLong + lat = GeoLat + + diff --git a/gam/gdata/health/__init__.py b/gam/gdata/health/__init__.py new file mode 100755 index 00000000000..1904ecdea66 --- /dev/null +++ b/gam/gdata/health/__init__.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to Atom objects used with Google Health.""" + +__author__ = 'api.eric@google.com (Eric Bidelman)' + +import atom +import gdata + + +CCR_NAMESPACE = 'urn:astm-org:CCR' +METADATA_NAMESPACE = 'http://schemas.google.com/health/metadata' + + +class Ccr(atom.AtomBase): + """Represents a Google Health .""" + + _tag = 'ContinuityOfCareRecord' + _namespace = CCR_NAMESPACE + _children = atom.AtomBase._children.copy() + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + def GetAlerts(self): + """Helper for extracting Alert/Allergy data from the CCR. + + Returns: + A list of ExtensionElements (one for each allergy found) or None if + no allergies where found in this CCR. + """ + try: + body = self.FindExtensions('Body')[0] + return body.FindChildren('Alerts')[0].FindChildren('Alert') + except: + return None + + def GetAllergies(self): + """Alias for GetAlerts().""" + return self.GetAlerts() + + def GetProblems(self): + """Helper for extracting Problem/Condition data from the CCR. + + Returns: + A list of ExtensionElements (one for each problem found) or None if + no problems where found in this CCR. + """ + try: + body = self.FindExtensions('Body')[0] + return body.FindChildren('Problems')[0].FindChildren('Problem') + except: + return None + + def GetConditions(self): + """Alias for GetProblems().""" + return self.GetProblems() + + def GetProcedures(self): + """Helper for extracting Procedure data from the CCR. + + Returns: + A list of ExtensionElements (one for each procedure found) or None if + no procedures where found in this CCR. + """ + try: + body = self.FindExtensions('Body')[0] + return body.FindChildren('Procedures')[0].FindChildren('Procedure') + except: + return None + + def GetImmunizations(self): + """Helper for extracting Immunization data from the CCR. + + Returns: + A list of ExtensionElements (one for each immunization found) or None if + no immunizations where found in this CCR. + """ + try: + body = self.FindExtensions('Body')[0] + return body.FindChildren('Immunizations')[0].FindChildren('Immunization') + except: + return None + + def GetMedications(self): + """Helper for extracting Medication data from the CCR. + + Returns: + A list of ExtensionElements (one for each medication found) or None if + no medications where found in this CCR. + """ + try: + body = self.FindExtensions('Body')[0] + return body.FindChildren('Medications')[0].FindChildren('Medication') + except: + return None + + def GetResults(self): + """Helper for extracting Results/Labresults data from the CCR. + + Returns: + A list of ExtensionElements (one for each result found) or None if + no results where found in this CCR. + """ + try: + body = self.FindExtensions('Body')[0] + return body.FindChildren('Results')[0].FindChildren('Result') + except: + return None + + +class ProfileEntry(gdata.GDataEntry): + """The Google Health version of an Atom Entry.""" + + _tag = gdata.GDataEntry._tag + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}ContinuityOfCareRecord' % CCR_NAMESPACE] = ('ccr', Ccr) + + def __init__(self, ccr=None, author=None, category=None, content=None, + atom_id=None, link=None, published=None, title=None, + updated=None, text=None, extension_elements=None, + extension_attributes=None): + self.ccr = ccr + gdata.GDataEntry.__init__( + self, author=author, category=category, content=content, + atom_id=atom_id, link=link, published=published, title=title, + updated=updated, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class ProfileFeed(gdata.GDataFeed): + """A feed containing a list of Google Health profile entries.""" + + _tag = gdata.GDataFeed._tag + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileEntry]) + + +class ProfileListEntry(gdata.GDataEntry): + """The Atom Entry in the Google Health profile list feed.""" + + _tag = gdata.GDataEntry._tag + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + def GetProfileId(self): + return self.content.text + + def GetProfileName(self): + return self.title.text + + +class ProfileListFeed(gdata.GDataFeed): + """A feed containing a list of Google Health profile list entries.""" + + _tag = gdata.GDataFeed._tag + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileListEntry]) + + +def ProfileEntryFromString(xml_string): + """Converts an XML string into a ProfileEntry object. + + Args: + xml_string: string The XML describing a Health profile feed entry. + + Returns: + A ProfileEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(ProfileEntry, xml_string) + + +def ProfileListEntryFromString(xml_string): + """Converts an XML string into a ProfileListEntry object. + + Args: + xml_string: string The XML describing a Health profile list feed entry. + + Returns: + A ProfileListEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(ProfileListEntry, xml_string) + + +def ProfileFeedFromString(xml_string): + """Converts an XML string into a ProfileFeed object. + + Args: + xml_string: string The XML describing a ProfileFeed feed. + + Returns: + A ProfileFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(ProfileFeed, xml_string) + + +def ProfileListFeedFromString(xml_string): + """Converts an XML string into a ProfileListFeed object. + + Args: + xml_string: string The XML describing a ProfileListFeed feed. + + Returns: + A ProfileListFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(ProfileListFeed, xml_string) diff --git a/gam/gdata/health/service.py b/gam/gdata/health/service.py new file mode 100755 index 00000000000..3d38411ebe3 --- /dev/null +++ b/gam/gdata/health/service.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""HealthService extends GDataService to streamline Google Health API access. + + HealthService: Provides methods to interact with the profile, profile list, + and register/notices feeds. Extends GDataService. + + HealthProfileQuery: Queries the Google Health Profile feed. + + HealthProfileListQuery: Queries the Google Health Profile list feed. +""" + +__author__ = 'api.eric@google.com (Eric Bidelman)' + + +import atom +import gdata.health +import gdata.service + + +class HealthService(gdata.service.GDataService): + + """Client extension for the Google Health service Document List feed.""" + + def __init__(self, email=None, password=None, source=None, + use_h9_sandbox=False, server='www.google.com', + additional_headers=None, **kwargs): + """Creates a client for the Google Health service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + use_h9_sandbox: boolean (optional) True to issue requests against the + /h9 developer's sandbox. + server: string (optional) The name of the server to which a connection + will be opened. + additional_headers: dictionary (optional) Any additional headers which + should be included with CRUD operations. + kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + service = use_h9_sandbox and 'weaver' or 'health' + gdata.service.GDataService.__init__( + self, email=email, password=password, service=service, source=source, + server=server, additional_headers=additional_headers, **kwargs) + self.ssl = True + self.use_h9_sandbox = use_h9_sandbox + + def __get_service(self): + return self.use_h9_sandbox and 'h9' or 'health' + + def GetProfileFeed(self, query=None, profile_id=None): + """Fetches the users Google Health profile feed. + + Args: + query: HealthProfileQuery or string (optional) A query to use on the + profile feed. If None, a HealthProfileQuery is constructed. + profile_id: string (optional) The profile id to query the profile feed + with when using ClientLogin. Note: this parameter is ignored if + query is set. + + Returns: + A gdata.health.ProfileFeed object containing the user's Health profile. + """ + if query is None: + projection = profile_id and 'ui' or 'default' + uri = HealthProfileQuery( + service=self.__get_service(), projection=projection, + profile_id=profile_id).ToUri() + elif isinstance(query, HealthProfileQuery): + uri = query.ToUri() + else: + uri = query + + return self.GetFeed(uri, converter=gdata.health.ProfileFeedFromString) + + def GetProfileListFeed(self, query=None): + """Fetches the users Google Health profile feed. + + Args: + query: HealthProfileListQuery or string (optional) A query to use + on the profile list feed. If None, a HealthProfileListQuery is + constructed to /health/feeds/profile/list or /h9/feeds/profile/list. + + Returns: + A gdata.health.ProfileListFeed object containing the user's list + of profiles. + """ + if not query: + uri = HealthProfileListQuery(service=self.__get_service()).ToUri() + elif isinstance(query, HealthProfileListQuery): + uri = query.ToUri() + else: + uri = query + + return self.GetFeed(uri, converter=gdata.health.ProfileListFeedFromString) + + def SendNotice(self, subject, body=None, content_type='html', + ccr=None, profile_id=None): + """Sends (posts) a notice to the user's Google Health profile. + + Args: + subject: A string representing the message's subject line. + body: string (optional) The message body. + content_type: string (optional) The content type of the notice message + body. This parameter is only honored when a message body is + specified. + ccr: string (optional) The CCR XML document to reconcile into the + user's profile. + profile_id: string (optional) The profile id to work with when using + ClientLogin. Note: this parameter is ignored if query is set. + + Returns: + A gdata.health.ProfileEntry object of the posted entry. + """ + if body: + content = atom.Content(content_type=content_type, text=body) + else: + content = body + + entry = gdata.GDataEntry( + title=atom.Title(text=subject), content=content, + extension_elements=[atom.ExtensionElementFromString(ccr)]) + + projection = profile_id and 'ui' or 'default' + query = HealthRegisterQuery(service=self.__get_service(), + projection=projection, profile_id=profile_id) + return self.Post(entry, query.ToUri(), + converter=gdata.health.ProfileEntryFromString) + + +class HealthProfileQuery(gdata.service.Query): + + """Object used to construct a URI to query the Google Health profile feed.""" + + def __init__(self, service='health', feed='feeds/profile', + projection='default', profile_id=None, text_query=None, + params=None, categories=None): + """Constructor for Health profile feed query. + + Args: + service: string (optional) The service to query. Either 'health' or 'h9'. + feed: string (optional) The path for the feed. The default value is + 'feeds/profile'. + projection: string (optional) The visibility of the data. Possible values + are 'default' for AuthSub and 'ui' for ClientLogin. If this value + is set to 'ui', the profile_id parameter should also be set. + profile_id: string (optional) The profile id to query. This should only + be used when using ClientLogin. + text_query: str (optional) The contents of the q query parameter. The + contents of the text_query are URL escaped upon conversion to a URI. + Note: this parameter can only be used on the register feed using + ClientLogin. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to + the query's items. + categories: list (optional) List of category strings which should be + included as query categories. See gdata.service.Query for + additional documentation. + """ + self.service = service + self.profile_id = profile_id + self.projection = projection + gdata.service.Query.__init__(self, feed=feed, text_query=text_query, + params=params, categories=categories) + + def ToUri(self): + """Generates a URI from the query parameters set in the object. + + Returns: + A string containing the URI used to retrieve entries from the Health + profile feed. + """ + old_feed = self.feed + self.feed = '/'.join([self.service, old_feed, self.projection]) + + if self.profile_id: + self.feed += '/' + self.profile_id + self.feed = '/%s' % (self.feed,) + + new_feed = gdata.service.Query.ToUri(self) + self.feed = old_feed + return new_feed + + +class HealthProfileListQuery(gdata.service.Query): + + """Object used to construct a URI to query a Health profile list feed.""" + + def __init__(self, service='health', feed='feeds/profile/list'): + """Constructor for Health profile list feed query. + + Args: + service: string (optional) The service to query. Either 'health' or 'h9'. + feed: string (optional) The path for the feed. The default value is + 'feeds/profile/list'. + """ + gdata.service.Query.__init__(self, feed) + self.service = service + + def ToUri(self): + """Generates a URI from the query parameters set in the object. + + Returns: + A string containing the URI used to retrieve entries from the + profile list feed. + """ + return '/%s' % ('/'.join([self.service, self.feed]),) + + +class HealthRegisterQuery(gdata.service.Query): + + """Object used to construct a URI to query a Health register/notice feed.""" + + def __init__(self, service='health', feed='feeds/register', + projection='default', profile_id=None): + """Constructor for Health profile list feed query. + + Args: + service: string (optional) The service to query. Either 'health' or 'h9'. + feed: string (optional) The path for the feed. The default value is + 'feeds/register'. + projection: string (optional) The visibility of the data. Possible values + are 'default' for AuthSub and 'ui' for ClientLogin. If this value + is set to 'ui', the profile_id parameter should also be set. + profile_id: string (optional) The profile id to query. This should only + be used when using ClientLogin. + """ + gdata.service.Query.__init__(self, feed) + self.service = service + self.projection = projection + self.profile_id = profile_id + + def ToUri(self): + """Generates a URI from the query parameters set in the object. + + Returns: + A string containing the URI needed to interact with the register feed. + """ + old_feed = self.feed + self.feed = '/'.join([self.service, old_feed, self.projection]) + new_feed = gdata.service.Query.ToUri(self) + self.feed = old_feed + + if self.profile_id: + new_feed += '/' + self.profile_id + return '/%s' % (new_feed,) diff --git a/gam/gdata/maps/__init__.py b/gam/gdata/maps/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/gam/gdata/maps/client.py b/gam/gdata/maps/client.py new file mode 100755 index 00000000000..7c7d7e91aef --- /dev/null +++ b/gam/gdata/maps/client.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains a client to communicate with the Maps Data servers. + +For documentation on the Maps Data API, see: +http://code.google.com/apis/maps/documentation/mapsdata/ +""" + + +__author__ = 'api.roman.public@google.com (Roman Nurik)' + + +import gdata.client +import gdata.maps.data +import atom.data +import atom.http_core +import gdata.gauth + + +# List user's maps, takes a user ID, or 'default'. +MAP_URL_TEMPLATE = 'http://maps.google.com/maps/feeds/maps/%s/full' + +# List map's features, takes a user ID (or 'default') and map ID. +MAP_FEATURE_URL_TEMPLATE = ('http://maps.google.com/maps' + '/feeds/features/%s/%s/full') + +# The KML mime type +KML_CONTENT_TYPE = 'application/vnd.google-earth.kml+xml' + + +class MapsClient(gdata.client.GDClient): + """Maps Data API GData client.""" + + api_version = '2' + auth_service = 'local' + auth_scopes = gdata.gauth.AUTH_SCOPES['local'] + + def get_maps(self, user_id='default', auth_token=None, + desired_class=gdata.maps.data.MapFeed, **kwargs): + """Retrieves a Map feed for the given user ID. + + Args: + user_id: An optional string representing the user ID; should be 'default'. + + Returns: + A gdata.maps.data.MapFeed. + """ + return self.get_feed(MAP_URL_TEMPLATE % user_id, auth_token=auth_token, + desired_class=desired_class, **kwargs) + + GetMaps = get_maps + + def get_features(self, map_id, user_id='default', auth_token=None, + desired_class=gdata.maps.data.FeatureFeed, query=None, + **kwargs): + """Retrieves a Feature feed for the given map ID/user ID combination. + + Args: + map_id: A string representing the ID of the map whose features should be + retrieved. + user_id: An optional string representing the user ID; should be 'default'. + + Returns: + A gdata.maps.data.FeatureFeed. + """ + return self.get_feed(MAP_FEATURE_URL_TEMPLATE % (user_id, map_id), + auth_token=auth_token, desired_class=desired_class, + query=query, **kwargs) + + GetFeatures = get_features + + def create_map(self, title, summary=None, unlisted=False, + auth_token=None, title_type='text', summary_type='text', + **kwargs): + """Creates a new map and posts it to the Maps Data servers. + + Args: + title: A string representing the title of the new map. + summary: An optional string representing the new map's description. + unlisted: An optional boolean identifying whether the map should be + unlisted (True) or public (False). Default False. + + Returns: + A gdata.maps.data.Map. + """ + new_entry = gdata.maps.data.Map( + title=atom.data.Title(text=title, type=title_type)) + if summary: + new_entry.summary = atom.data.Summary(text=summary, type=summary_type) + if unlisted: + new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes')) + return self.post(new_entry, MAP_URL_TEMPLATE % 'default', + auth_token=auth_token, **kwargs) + + CreateMap = create_map + + def add_feature(self, map_id, title, content, + auth_token=None, title_type='text', + content_type=KML_CONTENT_TYPE, **kwargs): + """Adds a new feature to the given map. + + Args: + map_id: A string representing the ID of the map to which the new feature + should be added. + title: A string representing the name/title of the new feature. + content: A KML string or gdata.maps.data.KmlContent object representing + the new feature's KML contents, including its description. + + Returns: + A gdata.maps.data.Feature. + """ + if content_type == KML_CONTENT_TYPE: + if type(content) != gdata.maps.data.KmlContent: + content = gdata.maps.data.KmlContent(kml=content) + else: + content = atom.data.Content(content=content, type=content_type) + new_entry = gdata.maps.data.Feature( + title=atom.data.Title(text=title, type=title_type), + content=content) + return self.post(new_entry, MAP_FEATURE_URL_TEMPLATE % ('default', map_id), + auth_token=auth_token, **kwargs) + + AddFeature = add_feature + + def update(self, entry, auth_token=None, **kwargs): + """Sends changes to a given map or feature entry to the Maps Data servers. + + Args: + entry: A gdata.maps.data.Map or gdata.maps.data.Feature to be updated + server-side. + """ + # The Maps Data API does not currently support ETags, so for now remove + # the ETag before performing an update. + old_etag = entry.etag + entry.etag = None + response = gdata.client.GDClient.update(self, entry, + auth_token=auth_token, **kwargs) + entry.etag = old_etag + return response + + Update = update + + def delete(self, entry_or_uri, auth_token=None, **kwargs): + """Deletes the given entry or entry URI server-side. + + Args: + entry_or_uri: A gdata.maps.data.Map, gdata.maps.data.Feature, or URI + string representing the entry to delete. + """ + if isinstance(entry_or_uri, (str, unicode, atom.http_core.Uri)): + return gdata.client.GDClient.delete(self, entry_or_uri, + auth_token=auth_token, **kwargs) + # The Maps Data API does not currently support ETags, so for now remove + # the ETag before performing a delete. + old_etag = entry_or_uri.etag + entry_or_uri.etag = None + response = gdata.client.GDClient.delete(self, entry_or_uri, + auth_token=auth_token, **kwargs) + # TODO: if GDClient.delete raises and exception, the entry's etag may be + # left as None. Should revisit this logic. + entry_or_uri.etag = old_etag + return response + + Delete = delete diff --git a/gam/gdata/maps/data.py b/gam/gdata/maps/data.py new file mode 100755 index 00000000000..544611e2275 --- /dev/null +++ b/gam/gdata/maps/data.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Data model classes for parsing and generating XML for the Maps Data API.""" + + +__author__ = 'api.roman.public@google.com (Roman Nurik)' + + +import re +import atom.core +import gdata.data + + +MAP_ATOM_ID_PATTERN = re.compile('/maps/feeds/maps/' + '(?P\w+)/' + '(?P\w+)$') + +FEATURE_ATOM_ID_PATTERN = re.compile('/maps/feeds/features/' + '(?P\w+)/' + '(?P\w+)/' + '(?P\w+)$') + +# The KML mime type +KML_CONTENT_TYPE = 'application/vnd.google-earth.kml+xml' + +# The OGC KML 2.2 namespace +KML_NAMESPACE = 'http://www.opengis.net/kml/2.2' + +class MapsDataEntry(gdata.data.GDEntry): + """Adds convenience methods inherited by all Maps Data entries.""" + + def get_user_id(self): + """Extracts the user ID of this entry.""" + if self.id.text: + match = self.__class__.atom_id_pattern.search(self.id.text) + if match: + return match.group('user_id') + return None + + GetUserId = get_user_id + + def get_map_id(self): + """Extracts the map ID of this entry.""" + if self.id.text: + match = self.__class__.atom_id_pattern.search(self.id.text) + if match: + return match.group('map_id') + return None + + GetMapId = get_map_id + + +class Map(MapsDataEntry): + """Represents a map which belongs to the user.""" + atom_id_pattern = MAP_ATOM_ID_PATTERN + + +class MapFeed(gdata.data.GDFeed): + """Represents an atom feed of maps.""" + entry = [Map] + + +class KmlContent(atom.data.Content): + """Represents an atom content element that encapsulates KML content.""" + + def __init__(self, **kwargs): + super(KmlContent, self).__init__(type=KML_CONTENT_TYPE, **kwargs) + if 'kml' in kwargs: + self.kml = kwargs['kml'] + + def _get_kml(self): + if self.children: + return self.children[0] + else: + return '' + + def _set_kml(self, kml): + if not kml: + self.children = [] + return + + if type(kml) == str: + kml = atom.core.parse(kml) + if not kml.namespace: + kml.namespace = KML_NAMESPACE + + self.children = [kml] + + kml = property(_get_kml, _set_kml) + + +class Feature(MapsDataEntry): + """Represents a single feature in a map.""" + atom_id_pattern = FEATURE_ATOM_ID_PATTERN + content = KmlContent + + def get_feature_id(self): + """Extracts the feature ID of this feature.""" + if self.id.text: + match = self.__class__.atom_id_pattern.search(self.id.text) + if match: + return match.group('feature_id') + return None + + GetFeatureId = get_feature_id + + +class FeatureFeed(gdata.data.GDFeed): + """Represents an atom feed of features.""" + entry = [Feature] diff --git a/gam/gdata/media/__init__.py b/gam/gdata/media/__init__.py new file mode 100755 index 00000000000..e6af1ae52d1 --- /dev/null +++ b/gam/gdata/media/__init__.py @@ -0,0 +1,355 @@ +# -*-*- encoding: utf-8 -*-*- +# +# This is gdata.photos.media, implementing parts of the MediaRSS spec in gdata structures +# +# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $ +# +# Copyright 2007 HÃ¥vard Gulldahl +# Portions copyright 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Essential attributes of photos in Google Photos/Picasa Web Albums are +expressed using elements from the `media' namespace, defined in the +MediaRSS specification[1]. + +Due to copyright issues, the elements herein are documented sparingly, please +consult with the Google Photos API Reference Guide[2], alternatively the +official MediaRSS specification[1] for details. +(If there is a version conflict between the two sources, stick to the +Google Photos API). + +[1]: http://search.yahoo.com/mrss (version 1.1.1) +[2]: http://code.google.com/apis/picasaweb/reference.html#media_reference + +Keep in mind that Google Photos only uses a subset of the MediaRSS elements +(and some of the attributes are trimmed down, too): + +media:content +media:credit +media:description +media:group +media:keywords +media:thumbnail +media:title +""" + +__author__ = u'havard@gulldahl.no'# (HÃ¥vard Gulldahl)' #BUG: api chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' + + +import atom +import gdata + +MEDIA_NAMESPACE = 'http://search.yahoo.com/mrss/' +YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007' + + +class MediaBaseElement(atom.AtomBase): + """Base class for elements in the MEDIA_NAMESPACE. + To add new elements, you only need to add the element tag name to self._tag + """ + + _tag = '' + _namespace = MEDIA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Content(MediaBaseElement): + """(attribute container) This element describes the original content, + e.g. an image or a video. There may be multiple Content elements + in a media:Group. + + For example, a video may have a + element that specifies a JPEG + representation of the video, and a + element that specifies the URL of the video itself. + + Attributes: + url: non-ambigous reference to online object + width: width of the object frame, in pixels + height: width of the object frame, in pixels + medium: one of `image' or `video', allowing the api user to quickly + determine the object's type + type: Internet media Type[1] (a.k.a. mime type) of the object -- a more + verbose way of determining the media type. To set the type member + in the contructor, use the content_type parameter. + (optional) fileSize: the size of the object, in bytes + + [1]: http://en.wikipedia.org/wiki/Internet_media_type + """ + + _tag = 'content' + _attributes = atom.AtomBase._attributes.copy() + _attributes['url'] = 'url' + _attributes['width'] = 'width' + _attributes['height'] = 'height' + _attributes['medium'] = 'medium' + _attributes['type'] = 'type' + _attributes['fileSize'] = 'fileSize' + + def __init__(self, url=None, width=None, height=None, + medium=None, content_type=None, fileSize=None, format=None, + extension_elements=None, extension_attributes=None, text=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.url = url + self.width = width + self.height = height + self.medium = medium + self.type = content_type + self.fileSize = fileSize + + +def ContentFromString(xml_string): + return atom.CreateClassFromXMLString(Content, xml_string) + + +class Credit(MediaBaseElement): + """(string) Contains the nickname of the user who created the content, + e.g. `Liz Bennet'. + + This is a user-specified value that should be used when referring to + the user by name. + + Note that none of the attributes from the MediaRSS spec are supported. + """ + + _tag = 'credit' + + +def CreditFromString(xml_string): + return atom.CreateClassFromXMLString(Credit, xml_string) + + +class Description(MediaBaseElement): + """(string) A description of the media object. + Either plain unicode text, or entity-encoded html (look at the `type' + attribute). + + E.g `A set of photographs I took while vacationing in Italy.' + + For `api' projections, the description is in plain text; + for `base' projections, the description is in HTML. + + Attributes: + type: either `text' or `html'. To set the type member in the contructor, + use the description_type parameter. + """ + + _tag = 'description' + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + def __init__(self, description_type=None, + extension_elements=None, extension_attributes=None, text=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + self.type = description_type + + +def DescriptionFromString(xml_string): + return atom.CreateClassFromXMLString(Description, xml_string) + + +class Keywords(MediaBaseElement): + """(string) Lists the tags associated with the entry, + e.g `italy, vacation, sunset'. + + Contains a comma-separated list of tags that have been added to the photo, or + all tags that have been added to photos in the album. + """ + + _tag = 'keywords' + + +def KeywordsFromString(xml_string): + return atom.CreateClassFromXMLString(Keywords, xml_string) + + +class Thumbnail(MediaBaseElement): + """(attributes) Contains the URL of a thumbnail of a photo or album cover. + + There can be multiple elements for a given ; + for example, a given item may have multiple thumbnails at different sizes. + Photos generally have two thumbnails at different sizes; + albums generally have one cropped thumbnail. + + If the thumbsize parameter is set to the initial query, this element points + to thumbnails of the requested sizes; otherwise the thumbnails are the + default thumbnail size. + + This element must not be confused with the element. + + Attributes: + url: The URL of the thumbnail image. + height: The height of the thumbnail image, in pixels. + width: The width of the thumbnail image, in pixels. + """ + + _tag = 'thumbnail' + _attributes = atom.AtomBase._attributes.copy() + _attributes['url'] = 'url' + _attributes['width'] = 'width' + _attributes['height'] = 'height' + def __init__(self, url=None, width=None, height=None, + extension_attributes=None, text=None, extension_elements=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.url = url + self.width = width + self.height = height + + +def ThumbnailFromString(xml_string): + return atom.CreateClassFromXMLString(Thumbnail, xml_string) + + +class Title(MediaBaseElement): + """(string) Contains the title of the entry's media content, in plain text. + + Attributes: + type: Always set to plain. To set the type member in the constructor, use + the title_type parameter. + """ + + _tag = 'title' + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + def __init__(self, title_type=None, + extension_attributes=None, text=None, extension_elements=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.type = title_type + + +def TitleFromString(xml_string): + return atom.CreateClassFromXMLString(Title, xml_string) + + +class Player(MediaBaseElement): + """(string) Contains the embeddable player URL for the entry's media content + if the media is a video. + + Attributes: + url: Always set to plain + """ + + _tag = 'player' + _attributes = atom.AtomBase._attributes.copy() + _attributes['url'] = 'url' + + def __init__(self, player_url=None, + extension_attributes=None, extension_elements=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.url= player_url + + +class Private(atom.AtomBase): + """The YouTube Private element""" + _tag = 'private' + _namespace = YOUTUBE_NAMESPACE + + +class Duration(atom.AtomBase): + """The YouTube Duration element""" + _tag = 'duration' + _namespace = YOUTUBE_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['seconds'] = 'seconds' + + +class Category(MediaBaseElement): + """The mediagroup:category element""" + + _tag = 'category' + _attributes = atom.AtomBase._attributes.copy() + _attributes['term'] = 'term' + _attributes['scheme'] = 'scheme' + _attributes['label'] = 'label' + + def __init__(self, term=None, scheme=None, label=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Category + + Args: + term: str + scheme: str + label: str + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.term = term + self.scheme = scheme + self.label = label + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Group(MediaBaseElement): + """Container element for all media elements. + The element can appear as a child of an album, photo or + video entry.""" + + _tag = 'group' + _children = atom.AtomBase._children.copy() + _children['{%s}content' % MEDIA_NAMESPACE] = ('content', [Content,]) + _children['{%s}credit' % MEDIA_NAMESPACE] = ('credit', Credit) + _children['{%s}description' % MEDIA_NAMESPACE] = ('description', Description) + _children['{%s}keywords' % MEDIA_NAMESPACE] = ('keywords', Keywords) + _children['{%s}thumbnail' % MEDIA_NAMESPACE] = ('thumbnail', [Thumbnail,]) + _children['{%s}title' % MEDIA_NAMESPACE] = ('title', Title) + _children['{%s}category' % MEDIA_NAMESPACE] = ('category', [Category,]) + _children['{%s}duration' % YOUTUBE_NAMESPACE] = ('duration', Duration) + _children['{%s}private' % YOUTUBE_NAMESPACE] = ('private', Private) + _children['{%s}player' % MEDIA_NAMESPACE] = ('player', Player) + + def __init__(self, content=None, credit=None, description=None, keywords=None, + thumbnail=None, title=None, duration=None, private=None, + category=None, player=None, extension_elements=None, + extension_attributes=None, text=None): + + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.content=content + self.credit=credit + self.description=description + self.keywords=keywords + self.thumbnail=thumbnail or [] + self.title=title + self.duration=duration + self.private=private + self.category=category or [] + self.player=player + + +def GroupFromString(xml_string): + return atom.CreateClassFromXMLString(Group, xml_string) diff --git a/gam/gdata/media/data.py b/gam/gdata/media/data.py new file mode 100755 index 00000000000..bb5d2c80f85 --- /dev/null +++ b/gam/gdata/media/data.py @@ -0,0 +1,159 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains the data classes of the Yahoo! Media RSS Extension""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core + + +MEDIA_TEMPLATE = '{http://search.yahoo.com/mrss//}%s' + + +class MediaCategory(atom.core.XmlElement): + """Describes a media category.""" + _qname = MEDIA_TEMPLATE % 'category' + scheme = 'scheme' + label = 'label' + + +class MediaCopyright(atom.core.XmlElement): + """Describes a media copyright.""" + _qname = MEDIA_TEMPLATE % 'copyright' + url = 'url' + + +class MediaCredit(atom.core.XmlElement): + """Describes a media credit.""" + _qname = MEDIA_TEMPLATE % 'credit' + role = 'role' + scheme = 'scheme' + + +class MediaDescription(atom.core.XmlElement): + """Describes a media description.""" + _qname = MEDIA_TEMPLATE % 'description' + type = 'type' + + +class MediaHash(atom.core.XmlElement): + """Describes a media hash.""" + _qname = MEDIA_TEMPLATE % 'hash' + algo = 'algo' + + +class MediaKeywords(atom.core.XmlElement): + """Describes a media keywords.""" + _qname = MEDIA_TEMPLATE % 'keywords' + + +class MediaPlayer(atom.core.XmlElement): + """Describes a media player.""" + _qname = MEDIA_TEMPLATE % 'player' + height = 'height' + width = 'width' + url = 'url' + + +class MediaRating(atom.core.XmlElement): + """Describes a media rating.""" + _qname = MEDIA_TEMPLATE % 'rating' + scheme = 'scheme' + + +class MediaRestriction(atom.core.XmlElement): + """Describes a media restriction.""" + _qname = MEDIA_TEMPLATE % 'restriction' + relationship = 'relationship' + type = 'type' + + +class MediaText(atom.core.XmlElement): + """Describes a media text.""" + _qname = MEDIA_TEMPLATE % 'text' + end = 'end' + lang = 'lang' + type = 'type' + start = 'start' + + +class MediaThumbnail(atom.core.XmlElement): + """Describes a media thumbnail.""" + _qname = MEDIA_TEMPLATE % 'thumbnail' + time = 'time' + url = 'url' + width = 'width' + height = 'height' + + +class MediaTitle(atom.core.XmlElement): + """Describes a media title.""" + _qname = MEDIA_TEMPLATE % 'title' + type = 'type' + + +class MediaContent(atom.core.XmlElement): + """Describes a media content.""" + _qname = MEDIA_TEMPLATE % 'content' + bitrate = 'bitrate' + is_default = 'isDefault' + medium = 'medium' + height = 'height' + credit = [MediaCredit] + language = 'language' + hash = MediaHash + width = 'width' + player = MediaPlayer + url = 'url' + file_size = 'fileSize' + channels = 'channels' + expression = 'expression' + text = [MediaText] + samplingrate = 'samplingrate' + title = MediaTitle + category = [MediaCategory] + rating = [MediaRating] + type = 'type' + description = MediaDescription + framerate = 'framerate' + thumbnail = [MediaThumbnail] + duration = 'duration' + copyright = MediaCopyright + keywords = MediaKeywords + restriction = [MediaRestriction] + + +class MediaGroup(atom.core.XmlElement): + """Describes a media group.""" + _qname = MEDIA_TEMPLATE % 'group' + credit = [MediaCredit] + content = [MediaContent] + copyright = MediaCopyright + description = MediaDescription + category = [MediaCategory] + player = MediaPlayer + rating = [MediaRating] + hash = MediaHash + title = MediaTitle + keywords = MediaKeywords + restriction = [MediaRestriction] + thumbnail = [MediaThumbnail] + text = [MediaText] + + diff --git a/gam/gdata/notebook/__init__.py b/gam/gdata/notebook/__init__.py new file mode 100755 index 00000000000..22071f7a11e --- /dev/null +++ b/gam/gdata/notebook/__init__.py @@ -0,0 +1,15 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/gam/gdata/notebook/data.py b/gam/gdata/notebook/data.py new file mode 100755 index 00000000000..53405e01818 --- /dev/null +++ b/gam/gdata/notebook/data.py @@ -0,0 +1,55 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains the data classes of the Google Notebook Data API""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import atom.data +import gdata.data +import gdata.opensearch.data + + +NB_TEMPLATE = '{http://schemas.google.com/notes/2008/}%s' + + +class ComesAfter(atom.core.XmlElement): + """Preceding element.""" + _qname = NB_TEMPLATE % 'comesAfter' + id = 'id' + + +class NoteEntry(gdata.data.GDEntry): + """Describes a note entry in the feed of a user's notebook.""" + + +class NotebookFeed(gdata.data.GDFeed): + """Describes a notebook feed.""" + entry = [NoteEntry] + + +class NotebookListEntry(gdata.data.GDEntry): + """Describes a note list entry in the feed of a user's list of public notebooks.""" + + +class NotebookListFeed(gdata.data.GDFeed): + """Describes a notebook list feed.""" + entry = [NotebookListEntry] + + diff --git a/gam/gdata/oauth/CHANGES.txt b/gam/gdata/oauth/CHANGES.txt new file mode 100755 index 00000000000..7c2b92cd943 --- /dev/null +++ b/gam/gdata/oauth/CHANGES.txt @@ -0,0 +1,17 @@ +1. Moved oauth.py to __init__.py + +2. Refactored __init__.py for compatibility with python 2.2 (Issue 59) + +3. Refactored rsa.py for compatibility with python 2.2 (Issue 59) + +4. Refactored OAuthRequest.from_token_and_callback since the callback url was +getting double url-encoding the callback url in place of single. (Issue 43) + +5. Added build_signature_base_string method to rsa.py since it used the +implementation of this method from oauth.OAuthSignatureMethod_HMAC_SHA1 which +was incorrect since it enforced the presence of a consumer secret and a token +secret. Also, changed its super class from oauth.OAuthSignatureMethod_HMAC_SHA1 +to oauth.OAuthSignatureMethod (Issue 64) + +6. Refactored .to_header method since it returned non-oauth params +as well which was incorrect. (Issue 31) \ No newline at end of file diff --git a/gam/gdata/oauth/__init__.py b/gam/gdata/oauth/__init__.py new file mode 100755 index 00000000000..baf543ed4a8 --- /dev/null +++ b/gam/gdata/oauth/__init__.py @@ -0,0 +1,524 @@ +import cgi +import urllib +import time +import random +import urlparse +import hmac +import binascii + +VERSION = '1.0' # Hi Blaine! +HTTP_METHOD = 'GET' +SIGNATURE_METHOD = 'PLAINTEXT' + +# Generic exception class +class OAuthError(RuntimeError): + def __init__(self, message='OAuth error occured.'): + self.message = message + +# optional WWW-Authenticate header (401 error) +def build_authenticate_header(realm=''): + return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} + +# url escape +def escape(s): + # escape '/' too + return urllib.quote(s, safe='~') + +# util function: current timestamp +# seconds since epoch (UTC) +def generate_timestamp(): + return int(time.time()) + +# util function: nonce +# pseudorandom number +def generate_nonce(length=8): + return ''.join([str(random.randint(0, 9)) for i in range(length)]) + +# OAuthConsumer is a data type that represents the identity of the Consumer +# via its shared secret with the Service Provider. +class OAuthConsumer(object): + key = None + secret = None + + def __init__(self, key, secret): + self.key = key + self.secret = secret + +# OAuthToken is a data type that represents an End User via either an access +# or request token. +class OAuthToken(object): + # access tokens and request tokens + key = None + secret = None + + ''' + key = the token + secret = the token secret + ''' + def __init__(self, key, secret): + self.key = key + self.secret = secret + + def to_string(self): + return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret}) + + # return a token from something like: + # oauth_token_secret=digg&oauth_token=digg + def from_string(s): + params = cgi.parse_qs(s, keep_blank_values=False) + key = params['oauth_token'][0] + secret = params['oauth_token_secret'][0] + return OAuthToken(key, secret) + from_string = staticmethod(from_string) + + def __str__(self): + return self.to_string() + +# OAuthRequest represents the request and can be serialized +class OAuthRequest(object): + ''' + OAuth parameters: + - oauth_consumer_key + - oauth_token + - oauth_signature_method + - oauth_signature + - oauth_timestamp + - oauth_nonce + - oauth_version + ... any additional parameters, as defined by the Service Provider. + ''' + parameters = None # oauth parameters + http_method = HTTP_METHOD + http_url = None + version = VERSION + + def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None): + self.http_method = http_method + self.http_url = http_url + self.parameters = parameters or {} + + def set_parameter(self, parameter, value): + self.parameters[parameter] = value + + def get_parameter(self, parameter): + try: + return self.parameters[parameter] + except: + raise OAuthError('Parameter not found: %s' % parameter) + + def _get_timestamp_nonce(self): + return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce') + + # get any non-oauth parameters + def get_nonoauth_parameters(self): + parameters = {} + for k, v in self.parameters.iteritems(): + # ignore oauth parameters + if k.find('oauth_') < 0: + parameters[k] = v + return parameters + + # serialize as a header for an HTTPAuth request + def to_header(self, realm=''): + auth_header = 'OAuth realm="%s"' % realm + # add the oauth parameters + if self.parameters: + for k, v in self.parameters.iteritems(): + if k[:6] == 'oauth_': + auth_header += ', %s="%s"' % (k, escape(str(v))) + return {'Authorization': auth_header} + + # serialize as post data for a POST request + def to_postdata(self): + return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems()]) + + # serialize as a url for a GET request + def to_url(self): + return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata()) + + # return a string that consists of all the parameters that need to be signed + def get_normalized_parameters(self): + params = self.parameters + try: + # exclude the signature if it exists + del params['oauth_signature'] + except: + pass + key_values = params.items() + # sort lexicographically, first after key, then after value + key_values.sort() + # combine key value pairs in string and escape + return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values]) + + # just uppercases the http method + def get_normalized_http_method(self): + return self.http_method.upper() + + # parses the url and rebuilds it to be scheme://host/path + def get_normalized_http_url(self): + parts = urlparse.urlparse(self.http_url) + url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path + return url_string + + # set the signature parameter to the result of build_signature + def sign_request(self, signature_method, consumer, token): + # set the signature method + self.set_parameter('oauth_signature_method', signature_method.get_name()) + # set the signature + self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token)) + + def build_signature(self, signature_method, consumer, token): + # call the build signature method within the signature method + return signature_method.build_signature(self, consumer, token) + + def from_request(http_method, http_url, headers=None, parameters=None, query_string=None): + # combine multiple parameter sources + if parameters is None: + parameters = {} + + # headers + if headers and 'Authorization' in headers: + auth_header = headers['Authorization'] + # check that the authorization header is OAuth + if auth_header.index('OAuth') > -1: + try: + # get the parameters from the header + header_params = OAuthRequest._split_header(auth_header) + parameters.update(header_params) + except: + raise OAuthError('Unable to parse OAuth parameters from Authorization header.') + + # GET or POST query string + if query_string: + query_params = OAuthRequest._split_url_string(query_string) + parameters.update(query_params) + + # URL parameters + param_str = urlparse.urlparse(http_url)[4] # query + url_params = OAuthRequest._split_url_string(param_str) + parameters.update(url_params) + + if parameters: + return OAuthRequest(http_method, http_url, parameters) + + return None + from_request = staticmethod(from_request) + + def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None): + if not parameters: + parameters = {} + + defaults = { + 'oauth_consumer_key': oauth_consumer.key, + 'oauth_timestamp': generate_timestamp(), + 'oauth_nonce': generate_nonce(), + 'oauth_version': OAuthRequest.version, + } + + defaults.update(parameters) + parameters = defaults + + if token: + parameters['oauth_token'] = token.key + + return OAuthRequest(http_method, http_url, parameters) + from_consumer_and_token = staticmethod(from_consumer_and_token) + + def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None): + if not parameters: + parameters = {} + + parameters['oauth_token'] = token.key + + if callback: + parameters['oauth_callback'] = callback + + return OAuthRequest(http_method, http_url, parameters) + from_token_and_callback = staticmethod(from_token_and_callback) + + # util function: turn Authorization: header into parameters, has to do some unescaping + def _split_header(header): + params = {} + parts = header.split(',') + for param in parts: + # ignore realm parameter + if param.find('OAuth realm') > -1: + continue + # remove whitespace + param = param.strip() + # split key-value + param_parts = param.split('=', 1) + # remove quotes and unescape the value + params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"')) + return params + _split_header = staticmethod(_split_header) + + # util function: turn url string into parameters, has to do some unescaping + def _split_url_string(param_str): + parameters = cgi.parse_qs(param_str, keep_blank_values=False) + for k, v in parameters.iteritems(): + parameters[k] = urllib.unquote(v[0]) + return parameters + _split_url_string = staticmethod(_split_url_string) + +# OAuthServer is a worker to check a requests validity against a data store +class OAuthServer(object): + timestamp_threshold = 300 # in seconds, five minutes + version = VERSION + signature_methods = None + data_store = None + + def __init__(self, data_store=None, signature_methods=None): + self.data_store = data_store + self.signature_methods = signature_methods or {} + + def set_data_store(self, oauth_data_store): + self.data_store = data_store + + def get_data_store(self): + return self.data_store + + def add_signature_method(self, signature_method): + self.signature_methods[signature_method.get_name()] = signature_method + return self.signature_methods + + # process a request_token request + # returns the request token on success + def fetch_request_token(self, oauth_request): + try: + # get the request token for authorization + token = self._get_token(oauth_request, 'request') + except OAuthError: + # no token required for the initial token request + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + self._check_signature(oauth_request, consumer, None) + # fetch a new token + token = self.data_store.fetch_request_token(consumer) + return token + + # process an access_token request + # returns the access token on success + def fetch_access_token(self, oauth_request): + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + # get the request token + token = self._get_token(oauth_request, 'request') + self._check_signature(oauth_request, consumer, token) + new_token = self.data_store.fetch_access_token(consumer, token) + return new_token + + # verify an api call, checks all the parameters + def verify_request(self, oauth_request): + # -> consumer and token + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + # get the access token + token = self._get_token(oauth_request, 'access') + self._check_signature(oauth_request, consumer, token) + parameters = oauth_request.get_nonoauth_parameters() + return consumer, token, parameters + + # authorize a request token + def authorize_token(self, token, user): + return self.data_store.authorize_request_token(token, user) + + # get the callback url + def get_callback(self, oauth_request): + return oauth_request.get_parameter('oauth_callback') + + # optional support for the authenticate header + def build_authenticate_header(self, realm=''): + return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} + + # verify the correct version request for this server + def _get_version(self, oauth_request): + try: + version = oauth_request.get_parameter('oauth_version') + except: + version = VERSION + if version and version != self.version: + raise OAuthError('OAuth version %s not supported.' % str(version)) + return version + + # figure out the signature with some defaults + def _get_signature_method(self, oauth_request): + try: + signature_method = oauth_request.get_parameter('oauth_signature_method') + except: + signature_method = SIGNATURE_METHOD + try: + # get the signature method object + signature_method = self.signature_methods[signature_method] + except: + signature_method_names = ', '.join(self.signature_methods.keys()) + raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names)) + + return signature_method + + def _get_consumer(self, oauth_request): + consumer_key = oauth_request.get_parameter('oauth_consumer_key') + if not consumer_key: + raise OAuthError('Invalid consumer key.') + consumer = self.data_store.lookup_consumer(consumer_key) + if not consumer: + raise OAuthError('Invalid consumer.') + return consumer + + # try to find the token for the provided request token key + def _get_token(self, oauth_request, token_type='access'): + token_field = oauth_request.get_parameter('oauth_token') + token = self.data_store.lookup_token(token_type, token_field) + if not token: + raise OAuthError('Invalid %s token: %s' % (token_type, token_field)) + return token + + def _check_signature(self, oauth_request, consumer, token): + timestamp, nonce = oauth_request._get_timestamp_nonce() + self._check_timestamp(timestamp) + self._check_nonce(consumer, token, nonce) + signature_method = self._get_signature_method(oauth_request) + try: + signature = oauth_request.get_parameter('oauth_signature') + except: + raise OAuthError('Missing signature.') + # validate the signature + valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature) + if not valid_sig: + key, base = signature_method.build_signature_base_string(oauth_request, consumer, token) + raise OAuthError('Invalid signature. Expected signature base string: %s' % base) + built = signature_method.build_signature(oauth_request, consumer, token) + + def _check_timestamp(self, timestamp): + # verify that timestamp is recentish + timestamp = int(timestamp) + now = int(time.time()) + lapsed = now - timestamp + if lapsed > self.timestamp_threshold: + raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold)) + + def _check_nonce(self, consumer, token, nonce): + # verify that the nonce is uniqueish + nonce = self.data_store.lookup_nonce(consumer, token, nonce) + if nonce: + raise OAuthError('Nonce already used: %s' % str(nonce)) + +# OAuthClient is a worker to attempt to execute a request +class OAuthClient(object): + consumer = None + token = None + + def __init__(self, oauth_consumer, oauth_token): + self.consumer = oauth_consumer + self.token = oauth_token + + def get_consumer(self): + return self.consumer + + def get_token(self): + return self.token + + def fetch_request_token(self, oauth_request): + # -> OAuthToken + raise NotImplementedError + + def fetch_access_token(self, oauth_request): + # -> OAuthToken + raise NotImplementedError + + def access_resource(self, oauth_request): + # -> some protected resource + raise NotImplementedError + +# OAuthDataStore is a database abstraction used to lookup consumers and tokens +class OAuthDataStore(object): + + def lookup_consumer(self, key): + # -> OAuthConsumer + raise NotImplementedError + + def lookup_token(self, oauth_consumer, token_type, token_token): + # -> OAuthToken + raise NotImplementedError + + def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp): + # -> OAuthToken + raise NotImplementedError + + def fetch_request_token(self, oauth_consumer): + # -> OAuthToken + raise NotImplementedError + + def fetch_access_token(self, oauth_consumer, oauth_token): + # -> OAuthToken + raise NotImplementedError + + def authorize_request_token(self, oauth_token, user): + # -> OAuthToken + raise NotImplementedError + +# OAuthSignatureMethod is a strategy class that implements a signature method +class OAuthSignatureMethod(object): + def get_name(self): + # -> str + raise NotImplementedError + + def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token): + # -> str key, str raw + raise NotImplementedError + + def build_signature(self, oauth_request, oauth_consumer, oauth_token): + # -> str + raise NotImplementedError + + def check_signature(self, oauth_request, consumer, token, signature): + built = self.build_signature(oauth_request, consumer, token) + return built == signature + +class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod): + + def get_name(self): + return 'HMAC-SHA1' + + def build_signature_base_string(self, oauth_request, consumer, token): + sig = ( + escape(oauth_request.get_normalized_http_method()), + escape(oauth_request.get_normalized_http_url()), + escape(oauth_request.get_normalized_parameters()), + ) + + key = '%s&' % escape(consumer.secret) + if token: + key += escape(token.secret) + raw = '&'.join(sig) + return key, raw + + def build_signature(self, oauth_request, consumer, token): + # build the base signature string + key, raw = self.build_signature_base_string(oauth_request, consumer, token) + + # hmac object + try: + import hashlib # 2.5 + hashed = hmac.new(key, raw, hashlib.sha1) + except: + import sha # deprecated + hashed = hmac.new(key, raw, sha) + + # calculate the digest base 64 + return binascii.b2a_base64(hashed.digest())[:-1] + +class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod): + + def get_name(self): + return 'PLAINTEXT' + + def build_signature_base_string(self, oauth_request, consumer, token): + # concatenate the consumer key and secret + sig = escape(consumer.secret) + '&' + if token: + sig = sig + escape(token.secret) + return sig + + def build_signature(self, oauth_request, consumer, token): + return self.build_signature_base_string(oauth_request, consumer, token) diff --git a/gam/gdata/oauth/__init__.pyc b/gam/gdata/oauth/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2591df8169be4d1b23353ac6c9aacb79ab355f11 GIT binary patch literal 21845 zcmdU1+ix7#eLl0x+ft+`i6SLRvb~lghjAq;N^Ch**oy77Jd`6_aJpFT49OLj zyVTB55?w2CB(-tdpbc8IX%ip;a%s}ENrALz3bYS>Xo0@80s7E~J`@G=+J^xB1JdvN zesfuoQX{y#ZcAwn&zw0kbI$Mbo!{jQi~l)3I=k_s3w4+L6!H5LIHGmWxd8t?*L3c3 zR`=XxFKsWl%Z0RFbeD^1eZXBFNb7^{@?csYa+il(fV+lWfc_&cK+lp323PPha=jHA_U1&UXhLk=P%5%hpy`Ga>(1u?RtHq69#j2UQLgA_8dp? zw&&hT4 zPFnHc;6zT2xB;GsqSH`?lM5PoCEA56#*$80Ypy+W_(*hEjh8N6Iz~o^t+-LI#i7lC zPmMVx_0H$j!zslxc>LlL#S?an$5T9)do^s;aX~+%v^aDYm4rjEB*3B6@-Pm8 zVXVmJU}e~aBQ6}KRGSIH#u6Dx`F)R!%qjH#^6wOeobgW@Pr)6)A&q|&6@%c0B=8D! z+jA$O_*XzDEIc4W3dlRqS1WBbCY@uVOxJle5l=qbr{2gG0a}A4?Vw9U1d0;izKE?} z8@H=~mqu&(;}}Ty0>sf}v**R`1qcv_1qfINy~N~a0CO`pK^L42Bxgl}AhlHpXd+RG zcjG~;VYpshY}Q(<)oZopMi}`duCUNQjM^+;E8wdyH)MmX*E&&Hy&4G@KUr%=phz07y{OyHrs~ zyTfjEIG_#()Zu^&Se)swPMG?{JSNrsmo9OR`3`J?k2V-DtP6 z_PM#aa;>#lt_49O#)Vq5oX_uQxfYd!aH-J>gYx2L83uZJA?#dh)Whr>t5jWKa#C63nsT{+_&4~n}F_oExORas*^E_D7KnAo-Lm_W@uI0Jc zHBA~!m55Hx1+F<%kaDXXmzUZbt>9GoNF=*lERrK@r<56=sMfzp!(qrL4cEj)BPg6# z^d@j*OH(#=8i}?vVWOC1QkTcFRLBe(F0qLLlPaO)*V?3gwv0WAj{RQH_Tn8?PBin^ zsv5x>+V5+?yo8HTMOZ91o=s+H&ga?B%(*uU4#vw(nJZ@3ffQH@R^f6@(}1CJ7u@Nq z(+>YH;ViZEXj>Z*WWJciO39bJHy{@{)*_kzfR><3lhuowvMQ(TT7QIsGD=#3LN?A! zC8Ddi6wwNBa1IU?ro7#roKh(WF@ToCJdOyKU#bW+@?^?%8t%rz7D_%VN*3MC0TK-a zEMpm6Ex68OBwwIbbB^C0ptRvXglzh0JoM^g{C}whE{WDyZ4t^LdG8&{X~?yr8y=NV z9EHv)#3R*bc&FWIyajuu-7=Ds+Ws%H*u{b_rJNK)RX#!j&A?0&knw+s9Z$0(4l8rg=FPWs2hC5%~^M0>N1GscQyfTWH5&8SVN^CN7ikQP$g0 z%n0TkoOUZYN6oQ_JO@xAV5hdWQ3&Lrlv${@lFabg1f%0zK{{)-X5+0esHUs1Ph+y* zXhu24{IKZ1i(2>8$bgLVm`XA)K-lIEvKBLa=2B~z3l$e8v>2jMMGJ{sc#sA7r<9gL zCL~f&XeJ~C3o-pO$w_kBt(g>(GbSWlyee_N5M-8|e}?1E^i1 z13!emvU*@lre0y6C5z!$gT{>WIXHfilj1;W{}c;5uzC)7V};Q|of4NGk~(_uCUQpq z)XJlg;L~1{wX?Cl9(MXIOjLj^Q|dA?zk;jX0!Fo6Wg^xt1jvqe~9f`CU0?5ICQo*8>lx2uHXbktACTt$x5ruFe zjwA!%MFK^Qkk`Pdk?8YCThz6 z>I?ZS7yw7E+A#;8rVxcX6=4)drVu~H39T3yXd|vOCF`;1V4z?Mom=j3c47$NM1qk)#!uinzj@PzN^ zcHE`QH;2>P6?pY9B4h-vpj9Xi!)fN`NP5SJURX-+DCvcGd*?sc1AX4b;^$-iew$C4G&sL+fuIFA>Zl$Wz;B#(m0x(w-Ct z({WUhsk1!5k8vs5kAe4g5j@(SfKI&b@K1%8q_8ITc2*^22RvecSE88eO zq{~st2Q5V+YB!Z?&E{gQzG{&P(V5gWtDmSVQq_}QTgccS+0Q7arAuT>-~ST~C~=QS zISTu@Sz@GC2H9W02}}>BC1lBfLd4sGyRmBaZ(UA4#Cr=yKjW?*658UD)PS)7)S(ZO zZ!GJOd(#V29vI6wf$$q|Ac&%M*v*{Ab1XhUsH@1cKN``8v3RO>7YI;w=^5Eb5!b-O zvYAAGfuq3CKZPrHiX6?lC5xy48!gM|bLxUhLo%OcwEuE?laNY&t!l)%K8d~mI^M|@ zGh(ib@=ZvZf?SUk_7rB#m8Dc?lC43?(gd%-D8i32N;ggjTCgb5t$i#H&cHvpB6kI7 zHIq=CL7Btd5gGpoUSgz_BSP%-KgfbYYY~8P*4ik{EMq5AT%M9|jED%{ zD|lJNu+Gp9<*b#r-ZNM#mY4u4jg6ulDDA{C#Fjmej1{K#;20e%mUiM86s4qWjNW%` zLm&)t^3mU;2$28*2B0js5SbrnFY-cZb|^{9ASr~sb=XyBNg-^kE4n+l1!*En3L#Bo zNgW~^jAEb#aHH7VSw%HCjBHW0i5OO<66IoISX(CGsAx&gSA?1PsWP8vJ znIdF$@HAwKER%zzkY#W7rx}~ZQApJG@cbc6lCY#mKJ3%=J=6-GYTBvwP47&G4Pz^? za@yE7v71ENlBbDsU(LUc>;4TCxhV8f3Mjr4)RF;I=zS|5$$|2qsA>}(d{vuVuqE%n z;pOpmc}r#(5#u?lGEU*T^@QzlTCukju}9^QfBaysN3%_0gWX(oB2qm zyGELlnhGOwGpb;+gu$X>p780r2hY1C75)X(at6qm>kq;A;zh{iRksT33@2mKGilO8 znc|`8Tt#IQVSUja)1j511i+=%MK^$Td3Drvnsmw7MgBdl2v`I-MQKKb8PPI^x%MNs zU$n@;kbE(7)e1@(LH&~8qrf^b?d1$f_bi#gog(JyFb=G?#LPAChzv_@N=bFKz7p0~ z^V!>KLGh1iGGjkreI<3#%>d0K-d$FAG8{9PF%R>+`@%8*79P(Ga}rrA7N%h(Ou$x{ zL``~rm-F%_x@T-MOOB^9xN??=LkSa&D`HaSiHMDApBjZE6^MN(QLM)3R5PWD(|WKb{-#cC{V zrpocLjDVCy5pe7t8E-0;Nux<1)Ec6cg17Kbva*j9NHS1i`i?{hinwNJ6Zu%2{sy*j z_h+UIBjy8lGqZ~ba`JtZznjPnr8+3GChA3?|3nvl32l+WNQ?S z@y*G*xB)lzQfpk1Lix1H`5+YeDqUO3C8CiC?7d1i71L+>k1kcVcLBW368e4}&yVO` z7-5oy21!(HV~CKfK(+=SFqjUL?fwP}A)F}0&~}$mcssq?AA%7@ULnC~x^O$gB#u$w z+>FUF6^VyLVI7qRAl~NgglwP-H!oglJw@t*c3kB}P(wOfYT$;${wJ-A$|s^V*%BhU z^6eG>6_nDiqveQE)P>ZJF57;mZ4@rYoy{`#1((}P+H$niZZ_N3u^m&pNVL9?`daW8 z&Ocu{bMZC*T?bIW<}UCwfuc5KAy- zp&M5!8pp5a1Jfn(g-L#(HL;btN;% z0>yRH$1DE1fY$z~$~$=Sa;_>9ipA_v>Hiw8$yX%MkP0I4^QhcKViSL?1Q2$~JJvbI zeGx~VPvB*n9^erJAZ-!Pswmhf9;XBZuxR~}R7*TXLy?bIpO8;|OolnaT!v*QTU;>S zTo2{s+v!pxYzBSI6VVX7N^%mjX<;F#MbzOWIPwg#e~c5OA~^o02u2gh>{Yn~sXrl- zG8dG13^G>sIjWM;_{ zLJ9kd8c@mI9Az|-FgDt~E}%E=OrM2e-9%)AekmteQL0jLy_XwN1ZXN}Dm@oHg&rxB z^XBro8|z^m3rOCzyjVlV$hMkG3QD+B!KrR%Cfm{_jY&Kl?e6k_oBcD*Q;u|t0~9k! zuqAI=PMH7x|@yA-YEAmaY0 z_vbN?8V3nTOY8Kz292#1x36bA1b`1rA$@$yu8xp6IQm5&72ejC8TxXxv8 z1^aDll{d;o#;;!3TBq7z0d?Ph?x3`~i~KK(Fxx7ow1SXoX<4_)zG&Ef3);VY;?6;>G35WJG zou+l`o^+=5RpBRC-(*f}7qFTnYsr$~rcNrj5Sj1)71a7eoZrWj zzeM8Do^g`V$}G3vbbWvDb)Igp2w2cIwQv&G2#aaI#oDj100EtX3H}|{zQE!)So|gn zBDGJ8(J=u_p!)$7UpZqBL6C!ASPl0BSo+ zqhrIRU8Rvcc0J#0;JcwZ?$!?;WwGl|p%Nk>4BH1Ef5Xs6spu2v!0*DheV~W(-pH0M&%jArYm7a5YF{$E<9N8t1^-p|0kjY~UoEJM{_kOlQ^6O+UEDFqD2dfXecJ7&1Q)Gu%|*ge`zn zOZ5R#E!BtApr!hTiJM%=(~9GKjZ_oEsewjpQeXT z1@tM*KE6c=pP~^Cja8SXt4bdX)N+(eZpHuyMAz&0pn3r zAc_wNjDSI+MV+^&$2{r^l-$#5FosCiv9DE^jv|zhsXaThcki-HaSXtcR_WPSnvuKcwh$IHJc;{9HAL_7>4^yY`YAFo%-dpFZp0yN>Z)Hhr;< z-EwO5k}o9tzmpJLOW`{4O@vndp2ra}0swr0gxN+E#G#T?1W5IFc#>0uPjrf46{6Re z6x)#_P%w7d^4Fb3=C64{o1XEkr*WYL8lWTOR(-i+AN3*TebTHVsdprLTwgz#p^>X?D=7wLdn|0103Vb^5qZ+7UGEQNDa$`0WjRu(eHT;MJU--2r;#eU zh8dlI6(`%3`OP%4y*W#g28i7&a*e1J(`MQZ!0SC>+pthB8u8IJIfJDDQ%m7En4B9xZ;>s)@pSN_l4Kh5Utcsr~wu2 zW?pQxYEiw>P%>fhBQ?c_Pd=&GV9;2`pOUc0^J%uBBL^dz?Ed#z`~Zb*xy8%F!2co6 z`@=87<4<#b9V`~TF?<@za;9jR+54eYJ|NY4sh@v>QT-c7#dpKfD3-?W4^!D6*sG(n zesE{+Van%IZO*Lq*}U+8Ox@_jWsr(Hk0bghiUenr!hr8n z4O-K!7H4p`1t^3jgpYjF)>fJPQ(nbkMK5_wq!{2VNBH=HVMJz1We72ei`x<6t|gvN z+L8?IiX8IkJr_`_&5oQ!|7(i!BaGu`5XW3mhG!3;&3~MQHlZ_};qPPd5{o}$@#icQ z2#2UR|BEOTTJ$&Y2MgCi#o3hVusFBmgpAN9G6JUd;uR#{{6lOkvpB|rF=_vsEWXQv z_J&Vg_Bqa6jC#!uA@6DXtC1h4az=9~#(-hjKmK_iZ>Tsuh-ml_{>0DV^yu_M(|g-bs+!% literal 0 HcmV?d00001 diff --git a/gam/gdata/oauth/rsa.py b/gam/gdata/oauth/rsa.py new file mode 100755 index 00000000000..f8d9b8503f7 --- /dev/null +++ b/gam/gdata/oauth/rsa.py @@ -0,0 +1,120 @@ +#!/usr/bin/python + +""" +requires tlslite - http://trevp.net/tlslite/ + +""" + +import binascii + +from gdata.tlslite.utils import keyfactory +from gdata.tlslite.utils import cryptomath + +# XXX andy: ugly local import due to module name, oauth.oauth +import gdata.oauth as oauth + +class OAuthSignatureMethod_RSA_SHA1(oauth.OAuthSignatureMethod): + def get_name(self): + return "RSA-SHA1" + + def _fetch_public_cert(self, oauth_request): + # not implemented yet, ideas are: + # (1) do a lookup in a table of trusted certs keyed off of consumer + # (2) fetch via http using a url provided by the requester + # (3) some sort of specific discovery code based on request + # + # either way should return a string representation of the certificate + raise NotImplementedError + + def _fetch_private_cert(self, oauth_request): + # not implemented yet, ideas are: + # (1) do a lookup in a table of trusted certs keyed off of consumer + # + # either way should return a string representation of the certificate + raise NotImplementedError + + def build_signature_base_string(self, oauth_request, consumer, token): + sig = ( + oauth.escape(oauth_request.get_normalized_http_method()), + oauth.escape(oauth_request.get_normalized_http_url()), + oauth.escape(oauth_request.get_normalized_parameters()), + ) + key = '' + raw = '&'.join(sig) + return key, raw + + def build_signature(self, oauth_request, consumer, token): + key, base_string = self.build_signature_base_string(oauth_request, + consumer, + token) + + # Fetch the private key cert based on the request + cert = self._fetch_private_cert(oauth_request) + + # Pull the private key from the certificate + privatekey = keyfactory.parsePrivateKey(cert) + + # Convert base_string to bytes + #base_string_bytes = cryptomath.createByteArraySequence(base_string) + + # Sign using the key + signed = privatekey.hashAndSign(base_string) + + return binascii.b2a_base64(signed)[:-1] + + def check_signature(self, oauth_request, consumer, token, signature): + decoded_sig = base64.b64decode(signature); + + key, base_string = self.build_signature_base_string(oauth_request, + consumer, + token) + + # Fetch the public key cert based on the request + cert = self._fetch_public_cert(oauth_request) + + # Pull the public key from the certificate + publickey = keyfactory.parsePEMKey(cert, public=True) + + # Check the signature + ok = publickey.hashAndVerify(decoded_sig, base_string) + + return ok + + +class TestOAuthSignatureMethod_RSA_SHA1(OAuthSignatureMethod_RSA_SHA1): + def _fetch_public_cert(self, oauth_request): + cert = """ +-----BEGIN CERTIFICATE----- +MIIBpjCCAQ+gAwIBAgIBATANBgkqhkiG9w0BAQUFADAZMRcwFQYDVQQDDA5UZXN0 +IFByaW5jaXBhbDAeFw03MDAxMDEwODAwMDBaFw0zODEyMzEwODAwMDBaMBkxFzAV +BgNVBAMMDlRlc3QgUHJpbmNpcGFsMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB +gQC0YjCwIfYoprq/FQO6lb3asXrxLlJFuCvtinTF5p0GxvQGu5O3gYytUvtC2JlY +zypSRjVxwxrsuRcP3e641SdASwfrmzyvIgP08N4S0IFzEURkV1wp/IpH7kH41Etb +mUmrXSwfNZsnQRE5SYSOhh+LcK2wyQkdgcMv11l4KoBkcwIDAQABMA0GCSqGSIb3 +DQEBBQUAA4GBAGZLPEuJ5SiJ2ryq+CmEGOXfvlTtEL2nuGtr9PewxkgnOjZpUy+d +4TvuXJbNQc8f4AMWL/tO9w0Fk80rWKp9ea8/df4qMq5qlFWlx6yOLQxumNOmECKb +WpkUQDIDJEoFUzKMVuJf4KO/FJ345+BNLGgbJ6WujreoM1X/gYfdnJ/J +-----END CERTIFICATE----- +""" + return cert + + def _fetch_private_cert(self, oauth_request): + cert = """ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V +A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d +7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ +hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H +X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm +uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw +rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z +zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn +qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG +WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno +cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+ +3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8 +AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54 +Lw03eHTNQghS0A== +-----END PRIVATE KEY----- +""" + return cert diff --git a/gam/gdata/oauth/rsa.pyc b/gam/gdata/oauth/rsa.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e4b61c5e90d5363ff99384d120100784420c026 GIT binary patch literal 4734 zcmcgwd2i#!6{jSxY_mysyXlo4s0$Q@TXmN@EvaaN4i8ZmNs-ju0s=2l6c156biw(j z=~w7y=?CivXlF)Vdy`F31aK;OhBJ?&dB^X)H*cK#*T>{-`pAt z%weA)%q-z$h&ddHtOv);!eSijI3?yL{@y0c(Las*3FxnRg#2%q^Kma zcH!H=N~nid6iGV&h9B`Z1Am#2uyd{BQZtfGbbO*2(J@RmzLu<#U&194NRyT7*nyU$ zktJBkYGW=G6)e;?C_S14?MTBGe-1kfngr@XAgSkBWDhjRA<@y`0+|O5ULd=m0X6~} zToQ_Un}=H%Xv24nK^tGd4(_DhX5m1LuV4?Qc!Q+4g=6BZ09qAW-ttUB^ccX78pT!= zQ?3)~M~dC-18<|5F=cE1oATRQ?M--uY6WlN$hf&J{f@FtEzzir1~sRmSbGa!BYw7( zECS89et@<`Vh|YvJy1ikVmXmnpO)+SNXf>9zJ^4O9UDJk(Bo&M?Vla4pF=)+>Uib?ayzGtg|5ZcB2FSFuz!QZ_y~4H z_I2iU=BL?w=4A#0h%(wEfHOF`hx)-DVwffFA!0e=uUP~RLOo*R|GTy%(M4bXbOS7c}O5Q6P}ItW5`WG@vXo{BrjnH4INv9v%mGAxxNQ! zD=l7uz(lDKTUmDkEjAnnCsMtvVLk7vI5$55qM&4ETj89iZV^Cuj;wcA zO_0OrcU99eHm0Ru3~?wx^4ZE~3^@VehvXkdsf9D_1CV1Ya+b$iq~15dg&90LF~;r8 z3%fQ3Q$gOY`~opV(wqfF;GqLy<9;W=87s?hpMnb=Zt*MF5t*0SchLPP_d0vF7c$xc zpIM6q_OQjm-V8&?4D%Dqvtt96=WAAEKULW%ysVi`ug&k zy#6HlHKY#Vf%h$|4U}FjST}rAwE-&$#19{6DE)oqghnYIIxWiPXptBn;DdaFJ~YR7 z#2bcFhMiW20YeT`NukrB!!^SIxnP5RhXyumLm<+&yIr{s-`p2zAdgyr1#>|$_APnF zj%Ar;L2Wdjibb;1qL137w$`TfN@s!9+7~MV3l0Ly?IjlM1(wzz?If|>Q8+!(1lm2c zPGC&Zwm7DNAh4d|85P+cwY#D2i=n}{Vxhw?1yJM}b<3+Ay~4^Ik_Lb(F9MdK9l1x7 zw#*b~Yi8eB&NgARrCPFd=jjzKo<@hh*KMWDE^&fks}>gd!%pVYnpCl8$7Hlim~z*f zk*8y*D(mrKe~98#F$NVFh$Yno>V6sdr{k_;4~h-3sunw~6E{-aacLh?ov>YZ+tm`6 z=%hdLqbXc1&f`ET5h8lO?qL|(i)VxtZs#EZR@or-r?>j_2onAi;~YVJ)N9{?nq`kY?s@# zMyH-IPP@x$Poi4gO0`y?#XfKA-MdM;j;xJPno{;`X$D=Ydv-2PWIuGRbFd*FeJ>HA zwDJ^%&lG?t+()qCYMS^sf39EucXa#%E_pvX21;iPBjcvW%^stJF;PUYUylf`1%S!` zAM9F zMX!4$wNUKO{PB9w^SZn^ZL@rOthM%(N!FwFxLc(AbRpFTy@ms*;l8r!%l1@I6keH5 znV{jg%T!*jM`|gZt6F0A=|;&vnlv}`y5y>J$G5A=KGNAPqm=3Wk(y4X_SWW=6R-su z0FJR|^lUOtJZp5)cCH=`*2m(o))F>pqfJhmX&Erk6B|30^6JLcGLyY92%BzwnCiCJ z+7v{7*NeUKF*t^iG*?Q8wOpUrkv*mhzE7sAAqiBiJvOXX$}8+@He@87D;!JiLGd=? zY#JOYmQy-eIHuTCw?}VS?loIc)JeAevfMM8yW*TYN#%6!d5mrs&3!mdXM@>vwsTyu-L?xF zGiepZ(^zjt;UH#UZks60;jk(x=4rDoh=vi6TUUr#|CAKDrKkaE-l^ zz0fHSyRuph8ikc(7SlAYP$y~49@Tbb+Ga+`>wayjp>PmGzE>ZSL z2QCL(udBtOGMEk;^|~0xLM?y^dsL((O5~Y&fvl_tHVb#Uo9Nx3W^Db2u$`ARmb<{L&7aX3|O}; zN`?WJDs_;OrF5#s-I-7+&~df1X%@^mud)oNRmna)8LakDlM6 zxPoZ|9<>jt<8?lBiwlSXBq@JD-D4^7l(q2anNrsq$Yc19Kdc>I-Qt@z;<{Lv8v|A} z=*EHafjell.' + + You'll find this element in searches -- that is, feeds that combine the + `kind=photo' and `q=yoursearch' parameters in the request. + + See also gphoto:truncated and gphoto:snippettype. + + """ + + _tag = 'snippet' +def SnippetFromString(xml_string): + return atom.CreateClassFromXMLString(Snippet, xml_string) + +class Snippettype(PhotosBaseElement): + """The Google Photo `Snippettype' element + + When searching, this element will tell you the type of element that matches. + + You'll find this element in searches -- that is, feeds that combine the + `kind=photo' and `q=yoursearch' parameters in the request. + + See also gphoto:snippet and gphoto:truncated. + + Possible values and their interpretation: + o ALBUM_TITLE - The album title matches + o PHOTO_TAGS - The match is a tag/keyword + o PHOTO_DESCRIPTION - The match is in the photo's description + + If you discover a value not listed here, please submit a patch to update this docstring. + + """ + + _tag = 'snippettype' +def SnippettypeFromString(xml_string): + return atom.CreateClassFromXMLString(Snippettype, xml_string) + +class Thumbnail(PhotosBaseElement): + """The Google Photo `Thumbnail' element + + Used to display user's photo thumbnail (hackergotchi). + + (Not to be confused with the element, which gives you + small versions of the photo object.)""" + + _tag = 'thumbnail' +def ThumbnailFromString(xml_string): + return atom.CreateClassFromXMLString(Thumbnail, xml_string) + +class Timestamp(PhotosBaseElement): + """The Google Photo `Timestamp' element + Represented as the number of milliseconds since January 1st, 1970. + + + Take a look at the convenience methods .isoformat() and .datetime(): + + photo_epoch = Time.text # 1180294337000 + photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z' + + Alternatively: + photo_datetime = Time.datetime() # (requires python >= 2.3) + """ + + _tag = 'timestamp' + def isoformat(self): + """(string) Return the timestamp as a ISO 8601 formatted string, + e.g. '2007-05-27T19:32:17.000Z' + """ + import time + epoch = float(self.text)/1000 + return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch)) + + def datetime(self): + """(datetime.datetime) Return the timestamp as a datetime.datetime object + + Requires python 2.3 + """ + import datetime + epoch = float(self.text)/1000 + return datetime.datetime.fromtimestamp(epoch) +def TimestampFromString(xml_string): + return atom.CreateClassFromXMLString(Timestamp, xml_string) + +class Truncated(PhotosBaseElement): + """The Google Photo `Truncated' element + + You'll find this element in searches -- that is, feeds that combine the + `kind=photo' and `q=yoursearch' parameters in the request. + + See also gphoto:snippet and gphoto:snippettype. + + Possible values and their interpretation: + 0 -- unknown + """ + + _tag = 'Truncated' +def TruncatedFromString(xml_string): + return atom.CreateClassFromXMLString(Truncated, xml_string) + +class User(PhotosBaseElement): + "The Google Photo `User' element" + + _tag = 'user' +def UserFromString(xml_string): + return atom.CreateClassFromXMLString(User, xml_string) + +class Version(PhotosBaseElement): + "The Google Photo `Version' element" + + _tag = 'version' +def VersionFromString(xml_string): + return atom.CreateClassFromXMLString(Version, xml_string) + +class Width(PhotosBaseElement): + "The Google Photo `Width' element" + + _tag = 'width' +def WidthFromString(xml_string): + return atom.CreateClassFromXMLString(Width, xml_string) + +class Weight(PhotosBaseElement): + """The Google Photo `Weight' element. + + The weight of the tag is the number of times the tag + appears in the collection of tags currently being viewed. + The default weight is 1, in which case this tags is omitted.""" + _tag = 'weight' +def WeightFromString(xml_string): + return atom.CreateClassFromXMLString(Weight, xml_string) + +class CommentAuthor(atom.Author): + """The Atom `Author' element in CommentEntry entries is augmented to + contain elements from the PHOTOS_NAMESPACE + + http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38 + """ + _children = atom.Author._children.copy() + _children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User) + _children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname) + _children['{%s}thumbnail' % PHOTOS_NAMESPACE] = ('thumbnail', Thumbnail) +def CommentAuthorFromString(xml_string): + return atom.CreateClassFromXMLString(CommentAuthor, xml_string) + +########################## ################################ + +class AlbumData(object): + _children = {} + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + _children['{%s}name' % PHOTOS_NAMESPACE] = ('name', Name) + _children['{%s}location' % PHOTOS_NAMESPACE] = ('location', Location) + _children['{%s}access' % PHOTOS_NAMESPACE] = ('access', Access) + _children['{%s}bytesUsed' % PHOTOS_NAMESPACE] = ('bytesUsed', BytesUsed) + _children['{%s}timestamp' % PHOTOS_NAMESPACE] = ('timestamp', Timestamp) + _children['{%s}numphotos' % PHOTOS_NAMESPACE] = ('numphotos', Numphotos) + _children['{%s}numphotosremaining' % PHOTOS_NAMESPACE] = \ + ('numphotosremaining', Numphotosremaining) + _children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User) + _children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname) + _children['{%s}commentingEnabled' % PHOTOS_NAMESPACE] = \ + ('commentingEnabled', CommentingEnabled) + _children['{%s}commentCount' % PHOTOS_NAMESPACE] = \ + ('commentCount', CommentCount) + ## NOTE: storing media:group as self.media, to create a self-explaining api + gphoto_id = None + name = None + location = None + access = None + bytesUsed = None + timestamp = None + numphotos = None + numphotosremaining = None + user = None + nickname = None + commentingEnabled = None + commentCount = None + +class AlbumEntry(GPhotosBaseEntry, AlbumData): + """All metadata for a Google Photos Album + + Take a look at AlbumData for metadata accessible as attributes to this object. + + Notes: + To avoid name clashes, and to create a more sensible api, some + objects have names that differ from the original elements: + + o media:group -> self.media, + o geo:where -> self.geo, + o photo:id -> self.gphoto_id + """ + + _kind = 'album' + _children = GPhotosBaseEntry._children.copy() + _children.update(AlbumData._children.copy()) + # child tags only for Album entries, not feeds + _children['{%s}where' % GEORSS_NAMESPACE] = ('geo', Geo.Where) + _children['{%s}group' % MEDIA_NAMESPACE] = ('media', Media.Group) + media = Media.Group() + geo = Geo.Where() + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + #GPHOTO NAMESPACE: + gphoto_id=None, name=None, location=None, access=None, + timestamp=None, numphotos=None, user=None, nickname=None, + commentingEnabled=None, commentCount=None, thumbnail=None, + # MEDIA NAMESPACE: + media=None, + # GEORSS NAMESPACE: + geo=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + self.gphoto_id = gphoto_id + self.name = name + self.location = location + self.access = access + self.timestamp = timestamp + self.numphotos = numphotos + self.user = user + self.nickname = nickname + self.commentingEnabled = commentingEnabled + self.commentCount = commentCount + self.thumbnail = thumbnail + self.extended_property = extended_property or [] + self.text = text + ## NOTE: storing media:group as self.media, and geo:where as geo, + ## to create a self-explaining api + self.media = media or Media.Group() + self.geo = geo or Geo.Where() + + def GetAlbumId(self): + "Return the id of this album" + + return self.GetFeedLink().href.split('/')[-1] + + def GetPhotosUri(self): + "(string) Return the uri to this albums feed of the PhotoEntry kind" + return self._feedUri('photo') + + def GetCommentsUri(self): + "(string) Return the uri to this albums feed of the CommentEntry kind" + return self._feedUri('comment') + + def GetTagsUri(self): + "(string) Return the uri to this albums feed of the TagEntry kind" + return self._feedUri('tag') + +def AlbumEntryFromString(xml_string): + return atom.CreateClassFromXMLString(AlbumEntry, xml_string) + +class AlbumFeed(GPhotosBaseFeed, AlbumData): + """All metadata for a Google Photos Album, including its sub-elements + + This feed represents an album as the container for other objects. + + A Album feed contains entries of + PhotoEntry, CommentEntry or TagEntry, + depending on the `kind' parameter in the original query. + + Take a look at AlbumData for accessible attributes. + + """ + + _children = GPhotosBaseFeed._children.copy() + _children.update(AlbumData._children.copy()) + + def GetPhotosUri(self): + "(string) Return the uri to the same feed, but of the PhotoEntry kind" + + return self._feedUri('photo') + + def GetTagsUri(self): + "(string) Return the uri to the same feed, but of the TagEntry kind" + + return self._feedUri('tag') + + def GetCommentsUri(self): + "(string) Return the uri to the same feed, but of the CommentEntry kind" + + return self._feedUri('comment') + +def AlbumFeedFromString(xml_string): + return atom.CreateClassFromXMLString(AlbumFeed, xml_string) + + +class PhotoData(object): + _children = {} + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + _children['{%s}albumid' % PHOTOS_NAMESPACE] = ('albumid', Albumid) + _children['{%s}checksum' % PHOTOS_NAMESPACE] = ('checksum', Checksum) + _children['{%s}client' % PHOTOS_NAMESPACE] = ('client', Client) + _children['{%s}height' % PHOTOS_NAMESPACE] = ('height', Height) + _children['{%s}position' % PHOTOS_NAMESPACE] = ('position', Position) + _children['{%s}rotation' % PHOTOS_NAMESPACE] = ('rotation', Rotation) + _children['{%s}size' % PHOTOS_NAMESPACE] = ('size', Size) + _children['{%s}timestamp' % PHOTOS_NAMESPACE] = ('timestamp', Timestamp) + _children['{%s}version' % PHOTOS_NAMESPACE] = ('version', Version) + _children['{%s}width' % PHOTOS_NAMESPACE] = ('width', Width) + _children['{%s}commentingEnabled' % PHOTOS_NAMESPACE] = \ + ('commentingEnabled', CommentingEnabled) + _children['{%s}commentCount' % PHOTOS_NAMESPACE] = \ + ('commentCount', CommentCount) + ## NOTE: storing media:group as self.media, exif:tags as self.exif, and + ## geo:where as self.geo, to create a self-explaining api + _children['{%s}tags' % EXIF_NAMESPACE] = ('exif', Exif.Tags) + _children['{%s}where' % GEORSS_NAMESPACE] = ('geo', Geo.Where) + _children['{%s}group' % MEDIA_NAMESPACE] = ('media', Media.Group) + # These elements show up in search feeds + _children['{%s}snippet' % PHOTOS_NAMESPACE] = ('snippet', Snippet) + _children['{%s}snippettype' % PHOTOS_NAMESPACE] = ('snippettype', Snippettype) + _children['{%s}truncated' % PHOTOS_NAMESPACE] = ('truncated', Truncated) + gphoto_id = None + albumid = None + checksum = None + client = None + height = None + position = None + rotation = None + size = None + timestamp = None + version = None + width = None + commentingEnabled = None + commentCount = None + snippet=None + snippettype=None + truncated=None + media = Media.Group() + geo = Geo.Where() + tags = Exif.Tags() + +class PhotoEntry(GPhotosBaseEntry, PhotoData): + """All metadata for a Google Photos Photo + + Take a look at PhotoData for metadata accessible as attributes to this object. + + Notes: + To avoid name clashes, and to create a more sensible api, some + objects have names that differ from the original elements: + + o media:group -> self.media, + o exif:tags -> self.exif, + o geo:where -> self.geo, + o photo:id -> self.gphoto_id + """ + + _kind = 'photo' + _children = GPhotosBaseEntry._children.copy() + _children.update(PhotoData._children.copy()) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, text=None, + # GPHOTO NAMESPACE: + gphoto_id=None, albumid=None, checksum=None, client=None, height=None, + position=None, rotation=None, size=None, timestamp=None, version=None, + width=None, commentCount=None, commentingEnabled=None, + # MEDIARSS NAMESPACE: + media=None, + # EXIF_NAMESPACE: + exif=None, + # GEORSS NAMESPACE: + geo=None, + extension_elements=None, extension_attributes=None): + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + self.gphoto_id = gphoto_id + self.albumid = albumid + self.checksum = checksum + self.client = client + self.height = height + self.position = position + self.rotation = rotation + self.size = size + self.timestamp = timestamp + self.version = version + self.width = width + self.commentingEnabled = commentingEnabled + self.commentCount = commentCount + ## NOTE: storing media:group as self.media, to create a self-explaining api + self.media = media or Media.Group() + self.exif = exif or Exif.Tags() + self.geo = geo or Geo.Where() + + def GetPostLink(self): + "Return the uri to this photo's `POST' link (use it for updates of the object)" + + return self.GetFeedLink() + + def GetCommentsUri(self): + "Return the uri to this photo's feed of CommentEntry comments" + return self._feedUri('comment') + + def GetTagsUri(self): + "Return the uri to this photo's feed of TagEntry tags" + return self._feedUri('tag') + + def GetAlbumUri(self): + """Return the uri to the AlbumEntry containing this photo""" + + href = self.GetSelfLink().href + return href[:href.find('/photoid')] + +def PhotoEntryFromString(xml_string): + return atom.CreateClassFromXMLString(PhotoEntry, xml_string) + +class PhotoFeed(GPhotosBaseFeed, PhotoData): + """All metadata for a Google Photos Photo, including its sub-elements + + This feed represents a photo as the container for other objects. + + A Photo feed contains entries of + CommentEntry or TagEntry, + depending on the `kind' parameter in the original query. + + Take a look at PhotoData for metadata accessible as attributes to this object. + + """ + _children = GPhotosBaseFeed._children.copy() + _children.update(PhotoData._children.copy()) + + def GetTagsUri(self): + "(string) Return the uri to the same feed, but of the TagEntry kind" + + return self._feedUri('tag') + + def GetCommentsUri(self): + "(string) Return the uri to the same feed, but of the CommentEntry kind" + + return self._feedUri('comment') + +def PhotoFeedFromString(xml_string): + return atom.CreateClassFromXMLString(PhotoFeed, xml_string) + +class TagData(GPhotosBaseData): + _children = {} + _children['{%s}weight' % PHOTOS_NAMESPACE] = ('weight', Weight) + weight=None + +class TagEntry(GPhotosBaseEntry, TagData): + """All metadata for a Google Photos Tag + + The actual tag is stored in the .title.text attribute + + """ + + _kind = 'tag' + _children = GPhotosBaseEntry._children.copy() + _children.update(TagData._children.copy()) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + # GPHOTO NAMESPACE: + weight=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + self.weight = weight + + def GetAlbumUri(self): + """Return the uri to the AlbumEntry containing this tag""" + + href = self.GetSelfLink().href + pos = href.find('/photoid') + if pos == -1: + return None + return href[:pos] + + def GetPhotoUri(self): + """Return the uri to the PhotoEntry containing this tag""" + + href = self.GetSelfLink().href + pos = href.find('/tag') + if pos == -1: + return None + return href[:pos] + +def TagEntryFromString(xml_string): + return atom.CreateClassFromXMLString(TagEntry, xml_string) + + +class TagFeed(GPhotosBaseFeed, TagData): + """All metadata for a Google Photos Tag, including its sub-elements""" + + _children = GPhotosBaseFeed._children.copy() + _children.update(TagData._children.copy()) + +def TagFeedFromString(xml_string): + return atom.CreateClassFromXMLString(TagFeed, xml_string) + +class CommentData(GPhotosBaseData): + _children = {} + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + _children['{%s}albumid' % PHOTOS_NAMESPACE] = ('albumid', Albumid) + _children['{%s}photoid' % PHOTOS_NAMESPACE] = ('photoid', Photoid) + _children['{%s}author' % atom.ATOM_NAMESPACE] = ('author', [CommentAuthor,]) + gphoto_id=None + albumid=None + photoid=None + author=None + +class CommentEntry(GPhotosBaseEntry, CommentData): + """All metadata for a Google Photos Comment + + The comment is stored in the .content.text attribute, + with a content type in .content.type. + + + """ + + _kind = 'comment' + _children = GPhotosBaseEntry._children.copy() + _children.update(CommentData._children.copy()) + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + # GPHOTO NAMESPACE: + gphoto_id=None, albumid=None, photoid=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + self.gphoto_id = gphoto_id + self.albumid = albumid + self.photoid = photoid + + def GetCommentId(self): + """Return the globally unique id of this comment""" + return self.GetSelfLink().href.split('/')[-1] + + def GetAlbumUri(self): + """Return the uri to the AlbumEntry containing this comment""" + + href = self.GetSelfLink().href + return href[:href.find('/photoid')] + + def GetPhotoUri(self): + """Return the uri to the PhotoEntry containing this comment""" + + href = self.GetSelfLink().href + return href[:href.find('/commentid')] + +def CommentEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CommentEntry, xml_string) + +class CommentFeed(GPhotosBaseFeed, CommentData): + """All metadata for a Google Photos Comment, including its sub-elements""" + + _children = GPhotosBaseFeed._children.copy() + _children.update(CommentData._children.copy()) + +def CommentFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CommentFeed, xml_string) + +class UserData(GPhotosBaseData): + _children = {} + _children['{%s}maxPhotosPerAlbum' % PHOTOS_NAMESPACE] = ('maxPhotosPerAlbum', MaxPhotosPerAlbum) + _children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname) + _children['{%s}quotalimit' % PHOTOS_NAMESPACE] = ('quotalimit', Quotalimit) + _children['{%s}quotacurrent' % PHOTOS_NAMESPACE] = ('quotacurrent', Quotacurrent) + _children['{%s}thumbnail' % PHOTOS_NAMESPACE] = ('thumbnail', Thumbnail) + _children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User) + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + + maxPhotosPerAlbum=None + nickname=None + quotalimit=None + quotacurrent=None + thumbnail=None + user=None + gphoto_id=None + + +class UserEntry(GPhotosBaseEntry, UserData): + """All metadata for a Google Photos User + + This entry represents an album owner and all appropriate metadata. + + Take a look at at the attributes of the UserData for metadata available. + """ + _children = GPhotosBaseEntry._children.copy() + _children.update(UserData._children.copy()) + _kind = 'user' + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + # GPHOTO NAMESPACE: + gphoto_id=None, maxPhotosPerAlbum=None, nickname=None, quotalimit=None, + quotacurrent=None, thumbnail=None, user=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + + self.gphoto_id=gphoto_id + self.maxPhotosPerAlbum=maxPhotosPerAlbum + self.nickname=nickname + self.quotalimit=quotalimit + self.quotacurrent=quotacurrent + self.thumbnail=thumbnail + self.user=user + + def GetAlbumsUri(self): + "(string) Return the uri to this user's feed of the AlbumEntry kind" + return self._feedUri('album') + + def GetPhotosUri(self): + "(string) Return the uri to this user's feed of the PhotoEntry kind" + return self._feedUri('photo') + + def GetCommentsUri(self): + "(string) Return the uri to this user's feed of the CommentEntry kind" + return self._feedUri('comment') + + def GetTagsUri(self): + "(string) Return the uri to this user's feed of the TagEntry kind" + return self._feedUri('tag') + +def UserEntryFromString(xml_string): + return atom.CreateClassFromXMLString(UserEntry, xml_string) + +class UserFeed(GPhotosBaseFeed, UserData): + """Feed for a User in the google photos api. + + This feed represents a user as the container for other objects. + + A User feed contains entries of + AlbumEntry, PhotoEntry, CommentEntry, UserEntry or TagEntry, + depending on the `kind' parameter in the original query. + + The user feed itself also contains all of the metadata available + as part of a UserData object.""" + _children = GPhotosBaseFeed._children.copy() + _children.update(UserData._children.copy()) + + def GetAlbumsUri(self): + """Get the uri to this feed, but with entries of the AlbumEntry kind.""" + return self._feedUri('album') + + def GetTagsUri(self): + """Get the uri to this feed, but with entries of the TagEntry kind.""" + return self._feedUri('tag') + + def GetPhotosUri(self): + """Get the uri to this feed, but with entries of the PhotosEntry kind.""" + return self._feedUri('photo') + + def GetCommentsUri(self): + """Get the uri to this feed, but with entries of the CommentsEntry kind.""" + return self._feedUri('comment') + +def UserFeedFromString(xml_string): + return atom.CreateClassFromXMLString(UserFeed, xml_string) + + + +def AnyFeedFromString(xml_string): + """Creates an instance of the appropriate feed class from the + xml string contents. + + Args: + xml_string: str A string which contains valid XML. The root element + of the XML string should match the tag and namespace of the desired + class. + + Returns: + An instance of the target class with members assigned according to the + contents of the XML - or a basic gdata.GDataFeed instance if it is + impossible to determine the appropriate class (look for extra elements + in GDataFeed's .FindExtensions() and extension_elements[] ). + """ + tree = ElementTree.fromstring(xml_string) + category = tree.find('{%s}category' % atom.ATOM_NAMESPACE) + if category is None: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseFeed, tree) + namespace, kind = category.get('term').split('#') + if namespace != PHOTOS_NAMESPACE: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseFeed, tree) + ## TODO: is getattr safe this way? + feed_class = getattr(gdata.photos, '%sFeed' % kind.title()) + return atom._CreateClassFromElementTree(feed_class, tree) + +def AnyEntryFromString(xml_string): + """Creates an instance of the appropriate entry class from the + xml string contents. + + Args: + xml_string: str A string which contains valid XML. The root element + of the XML string should match the tag and namespace of the desired + class. + + Returns: + An instance of the target class with members assigned according to the + contents of the XML - or a basic gdata.GDataEndry instance if it is + impossible to determine the appropriate class (look for extra elements + in GDataEntry's .FindExtensions() and extension_elements[] ). + """ + tree = ElementTree.fromstring(xml_string) + category = tree.find('{%s}category' % atom.ATOM_NAMESPACE) + if category is None: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseEntry, tree) + namespace, kind = category.get('term').split('#') + if namespace != PHOTOS_NAMESPACE: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseEntry, tree) + ## TODO: is getattr safe this way? + feed_class = getattr(gdata.photos, '%sEntry' % kind.title()) + return atom._CreateClassFromElementTree(feed_class, tree) + diff --git a/gam/gdata/photos/service.py b/gam/gdata/photos/service.py new file mode 100755 index 00000000000..7170379400f --- /dev/null +++ b/gam/gdata/photos/service.py @@ -0,0 +1,680 @@ +#!/usr/bin/env python +# -*-*- encoding: utf-8 -*-*- +# +# This is the service file for the Google Photo python client. +# It is used for higher level operations. +# +# $Id: service.py 144 2007-10-25 21:03:34Z havard.gulldahl $ +# +# Copyright 2007 HÃ¥vard Gulldahl +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google PhotoService provides a human-friendly interface to +Google Photo (a.k.a Picasa Web) services[1]. + +It extends gdata.service.GDataService and as such hides all the +nasty details about authenticating, parsing and communicating with +Google Photos. + +[1]: http://code.google.com/apis/picasaweb/gdata.html + +Example: + import gdata.photos, gdata.photos.service + pws = gdata.photos.service.PhotosService() + pws.ClientLogin(username, password) + #Get all albums + albums = pws.GetUserFeed().entry + # Get all photos in second album + photos = pws.GetFeed(albums[1].GetPhotosUri()).entry + # Get all tags for photos in second album and print them + tags = pws.GetFeed(albums[1].GetTagsUri()).entry + print [ tag.summary.text for tag in tags ] + # Get all comments for the first photos in list and print them + comments = pws.GetCommentFeed(photos[0].GetCommentsUri()).entry + print [ c.summary.text for c in comments ] + + # Get a photo to work with + photo = photos[0] + # Update metadata + + # Attributes from the namespace + photo.summary.text = u'A nice view from my veranda' + photo.title.text = u'Verandaview.jpg' + + # Attributes from the namespace + photo.media.keywords.text = u'Home, Long-exposure, Sunset' # Comma-separated + + # Adding attributes to media object + + # Rotate 90 degrees clockwise + photo.rotation = gdata.photos.Rotation(text='90') + + # Submit modified photo object + photo = pws.UpdatePhotoMetadata(photo) + + # Make sure you only modify the newly returned object, else you'll get + # versioning errors. See Optimistic-concurrency + + # Add comment to a picture + comment = pws.InsertComment(photo, u'I wish the water always was this warm') + + # Remove comment because it was silly + print "*blush*" + pws.Delete(comment.GetEditLink().href) + +""" + +__author__ = u'havard@gulldahl.no'# (HÃ¥vard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' +__version__ = '$Revision: 176 $'[11:-2] + + +import sys, os.path, StringIO +import time +import gdata.service +import gdata +import atom.service +import atom +import gdata.photos + +SUPPORTED_UPLOAD_TYPES = ('bmp', 'jpeg', 'jpg', 'gif', 'png') + +UNKOWN_ERROR=1000 +GPHOTOS_BAD_REQUEST=400 +GPHOTOS_CONFLICT=409 +GPHOTOS_INTERNAL_SERVER_ERROR=500 +GPHOTOS_INVALID_ARGUMENT=601 +GPHOTOS_INVALID_CONTENT_TYPE=602 +GPHOTOS_NOT_AN_IMAGE=603 +GPHOTOS_INVALID_KIND=604 + +class GooglePhotosException(Exception): + def __init__(self, response): + + self.error_code = response['status'] + self.reason = response['reason'].strip() + if '' in str(response['body']): #general html message, discard it + response['body'] = "" + self.body = response['body'].strip() + self.message = "(%(status)s) %(body)s -- %(reason)s" % response + + #return explicit error codes + error_map = { '(12) Not an image':GPHOTOS_NOT_AN_IMAGE, + 'kind: That is not one of the acceptable values': + GPHOTOS_INVALID_KIND, + + } + for msg, code in error_map.iteritems(): + if self.body == msg: + self.error_code = code + break + self.args = [self.error_code, self.reason, self.body] + +class PhotosService(gdata.service.GDataService): + userUri = '/data/feed/api/user/%s' + + def __init__(self, email=None, password=None, source=None, + server='picasaweb.google.com', additional_headers=None, + **kwargs): + """Creates a client for the Google Photos service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'picasaweb.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + self.email = email + self.client = source + gdata.service.GDataService.__init__( + self, email=email, password=password, service='lh2', source=source, + server=server, additional_headers=additional_headers, **kwargs) + + def GetFeed(self, uri, limit=None, start_index=None): + """Get a feed. + + The results are ordered by the values of their `updated' elements, + with the most recently updated entry appearing first in the feed. + + Arguments: + uri: the uri to fetch + limit (optional): the maximum number of entries to return. Defaults to what + the server returns. + + Returns: + one of gdata.photos.AlbumFeed, + gdata.photos.UserFeed, + gdata.photos.PhotoFeed, + gdata.photos.CommentFeed, + gdata.photos.TagFeed, + depending on the results of the query. + Raises: + GooglePhotosException + + See: + http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual + """ + if limit is not None: + uri += '&max-results=%s' % limit + if start_index is not None: + uri += '&start-index=%s' % start_index + try: + return self.Get(uri, converter=gdata.photos.AnyFeedFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def GetEntry(self, uri, limit=None, start_index=None): + """Get an Entry. + + Arguments: + uri: the uri to the entry + limit (optional): the maximum number of entries to return. Defaults to what + the server returns. + + Returns: + one of gdata.photos.AlbumEntry, + gdata.photos.UserEntry, + gdata.photos.PhotoEntry, + gdata.photos.CommentEntry, + gdata.photos.TagEntry, + depending on the results of the query. + Raises: + GooglePhotosException + """ + if limit is not None: + uri += '&max-results=%s' % limit + if start_index is not None: + uri += '&start-index=%s' % start_index + try: + return self.Get(uri, converter=gdata.photos.AnyEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def GetUserFeed(self, kind='album', user='default', limit=None): + """Get user-based feed, containing albums, photos, comments or tags; + defaults to albums. + + The entries are ordered by the values of their `updated' elements, + with the most recently updated entry appearing first in the feed. + + Arguments: + kind: the kind of entries to get, either `album', `photo', + `comment' or `tag', or a python list of these. Defaults to `album'. + user (optional): whose albums we're querying. Defaults to current user. + limit (optional): the maximum number of entries to return. + Defaults to everything the server returns. + + + Returns: + gdata.photos.UserFeed, containing appropriate Entry elements + + See: + http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual + http://googledataapis.blogspot.com/2007/07/picasa-web-albums-adds-new-api-features.html + """ + if isinstance(kind, (list, tuple) ): + kind = ",".join(kind) + + uri = '/data/feed/api/user/%s?kind=%s' % (user, kind) + return self.GetFeed(uri, limit=limit) + + def GetTaggedPhotos(self, tag, user='default', limit=None): + """Get all photos belonging to a specific user, tagged by the given keyword + + Arguments: + tag: The tag you're looking for, e.g. `dog' + user (optional): Whose images/videos you want to search, defaults + to current user + limit (optional): the maximum number of entries to return. + Defaults to everything the server returns. + + Returns: + gdata.photos.UserFeed containing PhotoEntry elements + """ + # Lower-casing because of + # http://code.google.com/p/gdata-issues/issues/detail?id=194 + uri = '/data/feed/api/user/%s?kind=photo&tag=%s' % (user, tag.lower()) + return self.GetFeed(uri, limit) + + def SearchUserPhotos(self, query, user='default', limit=100): + """Search through all photos for a specific user and return a feed. + This will look for matches in file names and image tags (a.k.a. keywords) + + Arguments: + query: The string you're looking for, e.g. `vacation' + user (optional): The username of whose photos you want to search, defaults + to current user. + limit (optional): Don't return more than `limit' hits, defaults to 100 + + Only public photos are searched, unless you are authenticated and + searching through your own photos. + + Returns: + gdata.photos.UserFeed with PhotoEntry elements + """ + uri = '/data/feed/api/user/%s?kind=photo&q=%s' % (user, query) + return self.GetFeed(uri, limit=limit) + + def SearchCommunityPhotos(self, query, limit=100): + """Search through all public photos and return a feed. + This will look for matches in file names and image tags (a.k.a. keywords) + + Arguments: + query: The string you're looking for, e.g. `vacation' + limit (optional): Don't return more than `limit' hits, defaults to 100 + + Returns: + gdata.GDataFeed with PhotoEntry elements + """ + uri='/data/feed/api/all?q=%s' % query + return self.GetFeed(uri, limit=limit) + + def GetContacts(self, user='default', limit=None): + """Retrieve a feed that contains a list of your contacts + + Arguments: + user: Username of the user whose contacts you want + + Returns + gdata.photos.UserFeed, with UserEntry entries + + See: + http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38 + """ + uri = '/data/feed/api/user/%s/contacts?kind=user' % user + return self.GetFeed(uri, limit=limit) + + def SearchContactsPhotos(self, user='default', search=None, limit=None): + """Search over your contacts' photos and return a feed + + Arguments: + user: Username of the user whose contacts you want + search (optional): What to search for (photo title, description and keywords) + + Returns + gdata.photos.UserFeed, with PhotoEntry elements + + See: + http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38 + """ + + uri = '/data/feed/api/user/%s/contacts?kind=photo&q=%s' % (user, search) + return self.GetFeed(uri, limit=limit) + + def InsertAlbum(self, title, summary, location=None, access='public', + commenting_enabled='true', timestamp=None): + """Add an album. + + Needs authentication, see self.ClientLogin() + + Arguments: + title: Album title + summary: Album summary / description + access (optional): `private' or `public'. Public albums are searchable + by everyone on the internet. Defaults to `public' + commenting_enabled (optional): `true' or `false'. Defaults to `true'. + timestamp (optional): A date and time for the album, in milliseconds since + Unix epoch[1] UTC. Defaults to now. + + Returns: + The newly created gdata.photos.AlbumEntry + + See: + http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed + + [1]: http://en.wikipedia.org/wiki/Unix_epoch + """ + album = gdata.photos.AlbumEntry() + album.title = atom.Title(text=title, title_type='text') + album.summary = atom.Summary(text=summary, summary_type='text') + if location is not None: + album.location = gdata.photos.Location(text=location) + album.access = gdata.photos.Access(text=access) + if commenting_enabled in ('true', 'false'): + album.commentingEnabled = gdata.photos.CommentingEnabled(text=commenting_enabled) + if timestamp is None: + timestamp = '%i' % int(time.time() * 1000) + album.timestamp = gdata.photos.Timestamp(text=timestamp) + try: + return self.Post(album, uri=self.userUri % self.email, + converter=gdata.photos.AlbumEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def InsertPhoto(self, album_or_uri, photo, filename_or_handle, + content_type='image/jpeg'): + """Add a PhotoEntry + + Needs authentication, see self.ClientLogin() + + Arguments: + album_or_uri: AlbumFeed or uri of the album where the photo should go + photo: PhotoEntry to add + filename_or_handle: A file-like object or file name where the image/video + will be read from + content_type (optional): Internet media type (a.k.a. mime type) of + media object. Currently Google Photos supports these types: + o image/bmp + o image/gif + o image/jpeg + o image/png + + Images will be converted to jpeg on upload. Defaults to `image/jpeg' + + """ + + try: + assert(isinstance(photo, gdata.photos.PhotoEntry)) + except AssertionError: + raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, + 'body':'`photo` must be a gdata.photos.PhotoEntry instance', + 'reason':'Found %s, not PhotoEntry' % type(photo) + }) + try: + majtype, mintype = content_type.split('/') + assert(mintype in SUPPORTED_UPLOAD_TYPES) + except (ValueError, AssertionError): + raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE, + 'body':'This is not a valid content type: %s' % content_type, + 'reason':'Accepted content types: %s' % \ + ['image/'+t for t in SUPPORTED_UPLOAD_TYPES] + }) + if isinstance(filename_or_handle, (str, unicode)) and \ + os.path.exists(filename_or_handle): # it's a file name + mediasource = gdata.MediaSource() + mediasource.setFile(filename_or_handle, content_type) + elif hasattr(filename_or_handle, 'read'):# it's a file-like resource + if hasattr(filename_or_handle, 'seek'): + filename_or_handle.seek(0) # rewind pointer to the start of the file + # gdata.MediaSource needs the content length, so read the whole image + file_handle = StringIO.StringIO(filename_or_handle.read()) + name = 'image' + if hasattr(filename_or_handle, 'name'): + name = filename_or_handle.name + mediasource = gdata.MediaSource(file_handle, content_type, + content_length=file_handle.len, file_name=name) + else: #filename_or_handle is not valid + raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, + 'body':'`filename_or_handle` must be a path name or a file-like object', + 'reason':'Found %s, not path name or object with a .read() method' % \ + type(filename_or_handle) + }) + + if isinstance(album_or_uri, (str, unicode)): # it's a uri + feed_uri = album_or_uri + elif hasattr(album_or_uri, 'GetFeedLink'): # it's a AlbumFeed object + feed_uri = album_or_uri.GetFeedLink().href + + try: + return self.Post(photo, uri=feed_uri, media_source=mediasource, + converter=gdata.photos.PhotoEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def InsertPhotoSimple(self, album_or_uri, title, summary, filename_or_handle, + content_type='image/jpeg', keywords=None): + """Add a photo without constructing a PhotoEntry. + + Needs authentication, see self.ClientLogin() + + Arguments: + album_or_uri: AlbumFeed or uri of the album where the photo should go + title: Photo title + summary: Photo summary / description + filename_or_handle: A file-like object or file name where the image/video + will be read from + content_type (optional): Internet media type (a.k.a. mime type) of + media object. Currently Google Photos supports these types: + o image/bmp + o image/gif + o image/jpeg + o image/png + + Images will be converted to jpeg on upload. Defaults to `image/jpeg' + keywords (optional): a 1) comma separated string or 2) a python list() of + keywords (a.k.a. tags) to add to the image. + E.g. 1) `dog, vacation, happy' 2) ['dog', 'happy', 'vacation'] + + Returns: + The newly created gdata.photos.PhotoEntry or GooglePhotosException on errors + + See: + http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed + [1]: http://en.wikipedia.org/wiki/Unix_epoch + """ + + metadata = gdata.photos.PhotoEntry() + metadata.title=atom.Title(text=title) + metadata.summary = atom.Summary(text=summary, summary_type='text') + if keywords is not None: + if isinstance(keywords, list): + keywords = ','.join(keywords) + metadata.media.keywords = gdata.media.Keywords(text=keywords) + return self.InsertPhoto(album_or_uri, metadata, filename_or_handle, + content_type) + + def UpdatePhotoMetadata(self, photo): + """Update a photo's metadata. + + Needs authentication, see self.ClientLogin() + + You can update any or all of the following metadata properties: + * + * <media:description> + * <gphoto:checksum> + * <gphoto:client> + * <gphoto:rotation> + * <gphoto:timestamp> + * <gphoto:commentingEnabled> + + Arguments: + photo: a gdata.photos.PhotoEntry object with updated elements + + Returns: + The modified gdata.photos.PhotoEntry + + Example: + p = GetFeed(uri).entry[0] + p.title.text = u'My new text' + p.commentingEnabled.text = 'false' + p = UpdatePhotoMetadata(p) + + It is important that you don't keep the old object around, once + it has been updated. See + http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency + """ + try: + return self.Put(data=photo, uri=photo.GetEditLink().href, + converter=gdata.photos.PhotoEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + + def UpdatePhotoBlob(self, photo_or_uri, filename_or_handle, + content_type = 'image/jpeg'): + """Update a photo's binary data. + + Needs authentication, see self.ClientLogin() + + Arguments: + photo_or_uri: a gdata.photos.PhotoEntry that will be updated, or a + `edit-media' uri pointing to it + filename_or_handle: A file-like object or file name where the image/video + will be read from + content_type (optional): Internet media type (a.k.a. mime type) of + media object. Currently Google Photos supports these types: + o image/bmp + o image/gif + o image/jpeg + o image/png + Images will be converted to jpeg on upload. Defaults to `image/jpeg' + + Returns: + The modified gdata.photos.PhotoEntry + + Example: + p = GetFeed(PhotoUri) + p = UpdatePhotoBlob(p, '/tmp/newPic.jpg') + + It is important that you don't keep the old object around, once + it has been updated. See + http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency + """ + + try: + majtype, mintype = content_type.split('/') + assert(mintype in SUPPORTED_UPLOAD_TYPES) + except (ValueError, AssertionError): + raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE, + 'body':'This is not a valid content type: %s' % content_type, + 'reason':'Accepted content types: %s' % \ + ['image/'+t for t in SUPPORTED_UPLOAD_TYPES] + }) + + if isinstance(filename_or_handle, (str, unicode)) and \ + os.path.exists(filename_or_handle): # it's a file name + photoblob = gdata.MediaSource() + photoblob.setFile(filename_or_handle, content_type) + elif hasattr(filename_or_handle, 'read'):# it's a file-like resource + if hasattr(filename_or_handle, 'seek'): + filename_or_handle.seek(0) # rewind pointer to the start of the file + # gdata.MediaSource needs the content length, so read the whole image + file_handle = StringIO.StringIO(filename_or_handle.read()) + name = 'image' + if hasattr(filename_or_handle, 'name'): + name = filename_or_handle.name + mediasource = gdata.MediaSource(file_handle, content_type, + content_length=file_handle.len, file_name=name) + else: #filename_or_handle is not valid + raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, + 'body':'`filename_or_handle` must be a path name or a file-like object', + 'reason':'Found %s, not path name or an object with .read() method' % \ + type(filename_or_handle) + }) + + if isinstance(photo_or_uri, (str, unicode)): + entry_uri = photo_or_uri # it's a uri + elif hasattr(photo_or_uri, 'GetEditMediaLink'): + entry_uri = photo_or_uri.GetEditMediaLink().href + try: + return self.Put(photoblob, entry_uri, + converter=gdata.photos.PhotoEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def InsertTag(self, photo_or_uri, tag): + """Add a tag (a.k.a. keyword) to a photo. + + Needs authentication, see self.ClientLogin() + + Arguments: + photo_or_uri: a gdata.photos.PhotoEntry that will be tagged, or a + `post' uri pointing to it + (string) tag: The tag/keyword + + Returns: + The new gdata.photos.TagEntry + + Example: + p = GetFeed(PhotoUri) + tag = InsertTag(p, 'Beautiful sunsets') + + """ + tag = gdata.photos.TagEntry(title=atom.Title(text=tag)) + if isinstance(photo_or_uri, (str, unicode)): + post_uri = photo_or_uri # it's a uri + elif hasattr(photo_or_uri, 'GetEditMediaLink'): + post_uri = photo_or_uri.GetPostLink().href + try: + return self.Post(data=tag, uri=post_uri, + converter=gdata.photos.TagEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + + def InsertComment(self, photo_or_uri, comment): + """Add a comment to a photo. + + Needs authentication, see self.ClientLogin() + + Arguments: + photo_or_uri: a gdata.photos.PhotoEntry that is about to be commented + , or a `post' uri pointing to it + (string) comment: The actual comment + + Returns: + The new gdata.photos.CommentEntry + + Example: + p = GetFeed(PhotoUri) + tag = InsertComment(p, 'OOOH! I would have loved to be there. + Who's that in the back?') + + """ + comment = gdata.photos.CommentEntry(content=atom.Content(text=comment)) + if isinstance(photo_or_uri, (str, unicode)): + post_uri = photo_or_uri # it's a uri + elif hasattr(photo_or_uri, 'GetEditMediaLink'): + post_uri = photo_or_uri.GetPostLink().href + try: + return self.Post(data=comment, uri=post_uri, + converter=gdata.photos.CommentEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def Delete(self, object_or_uri, *args, **kwargs): + """Delete an object. + + Re-implementing the GDataService.Delete method, to add some + convenience. + + Arguments: + object_or_uri: Any object that has a GetEditLink() method that + returns a link, or a uri to that object. + + Returns: + ? or GooglePhotosException on errors + """ + try: + uri = object_or_uri.GetEditLink().href + except AttributeError: + uri = object_or_uri + try: + return gdata.service.GDataService.Delete(self, uri, *args, **kwargs) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + +def GetSmallestThumbnail(media_thumbnail_list): + """Helper function to get the smallest thumbnail of a list of + gdata.media.Thumbnail. + Returns gdata.media.Thumbnail """ + r = {} + for thumb in media_thumbnail_list: + r[int(thumb.width)*int(thumb.height)] = thumb + keys = r.keys() + keys.sort() + return r[keys[0]] + +def ConvertAtomTimestampToEpoch(timestamp): + """Helper function to convert a timestamp string, for instance + from atom:updated or atom:published, to milliseconds since Unix epoch + (a.k.a. POSIX time). + + `2007-07-22T00:45:10.000Z' -> """ + return time.mktime(time.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.000Z')) + ## TODO: Timezone aware diff --git a/gam/gdata/projecthosting/__init__.py b/gam/gdata/projecthosting/__init__.py new file mode 100755 index 00000000000..8b137891791 --- /dev/null +++ b/gam/gdata/projecthosting/__init__.py @@ -0,0 +1 @@ + diff --git a/gam/gdata/projecthosting/client.py b/gam/gdata/projecthosting/client.py new file mode 100755 index 00000000000..8e36a7c3c1b --- /dev/null +++ b/gam/gdata/projecthosting/client.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python +# +# Copyright 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import atom.data +import gdata.client +import gdata.gauth +import gdata.projecthosting.data + + +class ProjectHostingClient(gdata.client.GDClient): + """Client to interact with the Project Hosting GData API.""" + api_version = '1.0' + auth_service = 'code' + auth_scopes = gdata.gauth.AUTH_SCOPES['code'] + host = 'code.google.com' + + def get_issues(self, project_name, + desired_class=gdata.projecthosting.data.IssuesFeed, **kwargs): + """Get a feed of issues for a particular project. + + Args: + project_name str The name of the project. + query Query Set returned issues parameters. + + Returns: + data.IssuesFeed + """ + return self.get_feed(gdata.projecthosting.data.ISSUES_FULL_FEED % + project_name, desired_class=desired_class, **kwargs) + + def add_issue(self, project_name, title, content, author, + status=None, owner=None, labels=None, ccs=None, **kwargs): + """Create a new issue for the project. + + Args: + project_name str The name of the project. + title str The title of the new issue. + content str The summary of the new issue. + author str The authenticated user's username. + status str The status of the new issue, Accepted, etc. + owner str The username of new issue's owner. + labels [str] Labels to associate with the new issue. + ccs [str] usernames to Cc on the new issue. + Returns: + data.IssueEntry + """ + new_entry = gdata.projecthosting.data.IssueEntry( + title=atom.data.Title(text=title), + content=atom.data.Content(text=content), + author=[atom.data.Author(name=atom.data.Name(text=author))]) + + if status: + new_entry.status = gdata.projecthosting.data.Status(text=status) + + if owner: + owner = [gdata.projecthosting.data.Owner( + username=gdata.projecthosting.data.Username(text=owner))] + + if labels: + new_entry.label = [gdata.projecthosting.data.Label(text=label) + for label in labels] + if ccs: + new_entry.cc = [ + gdata.projecthosting.data.Cc( + username=gdata.projecthosting.data.Username(text=cc)) + for cc in ccs] + + return self.post( + new_entry, + gdata.projecthosting.data.ISSUES_FULL_FEED % project_name, + **kwargs) + + def update_issue(self, project_name, issue_id, author, comment=None, + summary=None, status=None, owner=None, labels=None, ccs=None, + **kwargs): + """Update or comment on one issue for the project. + + Args: + project_name str The name of the issue's project. + issue_id str The issue number needing updated. + author str The authenticated user's username. + comment str A comment to append to the issue + summary str Rewrite the summary of the issue. + status str A new status for the issue. + owner str The username of the new owner. + labels [str] Labels to set on the issue (prepend issue with - to remove a + label). + ccs [str] Ccs to set on th enew issue (prepend cc with - to remove a cc). + + Returns: + data.CommentEntry + """ + updates = gdata.projecthosting.data.Updates() + + if summary: + updates.summary = gdata.projecthosting.data.Summary(text=summary) + + if status: + updates.status = gdata.projecthosting.data.Status(text=status) + + if owner: + updates.ownerUpdate = gdata.projecthosting.data.OwnerUpdate(text=owner) + + if labels: + updates.label = [gdata.projecthosting.data.Label(text=label) + for label in labels] + if ccs: + updates.ccUpdate = [gdata.projecthosting.data.CcUpdate(text=cc) + for cc in ccs] + + update_entry = gdata.projecthosting.data.CommentEntry( + content=atom.data.Content(text=comment), + author=[atom.data.Author(name=atom.data.Name(text=author))], + updates=updates) + + return self.post( + update_entry, + gdata.projecthosting.data.COMMENTS_FULL_FEED % (project_name, issue_id), + **kwargs) + + def get_comments(self, project_name, issue_id, + desired_class=gdata.projecthosting.data.CommentsFeed, + **kwargs): + """Get a feed of all updates to an issue. + + Args: + project_name str The name of the issue's project. + issue_id str The issue number needing updated. + + Returns: + data.CommentsFeed + """ + return self.get_feed( + gdata.projecthosting.data.COMMENTS_FULL_FEED % (project_name, issue_id), + desired_class=desired_class, **kwargs) + + def update(self, entry, auth_token=None, force=False, **kwargs): + """Unsupported GData update method. + + Use update_*() instead. + """ + raise NotImplementedError( + 'GData Update operation unsupported, try update_*') + + def delete(self, entry_or_uri, auth_token=None, force=False, **kwargs): + """Unsupported GData delete method. + + Use update_issue(status='Closed') instead. + """ + raise NotImplementedError( + 'GData Delete API unsupported, try closing the issue instead.') + + +class Query(gdata.client.Query): + + def __init__(self, issue_id=None, label=None, canned_query=None, owner=None, + status=None, **kwargs): + """Constructs a Google Data Query to filter feed contents serverside. + Args: + issue_id: int or str The issue to return based on the issue id. + label: str A label returned issues must have. + canned_query: str Return issues based on a canned query identifier + owner: str Return issues based on the owner of the issue. For Gmail users, + this will be the part of the email preceding the '@' sign. + status: str Return issues based on the status of the issue. + """ + super(Query, self).__init__(**kwargs) + self.label = label + self.issue_id = issue_id + self.canned_query = canned_query + self.owner = owner + self.status = status + + def modify_request(self, http_request): + if self.issue_id: + gdata.client._add_query_param('id', self.issue_id, http_request) + if self.label: + gdata.client._add_query_param('label', self.label, http_request) + if self.canned_query: + gdata.client._add_query_param('can', self.canned_query, http_request) + if self.owner: + gdata.client._add_query_param('owner', self.owner, http_request) + if self.status: + gdata.client._add_query_param('status', self.status, http_request) + super(Query, self).modify_request(http_request) + + ModifyRequest = modify_request diff --git a/gam/gdata/projecthosting/data.py b/gam/gdata/projecthosting/data.py new file mode 100755 index 00000000000..b0af2f5f71e --- /dev/null +++ b/gam/gdata/projecthosting/data.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python +# +# Copyright 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +"""Provides classes and constants for XML in the Google Project Hosting API. + +Canonical documentation for the raw XML which these classes represent can be +found here: http://code.google.com/p/support/wiki/IssueTrackerAPI +""" + + +__author__ = 'jlapenna@google.com (Joe LaPenna)' + +import atom.core +import gdata.data + + +ISSUES_TEMPLATE = '{http://schemas.google.com/projecthosting/issues/2009}%s' + + +ISSUES_FULL_FEED = '/feeds/issues/p/%s/issues/full' +COMMENTS_FULL_FEED = '/feeds/issues/p/%s/issues/%s/comments/full' + + +class Uri(atom.core.XmlElement): + """The issues:uri element.""" + _qname = ISSUES_TEMPLATE % 'uri' + + +class Username(atom.core.XmlElement): + """The issues:username element.""" + _qname = ISSUES_TEMPLATE % 'username' + + +class Cc(atom.core.XmlElement): + """The issues:cc element.""" + _qname = ISSUES_TEMPLATE % 'cc' + uri = Uri + username = Username + + +class Label(atom.core.XmlElement): + """The issues:label element.""" + _qname = ISSUES_TEMPLATE % 'label' + + +class Owner(atom.core.XmlElement): + """The issues:owner element.""" + _qname = ISSUES_TEMPLATE % 'owner' + uri = Uri + username = Username + + +class Stars(atom.core.XmlElement): + """The issues:stars element.""" + _qname = ISSUES_TEMPLATE % 'stars' + + +class State(atom.core.XmlElement): + """The issues:state element.""" + _qname = ISSUES_TEMPLATE % 'state' + + +class Status(atom.core.XmlElement): + """The issues:status element.""" + _qname = ISSUES_TEMPLATE % 'status' + + +class Summary(atom.core.XmlElement): + """The issues:summary element.""" + _qname = ISSUES_TEMPLATE % 'summary' + + +class OwnerUpdate(atom.core.XmlElement): + """The issues:ownerUpdate element.""" + _qname = ISSUES_TEMPLATE % 'ownerUpdate' + + +class CcUpdate(atom.core.XmlElement): + """The issues:ccUpdate element.""" + _qname = ISSUES_TEMPLATE % 'ccUpdate' + + +class Updates(atom.core.XmlElement): + """The issues:updates element.""" + _qname = ISSUES_TEMPLATE % 'updates' + summary = Summary + status = Status + ownerUpdate = OwnerUpdate + label = [Label] + ccUpdate = [CcUpdate] + + +class IssueEntry(gdata.data.GDEntry): + """Represents the information of one issue.""" + _qname = atom.data.ATOM_TEMPLATE % 'entry' + owner = Owner + cc = [Cc] + label = [Label] + stars = Stars + state = State + status = Status + + +class IssuesFeed(gdata.data.GDFeed): + """An Atom feed listing a project's issues.""" + entry = [IssueEntry] + + +class CommentEntry(gdata.data.GDEntry): + """An entry detailing one comment on an issue.""" + _qname = atom.data.ATOM_TEMPLATE % 'entry' + updates = Updates + + +class CommentsFeed(gdata.data.GDFeed): + """An Atom feed listing a project's issue's comments.""" + entry = [CommentEntry] diff --git a/gam/gdata/sample_util.py b/gam/gdata/sample_util.py new file mode 100755 index 00000000000..aae866e2afc --- /dev/null +++ b/gam/gdata/sample_util.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Provides utility functions used with command line samples.""" + +# This module is used for version 2 of the Google Data APIs. + +import sys +import getpass +import urllib +import gdata.gauth + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +CLIENT_LOGIN = 1 +AUTHSUB = 2 +OAUTH = 3 + +HMAC = 1 +RSA = 2 + + +class SettingsUtil(object): + """Gather's user preferences from flags or command prompts. + + An instance of this object stores the choices made by the user. At some + point it might be useful to save the user's preferences so that they do + not need to always set flags or answer preference prompts. + """ + + def __init__(self, prefs=None): + self.prefs = prefs or {} + + def get_param(self, name, prompt='', secret=False, ask=True, reuse=False): + # First, check in this objects stored preferences. + if name in self.prefs: + return self.prefs[name] + # Second, check for a command line parameter. + value = None + for i in xrange(len(sys.argv)): + if sys.argv[i].startswith('--%s=' % name): + value = sys.argv[i].split('=')[1] + elif sys.argv[i] == '--%s' % name: + value = sys.argv[i + 1] + # Third, if it was not on the command line, ask the user to input the + # value. + if value is None and ask: + prompt = '%s: ' % prompt + if secret: + value = getpass.getpass(prompt) + else: + value = raw_input(prompt) + # If we want to save the preference for reuse in future requests, add it + # to this object's prefs. + if value is not None and reuse: + self.prefs[name] = value + return value + + def authorize_client(self, client, auth_type=None, service=None, + source=None, scopes=None, oauth_type=None, + consumer_key=None, consumer_secret=None): + """Uses command line arguments, or prompts user for token values.""" + if 'client_auth_token' in self.prefs: + return + if auth_type is None: + auth_type = int(self.get_param( + 'auth_type', 'Please choose the authorization mechanism you want' + ' to use.\n' + '1. to use your email address and password (ClientLogin)\n' + '2. to use a web browser to visit an auth web page (AuthSub)\n' + '3. if you have registed to use OAuth\n', reuse=True)) + + # Get the scopes for the services we want to access. + if auth_type == AUTHSUB or auth_type == OAUTH: + if scopes is None: + scopes = self.get_param( + 'scopes', 'Enter the URL prefixes (scopes) for the resources you ' + 'would like to access.\nFor multiple scope URLs, place a comma ' + 'between each URL.\n' + 'Example: http://www.google.com/calendar/feeds/,' + 'http://www.google.com/m8/feeds/\n', reuse=True).split(',') + elif isinstance(scopes, (str, unicode)): + scopes = scopes.split(',') + + if auth_type == CLIENT_LOGIN: + email = self.get_param('email', 'Please enter your username', + reuse=False) + password = self.get_param('password', 'Password', True, reuse=False) + if service is None: + service = self.get_param( + 'service', 'What is the name of the service you wish to access?' + '\n(See list:' + ' http://code.google.com/apis/gdata/faq.html#clientlogin)', + reuse=True) + if source is None: + source = self.get_param('source', ask=False, reuse=True) + client.client_login(email, password, source=source, service=service) + elif auth_type == AUTHSUB: + auth_sub_token = self.get_param('auth_sub_token', ask=False, reuse=True) + session_token = self.get_param('session_token', ask=False, reuse=True) + private_key = None + auth_url = None + single_use_token = None + rsa_private_key = self.get_param( + 'rsa_private_key', + 'If you want to use secure mode AuthSub, please provide the\n' + ' location of your RSA private key which corresponds to the\n' + ' certificate you have uploaded for your domain. If you do not\n' + ' have an RSA key, simply press enter', reuse=True) + + if rsa_private_key: + try: + private_key_file = open(rsa_private_key, 'rb') + private_key = private_key_file.read() + private_key_file.close() + except IOError: + print 'Unable to read private key from file' + + if private_key is not None: + if client.auth_token is None: + if session_token: + client.auth_token = gdata.gauth.SecureAuthSubToken( + session_token, private_key, scopes) + self.prefs['client_auth_token'] = gdata.gauth.token_to_blob( + client.auth_token) + return + elif auth_sub_token: + client.auth_token = gdata.gauth.SecureAuthSubToken( + auth_sub_token, private_key, scopes) + client.upgrade_token() + self.prefs['client_auth_token'] = gdata.gauth.token_to_blob( + client.auth_token) + return + + auth_url = gdata.gauth.generate_auth_sub_url( + 'http://gauthmachine.appspot.com/authsub', scopes, True) + print 'with a private key, get ready for this URL', auth_url + + else: + if client.auth_token is None: + if session_token: + client.auth_token = gdata.gauth.AuthSubToken(session_token, + scopes) + self.prefs['client_auth_token'] = gdata.gauth.token_to_blob( + client.auth_token) + return + elif auth_sub_token: + client.auth_token = gdata.gauth.AuthSubToken(auth_sub_token, + scopes) + client.upgrade_token() + self.prefs['client_auth_token'] = gdata.gauth.token_to_blob( + client.auth_token) + return + + auth_url = gdata.gauth.generate_auth_sub_url( + 'http://gauthmachine.appspot.com/authsub', scopes) + + print 'Visit the following URL in your browser to authorize this app:' + print str(auth_url) + print 'After agreeing to authorize the app, copy the token value from' + print ' the URL. Example: "www.google.com/?token=ab12" token value is' + print ' ab12' + token_value = raw_input('Please enter the token value: ') + if private_key is not None: + single_use_token = gdata.gauth.SecureAuthSubToken( + token_value, private_key, scopes) + else: + single_use_token = gdata.gauth.AuthSubToken(token_value, scopes) + client.auth_token = single_use_token + client.upgrade_token() + + elif auth_type == OAUTH: + if oauth_type is None: + oauth_type = int(self.get_param( + 'oauth_type', 'Please choose the authorization mechanism you want' + ' to use.\n' + '1. use an HMAC signature using your consumer key and secret\n' + '2. use RSA with your private key to sign requests\n', + reuse=True)) + + consumer_key = self.get_param( + 'consumer_key', 'Please enter your OAuth conumer key ' + 'which identifies your app', reuse=True) + + if oauth_type == HMAC: + consumer_secret = self.get_param( + 'consumer_secret', 'Please enter your OAuth conumer secret ' + 'which you share with the OAuth provider', True, reuse=False) + # Swap out this code once the client supports requesting an oauth + # token. + # Get a request token. + request_token = client.get_oauth_token( + scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key, + consumer_secret=consumer_secret) + elif oauth_type == RSA: + rsa_private_key = self.get_param( + 'rsa_private_key', + 'Please provide the location of your RSA private key which\n' + ' corresponds to the certificate you have uploaded for your' + ' domain.', + reuse=True) + try: + private_key_file = open(rsa_private_key, 'rb') + private_key = private_key_file.read() + private_key_file.close() + except IOError: + print 'Unable to read private key from file' + + request_token = client.get_oauth_token( + scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key, + rsa_private_key=private_key) + else: + print 'Invalid OAuth signature type' + return None + + # Authorize the request token in the browser. + print 'Visit the following URL in your browser to authorize this app:' + print str(request_token.generate_authorization_url()) + print 'After agreeing to authorize the app, copy URL from the browser\'s' + print ' address bar.' + url = raw_input('Please enter the url: ') + gdata.gauth.authorize_request_token(request_token, url) + # Exchange for an access token. + client.auth_token = client.get_access_token(request_token) + else: + print 'Invalid authorization type.' + return None + if client.auth_token: + self.prefs['client_auth_token'] = gdata.gauth.token_to_blob( + client.auth_token) + + +def get_param(name, prompt='', secret=False, ask=True): + settings = SettingsUtil() + return settings.get_param(name=name, prompt=prompt, secret=secret, ask=ask) + + +def authorize_client(client, auth_type=None, service=None, source=None, + scopes=None, oauth_type=None, consumer_key=None, + consumer_secret=None): + """Uses command line arguments, or prompts user for token values.""" + settings = SettingsUtil() + return settings.authorize_client(client=client, auth_type=auth_type, + service=service, source=source, + scopes=scopes, oauth_type=oauth_type, + consumer_key=consumer_key, + consumer_secret=consumer_secret) + + +def print_options(): + """Displays usage information, available command line params.""" + # TODO: fill in the usage description for authorizing the client. + print '' + diff --git a/gam/gdata/service.py b/gam/gdata/service.py new file mode 100755 index 00000000000..24c75d28c3c --- /dev/null +++ b/gam/gdata/service.py @@ -0,0 +1,1717 @@ +#!/usr/bin/python +# +# Copyright (C) 2006,2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""GDataService provides CRUD ops. and programmatic login for GData services. + + Error: A base exception class for all exceptions in the gdata_client + module. + + CaptchaRequired: This exception is thrown when a login attempt results in a + captcha challenge from the ClientLogin service. When this + exception is thrown, the captcha_token and captcha_url are + set to the values provided in the server's response. + + BadAuthentication: Raised when a login attempt is made with an incorrect + username or password. + + NotAuthenticated: Raised if an operation requiring authentication is called + before a user has authenticated. + + NonAuthSubToken: Raised if a method to modify an AuthSub token is used when + the user is either not authenticated or is authenticated + through another authentication mechanism. + + NonOAuthToken: Raised if a method to modify an OAuth token is used when the + user is either not authenticated or is authenticated through + another authentication mechanism. + + RequestError: Raised if a CRUD request returned a non-success code. + + UnexpectedReturnType: Raised if the response from the server was not of the + desired type. For example, this would be raised if the + server sent a feed when the client requested an entry. + + GDataService: Encapsulates user credentials needed to perform insert, update + and delete operations with the GData API. An instance can + perform user authentication, query, insertion, deletion, and + update. + + Query: Eases query URI creation by allowing URI parameters to be set as + dictionary attributes. For example a query with a feed of + '/base/feeds/snippets' and ['bq'] set to 'digital camera' will + produce '/base/feeds/snippets?bq=digital+camera' when .ToUri() is + called on it. +""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + +import re +import urllib +import urlparse +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom.service +import gdata +import atom +import atom.http_interface +import atom.token_store +import gdata.auth +import gdata.gauth + + +AUTH_SERVER_HOST = 'https://www.google.com' + + +# When requesting an AuthSub token, it is often helpful to track the scope +# which is being requested. One way to accomplish this is to add a URL +# parameter to the 'next' URL which contains the requested scope. This +# constant is the default name (AKA key) for the URL parameter. +SCOPE_URL_PARAM_NAME = 'authsub_token_scope' +# When requesting an OAuth access token or authorization of an existing OAuth +# request token, it is often helpful to track the scope(s) which is/are being +# requested. One way to accomplish this is to add a URL parameter to the +# 'callback' URL which contains the requested scope. This constant is the +# default name (AKA key) for the URL parameter. +OAUTH_SCOPE_URL_PARAM_NAME = 'oauth_token_scope' +# Maps the service names used in ClientLogin to scope URLs. +CLIENT_LOGIN_SCOPES = gdata.gauth.AUTH_SCOPES +# Default parameters for GDataService.GetWithRetries method +DEFAULT_NUM_RETRIES = 3 +DEFAULT_DELAY = 1 +DEFAULT_BACKOFF = 2 + + +def lookup_scopes(service_name): + """Finds the scope URLs for the desired service. + + In some cases, an unknown service may be used, and in those cases this + function will return None. + """ + if service_name in CLIENT_LOGIN_SCOPES: + return CLIENT_LOGIN_SCOPES[service_name] + return None + + +# Module level variable specifies which module should be used by GDataService +# objects to make HttpRequests. This setting can be overridden on each +# instance of GDataService. +# This module level variable is deprecated. Reassign the http_client member +# of a GDataService object instead. +http_request_handler = atom.service + + +class Error(Exception): + pass + + +class CaptchaRequired(Error): + pass + + +class BadAuthentication(Error): + pass + + +class NotAuthenticated(Error): + pass + + +class NonAuthSubToken(Error): + pass + + +class NonOAuthToken(Error): + pass + + +class RequestError(Error): + pass + + +class UnexpectedReturnType(Error): + pass + + +class BadAuthenticationServiceURL(Error): + pass + + +class FetchingOAuthRequestTokenFailed(RequestError): + pass + + +class TokenUpgradeFailed(RequestError): + pass + + +class RevokingOAuthTokenFailed(RequestError): + pass + + +class AuthorizationRequired(Error): + pass + + +class TokenHadNoScope(Error): + pass + + +class RanOutOfTries(Error): + pass + + +class GDataService(atom.service.AtomService): + """Contains elements needed for GData login and CRUD request headers. + + Maintains additional headers (tokens for example) needed for the GData + services to allow a user to perform inserts, updates, and deletes. + """ + # The hander member is deprecated, use http_client instead. + handler = None + # The auth_token member is deprecated, use the token_store instead. + auth_token = None + # The tokens dict is deprecated in favor of the token_store. + tokens = None + + def __init__(self, email=None, password=None, account_type='HOSTED_OR_GOOGLE', + service=None, auth_service_url=None, source=None, server=None, + additional_headers=None, handler=None, tokens=None, + http_client=None, token_store=None): + """Creates an object of type GDataService. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + account_type: string (optional) The type of account to use. Use + 'GOOGLE' for regular Google accounts or 'HOSTED' for Google + Apps accounts, or 'HOSTED_OR_GOOGLE' to try finding a HOSTED + account first and, if it doesn't exist, try finding a regular + GOOGLE account. Default value: 'HOSTED_OR_GOOGLE'. + service: string (optional) The desired service for which credentials + will be obtained. + auth_service_url: string (optional) User-defined auth token request URL + allows users to explicitly specify where to send auth token requests. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'base.google.com'. + additional_headers: dictionary (optional) Any additional headers which + should be included with CRUD operations. + handler: module (optional) This parameter is deprecated and has been + replaced by http_client. + tokens: This parameter is deprecated, calls should be made to + token_store instead. + http_client: An object responsible for making HTTP requests using a + request method. If none is provided, a new instance of + atom.http.ProxiedHttpClient will be used. + token_store: Keeps a collection of authorization tokens which can be + applied to requests for a specific URLs. Critical methods are + find_token based on a URL (atom.url.Url or a string), add_token, + and remove_token. + """ + atom.service.AtomService.__init__(self, http_client=http_client, + token_store=token_store) + self.email = email + self.password = password + self.account_type = account_type + self.service = service + self.auth_service_url = auth_service_url + self.server = server + self.additional_headers = additional_headers or {} + self._oauth_input_params = None + self.__SetSource(source) + self.__captcha_token = None + self.__captcha_url = None + self.__gsessionid = None + + if http_request_handler.__name__ == 'gdata.urlfetch': + import gdata.alt.appengine + self.http_client = gdata.alt.appengine.AppEngineHttpClient() + + def _SetSessionId(self, session_id): + """Used in unit tests to simulate a 302 which sets a gsessionid.""" + self.__gsessionid = session_id + + # Define properties for GDataService + def _SetAuthSubToken(self, auth_token, scopes=None): + """Deprecated, use SetAuthSubToken instead.""" + self.SetAuthSubToken(auth_token, scopes=scopes) + + def __SetAuthSubToken(self, auth_token, scopes=None): + """Deprecated, use SetAuthSubToken instead.""" + self._SetAuthSubToken(auth_token, scopes=scopes) + + def _GetAuthToken(self): + """Returns the auth token used for authenticating requests. + + Returns: + string + """ + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if hasattr(token, 'auth_header'): + return token.auth_header + return None + + def _GetCaptchaToken(self): + """Returns a captcha token if the most recent login attempt generated one. + + The captcha token is only set if the Programmatic Login attempt failed + because the Google service issued a captcha challenge. + + Returns: + string + """ + return self.__captcha_token + + def __GetCaptchaToken(self): + return self._GetCaptchaToken() + + captcha_token = property(__GetCaptchaToken, + doc="""Get the captcha token for a login request.""") + + def _GetCaptchaURL(self): + """Returns the URL of the captcha image if a login attempt generated one. + + The captcha URL is only set if the Programmatic Login attempt failed + because the Google service issued a captcha challenge. + + Returns: + string + """ + return self.__captcha_url + + def __GetCaptchaURL(self): + return self._GetCaptchaURL() + + captcha_url = property(__GetCaptchaURL, + doc="""Get the captcha URL for a login request.""") + + def GetGeneratorFromLinkFinder(self, link_finder, func, + num_retries=DEFAULT_NUM_RETRIES, + delay=DEFAULT_DELAY, + backoff=DEFAULT_BACKOFF): + """returns a generator for pagination""" + yield link_finder + next = link_finder.GetNextLink() + while next is not None: + next_feed = func(str(self.GetWithRetries( + next.href, num_retries=num_retries, delay=delay, backoff=backoff))) + yield next_feed + next = next_feed.GetNextLink() + + def _GetElementGeneratorFromLinkFinder(self, link_finder, func, + num_retries=DEFAULT_NUM_RETRIES, + delay=DEFAULT_DELAY, + backoff=DEFAULT_BACKOFF): + for element in self.GetGeneratorFromLinkFinder(link_finder, func, + num_retries=num_retries, + delay=delay, + backoff=backoff).entry: + yield element + + def GetOAuthInputParameters(self): + return self._oauth_input_params + + def SetOAuthInputParameters(self, signature_method, consumer_key, + consumer_secret=None, rsa_key=None, + two_legged_oauth=False, requestor_id=None): + """Sets parameters required for using OAuth authentication mechanism. + + NOTE: Though consumer_secret and rsa_key are optional, either of the two + is required depending on the value of the signature_method. + + Args: + signature_method: class which provides implementation for strategy class + oauth.oauth.OAuthSignatureMethod. Signature method to be used for + signing each request. Valid implementations are provided as the + constants defined by gdata.auth.OAuthSignatureMethod. Currently + they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and + gdata.auth.OAuthSignatureMethod.HMAC_SHA1 + consumer_key: string Domain identifying third_party web application. + consumer_secret: string (optional) Secret generated during registration. + Required only for HMAC_SHA1 signature method. + rsa_key: string (optional) Private key required for RSA_SHA1 signature + method. + two_legged_oauth: boolean (optional) Enables two-legged OAuth process. + requestor_id: string (optional) User email adress to make requests on + their behalf. This parameter should only be set when two_legged_oauth + is True. + """ + self._oauth_input_params = gdata.auth.OAuthInputParams( + signature_method, consumer_key, consumer_secret=consumer_secret, + rsa_key=rsa_key, requestor_id=requestor_id) + if two_legged_oauth: + oauth_token = gdata.auth.OAuthToken( + oauth_input_params=self._oauth_input_params) + self.SetOAuthToken(oauth_token) + + def FetchOAuthRequestToken(self, scopes=None, extra_parameters=None, + request_url='%s/accounts/OAuthGetRequestToken' % \ + AUTH_SERVER_HOST, oauth_callback=None): + """Fetches and sets the OAuth request token and returns it. + + Args: + scopes: string or list of string base URL(s) of the service(s) to be + accessed. If None, then this method tries to determine the + scope(s) from the current service. + extra_parameters: dict (optional) key-value pairs as any additional + parameters to be included in the URL and signature while making a + request for fetching an OAuth request token. All the OAuth parameters + are added by default. But if provided through this argument, any + default parameters will be overwritten. For e.g. a default parameter + oauth_version 1.0 can be overwritten if + extra_parameters = {'oauth_version': '2.0'} + request_url: Request token URL. The default is + 'https://www.google.com/accounts/OAuthGetRequestToken'. + oauth_callback: str (optional) If set, it is assume the client is using + the OAuth v1.0a protocol where the callback url is sent in the + request token step. If the oauth_callback is also set in + extra_params, this value will override that one. + + Returns: + The fetched request token as a gdata.auth.OAuthToken object. + + Raises: + FetchingOAuthRequestTokenFailed if the server responded to the request + with an error. + """ + if scopes is None: + scopes = lookup_scopes(self.service) + if not isinstance(scopes, (list, tuple)): + scopes = [scopes,] + if oauth_callback: + if extra_parameters is not None: + extra_parameters['oauth_callback'] = oauth_callback + else: + extra_parameters = {'oauth_callback': oauth_callback} + request_token_url = gdata.auth.GenerateOAuthRequestTokenUrl( + self._oauth_input_params, scopes, + request_token_url=request_url, + extra_parameters=extra_parameters) + response = self.http_client.request('GET', str(request_token_url)) + if response.status == 200: + token = gdata.auth.OAuthToken() + token.set_token_string(response.read()) + token.scopes = scopes + token.oauth_input_params = self._oauth_input_params + self.SetOAuthToken(token) + return token + error = { + 'status': response.status, + 'reason': 'Non 200 response on fetch request token', + 'body': response.read() + } + raise FetchingOAuthRequestTokenFailed(error) + + def SetOAuthToken(self, oauth_token): + """Attempts to set the current token and add it to the token store. + + The oauth_token can be any OAuth token i.e. unauthorized request token, + authorized request token or access token. + This method also attempts to add the token to the token store. + Use this method any time you want the current token to point to the + oauth_token passed. For e.g. call this method with the request token + you receive from FetchOAuthRequestToken. + + Args: + request_token: gdata.auth.OAuthToken OAuth request token. + """ + if self.auto_set_current_token: + self.current_token = oauth_token + if self.auto_store_tokens: + self.token_store.add_token(oauth_token) + + def GenerateOAuthAuthorizationURL( + self, request_token=None, callback_url=None, extra_params=None, + include_scopes_in_callback=False, + scopes_param_prefix=OAUTH_SCOPE_URL_PARAM_NAME, + request_url='%s/accounts/OAuthAuthorizeToken' % AUTH_SERVER_HOST): + """Generates URL at which user will login to authorize the request token. + + Args: + request_token: gdata.auth.OAuthToken (optional) OAuth request token. + If not specified, then the current token will be used if it is of + type <gdata.auth.OAuthToken>, else it is found by looking in the + token_store by looking for a token for the current scope. + callback_url: string (optional) The URL user will be sent to after + logging in and granting access. + extra_params: dict (optional) Additional parameters to be sent. + include_scopes_in_callback: Boolean (default=False) if set to True, and + if 'callback_url' is present, the 'callback_url' will be modified to + include the scope(s) from the request token as a URL parameter. The + key for the 'callback' URL's scope parameter will be + OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as + a parameter to the 'callback' URL, is that the page which receives + the OAuth token will be able to tell which URLs the token grants + access to. + scopes_param_prefix: string (default='oauth_token_scope') The URL + parameter key which maps to the list of valid scopes for the token. + This URL parameter will be included in the callback URL along with + the scopes of the token as value if include_scopes_in_callback=True. + request_url: Authorization URL. The default is + 'https://www.google.com/accounts/OAuthAuthorizeToken'. + Returns: + A string URL at which the user is required to login. + + Raises: + NonOAuthToken if the user's request token is not an OAuth token or if a + request token was not available. + """ + if request_token and not isinstance(request_token, gdata.auth.OAuthToken): + raise NonOAuthToken + if not request_token: + if isinstance(self.current_token, gdata.auth.OAuthToken): + request_token = self.current_token + else: + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.OAuthToken): + request_token = token + if not request_token: + raise NonOAuthToken + return str(gdata.auth.GenerateOAuthAuthorizationUrl( + request_token, + authorization_url=request_url, + callback_url=callback_url, extra_params=extra_params, + include_scopes_in_callback=include_scopes_in_callback, + scopes_param_prefix=scopes_param_prefix)) + + def UpgradeToOAuthAccessToken(self, authorized_request_token=None, + request_url='%s/accounts/OAuthGetAccessToken' \ + % AUTH_SERVER_HOST, oauth_version='1.0', + oauth_verifier=None): + """Upgrades the authorized request token to an access token and returns it + + Args: + authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request + token. If not specified, then the current token will be used if it is + of type <gdata.auth.OAuthToken>, else it is found by looking in the + token_store by looking for a token for the current scope. + request_url: Access token URL. The default is + 'https://www.google.com/accounts/OAuthGetAccessToken'. + oauth_version: str (default='1.0') oauth_version parameter. All other + 'oauth_' parameters are added by default. This parameter too, is + added by default but here you can override it's value. + oauth_verifier: str (optional) If present, it is assumed that the client + will use the OAuth v1.0a protocol which includes passing the + oauth_verifier (as returned by the SP) in the access token step. + + Returns: + Access token + + Raises: + NonOAuthToken if the user's authorized request token is not an OAuth + token or if an authorized request token was not available. + TokenUpgradeFailed if the server responded to the request with an + error. + """ + if (authorized_request_token and + not isinstance(authorized_request_token, gdata.auth.OAuthToken)): + raise NonOAuthToken + if not authorized_request_token: + if isinstance(self.current_token, gdata.auth.OAuthToken): + authorized_request_token = self.current_token + else: + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.OAuthToken): + authorized_request_token = token + if not authorized_request_token: + raise NonOAuthToken + access_token_url = gdata.auth.GenerateOAuthAccessTokenUrl( + authorized_request_token, + self._oauth_input_params, + access_token_url=request_url, + oauth_version=oauth_version, + oauth_verifier=oauth_verifier) + response = self.http_client.request('GET', str(access_token_url)) + if response.status == 200: + token = gdata.auth.OAuthTokenFromHttpBody(response.read()) + token.scopes = authorized_request_token.scopes + token.oauth_input_params = authorized_request_token.oauth_input_params + self.SetOAuthToken(token) + return token + else: + raise TokenUpgradeFailed({'status': response.status, + 'reason': 'Non 200 response on upgrade', + 'body': response.read()}) + + def RevokeOAuthToken(self, request_url='%s/accounts/AuthSubRevokeToken' % \ + AUTH_SERVER_HOST): + """Revokes an existing OAuth token. + + request_url: Token revoke URL. The default is + 'https://www.google.com/accounts/AuthSubRevokeToken'. + Raises: + NonOAuthToken if the user's auth token is not an OAuth token. + RevokingOAuthTokenFailed if request for revoking an OAuth token failed. + """ + scopes = lookup_scopes(self.service) + token = self.token_store.find_token(scopes[0]) + if not isinstance(token, gdata.auth.OAuthToken): + raise NonOAuthToken + + response = token.perform_request(self.http_client, 'GET', request_url, + headers={'Content-Type':'application/x-www-form-urlencoded'}) + if response.status == 200: + self.token_store.remove_token(token) + else: + raise RevokingOAuthTokenFailed + + def GetAuthSubToken(self): + """Returns the AuthSub token as a string. + + If the token is an gdta.auth.AuthSubToken, the Authorization Label + ("AuthSub token") is removed. + + This method examines the current_token to see if it is an AuthSubToken + or SecureAuthSubToken. If not, it searches the token_store for a token + which matches the current scope. + + The current scope is determined by the service name string member. + + Returns: + If the current_token is set to an AuthSubToken/SecureAuthSubToken, + return the token string. If there is no current_token, a token string + for a token which matches the service object's default scope is returned. + If there are no tokens valid for the scope, returns None. + """ + if isinstance(self.current_token, gdata.auth.AuthSubToken): + return self.current_token.get_token_string() + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.AuthSubToken): + return token.get_token_string() + else: + token = self.token_store.find_token(atom.token_store.SCOPE_ALL) + if isinstance(token, gdata.auth.ClientLoginToken): + return token.get_token_string() + return None + + def SetAuthSubToken(self, token, scopes=None, rsa_key=None): + """Sets the token sent in requests to an AuthSub token. + + Sets the current_token and attempts to add the token to the token_store. + + Only use this method if you have received a token from the AuthSub + service. The auth token is set automatically when UpgradeToSessionToken() + is used. See documentation for Google AuthSub here: + http://code.google.com/apis/accounts/AuthForWebApps.html + + Args: + token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string + The token returned by the AuthSub service. If the token is an + AuthSubToken or SecureAuthSubToken, the scope information stored in + the token is used. If the token is a string, the scopes parameter is + used to determine the valid scopes. + scopes: list of URLs for which the token is valid. This is only used + if the token parameter is a string. + rsa_key: string (optional) Private key required for RSA_SHA1 signature + method. This parameter is necessary if the token is a string + representing a secure token. + """ + if not isinstance(token, gdata.auth.AuthSubToken): + token_string = token + if rsa_key: + token = gdata.auth.SecureAuthSubToken(rsa_key) + else: + token = gdata.auth.AuthSubToken() + + token.set_token_string(token_string) + + # If no scopes were set for the token, use the scopes passed in, or + # try to determine the scopes based on the current service name. If + # all else fails, set the token to match all requests. + if not token.scopes: + if scopes is None: + scopes = lookup_scopes(self.service) + if scopes is None: + scopes = [atom.token_store.SCOPE_ALL] + token.scopes = scopes + if self.auto_set_current_token: + self.current_token = token + if self.auto_store_tokens: + self.token_store.add_token(token) + + def GetClientLoginToken(self): + """Returns the token string for the current token or a token matching the + service scope. + + If the current_token is a ClientLoginToken, the token string for + the current token is returned. If the current_token is not set, this method + searches for a token in the token_store which is valid for the service + object's current scope. + + The current scope is determined by the service name string member. + The token string is the end of the Authorization header, it doesn not + include the ClientLogin label. + """ + if isinstance(self.current_token, gdata.auth.ClientLoginToken): + return self.current_token.get_token_string() + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.ClientLoginToken): + return token.get_token_string() + else: + token = self.token_store.find_token(atom.token_store.SCOPE_ALL) + if isinstance(token, gdata.auth.ClientLoginToken): + return token.get_token_string() + return None + + def SetClientLoginToken(self, token, scopes=None): + """Sets the token sent in requests to a ClientLogin token. + + This method sets the current_token to a new ClientLoginToken and it + also attempts to add the ClientLoginToken to the token_store. + + Only use this method if you have received a token from the ClientLogin + service. The auth_token is set automatically when ProgrammaticLogin() + is used. See documentation for Google ClientLogin here: + http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html + + Args: + token: string or instance of a ClientLoginToken. + """ + if not isinstance(token, gdata.auth.ClientLoginToken): + token_string = token + token = gdata.auth.ClientLoginToken() + token.set_token_string(token_string) + + if not token.scopes: + if scopes is None: + scopes = lookup_scopes(self.service) + if scopes is None: + scopes = [atom.token_store.SCOPE_ALL] + token.scopes = scopes + if self.auto_set_current_token: + self.current_token = token + if self.auto_store_tokens: + self.token_store.add_token(token) + + # Private methods to create the source property. + def __GetSource(self): + return self.__source + + def __SetSource(self, new_source): + self.__source = new_source + # Update the UserAgent header to include the new application name. + self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % ( + self.__source,) + + source = property(__GetSource, __SetSource, + doc="""The source is the name of the application making the request. + It should be in the form company_id-app_name-app_version""") + + # Authentication operations + + def ProgrammaticLogin(self, captcha_token=None, captcha_response=None): + """Authenticates the user and sets the GData Auth token. + + Login retreives a temporary auth token which must be used with all + requests to GData services. The auth token is stored in the GData client + object. + + Login is also used to respond to a captcha challenge. If the user's login + attempt failed with a CaptchaRequired error, the user can respond by + calling Login with the captcha token and the answer to the challenge. + + Args: + captcha_token: string (optional) The identifier for the captcha challenge + which was presented to the user. + captcha_response: string (optional) The user's answer to the captch + challenge. + + Raises: + CaptchaRequired if the login service will require a captcha response + BadAuthentication if the login service rejected the username or password + Error if the login service responded with a 403 different from the above + """ + request_body = gdata.auth.generate_client_login_request_body(self.email, + self.password, self.service, self.source, self.account_type, + captcha_token, captcha_response) + + # If the user has defined their own authentication service URL, + # send the ClientLogin requests to this URL: + if not self.auth_service_url: + auth_request_url = AUTH_SERVER_HOST + '/accounts/ClientLogin' + else: + auth_request_url = self.auth_service_url + + auth_response = self.http_client.request('POST', auth_request_url, + data=request_body, + headers={'Content-Type':'application/x-www-form-urlencoded'}) + response_body = auth_response.read() + + if auth_response.status == 200: + # TODO: insert the token into the token_store directly. + self.SetClientLoginToken( + gdata.auth.get_client_login_token(response_body)) + self.__captcha_token = None + self.__captcha_url = None + + elif auth_response.status == 403: + # Examine each line to find the error type and the captcha token and + # captch URL if they are present. + captcha_parameters = gdata.auth.get_captcha_challenge(response_body, + captcha_base_url='%s/accounts/' % AUTH_SERVER_HOST) + if captcha_parameters: + self.__captcha_token = captcha_parameters['token'] + self.__captcha_url = captcha_parameters['url'] + raise CaptchaRequired, 'Captcha Required' + elif response_body.splitlines()[0] == 'Error=BadAuthentication': + self.__captcha_token = None + self.__captcha_url = None + raise BadAuthentication, 'Incorrect username or password' + else: + self.__captcha_token = None + self.__captcha_url = None + raise Error, 'Server responded with a 403 code' + elif auth_response.status == 302: + self.__captcha_token = None + self.__captcha_url = None + # Google tries to redirect all bad URLs back to + # http://www.google.<locale>. If a redirect + # attempt is made, assume the user has supplied an incorrect authentication URL + raise BadAuthenticationServiceURL, 'Server responded with a 302 code.' + + def ClientLogin(self, username, password, account_type=None, service=None, + auth_service_url=None, source=None, captcha_token=None, + captcha_response=None): + """Convenience method for authenticating using ProgrammaticLogin. + + Sets values for email, password, and other optional members. + + Args: + username: + password: + account_type: string (optional) + service: string (optional) + auth_service_url: string (optional) + captcha_token: string (optional) + captcha_response: string (optional) + """ + self.email = username + self.password = password + + if account_type: + self.account_type = account_type + if service: + self.service = service + if source: + self.source = source + if auth_service_url: + self.auth_service_url = auth_service_url + + self.ProgrammaticLogin(captcha_token, captcha_response) + + def GenerateAuthSubURL(self, next, scope, secure=False, session=True, + domain='default'): + """Generate a URL at which the user will login and be redirected back. + + Users enter their credentials on a Google login page and a token is sent + to the URL specified in next. See documentation for AuthSub login at: + http://code.google.com/apis/accounts/docs/AuthSub.html + + Args: + next: string The URL user will be sent to after logging in. + scope: string or list of strings. The URLs of the services to be + accessed. + secure: boolean (optional) Determines whether or not the issued token + is a secure token. + session: boolean (optional) Determines whether or not the issued token + can be upgraded to a session token. + """ + if not isinstance(scope, (list, tuple)): + scope = (scope,) + return gdata.auth.generate_auth_sub_url(next, scope, secure=secure, + session=session, + request_url='%s/accounts/AuthSubRequest' % AUTH_SERVER_HOST, + domain=domain) + + def UpgradeToSessionToken(self, token=None): + """Upgrades a single use AuthSub token to a session token. + + Args: + token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken + (optional) which is good for a single use but can be upgraded + to a session token. If no token is passed in, the token + is found by looking in the token_store by looking for a token + for the current scope. + + Raises: + NonAuthSubToken if the user's auth token is not an AuthSub token + TokenUpgradeFailed if the server responded to the request with an + error. + """ + if token is None: + scopes = lookup_scopes(self.service) + if scopes: + token = self.token_store.find_token(scopes[0]) + else: + token = self.token_store.find_token(atom.token_store.SCOPE_ALL) + if not isinstance(token, gdata.auth.AuthSubToken): + raise NonAuthSubToken + + self.SetAuthSubToken(self.upgrade_to_session_token(token)) + + def upgrade_to_session_token(self, token): + """Upgrades a single use AuthSub token to a session token. + + Args: + token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken + which is good for a single use but can be upgraded to a + session token. + + Returns: + The upgraded token as a gdata.auth.AuthSubToken object. + + Raises: + TokenUpgradeFailed if the server responded to the request with an + error. + """ + response = token.perform_request(self.http_client, 'GET', + AUTH_SERVER_HOST + '/accounts/AuthSubSessionToken', + headers={'Content-Type':'application/x-www-form-urlencoded'}) + response_body = response.read() + if response.status == 200: + token.set_token_string( + gdata.auth.token_from_http_body(response_body)) + return token + else: + raise TokenUpgradeFailed({'status': response.status, + 'reason': 'Non 200 response on upgrade', + 'body': response_body}) + + def RevokeAuthSubToken(self): + """Revokes an existing AuthSub token. + + Raises: + NonAuthSubToken if the user's auth token is not an AuthSub token + """ + scopes = lookup_scopes(self.service) + token = self.token_store.find_token(scopes[0]) + if not isinstance(token, gdata.auth.AuthSubToken): + raise NonAuthSubToken + + response = token.perform_request(self.http_client, 'GET', + AUTH_SERVER_HOST + '/accounts/AuthSubRevokeToken', + headers={'Content-Type':'application/x-www-form-urlencoded'}) + if response.status == 200: + self.token_store.remove_token(token) + + def AuthSubTokenInfo(self): + """Fetches the AuthSub token's metadata from the server. + + Raises: + NonAuthSubToken if the user's auth token is not an AuthSub token + """ + scopes = lookup_scopes(self.service) + token = self.token_store.find_token(scopes[0]) + if not isinstance(token, gdata.auth.AuthSubToken): + raise NonAuthSubToken + + response = token.perform_request(self.http_client, 'GET', + AUTH_SERVER_HOST + '/accounts/AuthSubTokenInfo', + headers={'Content-Type':'application/x-www-form-urlencoded'}) + result_body = response.read() + if response.status == 200: + return result_body + else: + raise RequestError, {'status': response.status, + 'body': result_body} + + def GetWithRetries(self, uri, extra_headers=None, redirects_remaining=4, + encoding='UTF-8', converter=None, num_retries=DEFAULT_NUM_RETRIES, + delay=DEFAULT_DELAY, backoff=DEFAULT_BACKOFF, logger=None): + """This is a wrapper method for Get with retrying capability. + + To avoid various errors while retrieving bulk entities by retrying + specified times. + + Note this method relies on the time module and so may not be usable + by default in Python2.2. + + Args: + num_retries: Integer; the retry count. + delay: Integer; the initial delay for retrying. + backoff: Integer; how much the delay should lengthen after each failure. + logger: An object which has a debug(str) method to receive logging + messages. Recommended that you pass in the logging module. + Raises: + ValueError if any of the parameters has an invalid value. + RanOutOfTries on failure after number of retries. + """ + # Moved import for time module inside this method since time is not a + # default module in Python2.2. This method will not be usable in + # Python2.2. + import time + if backoff <= 1: + raise ValueError("backoff must be greater than 1") + num_retries = int(num_retries) + + if num_retries < 0: + raise ValueError("num_retries must be 0 or greater") + + if delay <= 0: + raise ValueError("delay must be greater than 0") + + # Let's start + mtries, mdelay = num_retries, delay + while mtries > 0: + if mtries != num_retries: + if logger: + logger.debug("Retrying: %s" % uri) + try: + rv = self.Get(uri, extra_headers=extra_headers, + redirects_remaining=redirects_remaining, + encoding=encoding, converter=converter) + except SystemExit: + # Allow this error + raise + except RequestError, e: + # Error 500 is 'internal server error' and warrants a retry + # Error 503 is 'service unavailable' and warrants a retry + if e[0]['status'] not in [500, 503]: + raise e + # Else, fall through to the retry code... + except Exception, e: + if logger: + logger.debug(e) + # Fall through to the retry code... + else: + # This is the right path. + return rv + mtries -= 1 + time.sleep(mdelay) + mdelay *= backoff + raise RanOutOfTries('Ran out of tries.') + + # CRUD operations + def Get(self, uri, extra_headers=None, redirects_remaining=4, + encoding='UTF-8', converter=None): + """Query the GData API with the given URI + + The uri is the portion of the URI after the server value + (ex: www.google.com). + + To perform a query against Google Base, set the server to + 'base.google.com' and set the uri to '/base/feeds/...', where ... is + your query. For example, to find snippets for all digital cameras uri + should be set to: '/base/feeds/snippets?bq=digital+camera' + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dictionary (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + redirects_remaining: int (optional) Tracks the number of additional + redirects this method will allow. If the service object receives + a redirect and remaining is 0, it will not follow the redirect. + This was added to avoid infinite redirect loops. + encoding: string (optional) The character encoding for the server's + response. Default is UTF-8 + converter: func (optional) A function which will transform + the server's results before it is returned. Example: use + GDataFeedFromString to parse the server response as if it + were a GDataFeed. + + Returns: + If there is no ResultsTransformer specified in the call, a GDataFeed + or GDataEntry depending on which is sent from the server. If the + response is niether a feed or entry and there is no ResultsTransformer, + return a string. If there is a ResultsTransformer, the returned value + will be that of the ResultsTransformer function. + """ + + if extra_headers is None: + extra_headers = {} + + if self.__gsessionid is not None: + if uri.find('gsessionid=') < 0: + if uri.find('?') > -1: + uri += '&gsessionid=%s' % (self.__gsessionid,) + else: + uri += '?gsessionid=%s' % (self.__gsessionid,) + + server_response = self.request('GET', uri, + headers=extra_headers) + result_body = server_response.read() + + if server_response.status == 200: + if converter: + return converter(result_body) + # There was no ResultsTransformer specified, so try to convert the + # server's response into a GDataFeed. + feed = gdata.GDataFeedFromString(result_body) + if not feed: + # If conversion to a GDataFeed failed, try to convert the server's + # response to a GDataEntry. + entry = gdata.GDataEntryFromString(result_body) + if not entry: + # The server's response wasn't a feed, or an entry, so return the + # response body as a string. + return result_body + return entry + return feed + elif server_response.status == 302: + if redirects_remaining > 0: + location = (server_response.getheader('Location') + or server_response.getheader('location')) + if location is not None: + m = re.compile('[\?\&]gsessionid=(\w*)').search(location) + if m is not None: + self.__gsessionid = m.group(1) + return GDataService.Get(self, location, extra_headers, redirects_remaining - 1, + encoding=encoding, converter=converter) + else: + raise RequestError, {'status': server_response.status, + 'reason': '302 received without Location header', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': 'Redirect received, but redirects_remaining <= 0', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': server_response.reason, 'body': result_body} + + def GetMedia(self, uri, extra_headers=None): + """Returns a MediaSource containing media and its metadata from the given + URI string. + """ + response_handle = self.request('GET', uri, + headers=extra_headers) + return gdata.MediaSource(response_handle, response_handle.getheader( + 'Content-Type'), + response_handle.getheader('Content-Length')) + + def GetEntry(self, uri, extra_headers=None): + """Query the GData API with the given URI and receive an Entry. + + See also documentation for gdata.service.Get + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dictionary (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + + Returns: + A GDataEntry built from the XML in the server's response. + """ + + result = GDataService.Get(self, uri, extra_headers, + converter=atom.EntryFromString) + if isinstance(result, atom.Entry): + return result + else: + raise UnexpectedReturnType, 'Server did not send an entry' + + def GetFeed(self, uri, extra_headers=None, + converter=gdata.GDataFeedFromString): + """Query the GData API with the given URI and receive a Feed. + + See also documentation for gdata.service.Get + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dictionary (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + + Returns: + A GDataFeed built from the XML in the server's response. + """ + + result = GDataService.Get(self, uri, extra_headers, converter=converter) + if isinstance(result, atom.Feed): + return result + else: + raise UnexpectedReturnType, 'Server did not send a feed' + + def GetNext(self, feed): + """Requests the next 'page' of results in the feed. + + This method uses the feed's next link to request an additional feed + and uses the class of the feed to convert the results of the GET request. + + Args: + feed: atom.Feed or a subclass. The feed should contain a next link and + the type of the feed will be applied to the results from the + server. The new feed which is returned will be of the same class + as this feed which was passed in. + + Returns: + A new feed representing the next set of results in the server's feed. + The type of this feed will match that of the feed argument. + """ + next_link = feed.GetNextLink() + # Create a closure which will convert an XML string to the class of + # the feed object passed in. + def ConvertToFeedClass(xml_string): + return atom.CreateClassFromXMLString(feed.__class__, xml_string) + # Make a GET request on the next link and use the above closure for the + # converted which processes the XML string from the server. + if next_link and next_link.href: + return GDataService.Get(self, next_link.href, + converter=ConvertToFeedClass) + else: + return None + + def Post(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=4, media_source=None, + converter=None): + """Insert or update data into a GData service at the given URI. + + Args: + data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The + XML to be sent to the uri. + uri: string The location (feed) to which the data should be inserted. + Example: '/base/feeds/items'. + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + media_source: MediaSource (optional) Container for the media to be sent + along with the entry, if provided. + converter: func (optional) A function which will be executed on the + server's response. Often this is a function like + GDataEntryFromString which will parse the body of the server's + response and return a GDataEntry. + + Returns: + If the post succeeded, this method will return a GDataFeed, GDataEntry, + or the results of running converter on the server's result body (if + converter was specified). + """ + return GDataService.PostOrPut(self, 'POST', data, uri, + extra_headers=extra_headers, url_params=url_params, + escape_params=escape_params, redirects_remaining=redirects_remaining, + media_source=media_source, converter=converter) + + def PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=4, media_source=None, + converter=None): + """Insert data into a GData service at the given URI. + + Args: + verb: string, either 'POST' or 'PUT' + data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The + XML to be sent to the uri. + uri: string The location (feed) to which the data should be inserted. + Example: '/base/feeds/items'. + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + media_source: MediaSource (optional) Container for the media to be sent + along with the entry, if provided. + converter: func (optional) A function which will be executed on the + server's response. Often this is a function like + GDataEntryFromString which will parse the body of the server's + response and return a GDataEntry. + + Returns: + If the post succeeded, this method will return a GDataFeed, GDataEntry, + or the results of running converter on the server's result body (if + converter was specified). + """ + if extra_headers is None: + extra_headers = {} + + if self.__gsessionid is not None: + if uri.find('gsessionid=') < 0: + if url_params is None: + url_params = {} + url_params['gsessionid'] = self.__gsessionid + + if data and media_source: + if ElementTree.iselement(data): + data_str = ElementTree.tostring(data) + else: + data_str = str(data) + + multipart = [] + multipart.append('Media multipart posting\r\n--END_OF_PART\r\n' + \ + 'Content-Type: application/atom+xml\r\n\r\n') + multipart.append('\r\n--END_OF_PART\r\nContent-Type: ' + \ + media_source.content_type+'\r\n\r\n') + multipart.append('\r\n--END_OF_PART--\r\n') + + extra_headers['MIME-version'] = '1.0' + extra_headers['Content-Length'] = str(len(multipart[0]) + + len(multipart[1]) + len(multipart[2]) + + len(data_str) + media_source.content_length) + + extra_headers['Content-Type'] = 'multipart/related; boundary=END_OF_PART' + server_response = self.request(verb, uri, + data=[multipart[0], data_str, multipart[1], media_source.file_handle, + multipart[2]], headers=extra_headers, url_params=url_params) + result_body = server_response.read() + + elif media_source or isinstance(data, gdata.MediaSource): + if isinstance(data, gdata.MediaSource): + media_source = data + extra_headers['Content-Length'] = str(media_source.content_length) + extra_headers['Content-Type'] = media_source.content_type + server_response = self.request(verb, uri, + data=media_source.file_handle, headers=extra_headers, + url_params=url_params) + result_body = server_response.read() + + else: + http_data = data + content_type = 'application/atom+xml' + extra_headers['Content-Type'] = content_type + server_response = self.request(verb, uri, data=http_data, + headers=extra_headers, url_params=url_params) + result_body = server_response.read() + + # Server returns 201 for most post requests, but when performing a batch + # request the server responds with a 200 on success. + if server_response.status == 201 or server_response.status == 200: + if converter: + return converter(result_body) + feed = gdata.GDataFeedFromString(result_body) + if not feed: + entry = gdata.GDataEntryFromString(result_body) + if not entry: + return result_body + return entry + return feed + elif server_response.status == 302: + if redirects_remaining > 0: + location = (server_response.getheader('Location') + or server_response.getheader('location')) + if location is not None: + m = re.compile('[\?\&]gsessionid=(\w*)').search(location) + if m is not None: + self.__gsessionid = m.group(1) + return GDataService.PostOrPut(self, verb, data, location, + extra_headers, url_params, escape_params, + redirects_remaining - 1, media_source, converter=converter) + else: + raise RequestError, {'status': server_response.status, + 'reason': '302 received without Location header', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': 'Redirect received, but redirects_remaining <= 0', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': server_response.reason, 'body': result_body} + + def Put(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=3, media_source=None, + converter=None): + """Updates an entry at the given URI. + + Args: + data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The + XML containing the updated data. + uri: string A URI indicating entry to which the update will be applied. + Example: '/base/feeds/items/ITEM-ID' + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + converter: func (optional) A function which will be executed on the + server's response. Often this is a function like + GDataEntryFromString which will parse the body of the server's + response and return a GDataEntry. + + Returns: + If the put succeeded, this method will return a GDataFeed, GDataEntry, + or the results of running converter on the server's result body (if + converter was specified). + """ + return GDataService.PostOrPut(self, 'PUT', data, uri, + extra_headers=extra_headers, url_params=url_params, + escape_params=escape_params, redirects_remaining=redirects_remaining, + media_source=media_source, converter=converter) + + def Delete(self, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=4): + """Deletes the entry at the given URI. + + Args: + uri: string The URI of the entry to be deleted. Example: + '/base/feeds/items/ITEM-ID' + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type and + Authorization headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + True if the entry was deleted. + """ + if extra_headers is None: + extra_headers = {} + + if self.__gsessionid is not None: + if uri.find('gsessionid=') < 0: + if url_params is None: + url_params = {} + url_params['gsessionid'] = self.__gsessionid + + server_response = self.request('DELETE', uri, + headers=extra_headers, url_params=url_params) + result_body = server_response.read() + + if server_response.status == 200: + return True + elif server_response.status == 302: + if redirects_remaining > 0: + location = (server_response.getheader('Location') + or server_response.getheader('location')) + if location is not None: + m = re.compile('[\?\&]gsessionid=(\w*)').search(location) + if m is not None: + self.__gsessionid = m.group(1) + return GDataService.Delete(self, location, extra_headers, + url_params, escape_params, redirects_remaining - 1) + else: + raise RequestError, {'status': server_response.status, + 'reason': '302 received without Location header', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': 'Redirect received, but redirects_remaining <= 0', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': server_response.reason, 'body': result_body} + + +def ExtractToken(url, scopes_included_in_next=True): + """Gets the AuthSub token from the current page's URL. + + Designed to be used on the URL that the browser is sent to after the user + authorizes this application at the page given by GenerateAuthSubRequestUrl. + + Args: + url: The current page's URL. It should contain the token as a URL + parameter. Example: 'http://example.com/?...&token=abcd435' + scopes_included_in_next: If True, this function looks for a scope value + associated with the token. The scope is a URL parameter with the + key set to SCOPE_URL_PARAM_NAME. This parameter should be present + if the AuthSub request URL was generated using + GenerateAuthSubRequestUrl with include_scope_in_next set to True. + + Returns: + A tuple containing the token string and a list of scope strings for which + this token should be valid. If the scope was not included in the URL, the + tuple will contain (token, None). + """ + parsed = urlparse.urlparse(url) + token = gdata.auth.AuthSubTokenFromUrl(parsed[4]) + scopes = '' + if scopes_included_in_next: + for pair in parsed[4].split('&'): + if pair.startswith('%s=' % SCOPE_URL_PARAM_NAME): + scopes = urllib.unquote_plus(pair.split('=')[1]) + return (token, scopes.split(' ')) + + +def GenerateAuthSubRequestUrl(next, scopes, hd='default', secure=False, + session=True, request_url='https://www.google.com/accounts/AuthSubRequest', + include_scopes_in_next=True): + """Creates a URL to request an AuthSub token to access Google services. + + For more details on AuthSub, see the documentation here: + http://code.google.com/apis/accounts/docs/AuthSub.html + + Args: + next: The URL where the browser should be sent after the user authorizes + the application. This page is responsible for receiving the token + which is embeded in the URL as a parameter. + scopes: The base URL to which access will be granted. Example: + 'http://www.google.com/calendar/feeds' will grant access to all + URLs in the Google Calendar data API. If you would like a token for + multiple scopes, pass in a list of URL strings. + hd: The domain to which the user's account belongs. This is set to the + domain name if you are using Google Apps. Example: 'example.org' + Defaults to 'default' + secure: If set to True, all requests should be signed. The default is + False. + session: If set to True, the token received by the 'next' URL can be + upgraded to a multiuse session token. If session is set to False, the + token may only be used once and cannot be upgraded. Default is True. + request_url: The base of the URL to which the user will be sent to + authorize this application to access their data. The default is + 'https://www.google.com/accounts/AuthSubRequest'. + include_scopes_in_next: Boolean if set to true, the 'next' parameter will + be modified to include the requested scope as a URL parameter. The + key for the next's scope parameter will be SCOPE_URL_PARAM_NAME. The + benefit of including the scope URL as a parameter to the next URL, is + that the page which receives the AuthSub token will be able to tell + which URLs the token grants access to. + + Returns: + A URL string to which the browser should be sent. + """ + if isinstance(scopes, list): + scope = ' '.join(scopes) + else: + scope = scopes + if include_scopes_in_next: + if next.find('?') > -1: + next += '&%s' % urllib.urlencode({SCOPE_URL_PARAM_NAME:scope}) + else: + next += '?%s' % urllib.urlencode({SCOPE_URL_PARAM_NAME:scope}) + return gdata.auth.GenerateAuthSubUrl(next=next, scope=scope, secure=secure, + session=session, request_url=request_url, domain=hd) + + +class Query(dict): + """Constructs a query URL to be used in GET requests + + Url parameters are created by adding key-value pairs to this object as a + dict. For example, to add &max-results=25 to the URL do + my_query['max-results'] = 25 + + Category queries are created by adding category strings to the categories + member. All items in the categories list will be concatenated with the / + symbol (symbolizing a category x AND y restriction). If you would like to OR + 2 categories, append them as one string with a | between the categories. + For example, do query.categories.append('Fritz|Laurie') to create a query + like this feed/-/Fritz%7CLaurie . This query will look for results in both + categories. + """ + + def __init__(self, feed=None, text_query=None, params=None, + categories=None): + """Constructor for Query + + Args: + feed: str (optional) The path for the feed (Examples: + '/base/feeds/snippets' or 'calendar/feeds/jo@gmail.com/private/full' + text_query: str (optional) The contents of the q query parameter. The + contents of the text_query are URL escaped upon conversion to a URI. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to the + query's items (key-value pairs). + categories: list (optional) List of category strings which should be + included as query categories. See + http://code.google.com/apis/gdata/reference.html#Queries for + details. If you want to get results from category A or B (both + categories), specify a single list item 'A|B'. + """ + + self.feed = feed + self.categories = [] + if text_query: + self.text_query = text_query + if isinstance(params, dict): + for param in params: + self[param] = params[param] + if isinstance(categories, list): + for category in categories: + self.categories.append(category) + + def _GetTextQuery(self): + if 'q' in self.keys(): + return self['q'] + else: + return None + + def _SetTextQuery(self, query): + self['q'] = query + + text_query = property(_GetTextQuery, _SetTextQuery, + doc="""The feed query's q parameter""") + + def _GetAuthor(self): + if 'author' in self.keys(): + return self['author'] + else: + return None + + def _SetAuthor(self, query): + self['author'] = query + + author = property(_GetAuthor, _SetAuthor, + doc="""The feed query's author parameter""") + + def _GetAlt(self): + if 'alt' in self.keys(): + return self['alt'] + else: + return None + + def _SetAlt(self, query): + self['alt'] = query + + alt = property(_GetAlt, _SetAlt, + doc="""The feed query's alt parameter""") + + def _GetUpdatedMin(self): + if 'updated-min' in self.keys(): + return self['updated-min'] + else: + return None + + def _SetUpdatedMin(self, query): + self['updated-min'] = query + + updated_min = property(_GetUpdatedMin, _SetUpdatedMin, + doc="""The feed query's updated-min parameter""") + + def _GetUpdatedMax(self): + if 'updated-max' in self.keys(): + return self['updated-max'] + else: + return None + + def _SetUpdatedMax(self, query): + self['updated-max'] = query + + updated_max = property(_GetUpdatedMax, _SetUpdatedMax, + doc="""The feed query's updated-max parameter""") + + def _GetPublishedMin(self): + if 'published-min' in self.keys(): + return self['published-min'] + else: + return None + + def _SetPublishedMin(self, query): + self['published-min'] = query + + published_min = property(_GetPublishedMin, _SetPublishedMin, + doc="""The feed query's published-min parameter""") + + def _GetPublishedMax(self): + if 'published-max' in self.keys(): + return self['published-max'] + else: + return None + + def _SetPublishedMax(self, query): + self['published-max'] = query + + published_max = property(_GetPublishedMax, _SetPublishedMax, + doc="""The feed query's published-max parameter""") + + def _GetStartIndex(self): + if 'start-index' in self.keys(): + return self['start-index'] + else: + return None + + def _SetStartIndex(self, query): + if not isinstance(query, str): + query = str(query) + self['start-index'] = query + + start_index = property(_GetStartIndex, _SetStartIndex, + doc="""The feed query's start-index parameter""") + + def _GetMaxResults(self): + if 'max-results' in self.keys(): + return self['max-results'] + else: + return None + + def _SetMaxResults(self, query): + if not isinstance(query, str): + query = str(query) + self['max-results'] = query + + max_results = property(_GetMaxResults, _SetMaxResults, + doc="""The feed query's max-results parameter""") + + def _GetOrderBy(self): + if 'orderby' in self.keys(): + return self['orderby'] + else: + return None + + def _SetOrderBy(self, query): + self['orderby'] = query + + orderby = property(_GetOrderBy, _SetOrderBy, + doc="""The feed query's orderby parameter""") + + def ToUri(self): + q_feed = self.feed or '' + category_string = '/'.join( + [urllib.quote_plus(c) for c in self.categories]) + # Add categories to the feed if there are any. + if len(self.categories) > 0: + q_feed = q_feed + '/-/' + category_string + return atom.service.BuildUri(q_feed, self) + + def __str__(self): + return self.ToUri() diff --git a/gam/gdata/service.pyc b/gam/gdata/service.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ae546c1774531c5d1176eec90ecb31e4fd63f37 GIT binary patch literal 67714 zcmeIbdyrgNdKY-Js#H>`OV8G`Tit#8r7BHJRsEhGO>25erBb(Mq>|=VX?m>bagvpJ ztCUh!W+`u$9(FfFj2eRt3^0HV!!i)w#S56R85`RK26osD7B*sIy?|{N*o}Sv;qWeo zSnskMM8N)jzwexTPd=)jrFI*7rt9kFz2~0uo!581^ZOoW`2X6m{jv35x>Swa|3>le z=kb^Qi=im0MbQ;pOVO25R2zzF!%<@>x`G?KAC9gJ`}>jT%1Cr)pMH)+jV;mYXmn+3 zRC>L+8jYmeqU+nE_RmM9Qt8fa|A`X3-aMDT|5?4aJhmv0tuBv8^E5xrG%Ozp#-deA z&@4q?@qdoC?a`I{d_m*Um2rQ6U+{cKaKAIS-xZ|at$UEVC#s>ay-{sjbmjgi9gnW< zi>~dDt{jMJW6_m^Q4J*?ifYLJa8w(Q-bT|s5T*A;*B*?n9EoZ>qV%C?8O^wpukJ?C z4_tXTs_lx>hxK%K{`8Tkw#Pr+n?H@C8Va?v`|_ucMz#I^>4E&|V^Qs(e|ji?`gl}3 z?4Lf6KYb#qJ?Nhv$)7$M)gJOsAI_g1jcSiX=@X(i&Yw<1wMYHa$MUC>QSEX6^ojiG zv8eW>&-ZBl^ixr7!atqNpFS1Uj`^pb%AX#OYEQXVy>>#B9M9iPMd@@@I}yE&@%nUB z!_Q~*WLnp!qqnz2SDuY(c=(*Yd^Z2`c@FJ`$>%uke`9MDy?Un7sVt=JoAqiMueDn@ z>$NnCXOqik;?`O=6<3-yK3-{8R#z*XdNppeR_e`oxz&#KX`I=&+0^#!aXi;<x7sJ; znRuy^rEz+@nyz)~t!7+pRI*GTRT_=rc@`t9&eb$tsey`ewNX!-9c1JFx7w<$H&SIh zTUqN=uU3-u#(KS-)=tKYSL<1kQH&p*tL@gUW_;^v+KelX%u1(|uC8_BcABj>Ix4PG z<nPOg<EmvJqliW$ZLXy8a=W!E(q=`}dDYt0bt-<Hg>*o|pv;PD=Tse6R<4wCr*)k) zqGkP)^>!n!w9`SkWoakwwDkSWN@E?J<9ey)8;?|_?TL)FTx&HmYq8Un+RQrIwuw$d z$F-U#<D^p0(%Rq_0;#K&S{mP~cdmjo<XvsG+i5lEj-t8ZcpZf{E30XYaj=HIz13>h z)KT-TPEag+%9UDQX11-hv@L2tEW5JaT!|}PWU?!(?1kE()`*u<480T`ObX(wl`Q-` z^)xg|!@~N~B3rtPgm^XWTy53Zq!?cH<vXOxrN%>c3<yBx)=q<H;DA->NSLCzKu5FH z>86A2g4aRS2G@s!c725{-cm-v2b?#nDMn<oo~^pJZC)g)@3B@&JD^S+Oa1lwAT>fx zBELVk_f#Lw<}~X#rV)%2<3AiBc#V<LS#LMdAr-W9^F+2@t)}26)mF_$`sHSNdkrHs zttCpicxNpQ`<(sdr$#ZaZPvuMFoM{7t>wYZ;s!mKBKc0-LEcmGS@dptyRy30NIA37 z(YIRbjT#1L+_pJ}(LS)1U3RYU46_rpT2B3}B&o&)H9t^$R=XJ^UHgv4K*+C8#&b<@ z5U{pN2Yf+2SOt}w^OXkbg?veKE5@wDv{}WhM#-Jy@%kDVZvP-4ORA-fw1au=Co1Yj zmO#pk;m=$;Hx<v2Lu8#w6RSqG((D@mzCh8`J*bbz=+^d~<Bk{%^W#>i?npvkZbq)^ z|0RBlier7rETi~xaxO--jjb$ULN^+%TVyPJu~xy#g$lK?K(yFF<frKKqKsO-O7>W3 z<I7H`U0+&9V|Mj1x=18D=4kze0aN5LG0g>UnjM)<XU+QBTH47b&~vr;s}oB%CcdU( zLBK?<zEba08fdgtaKZ^>i3PF92)w<vjy4^b;VVlwPPy-&%D?AIG_}~e+^$a^Bg-lB zGA<f}jnq3++p~wU<X6_}Q`fTUdaVYgHTi{fdAXh5i5Kj_vHHKm|K=x4NZ1+2r8?J0 zS236t+i7Y~@59sJVRit&uXZ|X*~#hYTeogat+ZMzSh1_E)z1BR#g5O`mu$f+XH^WU z&K^8(@kRKwK8&Yz7F>rc9#whAKmM4-Uv>zWH$heuT_26w`*?$&hZi8QFHAC3_7k|A ztv72F1klSY3j^T1NgaITr!eOpgdL5hbImwwVHK-#I34FqTW?-(QjBn^uqfW)sKGd{ z@x5DgTaa$_jb=ZZe0ja8QDpsT94nr0HBn3)&x=B}$n5!ZbMuSk^A}$|H(y?uy?AME zp)&%?nY3tlS9qB#>?Gbsx@525Zu&A_X4BW}?WH@@XFzGARl%5BsjQxOcIt%}PORO* z@&+c^uEvwk9GjM4HSMd6mphP0qt&{;zGnTAUB`orTcK!lytJ*<*@8Q9<SL)>f9#0U z_{+B8l0w)oMahDZ#g^~zaapvM%bo4|VUk<9Y^m^Q^J<W=xZjJvjD?SuI=k@CY`1v$ zUP({J@NGhp?**ZpEZ@MNCzK<<Pb&L(kjmNqBLS(qao;P+eLN`T`2DefQuaP355*!c z?|nQ7-3h|J84yasL1-bseLToy`+X}QlOrLN%RU}day<TcK;=H%4-)%69@KK8e><S| zLEQIE`~_Qvuta@4h~5h#zY`Gs2=33O*mOgZ(x7#tLYt4Xl{z+P9}mJQX#8YAI4dF| zFRuZ$sHGn#S~+fiDxj5OO_JVhUH6Urv1{G^pzyB*6!SNkZL3}XAxYdE7Tg+rJO%>= z(w_;)C3{nwzE-Ktw-zWBemn?02*Q3YAe75PQfXdX?_6A7Y}eE5<3T0|=r0Cja(M{1 zpgEPK|1=Z+D~M$#Boa~wdi@gNa$dLSI?5~m13+vy1Fu8T+rYB760Ib@yiJKm1bB|= zev9t6N3~HU00O>MH-Nvl=>{<L&M4hM%o>38-BRhwE@IZq4Y<6))_|Xp26#8p>`<D$ zQ3~{VUzF|_ehu9JK=5Q&^fsOV0T+l(5M0Ekz`=2|Cwd#Yfd|~f8nSvYI=~nA>jj{3 z;n`On@+n@5@Fk#bLfcFVU>oo|U~zl{T$_&$>BnJZ0Aw6EJD)zN$^#P@kbMO(IKIGt z4@GZ7YXSUQQ1%r--gx+k5(DTKl6?i(_M>VAAknD&(I^G3jru<p-5n}j0h(=DJ+7>d z`JeXCu1|=}Pl<S7*T@+NH`)b=H4;pyxD!!L0QZ$Cg4=9#0M-Jx30NZ;K2-Xmx$zK) zHgpcpbS0=wsq%ockziVxKc{=Z+<?CUXrmclh_1azyqnekbW{`GeFf?DUy8EupZ$DJ z#D8YctLOFVWbo<*|J93n_1WMRGW@g%dzrhT&v5Af8ZNV~W~TyZBnFJay>pJj6o|PY z=-f*RX7@1H)f9_W+i>&?$ilLz)M|CX6OGs<h$lOQDQ#oxP^e=b>tQJcRD->z+xiLp z5HJ!u=(G<N!v5EA7Hxz*5CnSSxI_8twTlajb7#sIlk%$<FTQ$yuF5G-DVLkaS^Q=H zH7?kgUn@n|hN5f3+@WKZzj+@OQg4o9dJUD{#Bb~tN1`|J2sc}zH<hkD8oh~!c!C6Y zh?}j^oA?hm+oCtOMQcA8)wSm@W2jn!`}p?Sc=YBdvW`mbDJHzV-sx|4MWFv0Ci!r5 zeJE=GgQy061fAO6=&c>mn>(Uw_d5mGT`S-~)CA#M04%U(Sp+S~Ea4>pP5=m6OV@xk z3NHi}XqcFj<pe3j@l1OqJBe1~|I$@RN+;tCxDo)V$(Gb1mBumXFAPKmY_BviIvOA) zJAv3U`qAaUTnLvC)H5KTJ;MKa-2Gj$_sRTcm1?!M-t3eKWp4B--?54=Euk}zH5CJV z2*{eSfiNM(PCH!z4hgN0ARzx`Mj(8`hK{AP_dzZ*Yinp&BMQ|o(icN*f&>5ojh6xN zupuk4Wf^4SNnNhDff-|V0bpDP@)g%wY1W+R0I;oRfIhpkaa0HSSe-mUQ}LN}xdPo$ zEal5dUzdP3->z<^ZPcVa@RQo?*429TDzQrs*#>k77zFavS|TH$X2W`2(3vZ%4875y z@`w?gsHJEv!p9gqXg_K;JgdQ`BPdC1!Qf-D383kXdbQqZ+=(-YOVp7-sRpGBIsu6F zpd7^|NMF`kZ&x=QR;Do$G%mCngTJ!I!X!!h8nSXJw1|TzO_+zJBwf~$6(|LoO-xVV zxj_Z<rd&(ekbPZB>^6WdCp;kq?=3stdom8t;$Sw<Gyxv>b84e1pd`EM^#stbHP(sF z3te~mR;L@wTNApUT7yktcv?6A0M?s>+XQ}V=^FHPU{ll>)TA0)O4G0%+UZ)OQpJ0K z<V0<q!qr#TmKCR3-5~4ZfIS;fEff`z)(|}q)KVFhvknz?SPDB>*nAm5u}~A|+|;RO z>Pro1UcmuYE8G#quPrWK@`IKGR8v1F$d6~!=}pDwmZ>2^p@otZpe@$MTZOiwwH#zn z>9kg-NYm7%cI$RMt-XeO)A$;a>!4pxeo(WM@fXvSoC{+P+Db!z$uh&uk<B}(P;8=O z`B(}Ws0Kg!1I_2E;*{iW;z8X`2zx4?Z9@n^%RB8QO6^h>lPx-}GRJa_n16+z#*?Zc z*u>OjsF=~R6*sMpp<!z7qvK(VVCb~d)z;0_GPIg^h-C(Sr{6(iWRwGhHBX_eWx)of zOyE982sa7oU@46p?@|^sc(`Cdh=Y;q&3XqgGEr!)i+b#rFli;i^-F?DFGuWX*%lM& z5DX|Lrk}<Mvlkv;HdREuxwhUZYs_aI?s>}LCZ}9pNIMJSY@H+hFTE09?a_H);{<4$ z&f)&oE6_b;;8^uqhxmY+*$s!X;{XDxMX)t;rO}zf;DMzASbS#~U*SoIXm;_GxX8@f z+MLo9<2oT4HOc-`w`6H!Ie8SnlE-*?oEK_}k|%JnmQ=-)C;9LwAF>l2T}q&OXUaEW z^QiMgXMw73hVCMXk@?@pUq(`+Ck~aimd1kr@522K`#rR)G+wG=Cn^mR|DDBOMv2u) zpb|};>gW_6W66|mXC31&hNuU@oy%Q)RmxRNo9CW>)=gt*5IOCOZksZlS@IO}k*X(o z26s}0xUMbNu^ZxBUN)mGq{q>)w6Sxp_HUUpYZ?tzhjHcK2>xdAmvI(SSXT8ZlMivT zOXISINhZul*qM9*kq{bV(9eLGd00JX<u)n^rDRT7NR*`~wX!YRL330CC3y*nH@Pvm zJz-;n<PJk^_0hD4YUGB%XIVM6sH&cv<lQ}}FcD^PrLe|ZgBnw3M>6;~i@)qOT(CFe zASiLW^cDs|DQch56Wgr;kSRrn`EH230b>N?<1ThgNQ4yF5OH5b>oJb2>?^pK!G)<@ z*~PZr<y)ApjSJRuzsR|blKa~4l}z5#T7<10$mOD=fn(Cw{3JO~qKjCRm<_de#ro%% zQ$|gG!g(!LU-U?ajIX*5%O>oAq$JM7t~4|^c~p|ylwY+`xk&l@K6Vfhv}kl!X|!~p zw5zneRApEBKXnx=Xp?n8D&g$6@W8h;#0_VwCX%_w;eLO$B}17is?}rqwgQ<KCRY*) zOz9nK^VyRPHfl`@{IHI2SwbQztp3dY&70FL3;s8eZwWFd_J)Q!*xt{r@DRhYb(C9> zoSWCZ>i|&2lVhm3-I1Yga`%X7pnII|SUq%zJtQVN*55B2Y1S`y!R|ztoAvCrdq$_t z^&UbW&j~GQ`LDIA*{47SlBp-UdzT`$H;Hs}aH_w4edF#ok)a=V9Ap$I%}y#Gg!R?R z3gm5wz3+3_F)xVe?|aaRJ>1JN$5y;&eM#;^mf*%58sS*pk?(87oxPV-ayc(|llgp3 zW=XO_?>J+34jg5a#kb*)lc$QmjI13i&zA`WUXywFTcgqKFA>E9E_Zz_YS*~jbc7EQ zL>!H-0k6YyB?Ek*gjj+A5#B8g-?f!YKq2zmTI6?QU-UY1TbQJvFLALg(?H2>#t<<j zNbp((BDCNKEjP{e)iRP&0i$&R)^L?OT2z)Q)$6V0<#}bPv}l9*^mgZby?Na>6vHg6 z)7Bb;m#@Qa4%0_@amBpaPM0Uy7A6*Pb!*_;GFLBH<uM7(h?3Ws<Qy+wz(vnt;b`JL zZ^|??+hk_QT?<Hl)wj)Am_M^(XGPmd_CCjMv}FqfYYNv-l(v+r>|An<G5o1>DgNAk z-f>Av+K?godv0hLGRT>)-4~(f(Y;?T-Izpok*Bf^=6f2zB<cmcv0WieAx#qCqL5tV z<q|JF?MA{J2*1gV^%!#I^l+AN8#LOhY%~u0Xk;%|`_a<w_psKx1!V$X-7_Y{%>byE zQvVz`zL#<v$uAYtyvp|5hCg=hEdH`LaLM8Q!JP{verOnLWLZ<R{S}`clNJIcWY6K~ zZd7_pxIP3*Oj*p~62;0KZvP&}07Ur!+@fK->6kTR4F)pfW=e+zFy3P>4a2*Oi*v-Y zWMU0yFk6SwWjRZMxC$bU+gYW29Tt#)Q(*CGdY0qfsM{@dom(xFNdr27wE+kv90jC> zOQX<Xzc^;~6|j``c3O786kQ{_^#Z*~PC6F}1B#r5Kl~MFVoWuX8Ml&Qfx$qUTe)K& zC}U<G$3ilu?5fsT@MK(&$@^5CKM0KA9l*tw4e}y+tX*0GKJ6uj_{)_>9h>AXngm4^ zE(x%#MRNo>vDINFjQ;d^G{8p(VxaS;22(m~5_;oKkQ4H?j(WGRlZBb`!fP|n5C#pt zf49$GyD&4W@7y=}c)<Y4!RRwBD0E;@E5LsF4%-O!-|ZT)qYiNLbZHQxoo8i3FV8JN zkQZ!F7SeicT`D6eD4@15p9zmbvbPgIB_u&{VC3`zwQ%Fl^|SxZjrKvsUuxHHVpzqT z3|+G=Z_GT?ph^AkCPB~=-fEQ_>B>r4GgR$lywqwnQh=U8J?EMgfV>zbw^}DGacol= zW1eRHIjv^=-)aLo8ALEu9J%I<iZjABfQ@PDFSh~8mI0|4J@q!wS2*}APjLs-1)n=O zTWyIM!CjF2h<d9OWQgw<+v~FLMb#-X_4Kl&2G`^QE*)X2l7Pv6>~UTy$K(ZaTCRoU zZ^Ag$Mg_CZ60Q(JnDj=Q7hytRoyC(t+Td;{??T6y%*flbMRa|lJ^c?bYo_n5eR72l zDYqEq+JSEJ+RDYr*P*Dd4EPswDdg}NWDqW@<M@9T<C7<|X|F<@R{gQSaf(2UQ`=N2 z2y!Lks;1TKCFF)h45lI1w?*xTqYmU0EKmf-2##P)!o~^-&<~J1mW6}>xx<qcp8&FG zyD1q5nLu#;LFuq1^1OVbNqcrmtEK2g04mH6k}PCYsIyVdm<orG3#k_6F#Dmmq`1De zJ-P;!If{j9n{qBO7?YH)Nx4lW_DIR)wi63##03>O?~<^*n@_l`a*5s=mG}7aaKBgg z_xptxuNPo?PZ1kAk47)Th6dI%FY<x1fnCUpZ*7Z!OG5>ZJinm^TbSe+$WEe7Wg#fV zc#X5hkRWw{pN9?-qDsok;B-gOObRvS2FB!59Sp4j#>XYqa3OOk#6Dv(JC-lBraIsg z%>HiGikyU~?K`&|)4E<Rc;>X1FMkxx`DKmcYZXv>&*CMTn0xLG=%6Fi)j1Iu>w{F@ zu9*HnL)G+zA>YI7I$=V>S_K+vx}a3Ll!gIo$#ric4*V!r7CLuH?zY&V`Gh7i4b_=b zn}sfpj_F8pGQA9zEU@GOeFU!__@Ypnv6lrI0d16&pilv<gLNEBI(6ey@#%HJMow;l zLd&_e$v3LfUV+@zgzg<R4zhAz7g|iZ8i4{-80x3?E$F;DY14V)Os&8d)fZTN5@csQ z2_I6I5<fHbG(ZaK+rrF%R0Lo2_F8-@{^5zPY$l-ae|GBWiMQNWt~E{X?)Zxv;OLVn z0c*ZmUCyBs)avwm$-Fbu$`#Q@6E&~gg=&t3V+$i3a}V|o=md@|in;1X=Sqf$5}Jdm z7rlHFEmffa*J(kK$z=~c%-6oS7-}o#D`zzufI&und}%GfR87~gx`H6)&`q{zYh<(` z0G;959P|SWKs07-s%SWHEVLnT;Y9`ffG#yEA6sj<1?HAEizrY797D64C{DFLkyERJ zGv@Ib(seJ-Pn-?%o#uk;YJFt(#2zIjBY-4g3T)?X$GSTbTuXVcTGXV~HW5cCL3azu zH_CnJyrZZx<Wd|okr<3XzgOoL>;C{RO(BCl!g`ibE&#iWpMCmiM-;GG{8rO=s%*&W zGZ;2yFj=FpvDB*FnV+PAui%UVXqTK(hxXkZ{*;)^*^u*fY*8e!ByW(GBe=L7aoX2* zm)i|VE>aenfMknwa-PN3dHF%+B5@^!krHupm=aJHR=J?JCXGt+RVFU;LQUr+7k5*- zP~VcQ;384Qw<lUMN%>}(Z@95_Mh9s!!zUCF)IFOl(9j}TW2vt()+x_Ci`UV^<B(?d zLzbaLvtwvCl$g99D?JD~XIJTd{oYX;DGftmu(h;%s9&{7k&$u*s<oe@1@2)=2dRS( zAuv3nn_*&UcuVnsyYAe3ZXZDJ8FQA`qQf~VCSV&e$K$x>fccZrVAbS%KkZ@aVZ&Mj zo^#QLw!L?Zm<ygQE%vxR1(UM%CWTy@llM+sGfUX$t&ADW6Ii)f@sjIpTLM2&FdL_` zP^CzELG(s7z|I1x!0>!Fpt@5BN4?Wphr2KoEd33Gyw+MU*>ugKMhF{#1`L!%&O)q# zsnL{}YgZ|`ldT)C*0Rco<-)Qjt>1KE2?h_Rp#4L+%8!3F(#Z|@p!G^u(UzTqJ2{Ba z5iMXRow8S-DMxKOx}Q*CVTox0WK%dZ%K%PN^nyJrROLw(&o_78i8K5xXBek?w1YEh ztd!BKrEjk=;|Jg(xs8A?61o$Av-r#YXIwC8+D}S@iY<XRaD`w#&Zi?3j5*gzQT83L zX5G@2u6<Dxa0~YqH;@L%Cnl!wFO%Ne-_et8+8RI%cdA$j-^dr&pv?B4<r8M~XPK8P z_`3xiVAw7_Y-Ay_W19hVUE2)eaM*HSr-7{rh~Nfe3)sPeAl2-j0n+u{JCni(Yp#J( zQ1Z1+Kr;hojQM<SYDx?kJ{rI8do%`c!G;6V<ZcKWblx{JMVN2pLO#ToF3#lzOH=~v zOLn1{;a#?1vUz>E7bVh=_#8072H1iLSj#Y<mH10PS|UJwLXl>MGk}Ab?9&mYsW_I& z`;d=ls{zp|_J(v`{MyLIlRTFDfTCLjTo@0kEMr5X#<zAtk3r2LO3A~a*M&7PHv(dy zHq~LQT<l`9{vFIrVc%Hn49M_Yxjq-SS#jF+a=lshRXG`-_JEKRl24rllTP8cE(w;I z5mVttAw=%*cEabu=9r);31de}r0VhB*S@zE;RC}083&Ew2vqa;@9?@tv9@Vy=Df-h z@`K!nnEB4kshr?9ux?fM#csv#Z-UP+&Rkx6%{;-&_@;bmCYiZVo}aleXS<0dXk?Z# z?ywIwy4h_UIF>a>KqZVq15w_i`D{+6?4}Ca1T2OWrTGr(FT{M2b)s8h!pxW=T(Et+ z62epNDYk3&H3(t0ULw#43$wD3hL8L(5&fC%nak+R<@)V>^FQf_hue+0`9zbW2{WBX zaX}*%Rc3D(L8}Oi;ak8X3D{Ktz_ya}PE+QdDoz*45Ny~YTVL_sjkq6`8X}cO3w^_d zJRprt;tKTo1es!EQ(}7@G9$AE9x!806%b?Wg?D2Je$6t)0I2=Yn@^u(DwgN=o;{na z84t#=dGeKkbrc)I$l~;|NGUC|2(Xq_izvcNzQO8phW6VjW`lXtJqeL5xqOaVsK+S( zq3t}@(fx&w>QcE0OBIe*qk{X3NV_b*a;f>)qK1XK9d`a8<cPBD+_<Y`ojG7oPtxEg zY4AcBU7IBBaS~$2$!%T;N}AQ>5#D`{7jB3;58>jMV@*WgNunFOsbbqBLCm#+$yW&^ zj3{}Fm*30F*Llf!`8+S&^=z)p^={-Q=l?DRvL{e*^u!T}W22=*rE%D5(iUnzHagt$ zaO1;m(C7xj8i3syty&tU*)Z|~f|299RJclLTSHOzQfPw&wI%8j+Th>lgtk!$ZMgOb zZKLlfv{~Zqq?)p!xlI90hTH~;4RYHEwX05filBS~g8?I$7L_2eJf#2wL$D)Ae7fWa z*iCs69#D4$3=>O6S-^rvg$Uywh%oMPMY+Iw&7lhaEOhoq1c>o9a@`7m7>=00g!AkG zpTFpE;rk#%a;#^+3NXR>UfYu3CtL3ZkL($fI$s*-qC)Ycz=_2+bP0vXB0tz_2;;W_ zztQeyBY84p(`3rdf#3tnlR<vu_4J|o9hN5@MZT55=iqe_Y?`@ledvXwdq(zvmJW|~ zm}S1MW6EMBw=J@VZ1d&TfM^9o3!2o)0u#lu3|wy@8m$F9&kdujMWH9i3R_T&#R9;N z5%US|mB@^HZK4%9=vePyInqk)MTDH_`f}a5DfNJ{xo8>!vJgVO&}Fb}PLG5OEPN85 zOx)@NW$7p8)*f0$z+b?`z&w*~s^Is*MbyPjLq)knFI<A4ZRQGHQ%<0*GSa1Y$q$B0 zUYgsby!QpYn6mG@w!0F#R(LQ4zd`SXb~jk(A;WP#cLjj(62`j#wvyk-l^i7jVr$X^ zkx6SlV>*%*XzH8y0QC<6x{Kkhk&;|RBH^{dIWziU3_@zxEo?;sC@)H`GtngBm-jD1 zbXY>S^iV9EkR1vnHd$i<lsH6MhvjJ?su^huM*or1z=xAI6A580@K<`R1cJyR{49E` zn?;-a@isI0{k(jh7fK=t)}|-{*0I?#2_0_SKv-;<3{|wk|3`U*M*7i1ltBhdBx8VG z`NjjXkXi75z_9>ox0V!FErgV2Fj(O;p|Bn%OAD<fzDy`e!pWb+4RIYTw*q6KAm+W< z;Y)!mJN#inI#~PxPG6Jq7>j&18v(s`coXnkD3b9d;vgUkKt01$J409i!t>(Y;nG_Y z8z@VdNbqC69O#yZ4F-movO|hpD1&7%kiCdjG4jD-5qyx2v*b6%n>06_%A{*y(O|*! zMmQ5e6CY+_>2E~8A?Z@mywe_nLlC^5LdC!hgy-iQ#a|)AaP`w(Q56$t;<cxixf#O; z8w^c!b3`zg{_Ih7w|xD~(RKRv2?WbK0n}ji1eRn#jtFI0lV`vzSO-vz6N?ZY#4G0I zsx@$ju+g4E#wacTqn~}|okb4jOi>CCzl0heS+$z_L^}k{aLvnmFeS-CZG06>=0zwV zGJ5Mp+jnU<FgCOw7L!nZ3{^24yPa9ShX?uefu%6a3Zd7)<Nz3Ja|*_VU?>Y+Fe}7r zfVgd?FqjoHrSN#cbd=d~k=b|o#O2idZ1El7RakLgQbF>iuT1zQ!W@8%zZ`r?CoBG$ zqt!n}J>3ecMcEB>H&CUjG;Nm~o?3>^nLafi%$SrbwN7P<Lww5RpxI|=7@y?vdE)bx zrL>`JCLil6?J>oRG-po>oa?X;r}$uO3D`uK4BrLuQ*h5x+5I~aCy${kGE^G&=P+u6 z6=X=D7qO+R&eBS|des%6$~nw507{jWM|2R4s+$~xR;;O%auITlM}D;#=iJZDJ0$Z7 zkumd)SkC0mzGO9BT}qvDR(bUZLar0K8v?mYRi=$hSkdYJx_dWUlgrfm)}yY&o!T5y zsQ8tyhlv@$MBNy%)S?h^Cj&^izm+KKFs`@7tbicvZ4>gQF<Oe~0#->%)%a@yKP3N@ z01V*F!<_p1xRWn4S=R*Y&v7hqC!rfT>><_@Hj>o5D?Mu7fxM1Di2Pd>GqXH%{=B|5 zS8X{bY2fvEdlllvCIcOWZn*8SrCvp!Cj+D46zx3(;lP6$JBAJc+JKuEHSClDhVdsE z;Gg4$tfO0>lwIZ%F5AOV`$dl+=qI@i)JzCxlEVrx{SsFLlOj4`T3vh(X(9V5@*wax znzwzMG6Ol|nB!SpuViqBKq@qB8;WjxTN)*a69~&f?GirR5w(Arl;OU-(?8r5wf~9U zBKAKp28lA=dgU*m+6&$;%#C)eZ`_BtwN)T37@BKK8*xs&$iM8KbW%eH%Ib~vy2eie z{&0~_ZnoFLBy)@o3qALJS1VAkIux8<YesHv`sE1J#B&qNB%Df@ph0$kC$JNi$3BXa zC21~#E||lv#?0g~<pxSX64b*B;HmKBlC_ipyPR?^qKa=I?o-WWk~T#MysJRiMdaOS z1o;iDA^F+aR{QmI3C`ddf*q_P-mr=dAQe@$3qEnBiA+MRfnKbivoS#zTQaQ^nXxsZ zXEWS$lmGg<<n7qM!Z`+KIrUQ0PlBy{j;}S*Y;1qny(kI=J-fT|BMnAb-u8VIxEci+ z2R;eic!J{LQ_EB=HL7N)Z?Hk=bO9%G<3<257W2WlcNby$oMQ|`;y+m@bD(rp#qVpa zItA*14=Sz-uINoS|7PFV6zaFAM)3&5g0arH+L&Nn74w93R|Ip<At#cuv9MBluzs>C z%umv`pm!(yO)^r7j{Tz|xxw@jX32YlrG*QMY!Z?`#=Bd*Xtlb-yC1?O5Q!xSD;L>D z6XjHMJHw>SU0jmSAbY^KQ5NGSI<Q|`&;3KYhIVfuJWlT`+6tLP!|y8L*ktsLh2#5s zgku}iPCoO}hZpbcWg!1x#w%K>#+rtfmw&_-+?wL%dH0H<$<ixK*v#*vK{<0ccN2mv zJeQM@8>BD^&ZIhk%QV#t42&G5P84P>{nF#WIzQAHPRF<e$}-mC+r(E?dF9fcCoru3 zVnDd#mLEhwW;R_d1Cm%{hKF)`PV($x7h*2_68ja9iyA#q>rhR79rQp9wO!h<X)oYy zev%-Y&HZk%GWp|t@ke<Xq)5|D*CZzkC_(|;GvScth!75g$}Ccf{?oaDxMQGzIE>FI zB=X-`{AJ(AWq^QqQUW4Cp?)dQi;0xMC<ShbvOg3^fnR6r77BB+<tOHp3y63%MidE~ zS|<g5J&*#oc!6&`YX1c<1@7<<cSh~MrEKvzTo49GfyYqwT$XdgW{?yZ2W{s0z6Obw zKqswX{%o)%wMb7v9%^@~l(0=zMjP4$_I>4yCq(ULijhG<q9_-EIHf9>GX=NUK$N@` z1ZL3I=Y5HhLHSa7ybn>5K?bHdt<SdF=V%PfQ+jf#vQMbA9?Xq8z}UP6F<C?<>}++^ zly%Yk<ZcZ7-bG$>{KKr~oLUBO6|4E5#Iwy^yk!M|p&qod>h^P-{6|n+w3F-kF4*vH zu$B|-;-6Rm`57lHrcV%W0!mprAO|ORmCF`*@1FS#)8R)U0W1vGKV)_A2T}!Y^ET&O zpTZBfQv?I^T*|6}W8n_)PJU4@*H|xpJ1<A+%a{u%W>&x%O!oQ-yh*h4--$c!c3@2g z!?tAvv6D^u<%PMVJo73})KFxq!k|=d5%$eD3qH4w-jvu&))?qzYm8rIW6%{oDvhC6 zv$t`_$!uN1WuGf!7`HIMg1yfUleitRn+LY@!Sx(uh9GXBa>K&EiokQLYp|(?hvW(5 zs1rDJ;}oh214Wg?82&Vz{u~AtRul`+%5H#aU!B1#AOu{!T`D^ePH9`Cw=B)JC_BeB z0)WCnpc73~JHQGRYYsls%0U$bI@a4OP!R!}2)LCw-!^D50FMH*kliWm7q-ey;lEH8 zJ3VP~V*{2!I0F%rkv)9a+|Ayo`Rg>i;Kw*=5m0NR<Z2`v1_m;Q@#|Q00P(GyVeJD9 z4N<!<I&3-d$GH92OV2h0e*Okfk3Um_y~DNrqz?xd9<J?D?(+jEfO-t^pY5A|`Z|aX zy@zj`Cp@8hmDQgal+_^yhh}1wx(}0qGdue^OeL~eOaz9;bdV=@eh>{IU*!v+JC6w| zyB15W2^2=#G#TeMkcZ&4+B)GVPwYg=)pa=8!Ge?f%Glgg8ip2!_>7h0mMX*}<nZQN z<zzq%@qC}H;c4ls#d&@vlR6RH2*gKPja4#MTjxT9EJ3l4UsFU3@=ir;1|XRY9>`}) zx;x{~1tqdN#4f^0;?2>d$2Eyw4M`=xFLBAd(rC=bou`qP8{~UZBnhYkI9Dsp>=ydN zNeKm+{i~q+-j?-zH4JQ0*qF|t90mnB%IeF_^HyzhuHbKMM?gim+dRwMRlUH9y1l<| zQ0Rl~jKGZuan0LPJ%ya|Dsf+O@o!F|>)>v?fZncfXn=YUgV`Wz89{K!!FVv}e^0Kf zd*3M<9$M(mterAOGaCX;%NiN;EsiR5+0D@%YojdKPBas71M2ywpF{B1<z=96z%O%D zu(AYG5oMQg?iLPxfdnAOrAr8(ts5S7nfw^%`J=c5OTqh53)cSxSvdtIKfD<DleP#_ zBZm5Db8iAmiTIYExDJW^<k0>~F#^>fr}|o1I_`7+M96_HDzIXq*BLHoJED~JDNwCy zZ<LZz7KLZdPf~GTm`rva!%v6?ie=^k*Ot}mUd36a|5^~rQX%4=WhRgF@&qnwFf+KG zOTG-*;Wc!MsYchBrp*gmBl&TrCYn#}gz+&~n%ICj$Q@%!N;?wBl+t3emGcRaP*#vM zk|EA<2mB??65>Y*7v^LyE`gIiceuVkRZdqA`?=sWxMujqN-LGN4vuH%HYx6*O?hV_ zYCqIkLd|@EIdqt9vPirKwd&im{7)2`PUB5<U>B`GM-aPnXl#g1{kv%sIy7o-Z$rC= zcR`&pioYEIy=bEW!5LzfiO=|Vq)PdQe;nhp_{*@G77{4L7%u#fXP{Aef>-|3k_%}C zIu=~{WLW!IoG$ko4IudhVh-|wmFE>+`6S;6BLTM3*hddRM#;VCZee&sgMTw^;vie- z-Q12<m`<^tbB(beHhrz)L_hHp$d}IY%!T@ADEZ?u{^Dt?a*~Mc)KDL~1_e@wi69!v zAprREIqQQh`jEQ*H%yw|DJr~Un2uWySUg+;4LFyIqj2Mte%NW8zk4!2_%}-Ff<uy} zCS(;sBwO(SPXFY)xCDkuoPvIJ=>16P_2p0T%m0*L;%}385y2vH6r<Plhr2PTG5pON z)crdodlIL<RopFZILKTy;4k5ZP!Ws~piUg};VK^G6KuHv^<X5!XBhh~%K4J*F>Emx za+w`AL$H=OK+iDPP7IB>(+~bS=Bc+Ki!0{m;61|p>WU>2ejv64Q$U3{iIDbK$h8L3 zW!(+P%NQ9r?|^$aF3kwY62!izmvY4`yDT$#r%2lu2%7j@gUyLv^hC)0zo=Jah_sDV zX)~CGu+8$cc0LIBf$yb7>z`vRIUk>t>PQ<0m^DzZe-^25URhpB6C#^u0olZRt{4XH zByZ`A9LnnE8uplS#B$TniDqV}SZMc6qcSiq&lw*AgTcxXFA7AXc)DG%mt7=J&DHKA zxV^0mP}T6{5k<A$+|nS&DyD-ol9bMk#j7YAB-WO4Znyes>bNAbKg<~IE)<d6u%JAI zfHC^Cra(^?XHKRJtN142!V<_Nl=yO)#8@9rI|bWIeiCmimd`W%<V(27!(1Jle2>q5 zj+dXuWs~^|WePuIUBxh9&J2USQ1FQMP<eC+D=Lf<FuApA;-xW?KY|;u2XF<N4O}KW z!Mf+7;s`Kq1AoR7^CT}!72%8u%LV^}28&1`KLXk;p!QfU!J1@u1+)O`W|q-G-x)IE zLs%r@iW6D1^VjA-x0-qhggOnnh8vbpc%Wfw<(gy-O-_Qo(V+1fE`@HznF)2;uAvrX zE{K8uj_U6284&E`a_tPX@n$XqY6@CuW!^J>yR#ZVKDWKz4u^BiJjuc50_{|HCP=h_ zhpNpFXJa+kFSR=>uU;CYNb26>!4R{n3Edu#%nzG&P7N!ZRdBvsF1<Z><*NN%N58K) z8*Z1jq1m=GO6pMpAHirhZ>vH{q{;pQU%bJ~H+k8~3)QkZ8^G}%Z0O+*FUDhg`#`lc z!w;x1-sHJWhU1!Nv9(}@tIgiv^yc0!+C>fVXzB3KgU}L>S)_;&Jg2ZTi@)qgalsq| zrp);!6G%u5nDGaJ+;tldYr<n9-vwp|u^GmYfcMOhxS~9^1VOE2PRwbG%ol*b(QnAU z2>ga1a18Jm`ty*RKd#gJ{m*G}q3(&)vq0%((jULZjV!KeG+UN6XuyfOW>^=Z);$^T z#VnFhbH-^>Kpc*qa2$SUVB>xJMr;R3$sgi{?haabgMIuBKFepb&eu@emI>gLrBV=V zun*}JeKt@LL7<M3pXLkvZJywt{1S4z$@wa4!Cedjk1ele?fuTFpo$40WpF-gzl8^s z@HJ&I%V_t9d)Yo?!a<ntirl}U#{fPEJO;1;W6~dl>>q&j@EX9>#qy1#6b+#NrY{St zvjXJF$Zs3)YHqu;AI8P(xxwlKe}OIzKM)$+lx3ri;v*ZMM{w6SK8-X@iVr<J-@7?} zv2gSZPLiAaVO)aoCfgz{8XR<+JKhBHSQ~^4<Xaqplofdx0o`xM;E2nSxXZ#6bE(Wn z-(SX$kAC!V*^x;@p(=to1HyX4yQ+@02}4wBpckoif>$c^C|F%wbV9bj0_8aMN!MV6 zOag4CLMXO?a=DcvU~b&G*UuzuI}aJZKk+6Qr`&+_jZ=-#bFf|f(4*8a%<Cv*eo|iM zgY0596m)vVYa7{EhLkS?tGMn#3s$0#T8IZIY;ltZG0P6SfbH}ja|j>8i{2qjW=ij@ zIt8DMBl+^;*%L2SxmK_}`0p(Kvfl?j3)TtY2d(iIT(9BYj8h`vCNjaT5BMTjt!+^t zBbi^ch2jKGg7$6CE1C8=ZYKlnB3>bfx?U;5k9#H1aOf?>Js*#5|7#tLiKM_FTO}Nl ziQKOPq5PTXO@tEF6_OW}oZ#*UC|2G08SbO*JSAR?X#w)U$u;DO;~)df0v9rL1Sea* z-n<@-U>&%=J!=0}gwuY$5k<{6@CfDL(ZkS>?u4+WV=@mz33?c6%j;XC_80jK6yNw_ z2`bDHY}}aqtV<5skN~zqSRxR6lyo88mgp-uNOOTleGW%2p@wflAq>)a=qJhmRokM^ zpw>6;M6XkcjNqbbS2X;>B$-h5YuMp=L)S`ts|~#fAcz1!g+-MK`BZEw@G=m@%2FMu z`W-iC7qR=h*{auI9M`V5)`2upzoXOFuv_GJ_4Fpu*QNEwb;3|MR1Mq#J0YK^rhb7Y zG3=KIQ|DU<j~tlo;TxzoX_YI4L=nQ=F?~|fG8*w6?RBJGWe^JGj?%mr2EeLIcVHyf ze0J(tSA}q&8k|LNGCl{;Y$a`fR)vXN=t*#lr$>%-P(e>Jo~2gDp>cXAy`rzD|H>U| z5PWyFbt_(7hqp0dAN$Z5AHYv-1rH&XGPsb12B*#yHo87L0$Oy5p2-jPGwJ!NB-vWJ zw7xP4B<q;(SL}CvU^9n>sfh~3BAP#|te_8|tAe!yqKGQQA|0t|vCqvgAwSaU!8Gdw zqo`Xf{&J<Up5`h)2BUKrTA}%X4}e6NdeiJHarGgd5pp{`o?9^$t@f@#&|SdZnWv33 z7s|S>bMPQuVUSZL<(eFgY>M|49W6^d6r$5JS|hR;>4M(R(>^UreEUGwMAktAvwB)+ zPvR!a$@s}k`X7sE;*PVFwUUQwGbqv++#C}`;SuGQ$RA^%2C=QNHlp9P64ZYi)v5n$ zT(UiQiH3<=z&6;N45>RML!2VfB>Am$pPVJqh82CGNR}|`5j>_LVzDjLjuI~1fqVMu z-0gZN;SwNX=5AM01?$&0Sp(-VCR_$=t$T*aurfg1Sx^2p{!RW4FaH@Y-{<8uUe59I z1zd#o*}zTyuK&Gir*;ryUsW4}`-9ux#F~I8k(;};B^QwA-{4aAGH#+nbS{RC_Yki9 z8>TN2bpZ$AhD`O?{ZN#_f^2x73!SoWWCU{Cj?zv$k7FEp<m=lw$mSApKyV&nd0>?@ z&6jQc@f-~O>)OjBnUFVPf2eEzQ2TnUgj}dnEj!YM0Xd*C*Qc=&x$&kROKBE9HWN~m zgyl)_A6GL*x-PkJ)>vLa5MIIZ29`o&vXl9LrE>evrE=5iNiWW8NMynCkOgd!ep|o; z?(HEK!m%j(m;AYay9Fd!aG4@BH>`yIw-VP#)C`sQzASyP3dw<UZOZ@DVnrbU&eYa0 z|4|8J@ER61o((UAk_X90T;K4}(%j_QO*vGnA@*bzb`v5hjbYLJ$WXE>5*&h65m#yW zN<qgmGPD5At&6{n>hD2#mA$kFdJ)xtH4x?58{3BZ(%@wJt~AXjsMzkxhjoX|R97<4 zyD)hWqa&l4^q1CCIA)M>IxC_}=WKc9qZQZ<HKDyYXR`1j9$_*OyW?A!63Mm3aZe<3 z9FQ+fh<359#LXvD1onczb8y1$v0}k?e#&tr2KEk$Dl1??;Ec}c=`^ewjw4f3L%7Vb zY$)dm+B^i=PI&Vukx7b?a$=gPrg>&gHa#^pHF2B@4j7r>o;6ZVSWMbRfQ!)y*H?tK z1;*OanbUFBtV1AyLRPw5XuE3ll{ySyf#>2#5iA-kO<5JDdZN082+|=1KBHHbZk%#? zJY{)I^edE*X|4=nCs-4?1y3e`=z|J7YAT*HlD$4xYv_9|*{|PSi^E1|9nUdJpdHQY zmJzlRgdAtd-{YR-7js87Vise4l%7RqWPtE}E_69XDtPnh#Cztd7WFyg|3YNKXWbTO zW}`u@IS*6lPkV)76avTLcn)99JR`ed%i8BXj(fSGUFemsF*cYk?oWC>9)bfcOd}dB zvnNsAP8d68u?=0RGZ)RJiK0S5srjOfp^ic9<Yeh#Va3+EmAjyD$`mfZUfk*mhJ%Mj znyQuy0h}C9%V9&=6188(Nym^6InVt!UB;>s;Ax5mn#~N&<nekFXI#}GKl`sBt+n8c z=qltTjeM*JDiw9kRfIHzE*U~6LnTmx_nHag2oqs_yrn>K&NnKe{huo?mt{`IJZ~%P zfEhgzcXM`i)NEtCH#1JRpaa+jzHn44!?qVYi4>_Yh~eKFdIR~XDTP&#hdA$9FeXM= zSx}5?4tj{AE*OyGcfLjhgrQieoBrDdFN_dVuz)-(4_|bH%TOYLV3J1l3xsLWi{WPV zLdzDL9yd{G4N0f-JZC={p&iq+IUdA?EkbY-QjV1?R(x(FXVu7uq-%k^R-~zJDrcZE zXbwub*qw1TfHlE86W-%gU-2%FUC#8C0W~)L8ANpikw=k|^nDdI;K9)>;=DO84rnuu z9*z|o#~A^T;!xmI66GZ@zM?&V4p2K9BzQ8@qgT2f3C9!wr0^fIn3FhuzGcBf^rPW^ z2txkq8?U@^^lL#blW*Mm)G_Ti0;!keDD)mp>~b$>8t74U_qjV}Tm&D=rtw|k8HE4t zxFTlx*&ML6_~lda)AQQWn~mI4_*|ktPr0)=Lj|BjU|xHWe2^70L0Ek;(1I6Z_zK2$ zwrrSSo=V40dw3j!A&K5%OK5|z(r&G<B{C=cd#nui5vrhtX5@Y$`LB2xuvt(n6&t^~ z>1^`LNNjb&U`-bJ=|AKnZVpyAd2>LD|IIME?mTXy3AlX%rrnSK9~^oBceI70x^FwI z<VN*3f-vzT+A=V7JT?7v7d@=7@>_$eyaR}j5gwbxUv>%?2&~rxi4k6c*lN<ccQ%6z zP0$sRv^E9qULe`sVMv$ED*Gxf*mm-42C$C{7&;a6lK^0WgUqzS#Day#n%;Hr&3-Ia z5~vsj#Y?k(v1wLJkiLL9nofSLJ}<b!F!nW)q`}5H8jzIy8h!^jd_D}+@18@sQ`k4R zXEl!jYKfXJqa)l|f$UTEXx=~EgsOjLa03t?J1lN1aMg|t6965;P*3P3q2+u+6RxO@ zlNix0m=EYmg5&HXL89NrzVN;60Gv!<(dV(x7uUC*Q1>uofz0^(;})3al9LAl$P}vj ziGbTH;0D4#Ae8=&%eRBc+CaQjVmIOa?qent5ti2Ljlg35D;Lgt%+44qgmypE+!X&l zM!N8=1v4nG0n?@Y39$l!IiO*raNsOH3%*X}wC1hGXt7|(l~5~zUL<>vZs45fOamnl z<?hW?SMk3mpXz(}f;I1jq9TtClTY=}u_}F29h2EYx?zR`MlgF$&T|~@L9G}tgr0f+ zCqCdjk13TpQSy@+?x*Gl*}@K<O8?%>aE=qmf*)vtQwch7f@>4tjzTP^xAc{o<6Od6 zu+3x&9cHzWuil39{x5j3$rG7+R4imH{0`^wHvEq6f6!(z=9+9aIEx{%McEf|`GXRu z?h1GP132IrVpS>n5&V>ORC|`II97Dr*h0>wT~8=|VZINQ7PxE|OE=~_#47<P4k-X? z6Hm}odcu?swik7Sby*XcBUmF*J9LK@KznUDFb$Y6&6i<t+`P_BsKaQ;Z3_TG9ix1> zG0VTNHmHYja3C{d25y(_zyhPmB?&R0ewlz7oJ4T>*6P$*=7r5*CC=8DREoJCstgCT zIiV713i?4^Fz>ikS1{&~Lchh6|CLe9bjJPx^I=CW(t~_#wK59eTWnj^e9N2fBz?}V zv<v`Y-&wub$P2eoMI6$AXSs3b;9dL-s_2UC3Y9~`Re2(gDArfP4S{@>Wofp;GSFYx zDA<A<DTF%|N2P1;t9KQ5jGyID<z|O}9O;aBk?~?{7RNkBFM^Fzy~{-NW*e#5OHIwJ zbXc<-H(aM75*Yj;^-dWMS9nn_>r-igZm%|ctQ}^T{BvA1wKv6|n+j-b8>rw~Y@J0T ziLk%O3C=kkmBdf%D5QjwI+^FCU`=E}fkZ*-R1<benKLT+4W_`KJJp*hHidM@kmga) zw@5FwKhf^-;awv<Aa(0d?i)p+XO^XdIkz>C~QLfi&ihUy9IA|OgY%Y%)BY!;}! zfv?lZs0$GL=fUC-00QmB*}1+3JDN0(WlaM_lQ!7exC<a7_~;HB$<+#Oc!$bve(dqT z<FPwgeX*UUQ)Tx9U=(?Rgs9_Egz}rI0wUm;4yJ1yQ_=)Af!_sZ3P5!~33~B$539tJ z9Pv6-)FvmtQ}qgTA6g13MDqzHAt6Wlx{!AupQ~9O7ad)NvqBx{-G9u^Ym1AQa!A}t zuK>8SdU%N4SHMdq?q5#o_l{(H=&UD`E-~M{%)0l~o;pti{2lBVmv=rih|Vy9roFpD zUHxVRIqoIp1}X+fRP8{_vVrl&m~P0!R3e@?g*dRUmR|@NF=66ptzPZ$$oEqM{Eku~ z<m=%-;J<lC{_w<V<@O23QFR>X3pfrGV+vpsmd~TX+f&cJ@U*K(n!z|F&0A%_IoLpC zmwEP)!I0kdoUX5CgV5kXhv8|d0d<JT38M=lqi$kZBWU4~R;c2_L$S#xf#VKnU1Wz! zaeGyZ;jL=;dWNzQ192&Z_Rp$NJ7%!0PG#bfH8`tK|6m_#@pPS|P1_xLLlpyHa7Iwo z+=MTDKC6TixhNhdyTGJCN5J6;TnQ+)R|^=tRLuRLu;P>+k(hET2`$GmwM1P-L$END zhxe)`0G~;3BW4}Prdb6A#pa7JS{;t%&|AhAVMjyT6)e8^574)RlOcm$4;fj(;fg)2 zi{79vFZ2a8W%hEIFEF6N7ws?|LUf*0i>nTnI!t!}tuM5)**apUr|2IJTM!-gy9q!( zLoTDo0~EkB8>|;(rfxz>T3K(>=EMt(G@&tz9$y2O7^lgjc9Z!DzIp$|)Hvn(3+R?P zRgyo4bxw$`1c-u1nH5YpKV=^xr|NLDY0bBYGF<4jo?k*6Tx?%j?-Wj?$#+;zvGe#p z`1AkdMOG*Of_LA?#TI|uY|gsREfA^s*?brB7~Q(YOR{m?hqpwzXIb1o8pf6at9l&R zfS6~q00usX4M;`EZjTKJ9zqbX59eh(k~=^{aKNkK=;a#~c}W9>_(lmr0<MNj5IV-b z=9_{kAWP^ctSg%1G~LL5u3#%9bBb%Qc>tALqJ#KMHWhe)gzy;K1`GAAUC$N3tVH`q ze6l@y89AbZXp}J|J9zg4F<+3`LH7$mU;HnbaU5jc+m$b~*%7@A6Aj+&ie4rT4zf^7 zfZZ_Hu$*bjfjfqh!Kbil#1G`Uf|kH3>-Z99>JWJyhN&Zr9$^)77u7?cv|o|Y%C}3f z3PH^QQt^0oG}(Pay-$9jgakOq2d18ECQ%5g9f8&w8_+0{-i45}EFX5CFb#PNwkT$f zGN8Ul@s2fO?G%&;#x+@M$o<?eTbo}Pq6O>Md4*9Zh+klZ(KPz;7Fu(6lznfA4-vD+ z9-^>4QTE*-S&{C6@y>4iSs5_1K}-FWVH(Al0nd*N^A#fZ9Axfmzp3K!^ALZ&g@Ry3 zW21U$7@--iAzsg6zPfghub2kq-tW^qH;nsh2*e|EAauMl;5i&!d!Rtt@gbTWy$hk3 zl-w5`LTA*F0wI;hN((q_gBCbO@`JGCITF<#f+Y_OgB~_Zp8saJFU=!8X=usQlMi$3 z%?g!Un0%CT`@bv%@ed_^W1(9rr0*0;L9pQohAaW5z`M(f6K)~-Xa#kzk=+zY-Df6I zDwpsEiRwP(rpW3(GZkB0J`vUf(Zn~)C&KzC!g??)K1yLdoTj1Z`Cg>;@Y@e3u7^4O zha|5Lww|FOb79z`z(mrQ!y>-Xsbk~Q(Y^%RhaKBboS2(GQ@(hXN9Zo@*q$Aw02~~r z159}hF__GXW|~;iQ-BqBY)4X_3f`AN_qQ1(ZY3tp*^~Ih31k8>n<r5Ug1d0;!rTcg zsyqrzhB$Cn!*ab77oV-!Cu9?NlvidNJ|%=UK8w8$&X`8PzEeS+`s^KS@9|^EA>p<o zqu4*Fj&*KN3Ui|5zvUOSSa}EYA#GY@?EjG&4G|qKjE0hb#GgcaA-;qBEN!Il|LDkW zqJynZ8@+C)q(TT{y`db|E)E+*o}pAG<}XJ8c&<RqmJ@?s;5R&3WQ-)QwPhF$IU5gi z9hRy|Uc>(bJ~QXHf6TjI<%KpQ$*=SBPk8z7c=@Ni2oU})-W4WB>=S9HWH3ZI>%Hz6 zRZ&V?M84Gq`8Y}b8FT$*T=ZOS%uJd5-+WGRxv)w~{(Jn~+}&rwI{rF$o*aA8<K8HV zG3oE8&q8VM&^UY)#!A~@$g&qkN83xoLuYW^1uupp@M}1Pr$`I01sW;A;ANP{t8Is& z5=@px9@agLVE8xKpo!n_(*bNd@+$;zbTUT^|27rzei&oyvT$x7HO>HG0IV%BM(fx6 z#kL`3Q_0~R-=-Yq{krpmqpTP#iw13ToI7(WLMjHs*Mw7hNHY|hXJ{u5&=d$Lj>Z7s zf>}WqfR@9OiacVk!g9(npPOp;4s!GKxy88)C(fPmTZK)Lp1Z2;#YCyEK0&(k=CGSj zgy-MI!ZT*`hlQI%7V+<c1Pi<5-#vh{Bn{vsW<|Uq3`{w&5EOpLab&KTz_TxI!D7A* z_fhO%WXe>OzPXal;xGHF5XFEU0^($3uzpuUJU19w0pGh^2>~MmMYRtNMz&a+!k_u7 zfRXVCzs<W{pfMp}n!yYWb{VwzB;O%c>K*<pEcn|c;?sr~{vLVw0kqTUXKTMIe?$B< zXFfR;k~)z8atTrw0;Cug2yhq}AMrx@7rHb@<o+(?7q1Gu5I`Nmqo9%G<Ore-F9bk* z=q^uEN6ZR5Gf&4I00;rG?lM=u-3Ac;CBnnqX#lZyr6EArlZH9sZ1%2XpmbqUMj5|> z4n70P*#{A~HUB;6<3q`T(g$S+XVH?&4N!Q&ITm^2=(1ca2-oX=XzgPwB2Xt5l3pyW zUHb4(bl;!ozKfxuyHE$ZNxE+h#2%CCMX2C?+!>pJRD1gQr!NL#D9OJ9_Xc|eADBBo zw>T%?BH;z^6A>_nqB_@tCvf>F+=#S|#?llebR#lEV2yXV3z2r!Y?^o8m0SJAtAf2b z*SB8F(Vm4n-)IXjxfgrs64q53r<?cx!S21ZTzKH%)9=u;tF$NI_nKSJ{NyRthYNPX zn#m=qT!vppxooh?b9_bnujF}NVqQMY%V%-vJd8`vp)w2IN1P_7nV8bRpv16?owe}i z3S=bs!h7B(KgBQ4^2;N*^b}Kk34;|OGO(V(S#A}@G3PpVX723F<@1Z>`O6o|$=qUc zZf-$(eE%Fnd*%mZ`|qEeo|*mP#j|IV1s42eUS7vVUI1rlcazlk>?e7-$xE4UAHe0J z#fP80cxkQ-a)>6*Tqw`aT$oF~!Gu4`3&W%)-{R#ny!<I%ejFFYhwQJN`9CE@VUlNg z`7SR%%L{ENlV9ZB-{9q!c=?;W{4HJ>8Y207y!?;6{I9$Sm-@%N``>x_O<w*bFG8OF zE$=v{3dl!yn_+Dzxs}9a{|PRmTSkZM{|v<r4Cnc=E#o5y<i2C<zOiA(uy+3r4<mBL z_<g!=9UB3v^T;Tk?0#wN{xSTEyXSe`S{gq&T-vdJ?2%n3#vT}d1bK`i(EG@)vqX}{ zk028E!SM&ij*RUY+s`L^XOPpC@$KV>hfBK-jXk7PPwIM0*T>;RMRA+F4`7vvu|I*E z4x~)TX|PX)Xs3|bE{>ke9~4IqLMZg)l0(~XN4GCY^hJz42Ec?kT8hqm?WO1~NT>*O z3psOy9QFVnx92$T82Sh=kQUYeu=pOe;Cv&|jXD1JI+Xh+A2Nv|SS-_0mI8RjxNMjZ zW|>XdgYl{rv#>Bq_VHL{^bP0c<h)FG&ZJp=h0d&I_yqfGZ-^w@tK3{{solDT{UNQ6 z<fx0}7Sl%%5Z+)lsW@MDnTXRosld)X?xpD5X48qFMm-^ior(J8HqP)vb_E;;OPrGi z->kQFltxgA;HV=9Zpm24YLC^9p$^V#spHC<+mZ#+=Y%^GH??R8Q?SyTeg#H;N0rB^ z%2KuV{BtiDK&4pB8IF{yHr8R%gqt!Ab`cC|Ip&h$Y@7<StX2!o`7|-B*fFl=G@R1| zo8?xuF4M&P@E8?qCgb^Oi>gIIJ<v8r#|>;(VV;PNLCA|%Y{y-9^C4!39?7d8oDSuV zfXPXMSsVM*4<m2XkNjjb<zrXVmLG^<{@I3oX|%c)E2~Dn**wK;IdyrL{4x`F))7!6 zbO9w{*0YA?%POr>L_Dt|XoK_ov{siftZ0<&nfo?B84CuTb!<eOxtg^$MI<_o`9AiJ zXJ`N!URrLVOqi!1b(6~cxQ;fLL75`Sfp36$fVW0RrR{k#JEdQzBpT^9){b6l9|pQC z^MdfBT*_QuEHc6??fEc6v?}R*5ZS8XV7+!H<EW4i^q?Uic?JbE>Pu32tT%70BR*Mq zt+Ae2*zvzWQV-&?!6;V_qOYa{Vic`d($rd|-Yyo8%~&@?;vCJMt1z%fy}w6!hr(U7 zkFh6-{XYP0z|m6j*GTacE}X|14!7<O9Mcyw(Xp8-?cm8ici?Xpf7vhKg8qU97y5;! zR*%y0hHeQ8a)h8mO&|;4|B5~Y9*^aK(1Xm>z~ZluMC~dclDF&B1avYCf6#6(f~6zw zQ54<KSK#p*B><nd)qs!!ibHOSW5hT}D9f9#5vhQ&8H$czVLwjtUqgk=Ny2eGG6O$- zp@8r++9m-RI1Dr4gyX{I5dDMUy~VCO3y!tQxD+*nysS4Avd(?a&^nlfn2Fu)yNne< zUrxtObt2D(CyeLELWU<J6b#>Z>LSVz+ff(_mgrZZ!lYh{QTIv|tV20>;>W>ql(WMX zg|~1@^E%?~iC0_50_r>Xo!TqU$;|7)OL=SxBbGP>orG$+$$6})Kx{7A^2Mk^wyb&u zit7TeJXwLIdKWa37wZ?Pp0N!haHudVZHrYjVOkjF;(M3<99VOM%sl*^>-brhhj}Z^ zTtZ+VFhs`6yVY86)MQ$W<;#W*ma?2C(=DRStd4fh4BMmN$ZQp3)h=?&RrqPKU+t^} zj?lte!iPq+V`X4c-e@&fbbbYz8~?fW4SkZ6;Ih$KU0AqCG%fz^s2o?n8J=NZfK3yA zoo}^Qyw)j>&!o#0#bW_Q6E4`ITk>_JxK?;afGzZBRX@8xTE3*1IAzOs9<;GAf!9XD zjSP#$-k)<m=#amh=;1DDaknNoF(x#G>`;KbA%ZaAXnyq!niEYHo^ru{^-qfS6iLO> zMls3yn!zY_fIL7uCm7N(2d4QboJ%C1uVJ*&_};f5JIdB`X5`q99qYq>*A#G|AO}a^ z0LT@IPRz_}Sn<*&CMC)UW-~K!6A4X+6Jn^>-mnKWlivF>;aoks<QEEnPVPJ1YBf?Y zR}4NsJe?Sr#;&(=hM=nD^+Dqy?qq$rZo;=KKpe-l8eW)4;>FlPepBu8tSJ0>HwKLm z3`mJW-9@v%21y3pnJwYKv}J@NLMItbo)21<{d|ti&mqf_IW*xJQH=L|10l)Z=DHiz zgSIQ45i<0t@=XKYWnZHN`_cxfm>q_Lt!16oAUTpWJAtulybWGXI6IR)+++U=mQ{<x zGmN{V2)@IAUwJaq;VcQ|h2UJ4j6Z)K7sakr1c77~KP4euYt@?uMjb@XGQof>rr1<z za)5<tkNyVJ@5QA@oQA|_XTE(2?@aFEHuY+)r+l41H+Zay+)WnJXuZ-ysgtfi@UKu} zV_l*rY287IZGY*}p+`#(mn6M@iNX7-e9!+OadaAg*)O9p(kQxuJut4=UOyiESBl;q ziLPJ=JQTf+w-ADcbu${JTcdPKl*0WGZ)s3px&mA4QN4vEwN*FUm250Z5v~!*cotjf z3e+-VdW+cg+jX-;$;P7;me@!JBX1;w?qytWp*h*1o83ybD@u1O8G<<?nba?&4F=`A zmF#{c+Z&}YA4hI`lnkcld-Yb48JYfolI@RDn4TlqJ|)A!u=|w^w&n+Pb6Cj^Md@KB zgE>12eSn7S%;Z6ut@GxHl06uuFa$?#4=C9~QSCv!eK>-xIg>r2WDiFv%*v7MAtj4x zf6lj%e;?7!V@mdDls=|pv67)*9@X0?qS|A+c``~CCZAv@{vt-_EL@g4?e!{f2_LDE z6A?4euO|={0>5rD4g80ZQ}8)_isc+9erb!0RgjM5kjudFPFVOYZau&c$bzsc*a;UV zh#@a}?)67!L>|3Pc%Fxmz*-AmuHGrj6YZ;AzI<PcPsPu^KyYFf1g*4S3&T{5={%TD zn9W!+Cj|L&+;e=$vUCF9R6GNTLVllKrYus~7T0q}Y&rR=sYdZoKs=4CvOB9wtwua) z|JHwqZB@Z??WnjN&&;2R8M~VFC=i)B>-9)`XwHiXvctie_9+O3Tc_;h$m>_x!mXy? zYhz&p98rniM9+3^(G!sCLGh^q{&7p;M6cs$WAnnC3KCiNlM`p#_0A8ydA<U5Vmcv* zTI+v5ia-~=-K@jubUS@wTHieR=~??K=8A^VV7`QEH3Sd5?qvHs*1-}^LPTZ(3H(Jy z#I4~7{$@Fr|1d7}>H!7~%TnCS-2+=Mo(w6Ubvqdn+I;NL!0owpyd2$rgg?)GZJ#hd zyxXD!r18(oH%<!A;|C+em-)dTjBY>5pJ%>yK!;sR(aoc^6)3)R<GMb<e$-As&%QDo z%rG=9*MD(T)WHRADE3paT=nIx5r!^YL@%vX&~;vTU~9&cPA?1BT;;|5;=vI~&C{cM z6Ec0R_4yT^J1HP-tzEx~AvC?b-e`Dfy8}VNX4gQ92&h0?l}CrT;rcNbX1e7SbXm^_ zMgE$VoYT%T72+x!1(Dv_*>XA(T07^>)l|j+pX5vMc7@Uo0H|>tCrudhF}cLtdno_~ zt!DW#dMxHd;f9<S%e@f9^t@`uNAggZm_|Wn8qO2g3eh(_p6ua)&WSM`&nJy-1a7bA z>+r~gWaq{_FhkkID&9Q;xaZrLpj0PKAc&HmP{D+W8R;iT{7##H#rA@^vW`PWbg!$O z)yCr-vSgnefk8eFb|`kD*cY?2RseB&KTyTn$SX2Kr=!#HBqw#4Q$g%8AQx+CwZ4p1 z#2>Myvl7{!@x;uVrzc!wQWqwQMzKQIiahkQ7+)49ikyNgjikNgxADu)0DJ~_W?B0r zWj^W@Y&;}S@HZKaehd2Z71>+~Es0Uc9+WIY?+D71F^FL_x7{Jb*&TLp3&@EmPm13? zh8yr1usLWv?ncpdoNp_yHatAMP(uJ7{>jaCUI69|-Al~Hn9QU&R2&W|N;kJcQsI@M z>{tX75x<*e?bx;v41~AY;bpALF#c2@nsFQdMg?9jXiq+cY8!P-Zx+|=6wR}HLH0sG z_6sDN<swwb9IlF8MusdN?5FyTf;CjxNguqU*h032HV5UWkk!5IDJqoNQ^XBh?azMT zZkq1zrsPEa425itd$h<!4;i^LMfd3N@xGq1w%n}lNr+#6R2@U<Mjg}Wy!wHAMFg|) zjcW0_d(bPQmA%sFTnM^^h6nv!f{=mvO8ViZOLiX$aMtlBR_o1=s%yxr*fr)iP`glX zE_~1)>Z<~>zQ>&<>d{#R^*<2w*-@0=*JnW+-<!Ux-2R>FyUOkI=JXw^@X_~O<u<Ss zze33|ZsWeI+}^alqcU!7eF<{e)wK3e&OE9XIrA?0!ZG(C`Ft?ZBI~clz3)NQ0JATw z!Hu8~If(lDu&b@_RWE*YbFjY`E4OcLRxf@e^RT}cE4N|Cp3lU0>%~o+i(CU`Sa1R! zVChHBM;B<*aE>Lz7XF>*;c-wJYVL7r7Ir31ZLhP$2W<efLiSa#fxP4+dT#|Azg8Bt z{L!M8wBa1ga|3cWZML$XgP3F=^?G@sayzlZCH{EP0)w}5AEKZ8D)<rib5YBmC~CQJ zKL_M)RzJ(Qx7Dt}pZLy4)yEuKTvac!*wc3y1oDIJ)jywa`0~B_z1RebUaX;x-oHqC zZIhlohN|=}pRPH3x`==fzBKrOrox7$tU<v9>mew%6s^~wWSAG7`PzQjl;W5KSdYq@ z=m&9(*CVt8WrTj1i~@`4u`)s~Bhf)1%m<NSQ}EI?vmzCA14!A<R+vwAqUnUwOw$lQ zk`5D)x%rXF@%tg95GIvKImw-HVH=kGKlqu*(Vn|5fmNpwy}5R|T`vNd3f;O<CU~K; zs<N!EI>mzz#ES0?aoH3TF=91~Es*ik?51&MF7NNjq5Y*j1+az6Sx>NDIjZG@%<^q= z4%d5zSZNE(7ATkhdNH2nCy%n^-1I&fVkfXl!t9dc{5hp(ybwW5p62BlUWkb#&+_sd zFH`*NdEUK%i_GEO#;T0XYw{xBe7f%q`0h)5^BKPBl34ZCNxu8+fOq)rWxhMbce#)N zn+Pn`$>;dy75@fojAx+GvL>0~o722-GONv#FW{~~+H|)WLnC<{`Igw&SI{z}J4c6L z4gS#B=-4oQB|gBvTikd4U(07~Qc8RFoPgnS*-#-kPy9N2gtIBx%eU-i`07vLXw5WD z@WX|vVC$V*<>`1jWK^vQmlFl8KuTFbDuk`G&a-*QQgsH7UJ-Lg<3l;IT&bow)(=1* zqu$Cs0^<&5e4@_I)*yu{#(u8wxA`>43Speoaun)?5*E%43kf+}vW?Xm<7GQ99N!69 zL&D`b`TI)33oDm!S|%J^2_>Fn7cV4Hf!Gt82PF6NvX7Vjyd2<#W~e%#Ghv_=^L~}z z!x0vFA1{yba+DYL_B|LyVs$Uu;jo`O2{JP9*=Tfl6!9wf|KZX;!07Nr(3ST`^xWR! oIy|~{bQGy&`!G5@wtH;&z{orO|H!|K?En8}{9)Qlj6ePV1C!y%hX4Qo literal 0 HcmV?d00001 diff --git a/gam/gdata/sites/__init__.py b/gam/gdata/sites/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/gam/gdata/sites/client.py b/gam/gdata/sites/client.py new file mode 100755 index 00000000000..4c38722f87a --- /dev/null +++ b/gam/gdata/sites/client.py @@ -0,0 +1,461 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""SitesClient extends gdata.client.GDClient to streamline Sites API calls.""" + + +__author__ = 'e.bidelman (Eric Bidelman)' + +import atom.data +import gdata.client +import gdata.sites.data +import gdata.gauth + + +# Feed URI templates +CONTENT_FEED_TEMPLATE = '/feeds/content/%s/%s/' +REVISION_FEED_TEMPLATE = '/feeds/revision/%s/%s/' +ACTIVITY_FEED_TEMPLATE = '/feeds/activity/%s/%s/' +SITE_FEED_TEMPLATE = '/feeds/site/%s/' +ACL_FEED_TEMPLATE = '/feeds/acl/site/%s/%s/' + + +class SitesClient(gdata.client.GDClient): + + """Client extension for the Google Sites API service.""" + + host = 'sites.google.com' # default server for the API + domain = 'site' # default site domain name + api_version = '1.1' # default major version for the service. + auth_service = 'jotspot' + auth_scopes = gdata.gauth.AUTH_SCOPES['jotspot'] + + def __init__(self, site=None, domain=None, auth_token=None, **kwargs): + """Constructs a new client for the Sites API. + + Args: + site: string (optional) Name (webspace) of the Google Site + domain: string (optional) Domain of the (Google Apps hosted) Site. + If no domain is given, the Site is assumed to be a consumer Google + Site, in which case the value 'site' is used. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: The other parameters to pass to gdata.client.GDClient + constructor. + """ + gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs) + self.site = site + if domain is not None: + self.domain = domain + + def __make_kind_category(self, label): + if label is None: + return None + return atom.data.Category( + scheme=gdata.sites.data.SITES_KIND_SCHEME, + term='%s#%s' % (gdata.sites.data.SITES_NAMESPACE, label), label=label) + + __MakeKindCategory = __make_kind_category + + def __upload(self, entry, media_source, auth_token=None, **kwargs): + """Uploads an attachment file to the Sites API. + + Args: + entry: gdata.sites.data.ContentEntry The Atom XML to include. + media_source: gdata.data.MediaSource The file payload to be uploaded. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to gdata.client.post(). + + Returns: + The created entry. + """ + uri = self.make_content_feed_uri() + return self.post(entry, uri, media_source=media_source, + auth_token=auth_token, **kwargs) + + def _get_file_content(self, uri): + """Fetches the file content from the specified URI. + + Args: + uri: string The full URL to fetch the file contents from. + + Returns: + The binary file content. + + Raises: + gdata.client.RequestError: on error response from server. + """ + server_response = self.request('GET', uri) + if server_response.status != 200: + raise gdata.client.RequestError, {'status': server_response.status, + 'reason': server_response.reason, + 'body': server_response.read()} + return server_response.read() + + _GetFileContent = _get_file_content + + def make_content_feed_uri(self): + return CONTENT_FEED_TEMPLATE % (self.domain, self.site) + + MakeContentFeedUri = make_content_feed_uri + + def make_revision_feed_uri(self): + return REVISION_FEED_TEMPLATE % (self.domain, self.site) + + MakeRevisionFeedUri = make_revision_feed_uri + + def make_activity_feed_uri(self): + return ACTIVITY_FEED_TEMPLATE % (self.domain, self.site) + + MakeActivityFeedUri = make_activity_feed_uri + + def make_site_feed_uri(self, site_name=None): + if site_name is not None: + return (SITE_FEED_TEMPLATE % self.domain) + site_name + else: + return SITE_FEED_TEMPLATE % self.domain + + MakeSiteFeedUri = make_site_feed_uri + + def make_acl_feed_uri(self): + return ACL_FEED_TEMPLATE % (self.domain, self.site) + + MakeAclFeedUri = make_acl_feed_uri + + def get_content_feed(self, uri=None, auth_token=None, **kwargs): + """Retrieves the content feed containing the current state of site. + + Args: + uri: string (optional) A full URI to query the Content feed with. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.sites.data.ContentFeed + """ + if uri is None: + uri = self.make_content_feed_uri() + return self.get_feed(uri, desired_class=gdata.sites.data.ContentFeed, + auth_token=auth_token, **kwargs) + + GetContentFeed = get_content_feed + + def get_revision_feed(self, entry_or_uri_or_id, auth_token=None, **kwargs): + """Retrieves the revision feed containing the revision history for a node. + + Args: + entry_or_uri_or_id: string or gdata.sites.data.ContentEntry A full URI, + content entry node ID, or a content entry object of the entry to + retrieve revision information for. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.sites.data.RevisionFeed + """ + uri = self.make_revision_feed_uri() + if isinstance(entry_or_uri_or_id, gdata.sites.data.ContentEntry): + uri = entry_or_uri_or_id.FindRevisionLink() + elif entry_or_uri_or_id.find('/') == -1: + uri += entry_or_uri_or_id + else: + uri = entry_or_uri_or_id + return self.get_feed(uri, desired_class=gdata.sites.data.RevisionFeed, + auth_token=auth_token, **kwargs) + + GetRevisionFeed = get_revision_feed + + def get_activity_feed(self, uri=None, auth_token=None, **kwargs): + """Retrieves the activity feed containing recent Site activity. + + Args: + uri: string (optional) A full URI to query the Activity feed. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.sites.data.ActivityFeed + """ + if uri is None: + uri = self.make_activity_feed_uri() + return self.get_feed(uri, desired_class=gdata.sites.data.ActivityFeed, + auth_token=auth_token, **kwargs) + + GetActivityFeed = get_activity_feed + + def get_site_feed(self, uri=None, auth_token=None, **kwargs): + """Retrieves the site feed containing a list of sites a user has access to. + + Args: + uri: string (optional) A full URI to query the site feed. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.sites.data.SiteFeed + """ + if uri is None: + uri = self.make_site_feed_uri() + return self.get_feed(uri, desired_class=gdata.sites.data.SiteFeed, + auth_token=auth_token, **kwargs) + + GetSiteFeed = get_site_feed + + def get_acl_feed(self, uri=None, auth_token=None, **kwargs): + """Retrieves the acl feed containing a site's sharing permissions. + + Args: + uri: string (optional) A full URI to query the acl feed. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.get_feed(). + + Returns: + gdata.sites.data.AclFeed + """ + if uri is None: + uri = self.make_acl_feed_uri() + return self.get_feed(uri, desired_class=gdata.sites.data.AclFeed, + auth_token=auth_token, **kwargs) + + GetAclFeed = get_acl_feed + + def create_site(self, title, description=None, source_site=None, + theme=None, uri=None, auth_token=None, **kwargs): + """Creates a new Google Site. + + Note: This feature is only available to Google Apps domains. + + Args: + title: string Title for the site. + description: string (optional) A description/summary for the site. + source_site: string (optional) The site feed URI of the site to copy. + This parameter should only be specified when copying a site. + theme: string (optional) The name of the theme to create the site with. + uri: string (optional) A full site feed URI to override where the site + is created/copied. By default, the site will be created under + the currently set domain (e.g. self.domain). + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to gdata.client.post(). + + Returns: + gdata.sites.data.SiteEntry of the created site. + """ + new_entry = gdata.sites.data.SiteEntry(title=atom.data.Title(text=title)) + + if description is not None: + new_entry.summary = gdata.sites.data.Summary(text=description) + + # Add the source link if we're making a copy of a site. + if source_site is not None: + source_link = atom.data.Link(rel=gdata.sites.data.SITES_SOURCE_LINK_REL, + type='application/atom+xml', + href=source_site) + new_entry.link.append(source_link) + + if theme is not None: + new_entry.theme = gdata.sites.data.Theme(text=theme) + + if uri is None: + uri = self.make_site_feed_uri() + + return self.post(new_entry, uri, auth_token=auth_token, **kwargs) + + CreateSite = create_site + + def create_page(self, kind, title, html='', page_name=None, parent=None, + auth_token=None, **kwargs): + """Creates a new page (specified by kind) on a Google Site. + + Args: + kind: string The type of page/item to create. For example, webpage, + listpage, comment, announcementspage, filecabinet, etc. The full list + of supported kinds can be found in gdata.sites.gdata.SUPPORT_KINDS. + title: string Title for the page. + html: string (optional) XHTML for the page's content body. + page_name: string (optional) The URL page name to set. If not set, the + title will be normalized and used as the page's URL path. + parent: string or gdata.sites.data.ContentEntry (optional) The parent + entry or parent link url to create the page under. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to gdata.client.post(). + + Returns: + gdata.sites.data.ContentEntry of the created page. + """ + new_entry = gdata.sites.data.ContentEntry( + title=atom.data.Title(text=title), kind=kind, + content=gdata.sites.data.Content(text=html)) + + if page_name is not None: + new_entry.page_name = gdata.sites.data.PageName(text=page_name) + + # Add parent link to entry if it should be uploaded as a subpage. + if isinstance(parent, gdata.sites.data.ContentEntry): + parent_link = atom.data.Link(rel=gdata.sites.data.SITES_PARENT_LINK_REL, + type='application/atom+xml', + href=parent.GetSelfLink().href) + new_entry.link.append(parent_link) + elif parent is not None: + parent_link = atom.data.Link(rel=gdata.sites.data.SITES_PARENT_LINK_REL, + type='application/atom+xml', + href=parent) + new_entry.link.append(parent_link) + + return self.post(new_entry, self.make_content_feed_uri(), + auth_token=auth_token, **kwargs) + + CreatePage = create_page + + def create_webattachment(self, src, content_type, title, parent, + description=None, auth_token=None, **kwargs): + """Creates a new webattachment within a filecabinet. + + Args: + src: string The url of the web attachment. + content_type: string The MIME type of the web attachment. + title: string The title to name the web attachment. + parent: string or gdata.sites.data.ContentEntry (optional) The + parent entry or url of the filecabinet to create the attachment under. + description: string (optional) A summary/description for the attachment. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to gdata.client.post(). + + Returns: + gdata.sites.data.ContentEntry of the created page. + """ + new_entry = gdata.sites.data.ContentEntry( + title=atom.data.Title(text=title), kind='webattachment', + content=gdata.sites.data.Content(src=src, type=content_type)) + + if isinstance(parent, gdata.sites.data.ContentEntry): + link = atom.data.Link(rel=gdata.sites.data.SITES_PARENT_LINK_REL, + type='application/atom+xml', + href=parent.GetSelfLink().href) + elif parent is not None: + link = atom.data.Link(rel=gdata.sites.data.SITES_PARENT_LINK_REL, + type='application/atom+xml', href=parent) + + new_entry.link.append(link) + + # Add file decription if it was specified + if description is not None: + new_entry.summary = gdata.sites.data.Summary(type='text', + text=description) + + return self.post(new_entry, self.make_content_feed_uri(), + auth_token=auth_token, **kwargs) + + CreateWebAttachment = create_webattachment + + def upload_attachment(self, file_handle, parent, content_type=None, + title=None, description=None, folder_name=None, + auth_token=None, **kwargs): + """Uploads an attachment to a parent page. + + Args: + file_handle: MediaSource or string A gdata.data.MediaSource object + containing the file to be uploaded or the full path name to the + file on disk. + parent: gdata.sites.data.ContentEntry or string The parent page to + upload the file to or the full URI of the entry's self link. + content_type: string (optional) The MIME type of the file + (e.g 'application/pdf'). This should be provided if file is not a + MediaSource object. + title: string (optional) The title to name the attachment. If not + included, the filepath or media source's filename is used. + description: string (optional) A summary/description for the attachment. + folder_name: string (optional) The name of an existing folder to upload + the attachment to. This only applies when the parent parameter points + to a filecabinet entry. + auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or + OAuthToken which authorizes this client to edit the user's data. + kwargs: Other parameters to pass to self.__upload(). + + Returns: + A gdata.sites.data.ContentEntry containing information about the created + attachment. + """ + if isinstance(parent, gdata.sites.data.ContentEntry): + link = atom.data.Link(rel=gdata.sites.data.SITES_PARENT_LINK_REL, + type='application/atom+xml', + href=parent.GetSelfLink().href) + else: + link = atom.data.Link(rel=gdata.sites.data.SITES_PARENT_LINK_REL, + type='application/atom+xml', + href=parent) + + if not isinstance(file_handle, gdata.data.MediaSource): + ms = gdata.data.MediaSource(file_path=file_handle, + content_type=content_type) + else: + ms = file_handle + + # If no title specified, use the file name + if title is None: + title = ms.file_name + + new_entry = gdata.sites.data.ContentEntry(kind='attachment') + new_entry.title = atom.data.Title(text=title) + new_entry.link.append(link) + + # Add file decription if it was specified + if description is not None: + new_entry.summary = gdata.sites.data.Summary(type='text', + text=description) + + # Upload the attachment to a filecabinet folder? + if parent.Kind() == 'filecabinet' and folder_name is not None: + folder_category = atom.data.Category( + scheme=gdata.sites.data.FOLDER_KIND_TERM, term=folder_name) + new_entry.category.append(folder_category) + + return self.__upload(new_entry, ms, auth_token=auth_token, **kwargs) + + UploadAttachment = upload_attachment + + def download_attachment(self, uri_or_entry, file_path): + """Downloads an attachment file to disk. + + Args: + uri_or_entry: string The full URL to download the file from. + file_path: string The full path to save the file to. + + Raises: + gdata.client.RequestError: on error response from server. + """ + uri = uri_or_entry + if isinstance(uri_or_entry, gdata.sites.data.ContentEntry): + uri = uri_or_entry.content.src + + f = open(file_path, 'wb') + try: + f.write(self._get_file_content(uri)) + except gdata.client.RequestError, e: + f.close() + raise e + f.flush() + f.close() + + DownloadAttachment = download_attachment diff --git a/gam/gdata/sites/data.py b/gam/gdata/sites/data.py new file mode 100755 index 00000000000..dc8dfb2db64 --- /dev/null +++ b/gam/gdata/sites/data.py @@ -0,0 +1,376 @@ +#!/usr/bin/python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data model classes for parsing and generating XML for the Sites Data API.""" + +__author__ = 'e.bidelman (Eric Bidelman)' + + +import atom.core +import atom.data +import gdata.acl.data +import gdata.data + +# XML Namespaces used in Google Sites entities. +SITES_NAMESPACE = 'http://schemas.google.com/sites/2008' +SITES_TEMPLATE = '{http://schemas.google.com/sites/2008}%s' +SPREADSHEETS_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006' +SPREADSHEETS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s' +DC_TERMS_TEMPLATE = '{http://purl.org/dc/terms}%s' +THR_TERMS_TEMPLATE = '{http://purl.org/syndication/thread/1.0}%s' +XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml' +XHTML_TEMPLATE = '{http://www.w3.org/1999/xhtml}%s' + +SITES_PARENT_LINK_REL = SITES_NAMESPACE + '#parent' +SITES_REVISION_LINK_REL = SITES_NAMESPACE + '#revision' +SITES_SOURCE_LINK_REL = SITES_NAMESPACE + '#source' + +SITES_KIND_SCHEME = 'http://schemas.google.com/g/2005#kind' +ANNOUNCEMENT_KIND_TERM = SITES_NAMESPACE + '#announcement' +ANNOUNCEMENT_PAGE_KIND_TERM = SITES_NAMESPACE + '#announcementspage' +ATTACHMENT_KIND_TERM = SITES_NAMESPACE + '#attachment' +COMMENT_KIND_TERM = SITES_NAMESPACE + '#comment' +FILECABINET_KIND_TERM = SITES_NAMESPACE + '#filecabinet' +LISTITEM_KIND_TERM = SITES_NAMESPACE + '#listitem' +LISTPAGE_KIND_TERM = SITES_NAMESPACE + '#listpage' +WEBPAGE_KIND_TERM = SITES_NAMESPACE + '#webpage' +WEBATTACHMENT_KIND_TERM = SITES_NAMESPACE + '#webattachment' +FOLDER_KIND_TERM = SITES_NAMESPACE + '#folder' + +SUPPORT_KINDS = [ + 'announcement', 'announcementspage', 'attachment', 'comment', 'filecabinet', + 'listitem', 'listpage', 'webpage', 'webattachment' + ] + + +class Revision(atom.core.XmlElement): + """Google Sites <sites:revision>.""" + _qname = SITES_TEMPLATE % 'revision' + + +class PageName(atom.core.XmlElement): + """Google Sites <sites:pageName>.""" + _qname = SITES_TEMPLATE % 'pageName' + + +class SiteName(atom.core.XmlElement): + """Google Sites <sites:siteName>.""" + _qname = SITES_TEMPLATE % 'siteName' + + +class Theme(atom.core.XmlElement): + """Google Sites <sites:theme>.""" + _qname = SITES_TEMPLATE % 'theme' + + +class Deleted(atom.core.XmlElement): + """Google Sites <gd:deleted>.""" + _qname = gdata.data.GDATA_TEMPLATE % 'deleted' + + +class Publisher(atom.core.XmlElement): + """Google Sites <dc:pulisher>.""" + _qname = DC_TERMS_TEMPLATE % 'publisher' + + +class Worksheet(atom.core.XmlElement): + """Google Sites List Page <gs:worksheet>.""" + + _qname = SPREADSHEETS_TEMPLATE % 'worksheet' + name = 'name' + + +class Header(atom.core.XmlElement): + """Google Sites List Page <gs:header>.""" + + _qname = SPREADSHEETS_TEMPLATE % 'header' + row = 'row' + + +class Column(atom.core.XmlElement): + """Google Sites List Page <gs:column>.""" + + _qname = SPREADSHEETS_TEMPLATE % 'column' + index = 'index' + name = 'name' + + +class Data(atom.core.XmlElement): + """Google Sites List Page <gs:data>.""" + + _qname = SPREADSHEETS_TEMPLATE % 'data' + startRow = 'startRow' + column = [Column] + + +class Field(atom.core.XmlElement): + """Google Sites List Item <gs:field>.""" + + _qname = SPREADSHEETS_TEMPLATE % 'field' + index = 'index' + name = 'name' + + +class InReplyTo(atom.core.XmlElement): + """Google Sites List Item <thr:in-reply-to>.""" + + _qname = THR_TERMS_TEMPLATE % 'in-reply-to' + href = 'href' + ref = 'ref' + source = 'source' + type = 'type' + + +class Content(atom.data.Content): + """Google Sites version of <atom:content> that encapsulates XHTML.""" + + def __init__(self, html=None, type=None, **kwargs): + if type is None and html: + type = 'xhtml' + super(Content, self).__init__(type=type, **kwargs) + if html is not None: + self.html = html + + def _get_html(self): + if self.children: + return self.children[0] + else: + return '' + + def _set_html(self, html): + if not html: + self.children = [] + return + + if type(html) == str: + html = atom.core.parse(html) + if not html.namespace: + html.namespace = XHTML_NAMESPACE + + self.children = [html] + + html = property(_get_html, _set_html) + + +class Summary(atom.data.Summary): + """Google Sites version of <atom:summary>.""" + + def __init__(self, html=None, type=None, text=None, **kwargs): + if type is None and html: + type = 'xhtml' + + super(Summary, self).__init__(type=type, text=text, **kwargs) + if html is not None: + self.html = html + + def _get_html(self): + if self.children: + return self.children[0] + else: + return '' + + def _set_html(self, html): + if not html: + self.children = [] + return + + if type(html) == str: + html = atom.core.parse(html) + if not html.namespace: + html.namespace = XHTML_NAMESPACE + + self.children = [html] + + html = property(_get_html, _set_html) + + +class BaseSiteEntry(gdata.data.GDEntry): + """Google Sites Entry.""" + + def __init__(self, kind=None, **kwargs): + super(BaseSiteEntry, self).__init__(**kwargs) + if kind is not None: + self.category.append( + atom.data.Category(scheme=SITES_KIND_SCHEME, + term='%s#%s' % (SITES_NAMESPACE, kind), + label=kind)) + + def __find_category_scheme(self, scheme): + for category in self.category: + if category.scheme == scheme: + return category + return None + + def kind(self): + kind = self.__find_category_scheme(SITES_KIND_SCHEME) + if kind is not None: + return kind.term[len(SITES_NAMESPACE) + 1:] + else: + return None + + Kind = kind + + def get_node_id(self): + return self.id.text[self.id.text.rfind('/') + 1:] + + GetNodeId = get_node_id + + def find_parent_link(self): + return self.find_url(SITES_PARENT_LINK_REL) + + FindParentLink = find_parent_link + + def is_deleted(self): + return self.deleted is not None + + IsDeleted = is_deleted + + +class ContentEntry(BaseSiteEntry): + """Google Sites Content Entry.""" + content = Content + deleted = Deleted + publisher = Publisher + in_reply_to = InReplyTo + worksheet = Worksheet + header = Header + data = Data + field = [Field] + revision = Revision + page_name = PageName + feed_link = gdata.data.FeedLink + + def find_revison_link(self): + return self.find_url(SITES_REVISION_LINK_REL) + + FindRevisionLink = find_revison_link + + +class ContentFeed(gdata.data.GDFeed): + """Google Sites Content Feed. + + The Content feed is a feed containing the current, editable site content. + """ + entry = [ContentEntry] + + def __get_entry_type(self, kind): + matches = [] + for entry in self.entry: + if entry.Kind() == kind: + matches.append(entry) + return matches + + def get_announcements(self): + return self.__get_entry_type('announcement') + + GetAnnouncements = get_announcements + + def get_announcement_pages(self): + return self.__get_entry_type('announcementspage') + + GetAnnouncementPages = get_announcement_pages + + def get_attachments(self): + return self.__get_entry_type('attachment') + + GetAttachments = get_attachments + + def get_comments(self): + return self.__get_entry_type('comment') + + GetComments = get_comments + + def get_file_cabinets(self): + return self.__get_entry_type('filecabinet') + + GetFileCabinets = get_file_cabinets + + def get_list_items(self): + return self.__get_entry_type('listitem') + + GetListItems = get_list_items + + def get_list_pages(self): + return self.__get_entry_type('listpage') + + GetListPages = get_list_pages + + def get_webpages(self): + return self.__get_entry_type('webpage') + + GetWebpages = get_webpages + + def get_webattachments(self): + return self.__get_entry_type('webattachment') + + GetWebattachments = get_webattachments + + +class ActivityEntry(BaseSiteEntry): + """Google Sites Activity Entry.""" + summary = Summary + + +class ActivityFeed(gdata.data.GDFeed): + """Google Sites Activity Feed. + + The Activity feed is a feed containing recent Site activity. + """ + entry = [ActivityEntry] + + +class RevisionEntry(BaseSiteEntry): + """Google Sites Revision Entry.""" + content = Content + + +class RevisionFeed(gdata.data.GDFeed): + """Google Sites Revision Feed. + + The Activity feed is a feed containing recent Site activity. + """ + entry = [RevisionEntry] + + +class SiteEntry(gdata.data.GDEntry): + """Google Sites Site Feed Entry.""" + site_name = SiteName + theme = Theme + + def find_source_link(self): + return self.find_url(SITES_SOURCE_LINK_REL) + + FindSourceLink = find_source_link + + +class SiteFeed(gdata.data.GDFeed): + """Google Sites Site Feed. + + The Site feed can be used to list a user's sites and create new sites. + """ + entry = [SiteEntry] + + +class AclEntry(gdata.acl.data.AclEntry): + """Google Sites ACL Entry.""" + + +class AclFeed(gdata.acl.data.AclFeed): + """Google Sites ACL Feed. + + The ACL feed can be used to modify the sharing permissions of a Site. + """ + entry = [AclEntry] diff --git a/gam/gdata/spreadsheet/__init__.py b/gam/gdata/spreadsheet/__init__.py new file mode 100755 index 00000000000..e9a0fb3dc7c --- /dev/null +++ b/gam/gdata/spreadsheet/__init__.py @@ -0,0 +1,474 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to Atom objects used with Google Spreadsheets. +""" + +__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)' + + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata +import re +import string + + +# XML namespaces which are often used in Google Spreadsheets entities. +GSPREADSHEETS_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006' +GSPREADSHEETS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s' + +GSPREADSHEETS_EXTENDED_NAMESPACE = ('http://schemas.google.com/spreadsheets' + '/2006/extended') +GSPREADSHEETS_EXTENDED_TEMPLATE = ('{http://schemas.google.com/spreadsheets' + '/2006/extended}%s') + + +class ColCount(atom.AtomBase): + """The Google Spreadsheets colCount element """ + + _tag = 'colCount' + _namespace = GSPREADSHEETS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ColCountFromString(xml_string): + return atom.CreateClassFromXMLString(ColCount, xml_string) + + +class RowCount(atom.AtomBase): + """The Google Spreadsheets rowCount element """ + + _tag = 'rowCount' + _namespace = GSPREADSHEETS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def RowCountFromString(xml_string): + return atom.CreateClassFromXMLString(RowCount, xml_string) + + +class Cell(atom.AtomBase): + """The Google Spreadsheets cell element """ + + _tag = 'cell' + _namespace = GSPREADSHEETS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['row'] = 'row' + _attributes['col'] = 'col' + _attributes['inputValue'] = 'inputValue' + _attributes['numericValue'] = 'numericValue' + + def __init__(self, text=None, row=None, col=None, inputValue=None, + numericValue=None, extension_elements=None, extension_attributes=None): + self.text = text + self.row = row + self.col = col + self.inputValue = inputValue + self.numericValue = numericValue + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def CellFromString(xml_string): + return atom.CreateClassFromXMLString(Cell, xml_string) + + +class Custom(atom.AtomBase): + """The Google Spreadsheets custom element""" + + _namespace = GSPREADSHEETS_EXTENDED_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, column=None, text=None, extension_elements=None, + extension_attributes=None): + self.column = column # The name of the column + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def _BecomeChildElement(self, tree): + new_child = ElementTree.Element('') + tree.append(new_child) + new_child.tag = '{%s}%s' % (self.__class__._namespace, + self.column) + self._AddMembersToElementTree(new_child) + + def _ToElementTree(self): + new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace, + self.column)) + self._AddMembersToElementTree(new_tree) + return new_tree + + def _HarvestElementTree(self, tree): + namespace_uri, local_tag = string.split(tree.tag[1:], "}", 1) + self.column = local_tag + # Fill in the instance members from the contents of the XML tree. + for child in tree: + self._ConvertElementTreeToMember(child) + for attribute, value in tree.attrib.iteritems(): + self._ConvertElementAttributeToMember(attribute, value) + self.text = tree.text + + +def CustomFromString(xml_string): + element_tree = ElementTree.fromstring(xml_string) + return _CustomFromElementTree(element_tree) + + +def _CustomFromElementTree(element_tree): + namespace_uri, local_tag = string.split(element_tree.tag[1:], "}", 1) + if namespace_uri == GSPREADSHEETS_EXTENDED_NAMESPACE: + new_custom = Custom() + new_custom._HarvestElementTree(element_tree) + new_custom.column = local_tag + return new_custom + return None + + + + + +class SpreadsheetsSpreadsheet(gdata.GDataEntry): + """A Google Spreadsheets flavor of a Spreadsheet Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.updated = updated + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SpreadsheetsSpreadsheetFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsSpreadsheet, + xml_string) + + +class SpreadsheetsWorksheet(gdata.GDataEntry): + """A Google Spreadsheets flavor of a Worksheet Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rowCount' % GSPREADSHEETS_NAMESPACE] = ('row_count', + RowCount) + _children['{%s}colCount' % GSPREADSHEETS_NAMESPACE] = ('col_count', + ColCount) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + row_count=None, col_count=None, text=None, extension_elements=None, + extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.updated = updated + self.row_count = row_count + self.col_count = col_count + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SpreadsheetsWorksheetFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsWorksheet, + xml_string) + + +class SpreadsheetsCell(gdata.BatchEntry): + """A Google Spreadsheets flavor of a Cell Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.BatchEntry._children.copy() + _attributes = gdata.BatchEntry._attributes.copy() + _children['{%s}cell' % GSPREADSHEETS_NAMESPACE] = ('cell', Cell) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + cell=None, batch_operation=None, batch_id=None, batch_status=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.batch_operation = batch_operation + self.batch_id = batch_id + self.batch_status = batch_status + self.updated = updated + self.cell = cell + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SpreadsheetsCellFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsCell, + xml_string) + + +class SpreadsheetsList(gdata.GDataEntry): + """A Google Spreadsheets flavor of a List Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + custom=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.updated = updated + self.custom = custom or {} + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + # We need to overwrite _ConvertElementTreeToMember to add special logic to + # convert custom attributes to members + def _ConvertElementTreeToMember(self, child_tree): + # Find the element's tag in this class's list of child members + if self.__class__._children.has_key(child_tree.tag): + member_name = self.__class__._children[child_tree.tag][0] + member_class = self.__class__._children[child_tree.tag][1] + # If the class member is supposed to contain a list, make sure the + # matching member is set to a list, then append the new member + # instance to the list. + if isinstance(member_class, list): + if getattr(self, member_name) is None: + setattr(self, member_name, []) + getattr(self, member_name).append(atom._CreateClassFromElementTree( + member_class[0], child_tree)) + else: + setattr(self, member_name, + atom._CreateClassFromElementTree(member_class, child_tree)) + elif child_tree.tag.find('{%s}' % GSPREADSHEETS_EXTENDED_NAMESPACE) == 0: + # If this is in the custom namespace, make add it to the custom dict. + name = child_tree.tag[child_tree.tag.index('}')+1:] + custom = _CustomFromElementTree(child_tree) + if custom: + self.custom[name] = custom + else: + atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree) + + # We need to overwtite _AddMembersToElementTree to add special logic to + # convert custom members to XML nodes. + def _AddMembersToElementTree(self, tree): + # Convert the members of this class which are XML child nodes. + # This uses the class's _children dictionary to find the members which + # should become XML child nodes. + member_node_names = [values[0] for tag, values in + self.__class__._children.iteritems()] + for member_name in member_node_names: + member = getattr(self, member_name) + if member is None: + pass + elif isinstance(member, list): + for instance in member: + instance._BecomeChildElement(tree) + else: + member._BecomeChildElement(tree) + # Convert the members of this class which are XML attributes. + for xml_attribute, member_name in self.__class__._attributes.iteritems(): + member = getattr(self, member_name) + if member is not None: + tree.attrib[xml_attribute] = member + # Convert all special custom item attributes to nodes + for name, custom in self.custom.iteritems(): + custom._BecomeChildElement(tree) + # Lastly, call the ExtensionContainers's _AddMembersToElementTree to + # convert any extension attributes. + atom.ExtensionContainer._AddMembersToElementTree(self, tree) + + +def SpreadsheetsListFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsList, + xml_string) + element_tree = ElementTree.fromstring(xml_string) + return _SpreadsheetsListFromElementTree(element_tree) + + +class SpreadsheetsSpreadsheetsFeed(gdata.GDataFeed): + """A feed containing Google Spreadsheets Spreadsheets""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsSpreadsheet]) + + +def SpreadsheetsSpreadsheetsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsSpreadsheetsFeed, + xml_string) + + +class SpreadsheetsWorksheetsFeed(gdata.GDataFeed): + """A feed containing Google Spreadsheets Spreadsheets""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsWorksheet]) + + +def SpreadsheetsWorksheetsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsWorksheetsFeed, + xml_string) + + +class SpreadsheetsCellsFeed(gdata.BatchFeed): + """A feed containing Google Spreadsheets Cells""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.BatchFeed._children.copy() + _attributes = gdata.BatchFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsCell]) + _children['{%s}rowCount' % GSPREADSHEETS_NAMESPACE] = ('row_count', + RowCount) + _children['{%s}colCount' % GSPREADSHEETS_NAMESPACE] = ('col_count', + ColCount) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None, row_count=None, + col_count=None, interrupted=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text, interrupted=interrupted) + self.row_count = row_count + self.col_count = col_count + + def GetBatchLink(self): + for link in self.link: + if link.rel == 'http://schemas.google.com/g/2005#batch': + return link + return None + + +def SpreadsheetsCellsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsCellsFeed, + xml_string) + + +class SpreadsheetsListFeed(gdata.GDataFeed): + """A feed containing Google Spreadsheets Spreadsheets""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsList]) + + +def SpreadsheetsListFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsListFeed, + xml_string) diff --git a/gam/gdata/spreadsheet/service.py b/gam/gdata/spreadsheet/service.py new file mode 100755 index 00000000000..7449c18565e --- /dev/null +++ b/gam/gdata/spreadsheet/service.py @@ -0,0 +1,484 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""SpreadsheetsService extends the GDataService to streamline Google +Spreadsheets operations. + + SpreadsheetService: Provides methods to query feeds and manipulate items. + Extends GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + +__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)' + + +import gdata +import atom.service +import gdata.service +import gdata.spreadsheet +import atom + + +class Error(Exception): + """Base class for exceptions in this module.""" + pass + + +class RequestError(Error): + pass + + +class SpreadsheetsService(gdata.service.GDataService): + """Client for the Google Spreadsheets service.""" + + def __init__(self, email=None, password=None, source=None, + server='spreadsheets.google.com', additional_headers=None, + **kwargs): + """Creates a client for the Google Spreadsheets service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'spreadsheets.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='wise', source=source, + server=server, additional_headers=additional_headers, **kwargs) + + def GetSpreadsheetsFeed(self, key=None, query=None, visibility='private', + projection='full'): + """Gets a spreadsheets feed or a specific entry if a key is defined + Args: + key: string (optional) The spreadsheet key defined in /ccc?key= + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no key, then a SpreadsheetsSpreadsheetsFeed. + If there is a key, then a SpreadsheetsSpreadsheet. + """ + + uri = ('http://%s/feeds/spreadsheets/%s/%s' + % (self.server, visibility, projection)) + + if key is not None: + uri = '%s/%s' % (uri, key) + + if query != None: + query.feed = uri + uri = query.ToUri() + + if key: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsSpreadsheetFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsSpreadsheetsFeedFromString) + + def GetWorksheetsFeed(self, key, wksht_id=None, query=None, + visibility='private', projection='full'): + """Gets a worksheets feed or a specific entry if a wksht is defined + Args: + key: string The spreadsheet key defined in /ccc?key= + wksht_id: string (optional) The id for a specific worksheet entry + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no wksht_id, then a SpreadsheetsWorksheetsFeed. + If there is a wksht_id, then a SpreadsheetsWorksheet. + """ + + uri = ('http://%s/feeds/worksheets/%s/%s/%s' + % (self.server, key, visibility, projection)) + + if wksht_id != None: + uri = '%s/%s' % (uri, wksht_id) + + if query != None: + query.feed = uri + uri = query.ToUri() + + if wksht_id: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsWorksheetsFeedFromString) + + def AddWorksheet(self, title, row_count, col_count, key): + """Creates a new worksheet in the desired spreadsheet. + + The new worksheet is appended to the end of the list of worksheets. The + new worksheet will only have the available number of columns and cells + specified. + + Args: + title: str The title which will be displayed in the list of worksheets. + row_count: int or str The number of rows in the new worksheet. + col_count: int or str The number of columns in the new worksheet. + key: str The spreadsheet key to the spreadsheet to which the new + worksheet should be added. + + Returns: + A SpreadsheetsWorksheet if the new worksheet was created succesfully. + """ + new_worksheet = gdata.spreadsheet.SpreadsheetsWorksheet( + title=atom.Title(text=title), + row_count=gdata.spreadsheet.RowCount(text=str(row_count)), + col_count=gdata.spreadsheet.ColCount(text=str(col_count))) + return self.Post(new_worksheet, + 'http://%s/feeds/worksheets/%s/private/full' % (self.server, key), + converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) + + def UpdateWorksheet(self, worksheet_entry, url=None): + """Changes the size and/or title of the desired worksheet. + + Args: + worksheet_entry: SpreadsheetWorksheet The new contents of the + worksheet. + url: str (optional) The URL to which the edited worksheet entry should + be sent. If the url is None, the edit URL from the worksheet will + be used. + + Returns: + A SpreadsheetsWorksheet with the new information about the worksheet. + """ + target_url = url or worksheet_entry.GetEditLink().href + return self.Put(worksheet_entry, target_url, + converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) + + def DeleteWorksheet(self, worksheet_entry=None, url=None): + """Removes the desired worksheet from the spreadsheet + + Args: + worksheet_entry: SpreadsheetWorksheet (optional) The worksheet to + be deleted. If this is none, then the DELETE reqest is sent to + the url specified in the url parameter. + url: str (optaional) The URL to which the DELETE request should be + sent. If left as None, the worksheet's edit URL is used. + + Returns: + True if the worksheet was deleted successfully. + """ + if url: + target_url = url + else: + target_url = worksheet_entry.GetEditLink().href + return self.Delete(target_url) + + def GetCellsFeed(self, key, wksht_id='default', cell=None, query=None, + visibility='private', projection='full'): + """Gets a cells feed or a specific entry if a cell is defined + Args: + key: string The spreadsheet key defined in /ccc?key= + wksht_id: string The id for a specific worksheet entry + cell: string (optional) The R1C1 address of the cell + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no cell, then a SpreadsheetsCellsFeed. + If there is a cell, then a SpreadsheetsCell. + """ + + uri = ('http://%s/feeds/cells/%s/%s/%s/%s' + % (self.server, key, wksht_id, visibility, projection)) + + if cell != None: + uri = '%s/%s' % (uri, cell) + + if query != None: + query.feed = uri + uri = query.ToUri() + + if cell: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsCellFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString) + + def GetListFeed(self, key, wksht_id='default', row_id=None, query=None, + visibility='private', projection='full'): + """Gets a list feed or a specific entry if a row_id is defined + Args: + key: string The spreadsheet key defined in /ccc?key= + wksht_id: string The id for a specific worksheet entry + row_id: string (optional) The row_id of a row in the list + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no row_id, then a SpreadsheetsListFeed. + If there is a row_id, then a SpreadsheetsList. + """ + + uri = ('http://%s/feeds/list/%s/%s/%s/%s' + % (self.server, key, wksht_id, visibility, projection)) + + if row_id is not None: + uri = '%s/%s' % (uri, row_id) + + if query is not None: + query.feed = uri + uri = query.ToUri() + + if row_id: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsListFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsListFeedFromString) + + def UpdateCell(self, row, col, inputValue, key, wksht_id='default'): + """Updates an existing cell. + + Args: + row: int The row the cell to be editted is in + col: int The column the cell to be editted is in + inputValue: str the new value of the cell + key: str The key of the spreadsheet in which this cell resides. + wksht_id: str The ID of the worksheet which holds this cell. + + Returns: + The updated cell entry + """ + row = str(row) + col = str(col) + # make the new cell + new_cell = gdata.spreadsheet.Cell(row=row, col=col, inputValue=inputValue) + # get the edit uri and PUT + cell = 'R%sC%s' % (row, col) + entry = self.GetCellsFeed(key, wksht_id, cell) + for a_link in entry.link: + if a_link.rel == 'edit': + entry.cell = new_cell + return self.Put(entry, a_link.href, + converter=gdata.spreadsheet.SpreadsheetsCellFromString) + + def _GenerateCellsBatchUrl(self, spreadsheet_key, worksheet_id): + return ('http://spreadsheets.google.com/feeds/cells/%s/%s/' + 'private/full/batch' % (spreadsheet_key, worksheet_id)) + + def ExecuteBatch(self, batch_feed, url=None, spreadsheet_key=None, + worksheet_id=None, + converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString): + """Sends a batch request feed to the server. + + The batch request needs to be sent to the batch URL for a particular + worksheet. You can specify the worksheet by providing the spreadsheet_key + and worksheet_id, or by sending the URL from the cells feed's batch link. + + Args: + batch_feed: gdata.spreadsheet.SpreadsheetsCellFeed A feed containing + BatchEntry elements which contain the desired CRUD operation and + any necessary data to modify a cell. + url: str (optional) The batch URL for the cells feed to which these + changes should be applied. This can be found by calling + cells_feed.GetBatchLink().href. + spreadsheet_key: str (optional) Used to generate the batch request URL + if the url argument is None. If using the spreadsheet key to + generate the URL, the worksheet id is also required. + worksheet_id: str (optional) Used if the url is not provided, it is + oart of the batch feed target URL. This is used with the spreadsheet + key. + converter: Function (optional) Function to be executed on the server's + response. This function should take one string as a parameter. The + default value is SpreadsheetsCellsFeedFromString which will turn the result + into a gdata.spreadsheet.SpreadsheetsCellsFeed object. + + Returns: + A gdata.BatchFeed containing the results. + """ + + if url is None: + url = self._GenerateCellsBatchUrl(spreadsheet_key, worksheet_id) + return self.Post(batch_feed, url, converter=converter) + + def InsertRow(self, row_data, key, wksht_id='default'): + """Inserts a new row with the provided data + + Args: + uri: string The post uri of the list feed + row_data: dict A dictionary of column header to row data + + Returns: + The inserted row + """ + new_entry = gdata.spreadsheet.SpreadsheetsList() + for k, v in row_data.iteritems(): + new_custom = gdata.spreadsheet.Custom() + new_custom.column = k + new_custom.text = v + new_entry.custom[new_custom.column] = new_custom + # Generate the post URL for the worksheet which will receive the new entry. + post_url = 'http://spreadsheets.google.com/feeds/list/%s/%s/private/full'%( + key, wksht_id) + return self.Post(new_entry, post_url, + converter=gdata.spreadsheet.SpreadsheetsListFromString) + + def UpdateRow(self, entry, new_row_data): + """Updates a row with the provided data + + If you want to add additional information to a row, it is often + easier to change the values in entry.custom, then use the Put + method instead of UpdateRow. This UpdateRow method will replace + the contents of the row with new_row_data - it will change all columns + not just the columns specified in the new_row_data dict. + + Args: + entry: gdata.spreadsheet.SpreadsheetsList The entry to be updated + new_row_data: dict A dictionary of column header to row data + + Returns: + The updated row + """ + entry.custom = {} + for k, v in new_row_data.iteritems(): + new_custom = gdata.spreadsheet.Custom() + new_custom.column = k + new_custom.text = v + entry.custom[k] = new_custom + for a_link in entry.link: + if a_link.rel == 'edit': + return self.Put(entry, a_link.href, + converter=gdata.spreadsheet.SpreadsheetsListFromString) + + def DeleteRow(self, entry): + """Deletes a row, the provided entry + + Args: + entry: gdata.spreadsheet.SpreadsheetsList The row to be deleted + + Returns: + The delete response + """ + for a_link in entry.link: + if a_link.rel == 'edit': + return self.Delete(a_link.href) + + +class DocumentQuery(gdata.service.Query): + + def _GetTitleQuery(self): + return self['title'] + + def _SetTitleQuery(self, document_query): + self['title'] = document_query + + title = property(_GetTitleQuery, _SetTitleQuery, + doc="""The title query parameter""") + + def _GetTitleExactQuery(self): + return self['title-exact'] + + def _SetTitleExactQuery(self, document_query): + self['title-exact'] = document_query + + title_exact = property(_GetTitleExactQuery, _SetTitleExactQuery, + doc="""The title-exact query parameter""") + + +class CellQuery(gdata.service.Query): + + def _GetMinRowQuery(self): + return self['min-row'] + + def _SetMinRowQuery(self, cell_query): + self['min-row'] = cell_query + + min_row = property(_GetMinRowQuery, _SetMinRowQuery, + doc="""The min-row query parameter""") + + def _GetMaxRowQuery(self): + return self['max-row'] + + def _SetMaxRowQuery(self, cell_query): + self['max-row'] = cell_query + + max_row = property(_GetMaxRowQuery, _SetMaxRowQuery, + doc="""The max-row query parameter""") + + def _GetMinColQuery(self): + return self['min-col'] + + def _SetMinColQuery(self, cell_query): + self['min-col'] = cell_query + + min_col = property(_GetMinColQuery, _SetMinColQuery, + doc="""The min-col query parameter""") + + def _GetMaxColQuery(self): + return self['max-col'] + + def _SetMaxColQuery(self, cell_query): + self['max-col'] = cell_query + + max_col = property(_GetMaxColQuery, _SetMaxColQuery, + doc="""The max-col query parameter""") + + def _GetRangeQuery(self): + return self['range'] + + def _SetRangeQuery(self, cell_query): + self['range'] = cell_query + + range = property(_GetRangeQuery, _SetRangeQuery, + doc="""The range query parameter""") + + def _GetReturnEmptyQuery(self): + return self['return-empty'] + + def _SetReturnEmptyQuery(self, cell_query): + self['return-empty'] = cell_query + + return_empty = property(_GetReturnEmptyQuery, _SetReturnEmptyQuery, + doc="""The return-empty query parameter""") + + +class ListQuery(gdata.service.Query): + + def _GetSpreadsheetQuery(self): + return self['sq'] + + def _SetSpreadsheetQuery(self, list_query): + self['sq'] = list_query + + sq = property(_GetSpreadsheetQuery, _SetSpreadsheetQuery, + doc="""The sq query parameter""") + + def _GetOrderByQuery(self): + return self['orderby'] + + def _SetOrderByQuery(self, list_query): + self['orderby'] = list_query + + orderby = property(_GetOrderByQuery, _SetOrderByQuery, + doc="""The orderby query parameter""") + + def _GetReverseQuery(self): + return self['reverse'] + + def _SetReverseQuery(self, list_query): + self['reverse'] = list_query + + reverse = property(_GetReverseQuery, _SetReverseQuery, + doc="""The reverse query parameter""") diff --git a/gam/gdata/spreadsheet/text_db.py b/gam/gdata/spreadsheet/text_db.py new file mode 100755 index 00000000000..a8de5463c2b --- /dev/null +++ b/gam/gdata/spreadsheet/text_db.py @@ -0,0 +1,559 @@ +#!/usr/bin/python +# +# Copyright Google 2007-2008, all rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import StringIO +import gdata +import gdata.service +import gdata.spreadsheet +import gdata.spreadsheet.service +import gdata.docs +import gdata.docs.service + + +"""Make the Google Documents API feel more like using a database. + +This module contains a client and other classes which make working with the +Google Documents List Data API and the Google Spreadsheets Data API look a +bit more like working with a heirarchical database. Using the DatabaseClient, +you can create or find spreadsheets and use them like a database, with +worksheets representing tables and rows representing records. + +Example Usage: +# Create a new database, a new table, and add records. +client = gdata.spreadsheet.text_db.DatabaseClient(username='jo@example.com', + password='12345') +database = client.CreateDatabase('My Text Database') +table = database.CreateTable('addresses', ['name','email', + 'phonenumber', 'mailingaddress']) +record = table.AddRecord({'name':'Bob', 'email':'bob@example.com', + 'phonenumber':'555-555-1234', 'mailingaddress':'900 Imaginary St.'}) + +# Edit a record +record.content['email'] = 'bob2@example.com' +record.Push() + +# Delete a table +table.Delete + +Warnings: +Care should be exercised when using this module on spreadsheets +which contain formulas. This module treats all rows as containing text and +updating a row will overwrite any formula with the output of the formula. +The intended use case is to allow easy storage of text data in a spreadsheet. + + Error: Domain specific extension of Exception. + BadCredentials: Error raised is username or password was incorrect. + CaptchaRequired: Raised if a login attempt failed and a CAPTCHA challenge + was issued. + DatabaseClient: Communicates with Google Docs APIs servers. + Database: Represents a spreadsheet and interacts with tables. + Table: Represents a worksheet and interacts with records. + RecordResultSet: A list of records in a table. + Record: Represents a row in a worksheet allows manipulation of text data. +""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +class Error(Exception): + pass + + +class BadCredentials(Error): + pass + + +class CaptchaRequired(Error): + pass + + +class DatabaseClient(object): + """Allows creation and finding of Google Spreadsheets databases. + + The DatabaseClient simplifies the process of creating and finding Google + Spreadsheets and will talk to both the Google Spreadsheets API and the + Google Documents List API. + """ + + def __init__(self, username=None, password=None): + """Constructor for a Database Client. + + If the username and password are present, the constructor will contact + the Google servers to authenticate. + + Args: + username: str (optional) Example: jo@example.com + password: str (optional) + """ + self.__docs_client = gdata.docs.service.DocsService() + self.__spreadsheets_client = ( + gdata.spreadsheet.service.SpreadsheetsService()) + self.SetCredentials(username, password) + + def SetCredentials(self, username, password): + """Attempts to log in to Google APIs using the provided credentials. + + If the username or password are None, the client will not request auth + tokens. + + Args: + username: str (optional) Example: jo@example.com + password: str (optional) + """ + self.__docs_client.email = username + self.__docs_client.password = password + self.__spreadsheets_client.email = username + self.__spreadsheets_client.password = password + if username and password: + try: + self.__docs_client.ProgrammaticLogin() + self.__spreadsheets_client.ProgrammaticLogin() + except gdata.service.CaptchaRequired: + raise CaptchaRequired('Please visit https://www.google.com/accounts/' + 'DisplayUnlockCaptcha to unlock your account.') + except gdata.service.BadAuthentication: + raise BadCredentials('Username or password incorrect.') + + def CreateDatabase(self, name): + """Creates a new Google Spreadsheet with the desired name. + + Args: + name: str The title for the spreadsheet. + + Returns: + A Database instance representing the new spreadsheet. + """ + # Create a Google Spreadsheet to form the foundation of this database. + # Spreadsheet is created by uploading a file to the Google Documents + # List API. + virtual_csv_file = StringIO.StringIO(',,,') + virtual_media_source = gdata.MediaSource(file_handle=virtual_csv_file, content_type='text/csv', content_length=3) + db_entry = self.__docs_client.UploadSpreadsheet(virtual_media_source, name) + return Database(spreadsheet_entry=db_entry, database_client=self) + + def GetDatabases(self, spreadsheet_key=None, name=None): + """Finds spreadsheets which have the unique key or title. + + If querying on the spreadsheet_key there will be at most one result, but + searching by name could yield multiple results. + + Args: + spreadsheet_key: str The unique key for the spreadsheet, this + usually in the the form 'pk23...We' or 'o23...423.12,,,3'. + name: str The title of the spreadsheets. + + Returns: + A list of Database objects representing the desired spreadsheets. + """ + if spreadsheet_key: + db_entry = self.__docs_client.GetDocumentListEntry( + r'/feeds/documents/private/full/spreadsheet%3A' + spreadsheet_key) + return [Database(spreadsheet_entry=db_entry, database_client=self)] + else: + title_query = gdata.docs.service.DocumentQuery() + title_query['title'] = name + db_feed = self.__docs_client.QueryDocumentListFeed(title_query.ToUri()) + matching_databases = [] + for entry in db_feed.entry: + matching_databases.append(Database(spreadsheet_entry=entry, + database_client=self)) + return matching_databases + + def _GetDocsClient(self): + return self.__docs_client + + def _GetSpreadsheetsClient(self): + return self.__spreadsheets_client + + +class Database(object): + """Provides interface to find and create tables. + + The database represents a Google Spreadsheet. + """ + + def __init__(self, spreadsheet_entry=None, database_client=None): + """Constructor for a database object. + + Args: + spreadsheet_entry: gdata.docs.DocumentListEntry The + Atom entry which represents the Google Spreadsheet. The + spreadsheet's key is extracted from the entry and stored as a + member. + database_client: DatabaseClient A client which can talk to the + Google Spreadsheets servers to perform operations on worksheets + within this spreadsheet. + """ + self.entry = spreadsheet_entry + if self.entry: + id_parts = spreadsheet_entry.id.text.split('/') + self.spreadsheet_key = id_parts[-1].replace('spreadsheet%3A', '') + self.client = database_client + + def CreateTable(self, name, fields=None): + """Add a new worksheet to this spreadsheet and fill in column names. + + Args: + name: str The title of the new worksheet. + fields: list of strings The column names which are placed in the + first row of this worksheet. These names are converted into XML + tags by the server. To avoid changes during the translation + process I recommend using all lowercase alphabetic names. For + example ['somelongname', 'theothername'] + + Returns: + Table representing the newly created worksheet. + """ + worksheet = self.client._GetSpreadsheetsClient().AddWorksheet(title=name, + row_count=1, col_count=len(fields), key=self.spreadsheet_key) + return Table(name=name, worksheet_entry=worksheet, + database_client=self.client, + spreadsheet_key=self.spreadsheet_key, fields=fields) + + def GetTables(self, worksheet_id=None, name=None): + """Searches for a worksheet with the specified ID or name. + + The list of results should have one table at most, or no results + if the id or name were not found. + + Args: + worksheet_id: str The ID of the worksheet, example: 'od6' + name: str The title of the worksheet. + + Returns: + A list of length 0 or 1 containing the desired Table. A list is returned + to make this method feel like GetDatabases and GetRecords. + """ + if worksheet_id: + worksheet_entry = self.client._GetSpreadsheetsClient().GetWorksheetsFeed( + self.spreadsheet_key, wksht_id=worksheet_id) + return [Table(name=worksheet_entry.title.text, + worksheet_entry=worksheet_entry, database_client=self.client, + spreadsheet_key=self.spreadsheet_key)] + else: + matching_tables = [] + query = None + if name: + query = gdata.spreadsheet.service.DocumentQuery() + query.title = name + + worksheet_feed = self.client._GetSpreadsheetsClient().GetWorksheetsFeed( + self.spreadsheet_key, query=query) + for entry in worksheet_feed.entry: + matching_tables.append(Table(name=entry.title.text, + worksheet_entry=entry, database_client=self.client, + spreadsheet_key=self.spreadsheet_key)) + return matching_tables + + def Delete(self): + """Deletes the entire database spreadsheet from Google Spreadsheets.""" + entry = self.client._GetDocsClient().Get( + r'http://docs.google.com/feeds/documents/private/full/spreadsheet%3A' + + self.spreadsheet_key) + self.client._GetDocsClient().Delete(entry.GetEditLink().href) + + +class Table(object): + + def __init__(self, name=None, worksheet_entry=None, database_client=None, + spreadsheet_key=None, fields=None): + self.name = name + self.entry = worksheet_entry + id_parts = worksheet_entry.id.text.split('/') + self.worksheet_id = id_parts[-1] + self.spreadsheet_key = spreadsheet_key + self.client = database_client + self.fields = fields or [] + if fields: + self.SetFields(fields) + + def LookupFields(self): + """Queries to find the column names in the first row of the worksheet. + + Useful when you have retrieved the table from the server and you don't + know the column names. + """ + if self.entry: + first_row_contents = [] + query = gdata.spreadsheet.service.CellQuery() + query.max_row = '1' + query.min_row = '1' + feed = self.client._GetSpreadsheetsClient().GetCellsFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=query) + for entry in feed.entry: + first_row_contents.append(entry.content.text) + # Get the next set of cells if needed. + next_link = feed.GetNextLink() + while next_link: + feed = self.client._GetSpreadsheetsClient().Get(next_link.href, + converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString) + for entry in feed.entry: + first_row_contents.append(entry.content.text) + next_link = feed.GetNextLink() + # Convert the contents of the cells to valid headers. + self.fields = ConvertStringsToColumnHeaders(first_row_contents) + + def SetFields(self, fields): + """Changes the contents of the cells in the first row of this worksheet. + + Args: + fields: list of strings The names in the list comprise the + first row of the worksheet. These names are converted into XML + tags by the server. To avoid changes during the translation + process I recommend using all lowercase alphabetic names. For + example ['somelongname', 'theothername'] + """ + # TODO: If the table already had fields, we might want to clear out the, + # current column headers. + self.fields = fields + i = 0 + for column_name in fields: + i = i + 1 + # TODO: speed this up by using a batch request to update cells. + self.client._GetSpreadsheetsClient().UpdateCell(1, i, column_name, + self.spreadsheet_key, self.worksheet_id) + + def Delete(self): + """Deletes this worksheet from the spreadsheet.""" + worksheet = self.client._GetSpreadsheetsClient().GetWorksheetsFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id) + self.client._GetSpreadsheetsClient().DeleteWorksheet( + worksheet_entry=worksheet) + + def AddRecord(self, data): + """Adds a new row to this worksheet. + + Args: + data: dict of strings Mapping of string values to column names. + + Returns: + Record which represents this row of the spreadsheet. + """ + new_row = self.client._GetSpreadsheetsClient().InsertRow(data, + self.spreadsheet_key, wksht_id=self.worksheet_id) + return Record(content=data, row_entry=new_row, + spreadsheet_key=self.spreadsheet_key, worksheet_id=self.worksheet_id, + database_client=self.client) + + def GetRecord(self, row_id=None, row_number=None): + """Gets a single record from the worksheet based on row ID or number. + + Args: + row_id: The ID for the individual row. + row_number: str or int The position of the desired row. Numbering + begins at 1, which refers to the second row in the worksheet since + the first row is used for column names. + + Returns: + Record for the desired row. + """ + if row_id: + row_entry = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, row_id=row_id) + return Record(content=None, row_entry=row_entry, + spreadsheet_key=self.spreadsheet_key, + worksheet_id=self.worksheet_id, database_client=self.client) + else: + row_query = gdata.spreadsheet.service.ListQuery() + row_query.start_index = str(row_number) + row_query.max_results = '1' + row_feed = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query) + if len(row_feed.entry) >= 1: + return Record(content=None, row_entry=row_feed.entry[0], + spreadsheet_key=self.spreadsheet_key, + worksheet_id=self.worksheet_id, database_client=self.client) + else: + return None + + def GetRecords(self, start_row, end_row): + """Gets all rows between the start and end row numbers inclusive. + + Args: + start_row: str or int + end_row: str or int + + Returns: + RecordResultSet for the desired rows. + """ + start_row = int(start_row) + end_row = int(end_row) + max_rows = end_row - start_row + 1 + row_query = gdata.spreadsheet.service.ListQuery() + row_query.start_index = str(start_row) + row_query.max_results = str(max_rows) + rows_feed = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query) + return RecordResultSet(rows_feed, self.client, self.spreadsheet_key, + self.worksheet_id) + + def FindRecords(self, query_string): + """Performs a query against the worksheet to find rows which match. + + For details on query string syntax see the section on sq under + http://code.google.com/apis/spreadsheets/reference.html#list_Parameters + + Args: + query_string: str Examples: 'name == john' to find all rows with john + in the name column, '(cost < 19.50 and name != toy) or cost > 500' + + Returns: + RecordResultSet with the first group of matches. + """ + row_query = gdata.spreadsheet.service.ListQuery() + row_query.sq = query_string + matching_feed = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query) + return RecordResultSet(matching_feed, self.client, + self.spreadsheet_key, self.worksheet_id) + + +class RecordResultSet(list): + """A collection of rows which allows fetching of the next set of results. + + The server may not send all rows in the requested range because there are + too many. Using this result set you can access the first set of results + as if it is a list, then get the next batch (if there are more results) by + calling GetNext(). + """ + + def __init__(self, feed, client, spreadsheet_key, worksheet_id): + self.client = client + self.spreadsheet_key = spreadsheet_key + self.worksheet_id = worksheet_id + self.feed = feed + list(self) + for entry in self.feed.entry: + self.append(Record(content=None, row_entry=entry, + spreadsheet_key=spreadsheet_key, worksheet_id=worksheet_id, + database_client=client)) + + def GetNext(self): + """Fetches the next batch of rows in the result set. + + Returns: + A new RecordResultSet. + """ + next_link = self.feed.GetNextLink() + if next_link and next_link.href: + new_feed = self.client._GetSpreadsheetsClient().Get(next_link.href, + converter=gdata.spreadsheet.SpreadsheetsListFeedFromString) + return RecordResultSet(new_feed, self.client, self.spreadsheet_key, + self.worksheet_id) + + +class Record(object): + """Represents one row in a worksheet and provides a dictionary of values. + + Attributes: + custom: dict Represents the contents of the row with cell values mapped + to column headers. + """ + + def __init__(self, content=None, row_entry=None, spreadsheet_key=None, + worksheet_id=None, database_client=None): + """Constructor for a record. + + Args: + content: dict of strings Mapping of string values to column names. + row_entry: gdata.spreadsheet.SpreadsheetsList The Atom entry + representing this row in the worksheet. + spreadsheet_key: str The ID of the spreadsheet in which this row + belongs. + worksheet_id: str The ID of the worksheet in which this row belongs. + database_client: DatabaseClient The client which can be used to talk + the Google Spreadsheets server to edit this row. + """ + self.entry = row_entry + self.spreadsheet_key = spreadsheet_key + self.worksheet_id = worksheet_id + if row_entry: + self.row_id = row_entry.id.text.split('/')[-1] + else: + self.row_id = None + self.client = database_client + self.content = content or {} + if not content: + self.ExtractContentFromEntry(row_entry) + + def ExtractContentFromEntry(self, entry): + """Populates the content and row_id based on content of the entry. + + This method is used in the Record's contructor. + + Args: + entry: gdata.spreadsheet.SpreadsheetsList The Atom entry + representing this row in the worksheet. + """ + self.content = {} + if entry: + self.row_id = entry.id.text.split('/')[-1] + for label, custom in entry.custom.iteritems(): + self.content[label] = custom.text + + def Push(self): + """Send the content of the record to spreadsheets to edit the row. + + All items in the content dictionary will be sent. Items which have been + removed from the content may remain in the row. The content member + of the record will not be modified so additional fields in the row + might be absent from this local copy. + """ + self.entry = self.client._GetSpreadsheetsClient().UpdateRow(self.entry, self.content) + + def Pull(self): + """Query Google Spreadsheets to get the latest data from the server. + + Fetches the entry for this row and repopulates the content dictionary + with the data found in the row. + """ + if self.row_id: + self.entry = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, row_id=self.row_id) + self.ExtractContentFromEntry(self.entry) + + def Delete(self): + self.client._GetSpreadsheetsClient().DeleteRow(self.entry) + + +def ConvertStringsToColumnHeaders(proposed_headers): + """Converts a list of strings to column names which spreadsheets accepts. + + When setting values in a record, the keys which represent column names must + fit certain rules. They are all lower case, contain no spaces or special + characters. If two columns have the same name after being sanitized, the + columns further to the right have _2, _3 _4, etc. appended to them. + + If there are column names which consist of all special characters, or if + the column header is blank, an obfuscated value will be used for a column + name. This method does not handle blank column names or column names with + only special characters. + """ + headers = [] + for input_string in proposed_headers: + # TODO: probably a more efficient way to do this. Perhaps regex. + sanitized = input_string.lower().replace('_', '').replace( + ':', '').replace(' ', '') + # When the same sanitized header appears multiple times in the first row + # of a spreadsheet, _n is appended to the name to make it unique. + header_count = headers.count(sanitized) + if header_count > 0: + headers.append('%s_%i' % (sanitized, header_count+1)) + else: + headers.append(sanitized) + return headers diff --git a/gam/gdata/spreadsheets/__init__.py b/gam/gdata/spreadsheets/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/gam/gdata/spreadsheets/client.py b/gam/gdata/spreadsheets/client.py new file mode 100755 index 00000000000..e00b75c2ef2 --- /dev/null +++ b/gam/gdata/spreadsheets/client.py @@ -0,0 +1,451 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains a client to communicate with the Google Spreadsheets servers. + +For documentation on the Spreadsheets API, see: +http://code.google.com/apis/spreadsheets/ +""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import gdata.client +import gdata.gauth +import gdata.spreadsheets.data +import atom.data +import atom.http_core + + +SPREADSHEETS_URL = ('http://spreadsheets.google.com/feeds/spreadsheets' + '/private/full') +WORKSHEETS_URL = ('http://spreadsheets.google.com/feeds/worksheets/' + '%s/private/full') +WORKSHEET_URL = ('http://spreadsheets.google.com/feeds/worksheets/' + '%s/private/full/%s') +TABLES_URL = 'http://spreadsheets.google.com/feeds/%s/tables' +RECORDS_URL = 'http://spreadsheets.google.com/feeds/%s/records/%s' +RECORD_URL = 'http://spreadsheets.google.com/feeds/%s/records/%s/%s' + + +class SpreadsheetsClient(gdata.client.GDClient): + api_version = '3' + auth_service = 'wise' + auth_scopes = gdata.gauth.AUTH_SCOPES['wise'] + + def get_spreadsheets(self, auth_token=None, + desired_class=gdata.spreadsheets.data.SpreadsheetsFeed, + **kwargs): + """Obtains a feed with the spreadsheets belonging to the current user. + + Args: + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.SpreadsheetsFeed. + """ + return self.get_feed(SPREADSHEETS_URL, auth_token=auth_token, + desired_class=desired_class, **kwargs) + + GetSpreadsheets = get_spreadsheets + + def get_worksheets(self, spreadsheet_key, auth_token=None, + desired_class=gdata.spreadsheets.data.WorksheetsFeed, + **kwargs): + """Finds the worksheets within a given spreadsheet. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.WorksheetsFeed. + """ + return self.get_feed(WORKSHEETS_URL % spreadsheet_key, + auth_token=auth_token, desired_class=desired_class, + **kwargs) + + GetWorksheets = get_worksheets + + def add_worksheet(self, spreadsheet_key, title, rows, cols, + auth_token=None, **kwargs): + """Creates a new worksheet entry in the spreadsheet. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + title: str, The title to be used in for the worksheet. + rows: str or int, The number of rows this worksheet should start with. + cols: str or int, The number of columns this worksheet should start with. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + new_worksheet = gdata.spreadsheets.data.WorksheetEntry( + title=atom.data.Title(text=title), + row_count=gdata.spreadsheets.data.RowCount(text=str(rows)), + col_count=gdata.spreadsheets.data.ColCount(text=str(cols))) + return self.post(new_worksheet, WORKSHEETS_URL % spreadsheet_key, + auth_token=auth_token, **kwargs) + + AddWorksheet = add_worksheet + + def get_worksheet(self, spreadsheet_key, worksheet_id, + desired_class=gdata.spreadsheets.data.WorksheetEntry, + auth_token=None, **kwargs): + """Retrieves a single worksheet. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + worksheet_id: str, The unique ID for the worksheet withing the desired + spreadsheet. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.WorksheetEntry. + + """ + return self.get_entry(WORKSHEET_URL % (spreadsheet_key, worksheet_id,), + auth_token=auth_token, desired_class=desired_class, + **kwargs) + + GetWorksheet = get_worksheet + + def add_table(self, spreadsheet_key, title, summary, worksheet_name, + header_row, num_rows, start_row, insertion_mode, + column_headers, auth_token=None, **kwargs): + """Creates a new table within the worksheet. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + title: str, The title for the new table within a worksheet. + summary: str, A description of the table. + worksheet_name: str The name of the worksheet in which this table + should live. + header_row: int or str, The number of the row in the worksheet which + will contain the column names for the data in this table. + num_rows: int or str, The number of adjacent rows in this table. + start_row: int or str, The number of the row at which the data begins. + insertion_mode: str + column_headers: dict of strings, maps the column letters (A, B, C) to + the desired name which will be viewable in the + worksheet. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + data = gdata.spreadsheets.data.Data( + insertion_mode=insertion_mode, num_rows=str(num_rows), + start_row=str(start_row)) + for index, name in column_headers.iteritems(): + data.column.append(gdata.spreadsheets.data.Column( + index=index, name=name)) + new_table = gdata.spreadsheets.data.Table( + title=atom.data.Title(text=title), summary=atom.data.Summary(summary), + worksheet=gdata.spreadsheets.data.Worksheet(name=worksheet_name), + header=gdata.spreadsheets.data.Header(row=str(header_row)), data=data) + return self.post(new_table, TABLES_URL % spreadsheet_key, + auth_token=auth_token, **kwargs) + + AddTable = add_table + + def get_tables(self, spreadsheet_key, + desired_class=gdata.spreadsheets.data.TablesFeed, + auth_token=None, **kwargs): + """Retrieves a feed listing the tables in this spreadsheet. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.TablesFeed. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.get_feed(TABLES_URL % spreadsheet_key, + desired_class=desired_class, auth_token=auth_token, + **kwargs) + + GetTables = get_tables + + def add_record(self, spreadsheet_key, table_id, fields, + title=None, auth_token=None, **kwargs): + """Adds a new row to the table. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + table_id: str, The ID of the table within the worksheet which should + receive this new record. The table ID can be found using the + get_table_id method of a gdata.spreadsheets.data.Table. + fields: dict of strings mapping column names to values. + title: str, optional The title for this row. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + new_record = gdata.spreadsheets.data.Record() + if title is not None: + new_record.title = atom.data.Title(text=title) + for name, value in fields.iteritems(): + new_record.field.append(gdata.spreadsheets.data.Field( + name=name, text=value)) + return self.post(new_record, RECORDS_URL % (spreadsheet_key, table_id), + auth_token=auth_token, **kwargs) + + AddRecord = add_record + + def get_records(self, spreadsheet_key, table_id, + desired_class=gdata.spreadsheets.data.RecordsFeed, + auth_token=None, **kwargs): + """Retrieves the records in a table. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + table_id: str, The ID of the table within the worksheet whose records + we would like to fetch. The table ID can be found using the + get_table_id method of a gdata.spreadsheets.data.Table. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.RecordsFeed. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient. + """ + return self.get_feed(RECORDS_URL % (spreadsheet_key, table_id), + desired_class=desired_class, auth_token=auth_token, + **kwargs) + + GetRecords = get_records + + def get_record(self, spreadsheet_key, table_id, record_id, + desired_class=gdata.spreadsheets.data.Record, + auth_token=None, **kwargs): + """Retrieves a single record from the table. + + Args: + spreadsheet_key: str, The unique ID of this containing spreadsheet. This + can be the ID from the URL or as provided in a + Spreadsheet entry. + table_id: str, The ID of the table within the worksheet whose records + we would like to fetch. The table ID can be found using the + get_table_id method of a gdata.spreadsheets.data.Table. + record_id: str, The ID of the record within this table which we want to + fetch. You can find the record ID using get_record_id() on + an instance of the gdata.spreadsheets.data.Record class. + desired_class: class descended from atom.core.XmlElement to which a + successful response should be converted. If there is no + converter function specified (converter=None) then the + desired_class will be used in calling the + atom.core.parse function. If neither + the desired_class nor the converter is specified, an + HTTP reponse object will be returned. Defaults to + gdata.spreadsheets.data.RecordsFeed. + auth_token: An object which sets the Authorization HTTP header in its + modify_request method. Recommended classes include + gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken + among others. Represents the current user. Defaults to None + and if None, this method will look for a value in the + auth_token member of SpreadsheetsClient.""" + return self.get_entry(RECORD_URL % (spreadsheet_key, table_id, record_id), + desired_class=desired_class, auth_token=auth_token, + **kwargs) + + GetRecord = get_record + + +class SpreadsheetQuery(gdata.client.Query): + + def __init__(self, title=None, title_exact=None, **kwargs): + """Adds Spreadsheets feed query parameters to a request. + + Args: + title: str Specifies the search terms for the title of a document. + This parameter used without title-exact will only submit partial + queries, not exact queries. + title_exact: str Specifies whether the title query should be taken as an + exact string. Meaningless without title. Possible values are + 'true' and 'false'. + """ + gdata.client.Query.__init__(self, **kwargs) + self.title = title + self.title_exact = title_exact + + def modify_request(self, http_request): + gdata.client._add_query_param('title', self.title, http_request) + gdata.client._add_query_param('title-exact', self.title_exact, + http_request) + gdata.client.Query.modify_request(self, http_request) + + ModifyRequest = modify_request + + +class WorksheetQuery(SpreadsheetQuery): + pass + + +class ListQuery(gdata.client.Query): + + def __init__(self, order_by=None, reverse=None, sq=None, **kwargs): + """Adds List-feed specific query parameters to a request. + + Args: + order_by: str Specifies what column to use in ordering the entries in + the feed. By position (the default): 'position' returns + rows in the order in which they appear in the GUI. Row 1, then + row 2, then row 3, and so on. By column: + 'column:columnName' sorts rows in ascending order based on the + values in the column with the given columnName, where + columnName is the value in the header row for that column. + reverse: str Specifies whether to sort in descending or ascending order. + Reverses default sort order: 'true' results in a descending + sort; 'false' (the default) results in an ascending sort. + sq: str Structured query on the full text in the worksheet. + [columnName][binaryOperator][value] + Supported binaryOperators are: + - (), for overriding order of operations + - = or ==, for strict equality + - <> or !=, for strict inequality + - and or &&, for boolean and + - or or ||, for boolean or + """ + gdata.client.Query.__init__(self, **kwargs) + self.order_by = order_by + self.reverse = reverse + self.sq = sq + + def modify_request(self, http_request): + gdata.client._add_query_param('orderby', self.order_by, http_request) + gdata.client._add_query_param('reverse', self.reverse, http_request) + gdata.client._add_query_param('sq', self.sq, http_request) + gdata.client.Query.modify_request(self, http_request) + + ModifyRequest = modify_request + + +class TableQuery(ListQuery): + pass + + +class CellQuery(gdata.client.Query): + + def __init__(self, min_row=None, max_row=None, min_col=None, max_col=None, + range=None, return_empty=None, **kwargs): + """Adds Cells-feed specific query parameters to a request. + + Args: + min_row: str or int Positional number of minimum row returned in query. + max_row: str or int Positional number of maximum row returned in query. + min_col: str or int Positional number of minimum column returned in query. + max_col: str or int Positional number of maximum column returned in query. + range: str A single cell or a range of cells. Use standard spreadsheet + cell-range notations, using a colon to separate start and end of + range. Examples: + - 'A1' and 'R1C1' both specify only cell A1. + - 'D1:F3' and 'R1C4:R3C6' both specify the rectangle of cells with + corners at D1 and F3. + return_empty: str If 'true' then empty cells will be returned in the feed. + If omitted, the default is 'false'. + """ + gdata.client.Query.__init__(self, **kwargs) + self.min_row = min_row + self.max_row = max_row + self.min_col = min_col + self.max_col = max_col + self.range = range + self.return_empty = return_empty + + def modify_request(self, http_request): + gdata.client._add_query_param('min-row', self.min_row, http_request) + gdata.client._add_query_param('max-row', self.max_row, http_request) + gdata.client._add_query_param('min-col', self.min_col, http_request) + gdata.client._add_query_param('max-col', self.max_col, http_request) + gdata.client._add_query_param('range', self.range, http_request) + gdata.client._add_query_param('return-empty', self.return_empty, + http_request) + gdata.client.Query.modify_request(self, http_request) + + ModifyRequest = modify_request diff --git a/gam/gdata/spreadsheets/data.py b/gam/gdata/spreadsheets/data.py new file mode 100755 index 00000000000..efb729f49a8 --- /dev/null +++ b/gam/gdata/spreadsheets/data.py @@ -0,0 +1,317 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This module is used for version 2 of the Google Data APIs. + + +"""Provides classes and constants for the XML in the Google Spreadsheets API. + +Documentation for the raw XML which these classes represent can be found here: +http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements +""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import gdata.data + + +GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s' +GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended' + + +INSERT_MODE = 'insert' +OVERWRITE_MODE = 'overwrite' + + +WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed' + + +class Error(Exception): + pass + + +class FieldMissing(Exception): + pass + + +class HeaderNotSet(Error): + """The desired column header had no value for the row in the list feed.""" + + +class Cell(atom.core.XmlElement): + """The gs:cell element. + + A cell in the worksheet. The <gs:cell> element can appear only as a child + of <atom:entry>. + """ + _qname = GS_TEMPLATE % 'cell' + col = 'col' + input_value = 'inputValue' + numeric_value = 'numericValue' + row = 'row' + + +class ColCount(atom.core.XmlElement): + """The gs:colCount element. + + Indicates the number of columns in the worksheet, including columns that + contain only empty cells. The <gs:colCount> element can appear as a child + of <atom:entry> or <atom:feed> + """ + _qname = GS_TEMPLATE % 'colCount' + + +class Field(atom.core.XmlElement): + """The gs:field element. + + A field single cell within a record. Contained in an <atom:entry>. + """ + _qname = GS_TEMPLATE % 'field' + index = 'index' + name = 'name' + + +class Column(Field): + """The gs:column element.""" + _qname = GS_TEMPLATE % 'column' + + +class Data(atom.core.XmlElement): + """The gs:data element. + + A data region of a table. Contained in an <atom:entry> element. + """ + _qname = GS_TEMPLATE % 'data' + column = [Column] + insertion_mode = 'insertionMode' + num_rows = 'numRows' + start_row = 'startRow' + + +class Header(atom.core.XmlElement): + """The gs:header element. + + Indicates which row is the header row. Contained in an <atom:entry>. + """ + _qname = GS_TEMPLATE % 'header' + row = 'row' + + +class RowCount(atom.core.XmlElement): + """The gs:rowCount element. + + Indicates the number of total rows in the worksheet, including rows that + contain only empty cells. The <gs:rowCount> element can appear as a + child of <atom:entry> or <atom:feed>. + """ + _qname = GS_TEMPLATE % 'rowCount' + + +class Worksheet(atom.core.XmlElement): + """The gs:worksheet element. + + The worksheet where the table lives.Contained in an <atom:entry>. + """ + _qname = GS_TEMPLATE % 'worksheet' + name = 'name' + + +class Spreadsheet(gdata.data.GDEntry): + """An Atom entry which represents a Google Spreadsheet.""" + + def find_worksheets_feed(self): + return self.find_url(WORKSHEETS_REL) + + FindWorksheetsFeed = find_worksheets_feed + + +class SpreadsheetsFeed(gdata.data.GDFeed): + """An Atom feed listing a user's Google Spreadsheets.""" + entry = [Spreadsheet] + + +class WorksheetEntry(gdata.data.GDEntry): + """An Atom entry representing a single worksheet in a spreadsheet.""" + row_count = RowCount + col_count = ColCount + + +class WorksheetsFeed(gdata.data.GDFeed): + """A feed containing the worksheets in a single spreadsheet.""" + entry = [WorksheetEntry] + + +class Table(gdata.data.GDEntry): + """An Atom entry that represents a subsection of a worksheet. + + A table allows you to treat part or all of a worksheet somewhat like a + table in a database that is, as a set of structured data items. Tables + don't exist until you explicitly create them before you can use a table + feed, you have to explicitly define where the table data comes from. + """ + data = Data + header = Header + worksheet = Worksheet + + def get_table_id(self): + if self.id.text: + return self.id.text.split('/')[-1] + return None + + GetTableId = get_table_id + + +class TablesFeed(gdata.data.GDFeed): + """An Atom feed containing the tables defined within a worksheet.""" + entry = [Table] + + +class Record(gdata.data.GDEntry): + """An Atom entry representing a single record in a table. + + Note that the order of items in each record is the same as the order of + columns in the table definition, which may not match the order of + columns in the GUI. + """ + field = [Field] + + def value_for_index(self, column_index): + for field in self.field: + if field.index == column_index: + return field.text + raise FieldMissing('There is no field for %s' % column_index) + + ValueForIndex = value_for_index + + def value_for_name(self, name): + for field in self.field: + if field.name == name: + return field.text + raise FieldMissing('There is no field for %s' % name) + + ValueForName = value_for_name + + def get_record_id(self): + if self.id.text: + return self.id.text.split('/')[-1] + return None + + +class RecordsFeed(gdata.data.GDFeed): + """An Atom feed containing the individuals records in a table.""" + entry = [Record] + + +class ListRow(atom.core.XmlElement): + """A gsx column value within a row. + + The local tag in the _qname is blank and must be set to the column + name. For example, when adding to a ListEntry, do: + col_value = ListRow(text='something') + col_value._qname = col_value._qname % 'mycolumnname' + """ + _qname = '{http://schemas.google.com/spreadsheets/2006/extended}%s' + + +class ListEntry(gdata.data.GDEntry): + """An Atom entry representing a worksheet row in the list feed. + + The values for a particular column can be get and set using + x.get_value('columnheader') and x.set_value('columnheader', 'value'). + See also the explanation of column names in the ListFeed class. + """ + + def get_value(self, column_name): + """Returns the displayed text for the desired column in this row. + + The formula or input which generated the displayed value is not accessible + through the list feed, to see the user's input, use the cells feed. + + If a column is not present in this spreadsheet, or there is no value + for a column in this row, this method will return None. + """ + values = self.get_elements(column_name, GSX_NAMESPACE) + if len(values) == 0: + return None + return values[0].text + + def set_value(self, column_name, value): + """Changes the value of cell in this row under the desired column name. + + Warning: if the cell contained a formula, it will be wiped out by setting + the value using the list feed since the list feed only works with + displayed values. + + No client side checking is performed on the column_name, you need to + ensure that the column_name is the local tag name in the gsx tag for the + column. For example, the column_name will not contain special characters, + spaces, uppercase letters, etc. + """ + # Try to find the column in this row to change an existing value. + values = self.get_elements(column_name, GSX_NAMESPACE) + if len(values) > 0: + values[0].text = value + else: + # There is no value in this row for the desired column, so add a new + # gsx:column_name element. + new_value = ListRow(text=value) + new_value._qname = new_value._qname % (column_name,) + self._other_elements.append(new_value) + + +class ListsFeed(gdata.data.GDFeed): + """An Atom feed in which each entry represents a row in a worksheet. + + The first row in the worksheet is used as the column names for the values + in each row. If a header cell is empty, then a unique column ID is used + for the gsx element name. + + Spaces in a column name are removed from the name of the corresponding + gsx element. + + Caution: The columnNames are case-insensitive. For example, if you see + a <gsx:e-mail> element in a feed, you can't know whether the column + heading in the original worksheet was "e-mail" or "E-Mail". + + Note: If two or more columns have the same name, then subsequent columns + of the same name have _n appended to the columnName. For example, if the + first column name is "e-mail", followed by columns named "E-Mail" and + "E-mail", then the columnNames will be gsx:e-mail, gsx:e-mail_2, and + gsx:e-mail_3 respectively. + """ + entry = [ListEntry] + + +class CellEntry(gdata.data.BatchEntry): + """An Atom entry representing a single cell in a worksheet.""" + cell = Cell + + +class CellsFeed(gdata.data.BatchFeed): + """An Atom feed contains one entry per cell in a worksheet. + + The cell feed supports batch operations, you can send multiple cell + operations in one HTTP request. + """ + entry = [CellEntry] + + def batch_set_cell(row, col, input): + pass + diff --git a/gam/gdata/test_config.py b/gam/gdata/test_config.py new file mode 100755 index 00000000000..3aef1eed0bd --- /dev/null +++ b/gam/gdata/test_config.py @@ -0,0 +1,408 @@ +#!/usr/bin/env python + +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import sys +import unittest +import getpass +import inspect +import atom.mock_http_core +import gdata.gauth + + +"""Loads configuration for tests which connect to Google servers. + +Settings used in tests are stored in a ConfigCollection instance in this +module called options. If your test needs to get a test related setting, +use + +import gdata.test_config +option_value = gdata.test_config.options.get_value('x') + +The above will check the command line for an '--x' argument, and if not +found will either use the default value for 'x' or prompt the user to enter +one. + +Your test can override the value specified by the user by performing: + +gdata.test_config.options.set_value('x', 'y') + +If your test uses a new option which you would like to allow the user to +specify on the command line or via a prompt, you can use the register_option +method as follows: + +gdata.test_config.options.register( + 'option_name', 'Prompt shown to the user', secret=False #As for password. + 'This is the description of the option, shown when help is requested.', + 'default value, provide only if you do not want the user to be prompted') +""" + + +class Option(object): + + def __init__(self, name, prompt, secret=False, description=None, default=None): + self.name = name + self.prompt = prompt + self.secret = secret + self.description = description + self.default = default + + def get(self): + value = self.default + # Check for a command line parameter. + for i in xrange(len(sys.argv)): + if sys.argv[i].startswith('--%s=' % self.name): + value = sys.argv[i].split('=')[1] + elif sys.argv[i] == '--%s' % self.name: + value = sys.argv[i + 1] + # If the param was not on the command line, ask the user to input the + # value. + # In order for this to prompt the user, the default value for the option + # must be None. + if value is None: + prompt = '%s: ' % self.prompt + if self.secret: + value = getpass.getpass(prompt) + else: + print 'You can specify this on the command line using --%s' % self.name + value = raw_input(prompt) + return value + + +class ConfigCollection(object): + + def __init__(self, options=None): + self.options = options or {} + self.values = {} + + def register_option(self, option): + self.options[option.name] = option + + def register(self, *args, **kwargs): + self.register_option(Option(*args, **kwargs)) + + def get_value(self, option_name): + if option_name in self.values: + return self.values[option_name] + value = self.options[option_name].get() + if value is not None: + self.values[option_name] = value + return value + + def set_value(self, option_name, value): + self.values[option_name] = value + + def render_usage(self): + message_parts = [] + for opt_name, option in self.options.iteritems(): + message_parts.append('--%s: %s' % (opt_name, option.description)) + return '\n'.join(message_parts) + + +options = ConfigCollection() + + +# Register the default options. +options.register( + 'username', + 'Please enter the email address of your test account', + description=('The email address you want to sign in with. ' + 'Make sure this is a test account as these tests may edit' + ' or delete data.')) +options.register( + 'password', + 'Please enter the password for your test account', + secret=True, description='The test account password.') +options.register( + 'clearcache', + 'Delete cached data? (enter true or false)', + description=('If set to true, any temporary files which cache test' + ' requests and responses will be deleted.'), + default='true') +options.register( + 'savecache', + 'Save requests and responses in a temporary file? (enter true or false)', + description=('If set to true, requests to the server and responses will' + ' be saved in temporary files.'), + default='false') +options.register( + 'runlive', + 'Run the live tests which contact the server? (enter true or false)', + description=('If set to true, the tests will make real HTTP requests to' + ' the servers. This slows down test execution and may' + ' modify the users data, be sure to use a test account.'), + default='true') +options.register( + 'ssl', + 'Run the live tests over SSL (enter true or false)', + description='If set to true, all tests will be performed over HTTPS (SSL)', + default='false') +options.register( + 'appsusername', + 'Please enter the email address of your test Apps domain account', + description=('The email address you want to sign in with. ' + 'Make sure this is a test account on your Apps domain as ' + 'these tests may edit or delete data.')) +options.register( + 'appspassword', + 'Please enter the password for your test Apps domain account', + secret=True, description='The test Apps account password.') + +# Other options which may be used if needed. +BLOG_ID_OPTION = Option( + 'blogid', + 'Please enter the ID of your test blog', + description=('The blog ID for the blog which should have test posts added' + ' to it. Example 7682659670455539811')) +TEST_IMAGE_LOCATION_OPTION = Option( + 'imgpath', + 'Please enter the full path to a test image to upload', + description=('This test image will be uploaded to a service which' + ' accepts a media file, it must be a jpeg.')) +SPREADSHEET_ID_OPTION = Option( + 'spreadsheetid', + 'Please enter the ID of a spreadsheet to use in these tests', + description=('The spreadsheet ID for the spreadsheet which should be' + ' modified by theses tests.')) +APPS_DOMAIN_OPTION = Option( + 'appsdomain', + 'Please enter your Google Apps domain', + description=('The domain the Google Apps is hosted on or leave blank' + ' if n/a')) +SITES_NAME_OPTION = Option( + 'sitename', + 'Please enter name of your Google Site', + description='The webspace name of the Site found in its URL.') +PROJECT_NAME_OPTION = Option( + 'project_name', + 'Please enter the name of your project hosting project', + description=('The name of the project which should have test issues added' + ' to it. Example gdata-python-client')) +ISSUE_ASSIGNEE_OPTION = Option( + 'issue_assignee', + 'Enter the email address of the target owner of the updated issue.', + description=('The email address of the user a created issue\'s owner will ' + ' become. Example testuser2@gmail.com')) +GA_TABLE_ID = Option( + 'table_id', + 'Enter the Table ID of the Google Analytics profile to test', + description=('The Table ID of the Google Analytics profile to test.' + ' Example ga:1174')) +TARGET_USERNAME_OPTION = Option( + 'targetusername', + 'Please enter the username (without domain) of the user which will be' + ' affected by the tests', + description=('The username of the user to be tested')) + +# Functions to inject a cachable HTTP client into a service client. +def configure_client(client, case_name, service_name, use_apps_auth=False): + """Sets up a mock client which will reuse a saved session. + + Should be called during setUp of each unit test. + + Handles authentication to allow the GDClient to make requests which + require an auth header. +where + Args: + client: a gdata.GDClient whose http_client member should be replaced + with a atom.mock_http_core.MockHttpClient so that repeated + executions can used cached responses instead of contacting + the server. + case_name: str The name of the test case class. Examples: 'BloggerTest', + 'ContactsTest'. Used to save a session + for the ClientLogin auth token request, so the case_name + should be reused if and only if the same username, password, + and service are being used. + service_name: str The service name as used for ClientLogin to identify + the Google Data API being accessed. Example: 'blogger', + 'wise', etc. + use_apps_auth: bool (optional) If set to True, use appsusername and + appspassword command-line args instead of username and + password respectively. + """ + # Use a mock HTTP client which will record and replay the HTTP traffic + # from these tests. + client.http_client = atom.mock_http_core.MockHttpClient() + client.http_client.cache_case_name = case_name + # Getting the auth token only needs to be done once in the course of test + # runs. + auth_token_key = '%s_auth_token' % service_name + if (auth_token_key not in options.values + and options.get_value('runlive') == 'true'): + client.http_client.cache_test_name = 'client_login' + cache_name = client.http_client.get_cache_file_name() + if options.get_value('clearcache') == 'true': + client.http_client.delete_session(cache_name) + client.http_client.use_cached_session(cache_name) + if not use_apps_auth: + username = options.get_value('username') + password = options.get_value('password') + else: + username = options.get_value('appsusername') + password = options.get_value('appspassword') + auth_token = client.request_client_login_token(username, password, + case_name, service=service_name) + options.values[auth_token_key] = gdata.gauth.token_to_blob(auth_token) + client.http_client.close_session() + # Allow a config auth_token of False to prevent the client's auth header + # from being modified. + if auth_token_key in options.values: + client.auth_token = gdata.gauth.token_from_blob( + options.values[auth_token_key]) + + +def configure_cache(client, test_name): + """Loads or begins a cached session to record HTTP traffic. + + Should be called at the beginning of each test method. + + Args: + client: a gdata.GDClient whose http_client member has been replaced + with a atom.mock_http_core.MockHttpClient so that repeated + executions can used cached responses instead of contacting + the server. + test_name: str The name of this test method. Examples: + 'TestClass.test_x_works', 'TestClass.test_crud_operations'. + This is used to name the recording of the HTTP requests and + responses, so it should be unique to each test method in the + test case. + """ + # Auth token is obtained in configure_client which is called as part of + # setUp. + client.http_client.cache_test_name = test_name + cache_name = client.http_client.get_cache_file_name() + if options.get_value('clearcache') == 'true': + client.http_client.delete_session(cache_name) + client.http_client.use_cached_session(cache_name) + + +def close_client(client): + """Saves the recoded responses to a temp file if the config file allows. + + This should be called in the unit test's tearDown method. + + Checks to see if the 'savecache' option is set to 'true', to make sure we + only save sessions to repeat if the user desires. + """ + if client and options.get_value('savecache') == 'true': + # If this was a live request, save the recording. + client.http_client.close_session() + + +def configure_service(service, case_name, service_name): + """Sets up a mock GDataService v1 client to reuse recorded sessions. + + Should be called during setUp of each unit test. This is a duplicate of + configure_client, modified to handle old v1 service classes. + """ + service.http_client.v2_http_client = atom.mock_http_core.MockHttpClient() + service.http_client.v2_http_client.cache_case_name = case_name + # Getting the auth token only needs to be done once in the course of test + # runs. + auth_token_key = 'service_%s_auth_token' % service_name + if (auth_token_key not in options.values + and options.get_value('runlive') == 'true'): + service.http_client.v2_http_client.cache_test_name = 'client_login' + cache_name = service.http_client.v2_http_client.get_cache_file_name() + if options.get_value('clearcache') == 'true': + service.http_client.v2_http_client.delete_session(cache_name) + service.http_client.v2_http_client.use_cached_session(cache_name) + service.ClientLogin(options.get_value('username'), + options.get_value('password'), + service=service_name, source=case_name) + options.values[auth_token_key] = service.GetClientLoginToken() + service.http_client.v2_http_client.close_session() + if auth_token_key in options.values: + service.SetClientLoginToken(options.values[auth_token_key]) + + +def configure_service_cache(service, test_name): + """Loads or starts a session recording for a v1 Service object. + + Duplicates the behavior of configure_cache, but the target for this + function is a v1 Service object instead of a v2 Client. + """ + service.http_client.v2_http_client.cache_test_name = test_name + cache_name = service.http_client.v2_http_client.get_cache_file_name() + if options.get_value('clearcache') == 'true': + service.http_client.v2_http_client.delete_session(cache_name) + service.http_client.v2_http_client.use_cached_session(cache_name) + + +def close_service(service): + if service and options.get_value('savecache') == 'true': + # If this was a live request, save the recording. + service.http_client.v2_http_client.close_session() + + +def build_suite(classes): + """Creates a TestSuite for all unit test classes in the list. + + Assumes that each of the classes in the list has unit test methods which + begin with 'test'. Calls unittest.makeSuite. + + Returns: + A new unittest.TestSuite containing a test suite for all classes. + """ + suites = [unittest.makeSuite(a_class, 'test') for a_class in classes] + return unittest.TestSuite(suites) + + +def check_data_classes(test, classes): + import inspect + for data_class in classes: + test.assert_(data_class.__doc__ is not None, + 'The class %s should have a docstring' % data_class) + if hasattr(data_class, '_qname'): + qname_versions = None + if isinstance(data_class._qname, tuple): + qname_versions = data_class._qname + else: + qname_versions = (data_class._qname,) + for versioned_qname in qname_versions: + test.assert_(isinstance(versioned_qname, str), + 'The class %s has a non-string _qname' % data_class) + test.assert_(not versioned_qname.endswith('}'), + 'The _qname for class %s is only a namespace' % ( + data_class)) + + for attribute_name, value in data_class.__dict__.iteritems(): + # Ignore all elements that start with _ (private members) + if not attribute_name.startswith('_'): + try: + if not (isinstance(value, str) or inspect.isfunction(value) + or (isinstance(value, list) + and issubclass(value[0], atom.core.XmlElement)) + or type(value) == property # Allow properties. + or inspect.ismethod(value) # Allow methods. + or issubclass(value, atom.core.XmlElement)): + test.fail( + 'XmlElement member should have an attribute, XML class,' + ' or list of XML classes as attributes.') + + except TypeError: + test.fail('Element %s in %s was of type %s' % ( + attribute_name, data_class._qname, type(value))) + + +def check_clients_with_auth(test, classes): + for client_class in classes: + test.assert_(hasattr(client_class, 'api_version')) + test.assert_(isinstance(client_class.auth_service, (str, unicode, int))) + test.assert_(hasattr(client_class, 'auth_service')) + test.assert_(isinstance(client_class.auth_service, (str, unicode))) + test.assert_(hasattr(client_class, 'auth_scopes')) + test.assert_(isinstance(client_class.auth_scopes, (list, tuple))) diff --git a/gam/gdata/test_config_template.py b/gam/gdata/test_config_template.py new file mode 100755 index 00000000000..e27f71f0687 --- /dev/null +++ b/gam/gdata/test_config_template.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python + + +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Fill in this module with configuration data to use in tests. + +See comments in the source code for explanations of the settings. +""" + +import os + + +# To actually run the tests which use this configuration information you must +# change RUN_LIVE_TESTS to True. +RUN_LIVE_TESTS = False + + +# If set to True, the client will save responses from the server and reuse +# them in future runs of the test. +CACHE_RESPONSES = True + +# If set to True, the client will make HTTP requests to the server regardless +# of a cache file. If True, caches from previous sessions will be deleted. +# If False (the default) cached sessions will be reused if they exist. +CLEAR_CACHE = True + + +GOOGLE_ACCOUNT_EMAIL = '<your email>' +GOOGLE_ACCOUNT_PASSWORD = '<your password>' +# For example, the TEST_FILES_DIR might be +# '/home/username/svn/gdata-python-client/tests' +TEST_FILES_DIR = '<location of the tests directory>' + + +class NoAuthConfig(object): + auth_token = False + + +class TestConfig(object): + service = None + auth_token = None + + def email(cls): + """Provides email to log into the test account for this service. + + By default uses GOOGLE_ACCOUNT_EMAIL, so overwrite this function if you + have a service-specific test account. + """ + return GOOGLE_ACCOUNT_EMAIL + + email = classmethod(email) + + def password(cls): + """Provides password to log into the test account for this service. + + By default uses GOOGLE_ACCOUNT_PASSWORD, so overwrite this function if + you have a service-specific test account. + """ + return GOOGLE_ACCOUNT_PASSWORD + + password = classmethod(password) + + +class BloggerConfig(TestConfig): + service = 'blogger' + title = 'A Test Post' + content = 'This is a <b>test</b>.' + blog_id = '<your test blog\'s id>' + + +class ContactsConfig(TestConfig): + service = 'cp' + + def get_image_location(cls): + return os.path.join(TEST_FILES_DIR, 'files', 'testimage.jpg') + + get_image_location = classmethod(get_image_location) + +class MapsConfig(TestConfig): + service = 'local' + map_title = 'Some test map' + map_summary = 'A test description' diff --git a/gam/gdata/test_data.py b/gam/gdata/test_data.py new file mode 100755 index 00000000000..d9f62c5db7d --- /dev/null +++ b/gam/gdata/test_data.py @@ -0,0 +1,5397 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + +XML_ENTRY_1 = """<?xml version='1.0'?> +<entry xmlns='http://www.w3.org/2005/Atom' + xmlns:g='http://base.google.com/ns/1.0'> + <category scheme="http://base.google.com/categories/itemtypes" + term="products"/> + <id> http://www.google.com/test/id/url </id> + <title type='text'>Testing 2000 series laptop + +

    + + + + Computer + Laptop + testing laptop + products +""" + + +TEST_BASE_ENTRY = """ + + + Testing 2000 series laptop + +
    A Testing Laptop
    +
    + + yes + + + + Computer + Laptop + testing laptop + products +
    """ + + +BIG_FEED = """ + + dive into mark + + A <em>lot</em> of effort + went into making this effortless + + 2005-07-31T12:29:29Z + tag:example.org,2003:3 + + + Copyright (c) 2003, Mark Pilgrim + + Example Toolkit + + + Atom draft-07 snapshot + + + tag:example.org,2003:3.2397 + 2005-07-31T12:29:29Z + 2003-12-13T08:29:29-04:00 + + Mark Pilgrim + http://example.org/ + f8dy@example.com + + + Sam Ruby + + + Joe Gregorio + + +
    +

    [Update: The Atom draft is finished.]

    +
    +
    +
    +
    +""" + +SMALL_FEED = """ + + Example Feed + + 2003-12-13T18:30:02Z + + John Doe + + urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6 + + Atom-Powered Robots Run Amok + + urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a + 2003-12-13T18:30:02Z + Some text. + + +""" + +GBASE_FEED = """ + +http://www.google.com/base/feeds/snippets +2007-02-08T23:18:21.935Z +Items matching query: digital camera + + + + + + + + +GoogleBase +2171885 +1 +25 + +http://www.google.com/base/feeds/snippets/13246453826751927533 +2007-02-08T13:23:27.000Z +2007-02-08T16:40:57.000Z + + +Digital Camera Battery Notebook Computer 12v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables +Notebook Computer 12v DC Power Cable - 5.5mm x 2.1mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power portable computers that operate with 12v power and have a 2.1mm power connector (center +) Digital ... + + + + + +B&H Photo-Video +anon-szot0wdsq0at@base.google.com + +PayPal & Bill Me Later credit available online only. +new +420 9th Ave. 10001 +305668-REG +Products +Digital Camera Battery +2007-03-10T13:23:27.000Z +1172711 +34.95 usd +Digital Photography>Camera Connecting Cables +EN +DCB5092 +US +1.0 +http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305668.jpg&dhm=ffffffff84c9a95e&size=6 + + +http://www.google.com/base/feeds/snippets/10145771037331858608 +2007-02-08T13:23:27.000Z +2007-02-08T16:40:57.000Z + + +Digital Camera Battery Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables +Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power any electronic device that operates with 5v power and has a 2.5mm power connector (center +) Digital ... + + + + + +B&H Photo-Video +anon-szot0wdsq0at@base.google.com + +420 9th Ave. 10001 +new +0.18 +US +Digital Photography>Camera Connecting Cables +PayPal & Bill Me Later credit available online only. +305656-REG +http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305656.jpg&dhm=7315bdc8&size=6 +DCB5108 +838098005108 +34.95 usd +EN +Digital Camera Battery +1172711 +Products +2007-03-10T13:23:27.000Z + + +http://www.google.com/base/feeds/snippets/3128608193804768644 +2007-02-08T02:21:27.000Z +2007-02-08T15:40:13.000Z + + +Digital Camera Battery Power Cable for Kodak 645 Pro-Back ProBack & DCS-300 Series Camera Connecting Cables +Camera Connection Cable - to Power Kodak 645 Pro-Back DCS-300 Series Digital Cameras This connection cable will allow any Digital Pursuits battery pack to power the following digital cameras: Kodak DCS Pro Back 645 DCS-300 series Digital Photography ... + + + + + +B&H Photo-Video +anon-szot0wdsq0at@base.google.com + +0.3 +DCB6006 +http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305685.jpg&dhm=72f0ca0a&size=6 +420 9th Ave. 10001 +PayPal & Bill Me Later credit available online only. +Products +US +digital kodak camera +Digital Camera Battery +2007-03-10T02:21:27.000Z +EN +new +34.95 usd +1172711 +Digital Photography>Camera Connecting Cables +305685-REG + +""" + +EXTENSION_TREE = """ + + + John Doe + Bar + + + +""" + +TEST_AUTHOR = """ + + John Doe + johndoes@someemailadress.com + http://www.google.com + +""" + +TEST_LINK = """ + +""" + +TEST_GBASE_ATTRIBUTE = """ + Digital Camera Battery +""" + + +CALENDAR_FEED = """ + + http://www.google.com/calendar/feeds/default + 2007-03-20T22:48:57.833Z + GData Ops Demo's Calendar List + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + Google Calendar + 1 + + + http://www.google.com/calendar/feeds/default/gdata.ops.demo%40gmail.com + 2007-03-20T22:48:57.837Z + 2007-03-20T22:48:52.000Z + GData Ops Demo + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + http://www.google.com/calendar/feeds/default/jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com + 2007-03-20T22:48:57.837Z + 2007-03-20T22:48:53.000Z + GData Ops Demo Secondary Calendar + + + + + + + GData Ops Demo Secondary Calendar + + + + + + + + +""" + +CALENDAR_FULL_EVENT_FEED = """ + + + http://www.google.com/calendar/feeds/default/private/full + 2007-03-20T21:29:57.000Z + + GData Ops Demo + GData Ops Demo + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + Google Calendar + 10 + 1 + 25 + + + + http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100 + 2007-03-20T21:29:52.000Z + 2007-03-20T21:29:57.000Z + + test deleted + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0 + 2007-03-20T21:26:04.000Z + 2007-03-20T21:28:46.000Z + + Afternoon at Dolores Park with Kim + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/uvsqhg7klnae40v50vihr1pvos + 2007-03-20T21:28:37.000Z + 2007-03-20T21:28:37.000Z + + Team meeting + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + DTSTART;TZID=America/Los_Angeles:20070323T090000 + DTEND;TZID=America/Los_Angeles:20070323T100000 + RRULE:FREQ=WEEKLY;BYDAY=FR;UNTIL=20070817T160000Z;WKST=SU + BEGIN:VTIMEZONE TZID:America/Los_Angeles + X-LIC-LOCATION:America/Los_Angeles BEGIN:STANDARD + TZOFFSETFROM:-0700 TZOFFSETTO:-0800 TZNAME:PST + DTSTART:19701025T020000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU + END:STANDARD BEGIN:DAYLIGHT TZOFFSETFROM:-0800 TZOFFSETTO:-0700 + TZNAME:PDT DTSTART:19700405T020000 + RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU END:DAYLIGHT + END:VTIMEZONE + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo + 2007-03-20T21:25:46.000Z + 2007-03-20T21:25:46.000Z + + Movie with Kim and danah + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo + 2007-03-20T21:24:43.000Z + 2007-03-20T21:25:08.000Z + + Dinner with Kim and Sarah + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g + 2007-03-20T21:24:19.000Z + 2007-03-20T21:25:05.000Z + + Dinner with Jane and John + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc + 2007-03-20T21:24:33.000Z + 2007-03-20T21:24:33.000Z + + Tennis with Elizabeth + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c + 2007-03-20T21:24:00.000Z + 2007-03-20T21:24:00.000Z + + Lunch with Jenn + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco + 2007-03-20T07:50:02.000Z + 2007-03-20T20:39:26.000Z + + test entry + test desc + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg + 2007-02-14T23:23:37.000Z + 2007-02-14T23:25:30.000Z + + test + + + + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc + 2007-07-16T22:13:28.000Z + 2007-07-16T22:13:29.000Z + + + + + + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + +""" + +CALENDAR_BATCH_REQUEST = """ + + + + 1 + + + Event inserted via batch + + + 2 + + http://www.google.com/calendar/feeds/default/private/full/glcs0kv2qqa0gf52qi1jo018gc + + Event queried via batch + + + 3 + + http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs + + Event updated via batch + + + + + + 4 + + http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc + + Event deleted via batch + + + + + +""" + +CALENDAR_BATCH_RESPONSE = """ + + http://www.google.com/calendar/feeds/default/private/full + 2007-09-21T23:01:00.380Z + + Batch Feed + + + + + 1 + + + http://www.google.com/calendar/feeds/default/private/full/n9ug78gd9tv53ppn4hdjvk68ek + + Event inserted via batch + + + + + + 2 + + + http://www.google.com/calendar/feeds/default/private/full/glsc0kv2aqa0ff52qi1jo018gc + + Event queried via batch + + + + + + 3 + + + http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs + + Event updated via batch + + + + 3 + + + + + 4 + + + http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc + + Event deleted via batch + Deleted + + +""" + +GBASE_ATTRIBUTE_FEED = """ + + http://www.google.com/base/feeds/attributes + 2006-11-01T20:35:59.578Z + + + Attribute histogram for query: [item type:jobs] + + + + GoogleBase + 16 + 1 + 16 + + http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D + 2006-11-01T20:36:00.100Z + job industry(text) + Attribute"job industry" of type text. + + + + it internet + healthcare + information technology + accounting + clerical and administrative + other + sales and sales management + information systems + engineering and architecture + sales + + + +""" + + +GBASE_ATTRIBUTE_ENTRY = """ + + http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D + 2006-11-01T20:36:00.100Z + job industry(text) + Attribute"job industry" of type text. + + + + it internet + healthcare + information technology + accounting + clerical and administrative + other + sales and sales management + information systems + engineering and architecture + sales + + +""" + +GBASE_LOCALES_FEED = """ + + http://www.google.com/base/feeds/locales/ + 2006-06-13T18:11:40.120Z + Locales + + + + + Google Inc. + base@google.com + + GoogleBase + 3 + 25 + + + http://www.google.com/base/feeds/locales/en_US + 2006-03-27T22:27:36.658Z + + + en_US + en_US + + + + + + http://www.google.com/base/feeds/locales/en_GB + 2006-06-13T18:14:18.601Z + + en_GB + en_GB + + + + + http://www.google.com/base/feeds/locales/de_DE + 2006-06-13T18:14:18.601Z + + de_DE + de_DE + + + +""" + +GBASE_STRING_ENCODING_ENTRY = """ + + http://www.google.com/base/feeds/snippets/17495780256183230088 + 2007-12-09T03:13:07.000Z + 2008-01-07T03:26:46.000Z + + Digital Camera Cord Fits SONY Cybershot DSC-R1 S40 + SONY \xC2\xB7 Cybershot Digital Camera Usb Cable DESCRIPTION + This is a 2.5 USB 2.0 A to Mini B (5 Pin) high quality digital camera + cable used for connecting your Sony Digital Cameras and Camcoders. Backward + Compatible with USB 2.0, 1.0 and 1.1. Fully ... + + + + eBay + + Products + EN + US + 0.99 usd + http://thumbs.ebaystatic.com/pict/270195049057_1.jpg + Cameras & Photo>Digital Camera Accessories>Cables + Cords & Connectors>USB Cables>For Other Brands + 11729 + 270195049057 + 2008-02-06T03:26:46Z +""" + + +RECURRENCE_EXCEPTION_ENTRY = """ + + http://www.google.com/calendar/feeds/default/private/composite/i7lgfj69mjqjgnodklif3vbm7g + 2007-04-05T21:51:49.000Z + 2007-04-05T21:51:49.000Z + + testDavid + + + + + + gdata ops + gdata.ops.test@gmail.com + + + + + + + + + + DTSTART;TZID=America/Anchorage:20070403T100000 + DTEND;TZID=America/Anchorage:20070403T110000 + RRULE:FREQ=DAILY;UNTIL=20070408T180000Z;WKST=SU + EXDATE;TZID=America/Anchorage:20070407T100000 + EXDATE;TZID=America/Anchorage:20070405T100000 + EXDATE;TZID=America/Anchorage:20070404T100000 BEGIN:VTIMEZONE + TZID:America/Anchorage X-LIC-LOCATION:America/Anchorage + BEGIN:STANDARD TZOFFSETFROM:-0800 TZOFFSETTO:-0900 TZNAME:AKST + DTSTART:19701025T020000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU + END:STANDARD BEGIN:DAYLIGHT TZOFFSETFROM:-0900 TZOFFSETTO:-0800 + TZNAME:AKDT DTSTART:19700405T020000 + RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU END:DAYLIGHT + END:VTIMEZONE + + + + + + i7lgfj69mjqjgnodklif3vbm7g_20070407T180000Z + 2007-04-05T21:51:49.000Z + 2007-04-05T21:52:58.000Z + + testDavid + + + + gdata ops + gdata.ops.test@gmail.com + + + + + + + + + + + + + + + + + + + 2007-04-05T21:54:09.285Z + + + Comments for: testDavid + + + + + + + + + + + + """ + +NICK_ENTRY = """ + + https://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo + 1970-01-01T00:00:00.000Z + + Foo + + + + +""" + +NICK_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/nickname/2.0 + + 1970-01-01T00:00:00.000Z + + Nicknames for user SusanJones + + + + 1 + 2 + + + http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo + + + Foo + + + + + + + + http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/suse + + + suse + + + + + +""" + +USER_ENTRY = """ + + https://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser + 1970-01-01T00:00:00.000Z + + TestUser + + + + + + + +""" + +USER_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/user/2.0 + + 1970-01-01T00:00:00.000Z + + Users + """ + +EMAIL_LIST_ENTRY = """ + + + https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/testlist + + 1970-01-01T00:00:00.000Z + + testlist + + + + +""" + +EMAIL_LIST_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/emailList/2.0 + + 1970-01-01T00:00:00.000Z + + EmailLists + """ + +EMAIL_LIST_RECIPIENT_ENTRY = """ + + https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/TestUser%40example.com + 1970-01-01T00:00:00.000Z + + TestUser + + + +""" + +EMAIL_LIST_RECIPIENT_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient + + 1970-01-01T00:00:00.000Z + + Recipients for email list us-sales + """ + +ACL_FEED = """ + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full + 2007-04-21T00:52:04.000Z + Elizabeth Bennet's access control list + + + + + + + + + Google Calendar + 2 + 1 + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com + 2007-04-21T00:52:04.000Z + + + owner + + + + + + + Elizabeth Bennet + liz@gmail.com + + + + + + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default + 2007-04-21T00:52:04.000Z + + + read + + + + + + + Elizabeth Bennet + liz@gmail.com + + + + + + """ + +ACL_ENTRY = """ + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com + 2007-04-21T00:52:04.000Z + + + owner + + + + + + + Elizabeth Bennet + liz@gmail.com + + + + + """ + +DOCUMENT_LIST_FEED = """ +21test.usertest.user@gmail.comhttp://docs.google.com/feeds/documents/private/full/spreadsheet%3AsupercalifragilisticexpeadociousTest Spreadsheet2007-07-03T18:03:32.045Z + +document:dfrkj84g_3348jbxpxcd + + test.user + test.user@gmail.com + +2009-03-05T07:48:21.493Z + +test.usertest.user@gmail.comhttp://docs.google.com/feeds/documents/private/full/document%3Agr00vyTest Document2007-07-03T18:02:50.338Z + + + test.user + test.user@gmail.com + + + 2009-03-05T07:48:21.493Z +http://docs.google.com/feeds/documents/private/fullAvailable +Documents - +test.user@gmail.com2007-07-09T23:07:21.898Z + +""" + +DOCUMENT_LIST_ENTRY = """ + +test.usertest.user@gmail.com +http://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpealidocious + +Test Spreadsheet2007-07-03T18:03:32.045Z +spreadsheet:supercalifragilisticexpealidocious + + test.user + test.user@gmail.com + +2009-03-05T07:48:21.493Z + + +""" + +DOCUMENT_LIST_ENTRY_V3 = """ + +test.usertest.user@gmail.com +http://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpealidocious + + +Test Spreadsheet2007-07-03T18:03:32.045Z +spreadsheet:supercalifragilisticexpealidocious + + test.user + test.user@gmail.com + +2009-03-05T07:48:21.493Z + +1000 + + + +""" + +DOCUMENT_LIST_ACL_ENTRY = """ + + + + +""" + +DOCUMENT_LIST_ACL_FEED = """ + +http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZpwUQ +2009-02-22T03:48:25.895Z + +Document Permissions + + + + +2 +1 + + http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQp4pwUwUQ/user%3Auser%40gmail.com + 2009-02-22T03:48:25.896Z + + Document Permission - user@gmail.com + + + + + + + http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8fCgZp4pwUwUQ/user%3Auser2%40google.com + 2009-02-22T03:48:26.257Z + + Document Permission - user2@google.com + + + + + +""" + +DOCUMENT_LIST_REVISION_FEED = """ + +https://docs.google.com/feeds/default/private/full/resource_id/revisions +2009-08-17T04:22:10.378Z +Document Revisions + + + +6 +1 + + https://docs.google.com/feeds/id/resource_id/revisions/2 + 2009-08-17T04:22:10.440Z + 2009-08-14T07:11:34.197Z + Revision 2 + + + + + + another_user + another_user@gmail.com + + + + + + +""" + +BATCH_ENTRY = """ + + http://www.google.com/base/feeds/items/2173859253842813008 + 2006-07-11T14:51:43.560Z + 2006-07-11T14:51: 43.560Z + title + content + + + recipes + + itemB + +""" + +BATCH_FEED_REQUEST = """ + + My Batch Feed + + http://www.google.com/base/feeds/items/13308004346459454600 + + + + http://www.google.com/base/feeds/items/17437536661927313949 + + + + ... + ... + itemA + + recipes + + + ... + ... + itemB + + recipes + +""" + +BATCH_FEED_RESULT = """ + + http://www.google.com/base/feeds/items + 2006-07-11T14:51:42.894Z + My Batch + + + + + http://www.google.com/base/feeds/items/2173859253842813008 + 2006-07-11T14:51:43.560Z + 2006-07-11T14:51: 43.560Z + ... + ... + + + recipes + + itemB + + + + http://www.google.com/base/feeds/items/11974645606383737963 + 2006-07-11T14:51:43.247Z + 2006-07-11T14:51: 43.247Z + ... + ... + + + recipes + + itemA + + + + http://www.google.com/base/feeds/items/13308004346459454600 + 2006-07-11T14:51:42.894Z + Error + Bad request + + + + + + + + http://www.google.com/base/feeds/items/17437536661927313949 + 2006-07-11T14:51:43.246Z + Deleted + + + +""" + +ALBUM_FEED = """ + + http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1 + 2007-09-21T18:23:05.000Z + + Test + + public + http://lh6.google.com/sample.user/Rt8WNoDZEJE/AAAAAAAAABk/HQGlDhpIgWo/s160-c/Test.jpg + + + + + + sample + http://picasaweb.google.com/sample.user + + Picasaweb 4 + 1 + 500 + 1 + Test + + public 1188975600000 + 2 + sample.user + sample + true + 0 + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/2 + 2007-09-05T20:49:23.000Z + 2007-09-21T18:23:05.000Z + + Aqua Blue.jpg + Blue + + + + 2 + 1190398985145172 + 0.0 + 1 2560 + 1600 + 883405 + + + 1189025362000 + true + c041ce17aaa637eb656c81d9cf526c24 + + true + 1 + + Aqua Blue.jpg Blue + tag, test + + + + + sample + + + + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/3 + 2007-09-05T20:49:24.000Z + 2007-09-21T18:19:38.000Z + + Aqua Graphite.jpg + Gray + + + + + 3 + 1190398778006402 + 1.0 + 1 + 2560 + 1600 + 798334 + + + 1189025363000 + + true + a5ce2e36b9df7d3cb081511c72e73926 + + true + 0 + + Aqua Graphite.jpg + Gray + + + + + + sample + + + + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/tag + 2007-09-05T20:49:24.000Z + + tag + tag + + + + sample + http://picasaweb.google.com/sample.user + + + + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/test + 2007-09-05T20:49:24.000Z + + test + test + + + + sample + http://picasaweb.google.com/sample.user + + +""" + +CODE_SEARCH_FEED = """ + +http://www.google.com/codesearch/feeds/search?q=malloc +2007-12-19T16:08:04Z +Google Code Search +Google Code Search +2530000 +1 + +Google Code Search + +http://www.google.com/codesearch + + + + + +http://www.google.com/codesearch?hl=en&q=+malloc+show:LDjwp-Iqc7U:84hEYaYsZk8:xDGReDhvNi0&sa=N&ct=rx&cd=1&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002#first2007-12-19T16:08:04ZCode owned by external author.software/autoconf/manual/autoconf-2.60/autoconf.html<pre> 8: void *<b>malloc</b> (); + + +</pre><pre> #undef <b>malloc</b> +</pre><pre> void *<b>malloc</b> (); + +</pre><pre> rpl_<b>malloc</b> (size_t n) +</pre><pre> return <b>malloc</b> (n); + +</pre> +http://www.google.com/codesearch?hl=en&q=+malloc+show:h4hfh-fV-jI:niBq_bwWZNs:H0OhClf0HWQ&sa=N&ct=rx&cd=2&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c#first2007-12-19T16:08:04ZCode owned by external author.guile-1.6.8/libguile/mallocs.c<pre> 86: { + scm_t_bits mem = n ? (scm_t_bits) <b>malloc</b> (n) : 0; + if (n &amp;&amp; !mem) + +</pre><pre>#include &lt;<b>malloc</b>.h&gt; +</pre><pre>scm_t_bits scm_tc16_<b>malloc</b>; + +</pre><pre><b>malloc</b>_free (SCM ptr) +</pre><pre><b>malloc</b>_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED) + +</pre><pre> scm_puts(&quot;#&lt;<b>malloc</b> &quot;, port); +</pre><pre> scm_t_bits mem = n ? (scm_t_bits) <b>malloc</b> (n) : 0; + +</pre><pre> SCM_RETURN_NEWSMOB (scm_tc16_<b>malloc</b>, mem); +</pre><pre> scm_tc16_<b>malloc</b> = scm_make_smob_type (&quot;<b>malloc</b>&quot;, 0); + +</pre><pre> scm_set_smob_free (scm_tc16_<b>malloc</b>, <b>malloc</b>_free); +</pre>GPL + +http://www.google.com/codesearch?hl=en&q=+malloc+show:9wyZUG-N_30:7_dFxoC1ZrY:C0_iYbFj90M&sa=N&ct=rx&cd=3&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c#first2007-12-19T16:08:04ZCode owned by external author.bash-3.0/lib/malloc/alloca.c<pre> 78: #ifndef emacs + #define <b>malloc</b> x<b>malloc</b> + extern pointer x<b>malloc</b> (); + +</pre><pre> <b>malloc</b>. The Emacs executable needs alloca to call x<b>malloc</b>, because +</pre><pre> ordinary <b>malloc</b> isn&#39;t protected from input signals. On the other + +</pre><pre> hand, the utilities in lib-src need alloca to call <b>malloc</b>; some of +</pre><pre> them are very simple, and don&#39;t have an x<b>malloc</b> routine. + +</pre><pre> Callers below should use <b>malloc</b>. */ +</pre><pre>#define <b>malloc</b> x<b>malloc</b> + +</pre><pre>extern pointer x<b>malloc</b> (); +</pre><pre> It is very important that sizeof(header) agree with <b>malloc</b> + +</pre><pre> register pointer new = <b>malloc</b> (sizeof (header) + size); +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:uhVCKyPcT6k:8juMxxzmUJw:H7_IDsTB2L4&sa=N&ct=rx&cd=4&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c#first2007-12-19T16:08:04ZCode owned by external author.mozilla/xpcom/build/malloc.c<pre> 54: http://gee.cs.oswego.edu/dl/html/<b>malloc</b>.html + + You may already by default be using a c library containing a <b>malloc</b> + +</pre><pre>/* ---------- To make a <b>malloc</b>.h, start cutting here ------------ */ +</pre><pre> Note: There may be an updated version of this <b>malloc</b> obtainable at + +</pre><pre> ftp://gee.cs.oswego.edu/pub/misc/<b>malloc</b>.c +</pre><pre>* Why use this <b>malloc</b>? + +</pre><pre> most tunable <b>malloc</b> ever written. However it is among the fastest +</pre><pre> allocator for <b>malloc</b>-intensive programs. + +</pre><pre> http://gee.cs.oswego.edu/dl/html/<b>malloc</b>.html +</pre><pre> You may already by default be using a c library containing a <b>malloc</b> + +</pre><pre> that is somehow based on some version of this <b>malloc</b> (for example in +</pre>Mozilla +http://www.google.com/codesearch?hl=en&q=+malloc+show:4n1P2HVOISs:Ybbpph0wR2M:OhIN_sDrG0U&sa=N&ct=rx&cd=5&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh#first2007-12-19T16:08:04ZCode owned by external author.hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh<pre> 11: echo ================ unit-must-<b>malloc</b> tests ================ + ./unit-must-<b>malloc</b> + echo ...passed + +</pre><pre># tag: Tom Lord Tue Dec 4 14:54:29 2001 (mem-tests/unit-must-<b>malloc</b>.sh) +</pre><pre>echo ================ unit-must-<b>malloc</b> tests ================ + +</pre><pre>./unit-must-<b>malloc</b> +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:GzkwiWG266M:ykuz3bG00ws:2sTvVSif08g&sa=N&ct=rx&cd=6&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c#first2007-12-19T16:08:04ZCode owned by external author.tar-1.14/lib/malloc.c<pre> 22: #endif + #undef <b>malloc</b> + + +</pre><pre>/* Work around bug on some systems where <b>malloc</b> (0) fails. +</pre><pre>#undef <b>malloc</b> + +</pre><pre>rpl_<b>malloc</b> (size_t n) +</pre><pre> return <b>malloc</b> (n); + +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:o_TFIeBY6dY:ktI_dt8wPao:AI03BD1Dz0Y&sa=N&ct=rx&cd=7&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c#first2007-12-19T16:08:04ZCode owned by external author.tar-1.16.1/lib/malloc.c<pre> 21: #include &lt;config.h&gt; + #undef <b>malloc</b> + + +</pre><pre>/* <b>malloc</b>() function that is glibc compatible. +</pre><pre>#undef <b>malloc</b> + +</pre><pre>rpl_<b>malloc</b> (size_t n) +</pre><pre> return <b>malloc</b> (n); + +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:_ibw-VLkMoI:jBOtIJSmFd4:-0NUEVeCwfY&sa=N&ct=rx&cd=8&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h#first2007-12-19T16:08:04ZCode owned by external author.uClibc-0.9.29/include/malloc.h<pre> 1: /* Prototypes and definition for <b>malloc</b> implementation. + Copyright (C) 1996, 1997, 1999, 2000 Free Software Foundation, Inc. + +</pre><pre>/* Prototypes and definition for <b>malloc</b> implementation. +</pre><pre> `pt<b>malloc</b>&#39;, a <b>malloc</b> implementation for multiple threads without + +</pre><pre> See the files `pt<b>malloc</b>.c&#39; or `COPYRIGHT&#39; for copying conditions. +</pre><pre> This work is mainly derived from <b>malloc</b>-2.6.4 by Doug Lea + +</pre><pre> ftp://g.oswego.edu/pub/misc/<b>malloc</b>.c +</pre><pre> `pt<b>malloc</b>.c&#39;. + +</pre><pre># define __<b>malloc</b>_ptr_t void * +</pre><pre># define __<b>malloc</b>_ptr_t char * + +</pre><pre># define __<b>malloc</b>_size_t size_t +</pre>LGPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:F6qHcZ9vefo:bTX7o9gKfks:hECF4r_eKC0&sa=N&ct=rx&cd=9&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h#first2007-12-19T16:08:04ZCode owned by external author.glibc-2.0.1/hurd/hurdmalloc.h<pre> 15: #define <b>malloc</b> _hurd_<b>malloc</b> + #define realloc _hurd_realloc + +</pre><pre> All hurd-internal code which uses <b>malloc</b> et al includes this file so it +</pre><pre> will use the internal <b>malloc</b> routines _hurd_{<b>malloc</b>,realloc,free} + +</pre><pre> of <b>malloc</b> et al is the unixoid one using sbrk. +</pre><pre>extern void *_hurd_<b>malloc</b> (size_t); + +</pre><pre>#define <b>malloc</b> _hurd_<b>malloc</b> +</pre>GPL + +http://www.google.com/codesearch?hl=en&q=+malloc+show:CHUvHYzyLc8:pdcAfzDA6lY:wjofHuNLTHg&sa=N&ct=rx&cd=10&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h#first2007-12-19T16:08:04ZCode owned by external author.httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h<pre> 173: #undef <b>malloc</b> + #define <b>malloc</b>(x) library_<b>malloc</b>(gLibHandle,x) + +</pre><pre>/* Redefine <b>malloc</b> to use the library <b>malloc</b> call so +</pre><pre>#undef <b>malloc</b> + +</pre><pre>#define <b>malloc</b>(x) library_<b>malloc</b>(gLibHandle,x) +</pre>Apache + +""" + +YOUTUBE_VIDEO_FEED = """http://gdata.youtube.com/feeds/api/standardfeeds/top_rated2008-05-14T02:24:07.000-07:00Top Ratedhttp://www.youtube.com/img/pic_youtubelogo_123x63.gifYouTubehttp://www.youtube.com/YouTube data API100125 +http://gdata.youtube.com/feeds/api/videos/C71ypXYGho82008-03-20T10:17:27.000-07:002008-05-14T04:26:37.000-07:00Me odeio por te amar - KARYN GARCIAhttp://www.karyngarcia.com.brTvKarynGarciahttp://gdata.youtube.com/feeds/api/users/tvkaryngarciaMe odeio por te amar - KARYN GARCIAhttp://www.karyngarcia.com.bramar, boyfriend, garcia, karyn, me, odeio, por, teMusictest111test222 +http://gdata.youtube.com/feeds/api/videos/gsVaTyb1tBw2008-02-15T04:31:45.000-08:002008-05-14T05:09:42.000-07:00extreme helmet cam Kani, Keil and Patotrimmedperaltamagichttp://gdata.youtube.com/feeds/api/users/peraltamagicextreme helmet cam Kani, Keil and Patotrimmedalcala, cam, campillo, dirt, extreme, helmet, kani, patoSports +""" + +YOUTUBE_ENTRY_PRIVATE = """ + + http://gdata.youtube.com/feeds/videos/UMFI1hdm96E + 2007-01-07T01:50:15.000Z + 2007-01-07T01:50:15.000Z + + + + + + + + + "Crazy (Gnarles Barkley)" - Acoustic Cover + <div style="color: #000000;font-family: + Arial, Helvetica, sans-serif; font-size:12px; font-size: 12px; + width: 555px;"><table cellspacing="0" cellpadding="0" + border="0"><tbody><tr><td width="140" + valign="top" rowspan="2"><div style="border: 1px solid + #999999; margin: 0px 10px 5px 0px;"><a + href="http://www.youtube.com/watch?v=UMFI1hdm96E"><img + alt="" + src="http://img.youtube.com/vi/UMFI1hdm96E/2.jpg"></a></div></td> + <td width="256" valign="top"><div style="font-size: + 12px; font-weight: bold;"><a style="font-size: 15px; + font-weight: bold; font-decoration: none;" + href="http://www.youtube.com/watch?v=UMFI1hdm96E">&quot;Crazy + (Gnarles Barkley)&quot; - Acoustic Cover</a> + <br></div> <div style="font-size: 12px; margin: + 3px 0px;"><span>Gnarles Barkley acoustic cover + http://www.myspace.com/davidchoimusic</span></div></td> + <td style="font-size: 11px; line-height: 1.4em; padding-left: + 20px; padding-top: 1px;" width="146" + valign="top"><div><span style="color: #666666; + font-size: 11px;">From:</span> <a + href="http://www.youtube.com/profile?user=davidchoimusic">davidchoimusic</a></div> + <div><span style="color: #666666; font-size: + 11px;">Views:</span> 113321</div> <div + style="white-space: nowrap;text-align: left"><img + style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_half_11x11.gif"></div> + <div style="font-size: 11px;">1005 <span style="color: + #666666; font-size: + 11px;">ratings</span></div></td></tr> + <tr><td><span style="color: #666666; font-size: + 11px;">Time:</span> <span style="color: #000000; + font-size: 11px; font-weight: + bold;">04:15</span></td> <td style="font-size: + 11px; padding-left: 20px;"><span style="color: #666666; + font-size: 11px;">More in</span> <a + href="http://www.youtube.com/categories_portal?c=10">Music</a></td></tr></tbody></table></div> + + + + + + davidchoimusic + http://gdata.youtube.com/feeds/users/davidchoimusic + + + "Crazy (Gnarles Barkley)" - Acoustic Cover + Gnarles Barkley acoustic cover http://www.myspace.com/davidchoimusic + music, singing, gnarls, barkley, acoustic, cover + + + Music + + DeveloperTag1 + + + + + + + + + + + + + 37.398529052734375 -122.0635986328125 + + + + + + + + yes + + The content of this video may violate the terms of use. + +""" + +YOUTUBE_COMMENT_FEED = """ +http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments2008-05-19T21:45:45.261ZCommentshttp://www.youtube.com/img/pic_youtubelogo_123x63.gifYouTubehttp://www.youtube.com/YouTube data API0125 + + http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/91F809A3DE2EB81B + 2008-02-22T15:27:15.000-08:002008-02-22T15:27:15.000-08:00 + + test66 + test66 + + + + apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmann + + + http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/A261AEEFD23674AA + 2008-02-22T15:27:01.000-08:002008-02-22T15:27:01.000-08:00 + + test333 + test333 + + + + apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmann + + + http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/0DCF1E3531B3FF85 + 2008-02-22T15:11:06.000-08:002008-02-22T15:11:06.000-08:00 + + test2 + test2 + + + + apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmann + +""" + +YOUTUBE_PLAYLIST_FEED = """ + + http://gdata.youtube.com/feeds/users/andyland74/playlists?start-index=1&max-results=25 + 2008-02-26T00:26:15.635Z + + andyland74's Playlists + http://www.youtube.com/img/pic_youtubelogo_123x63.gif + + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + YouTube data API + 1 + 1 + 25 + + My new playlist Description + + http://gdata.youtube.com/feeds/users/andyland74/playlists/8BCDD04DE8F771B2 + 2007-11-04T17:30:27.000-08:00 + 2008-02-22T09:55:14.000-08:00 + + My New Playlist Title + My new playlist Description + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + +""" + +YOUTUBE_PLAYLIST_VIDEO_FEED = """http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B5052008-05-16T12:03:17.000-07:00Test PlaylistTest playlist 1http://www.youtube.com/img/pic_youtubelogo_123x63.gifgdpythonhttp://gdata.youtube.com/feeds/api/users/gdpythonYouTube data API1125Test PlaylistTest playlist 1http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505/B0F29389E537F8882008-05-16T20:54:08.520ZUploading YouTube Videos with the PHP Client LibraryJochen Hartmann demonstrates the basics of how to use the PHP Client Library with the YouTube Data API. + +PHP Developer's Guide: +http://code.google.com/apis/youtube/developers_guide_php.html + +Other documentation: +http://code.google.com/apis/youtube/GoogleDevelopershttp://gdata.youtube.com/feeds/api/users/googledevelopersUploading YouTube Videos with the PHP Client LibraryJochen Hartmann demonstrates the basics of how to use the PHP Client Library with the YouTube Data API. + +PHP Developer's Guide: +http://code.google.com/apis/youtube/developers_guide_php.html + +Other documentation: +http://code.google.com/apis/youtube/api, data, demo, php, screencast, tutorial, uploading, walkthrough, youtubeEducationundefined1""" + +YOUTUBE_SUBSCRIPTION_FEED = """ + + http://gdata.youtube.com/feeds/users/andyland74/subscriptions?start-index=1&max-results=25 + 2008-02-26T00:26:15.635Z + + andyland74's Subscriptions + http://www.youtube.com/img/pic_youtubelogo_123x63.gif + + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + YouTube data API + 1 + 1 + 25 + + http://gdata.youtube.com/feeds/users/andyland74/subscriptions/d411759045e2ad8c + 2007-11-04T17:30:27.000-08:00 + 2008-02-22T09:55:14.000-08:00 + + + Videos published by : NBC + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + NBC + + +""" + +YOUTUBE_VIDEO_RESPONSE_FEED = """ + + http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses2008-05-19T22:37:34.076ZVideos responses to 'Giant NES controller coffee table'http://www.youtube.com/img/pic_youtubelogo_123x63.gifYouTubehttp://www.youtube.com/YouTube data API8125 + + http://gdata.youtube.com/feeds/videos/7b9EnRI9VbY2008-03-11T19:08:53.000-07:002008-05-18T21:33:10.000-07:00 + + + + + + + + + + + + Catnip Partysnipped + + + + + PismoBeachhttp://gdata.youtube.com/feeds/users/pismobeach + + Catnip Party + Uncle, Hillary, Hankette, and B4 all but overdose on the patioBrattman, cat, catmint, catnip, cats, chat, drug, gato, gatto, kat, kato, katt, Katze, kedi, kissa, OD, overdose, party, sex, Uncle + + Animals + + + + + + + + + + + + + + + + +""" + + +YOUTUBE_PROFILE = """ + + http://gdata.youtube.com/feeds/users/andyland74 + 2006-10-16T00:09:45.000-07:00 + 2008-02-26T11:48:21.000-08:00 + + + andyland74 Channel + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + 33 + andyland74 + andy + example + Catch-22 + m + Google + Testing YouTube APIs + Somewhere + US + Aqua Teen Hungerforce + Elliott Smith + Technical Writer + University of North Carolina + + + + + + + + +""" + +YOUTUBE_CONTACTS_FEED = """ + http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts2008-05-16T19:24:34.916Zapitestjhartmann's Contactshttp://www.youtube.com/img/pic_youtubelogo_123x63.gifapitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmannYouTube data API2125 + + http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/test898990902008-02-04T11:27:54.000-08:002008-05-16T19:24:34.916Ztest89899090apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmanntest89899090requested + + http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/testjfisher2008-02-26T14:13:03.000-08:002008-05-16T19:24:34.916Ztestjfisherapitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmanntestjfisherpending +""" + +NEW_CONTACT = """ + + http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/8411573 + 2008-02-28T18:47:02.303Z + + Fitzgerald + Notes + + + + + (206)555-1212 + 456-123-2133 + (206)555-1213 + + + + + + + 1600 Amphitheatre Pkwy Mountain View +""" + +CONTACTS_FEED = """ + + http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base + 2008-03-05T12:36:38.836Z + + Contacts + + + + + + Elizabeth Bennet + liz@gmail.com + + + Contacts + + 1 + 1 + 25 + + + http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/c9012de + + 2008-03-05T12:36:38.835Z + + Fitzgerald + + + + + + 456 + + + + +""" + + +CONTACT_GROUPS_FEED = """ + + jo@gmail.com + 2008-05-21T21:11:25.237Z + + Jo's Contact Groups + + + + + + + Jo Brown + jo@gmail.com + + Contacts + 3 + 1 + 25 + + http://google.com/m8/feeds/groups/jo%40gmail.com/base/270f + 2008-05-14T13:10:19.070Z + + joggers + joggers + + + +""" + +CONTACT_GROUP_ENTRY = """ + + + http://www.google.com/feeds/groups/jo%40gmail.com/base/1234 + 2005-01-18T21:00:00Z + 2006-01-01T00:00:00Z + Salsa group + Salsa group + + + + Very nice people. + +""" + +CALENDAR_RESOURCE_ENTRY = """ + + + + + +""" + +CALENDAR_RESOURCES_FEED = """ + + https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com + 2008-10-17T15:29:21.064Z + + + + + 1 + + https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/CR-NYC-14-12-BR + 2008-10-17T15:29:21.064Z + + + + + + + + + + https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/?start=(Bike)-London-43-Lobby-Bike-1 + 2008-10-17T15:29:21.064Z + + + + + + + + +""" + +BLOG_ENTRY = """ + tag:blogger.com,1999:blog-blogID.post-postID + 2006-08-02T18:44:43.089-07:00 + 2006-11-08T18:10:23.020-08:00 + Lizzy's Diary + Being the journal of Elizabeth Bennet + + + + + + + + + + + + Elizabeth Bennet + liz@gmail.com + +""" + +BLOG_POST = """ + Marriage! + +
    +

    Mr. Darcy has proposed marriage to me!

    +

    He is the last man on earth I would ever desire to marry.

    +

    Whatever shall I do?

    +
    +
    + + Elizabeth Bennet + liz@gmail.com + +
    """ + +BLOG_POSTS_FEED = """ + tag:blogger.com,1999:blog-blogID + 2006-11-08T18:10:23.020-08:00 + Lizzy's Diary + + + + + + + + Elizabeth Bennet + liz@gmail.com + + Blogger + + tag:blogger.com,1999:blog-blogID.post-postID + 2006-11-08T18:10:00.000-08:00 + 2006-11-08T18:10:14.954-08:00 + Quite disagreeable + <p>I met Mr. Bingley's friend Mr. Darcy + this evening. I found him quite disagreeable.</p> + + + + + + + + Elizabeth Bennet + liz@gmail.com + + +""" + +BLOG_COMMENTS_FEED = """ + tag:blogger.com,1999:blog-blogID.postpostID..comments + 2007-04-04T21:56:29.803-07:00 + My Blog : Time to relax + + + + + Blog Author name + + Blogger + 1 + 1 + + tag:blogger.com,1999:blog-blogID.post-commentID + 2007-04-04T21:56:00.000-07:00 + 2007-04-04T21:56:29.803-07:00 + This is my first comment + This is my first comment + + + + + Blog Author name + + + +""" + + +SITES_FEED = """ + https://www.google.com/webmasters/tools/feeds/sites + Sites + 1 + + + + + 2008-10-02T07:26:51.833Z + + http://www.example.com + http://www.example.com + + + + 2007-11-17T18:27:32.543Z + + + + true + 2008-09-14T08:59:28.000 + US + none + normal + true + false + + + 456456-google.html + +""" + + +SITEMAPS_FEED = """ + http://www.example.com + http://www.example.com/ + 2006-11-17T18:27:32.543Z + + + + HTML + WAP + + + Value1 + Value2 + Value3 + + + http://www.example.com/sitemap-index.xml + http://www.example.com/sitemap-index.xml + + 2006-11-17T18:27:32.543Z + WEB + StatusValue + 2006-11-18T19:27:32.543Z + 102 + + + http://www.example.com/mobile/sitemap-index.xml + http://www.example.com/mobile/sitemap-index.xml + + 2006-11-17T18:27:32.543Z + StatusValue + 2006-11-18T19:27:32.543Z + 102 + HTML + + + http://www.example.com/news/sitemap-index.xml + http://www.example.com/news/sitemap-index.xml + + 2006-11-17T18:27:32.543Z + StatusValue + 2006-11-18T19:27:32.543Z + 102 + LabelValue + +""" + +HEALTH_CCR_NOTICE_PAYLOAD = """ + + + + + Start date + 2007-04-04T07:00:00Z + + + Aortic valve disorders + + 410.10 + ICD9 + 2004 + + + Active + + + +""" + +HEALTH_PROFILE_ENTRY_DIGEST = """ + + https://www.google.com/health/feeds/profile/default/vneCn5qdEIY_digest + 2008-09-29T07:52:17.176Z + + + + + + vneCn5qdEIY + + English + + en + ISO-639-1 + + + V1.0 + + 2008-09-29T07:52:17.176Z + + + Google Health Profile + + + + + + Pregnancy status + + + Not pregnant + + + + + user@google.com + + Patient + + + + + + + Breastfeeding status + + + Not breastfeeding + + + + + user@gmail.com + + Patient + + + + + + + + Hn0FE0IlcY-FMFFgSTxkvA/CONDITION/0 + + + Start date + + 2007-04-04T07:00:00Z + + + Aortic valve disorders + + 410.10 + ICD9 + 2004 + + + + Active + + + + example.com + + Information Provider + + + + + + + + Malaria + + 136.9 + ICD9_Broader + + + 084.6 + ICD9 + + + + ACTIVE + + + + user@gmail.com + + Patient + + + + + + + + + + + + Race + + S15814 + HL7 + + + + White + + + + + user@gmail.com + + Patient + + + + + + + + + + + + + + Allergy + + + A-Fil + + + ACTIVE + + + + user@gmail.com + + Patient + + + + + + + Severe + + + + + + Allergy + + + A.E.R Traveler + + + ACTIVE + + + + user@gmail.com + + Patient + + + + + + + Severe + + + + + + + + + + ACTIVE + + + + user@gmail.com + + Patient + + + + + + A& D + + + + 0 + + + + + + + + + + 0 + + + + To skin + + C38305 + FDA + + 0 + + + + + + + + + + + ACTIVE + + + + user@gmail.com + + Patient + + + + + + A-Fil + + + + 0 + + + + + + + + + + 0 + + + + To skin + + C38305 + FDA + + 0 + + + + + + + + + + + ACTIVE + + + + user@gmail.com + + Patient + + + + + + Lipitor + + + + 0 + + + + + + + + + + 0 + + + + By mouth + + C38288 + FDA + + 0 + + + + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + Chickenpox Vaccine + + 21 + HL7 + + + + + + + + + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + + + Height + + + + 0 + + 70 + + inches + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + + + Weight + + + + 0 + + 2480 + + ounces + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + + + Blood Type + + + + 0 + + O+ + + + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + + + Collection start date + + 2008-09-03 + + + + Acetaldehyde - Blood + + + + 0 + + + + + + + + + + + + Abdominal Ultrasound + + + + + user@gmail.com + + Patient + + + + + + + + Abdominoplasty + + + + + user@gmail.com + + Patient + + + + + + + + + Google Health Profile + + + + + + + + 1984-07-22 + + + Male + + + + + + user@gmail.com + + Patient + + + + + + +""" + +HEALTH_PROFILE_FEED = """ +https://www.google.com/health/feeds/profile/default +2008-09-30T01:07:17.888Z + +Profile Feed + + + + +1 + + https://www.google.com/health/feeds/profile/default/DysasdfARnFAao + 2008-09-29T03:12:50.850Z + 2008-09-29T03:12:50.850Z + + + + + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/MEDICATION/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA%26+D"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/DysasdfARnFAao"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/DysasdfARnFAao"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>hiD9sEigSzdk8nNT0evR4g</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Medications> + <Medication> + <Type/> + <Description/> + <Status> + <Text>ACTIVE</Text> + </Status> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <Product> + <ProductName> + <Text>A& D</Text> + </ProductName> + <Strength> + <Units/> + <StrengthSequencePosition>0</StrengthSequencePosition> + <VariableStrengthModifier/> + </Strength> + </Product> + <Directions> + <Direction> + <Description/> + <DeliveryMethod/> + <Dose> + <Units/> + <DoseSequencePosition>0</DoseSequencePosition> + <VariableDoseModifier/> + </Dose> + <Route> + <Text>To skin</Text> + <Code> + <Value>C38305</Value> + <CodingSystem>FDA</CodingSystem> + </Code> + <RouteSequencePosition>0</RouteSequencePosition> + <MultipleRouteModifier/> + </Route> + </Direction> + </Directions> + <Refills/> + </Medication> + </Medications> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/7I1WQzZrgp4</id> + <published>2008-09-29T03:27:14.909Z</published> + <updated>2008-09-29T03:27:14.909Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category scheme="http://schemas.google.com/health/item" term="A-Fil"/> + <category term="ALLERGY"/> + <title type="text"/> + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA-Fil/ALLERGY"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/7I1WQzZrgp4"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/7I1WQzZrgp4"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>YOyHDxQUiECCPgnsjV8SlQ</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Alerts> + <Alert> + <Type> + <Text>Allergy</Text> + </Type> + <Description> + <Text>A-Fil</Text> + </Description> + <Status> + <Text>ACTIVE</Text> + </Status> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <Reaction> + <Description/> + <Severity> + <Text>Severe</Text> + </Severity> + </Reaction> + </Alert> + </Alerts> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/Dz9wV83sKFg</id> + <published>2008-09-29T03:12:52.166Z</published> + <updated>2008-09-29T03:12:52.167Z</updated> + <category term="MEDICATION"/> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category scheme="http://schemas.google.com/health/item" term="A-Fil"/> + <title type="text"/> + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/MEDICATION/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA-Fil"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Dz9wV83sKFg"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Dz9wV83sKFg"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>7w.XFEPeuIYN3Rn32pUiUw</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Medications> + <Medication> + <Type/> + <Description/> + <Status> + <Text>ACTIVE</Text> + </Status> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <Product> + <ProductName> + <Text>A-Fil</Text> + </ProductName> + <Strength> + <Units/> + <StrengthSequencePosition>0</StrengthSequencePosition> + <VariableStrengthModifier/> + </Strength> + </Product> + <Directions> + <Direction> + <Description/> + <DeliveryMethod/> + <Dose> + <Units/> + <DoseSequencePosition>0</DoseSequencePosition> + <VariableDoseModifier/> + </Dose> + <Route> + <Text>To skin</Text> + <Code> + <Value>C38305</Value> + <CodingSystem>FDA</CodingSystem> + </Code> + <RouteSequencePosition>0</RouteSequencePosition> + <MultipleRouteModifier/> + </Route> + </Direction> + </Directions> + <Refills/> + </Medication> + </Medications> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/lzsxVzqZUyw</id> + <published>2008-09-29T03:13:07.496Z</published> + <updated>2008-09-29T03:13:07.497Z</updated> + <category scheme="http://schemas.google.com/health/item" term="A.E.R Traveler"/> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category term="ALLERGY"/> + <title type="text"/> + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA.E.R+Traveler/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/ALLERGY"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/lzsxVzqZUyw"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/lzsxVzqZUyw"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>5efFB0J2WgEHNUvk2z3A1A</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Alerts> + <Alert> + <Type> + <Text>Allergy</Text> + </Type> + <Description> + <Text>A.E.R Traveler</Text> + </Description> + <Status> + <Text>ACTIVE</Text> + </Status> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <Reaction> + <Description/> + <Severity> + <Text>Severe</Text> + </Severity> + </Reaction> + </Alert> + </Alerts> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/6PvhfKAXyYw</id> + <published>2008-09-29T03:13:02.123Z</published> + <updated>2008-09-29T03:13:02.124Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category term="PROCEDURE"/> + <category scheme="http://schemas.google.com/health/item" term="Abdominal Ultrasound"/> + <title type="text"/> + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/PROCEDURE/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DAbdominal+Ultrasound"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/6PvhfKAXyYw"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/6PvhfKAXyYw"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>W3Wbvx_QHwG5pxVchpuF1A</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Procedures> + <Procedure> + <Type/> + <Description> + <Text>Abdominal Ultrasound</Text> + </Description> + <Status/> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + </Procedure> + </Procedures> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/r2zGPGewCeU</id> + <published>2008-09-29T03:13:03.434Z</published> + <updated>2008-09-29T03:13:03.435Z</updated> + <category scheme="http://schemas.google.com/health/item" term="Abdominoplasty"/> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category term="PROCEDURE"/> + <title type="text"/> + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DAbdominoplasty/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/PROCEDURE"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/r2zGPGewCeU"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/r2zGPGewCeU"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>OUKgj5X0KMnbkC5sDL.yHA</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Procedures> + <Procedure> + <Type/> + <Description> + <Text>Abdominoplasty</Text> + </Description> + <Status/> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + </Procedure> + </Procedures> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/_cCCbQ0O3ug</id> + <published>2008-09-29T03:13:29.041Z</published> + <updated>2008-09-29T03:13:29.042Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category scheme="http://schemas.google.com/health/item" term="Acetaldehyde - Blood"/> + <category term="LABTEST"/> + <title type="text"/> + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DAcetaldehyde+-+Blood/LABTEST"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/_cCCbQ0O3ug"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/_cCCbQ0O3ug"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>YWtomFb8aG.DueZ7z7fyug</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Results> + <Result> + <Type/> + <Description/> + <Status/> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <Substance/> + <Test> + <DateTime> + <Type> + <Text>Collection start date</Text> + </Type> + <ExactDateTime>2008-09-03</ExactDateTime> + </DateTime> + <Type/> + <Description> + <Text>Acetaldehyde - Blood</Text> + </Description> + <Status/> + <TestResult> + <ResultSequencePosition>0</ResultSequencePosition> + <VariableResultModifier/> + <Units/> + </TestResult> + <ConfidenceValue/> + </Test> + </Result> + </Results> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/BdyA3iJZyCc</id> + <published>2008-09-29T03:00:45.915Z</published> + <updated>2008-09-29T03:00:45.915Z</updated> + <category scheme="http://schemas.google.com/health/item" term="Aortic valve disorders"/> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category term="CONDITION"/> + <title type="text">Aortic valve disorders + + + + + example.com + example.com + + + h1ljpoeKJ85li.1FHsG9Gw + + + + Hn0FE0IlcY-FMFFgSTxkvA/CONDITION/0 + + + Start date + + 2007-04-04T07:00:00Z + + + Aortic valve disorders + + 410.10 + ICD9 + 2004 + + + + Active + + + + example.com + + Information Provider + + + + + + + + + + https://www.google.com/health/feeds/profile/default/Cl.aMWIH5VA + 2008-09-29T03:13:34.996Z + 2008-09-29T03:13:34.997Z + + + + + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DChickenpox+Vaccine/IMMUNIZATION"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Cl.aMWIH5VA"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Cl.aMWIH5VA"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>KlhUqfftgELIitpKbqYalw</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Immunizations> + <Immunization> + <Type/> + <Description/> + <Status/> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <Product> + <ProductName> + <Text>Chickenpox Vaccine</Text> + <Code> + <Value>21</Value> + <CodingSystem>HL7</CodingSystem> + </Code> + </ProductName> + </Product> + <Directions> + <Direction> + <Description/> + <DeliveryMethod/> + </Direction> + </Directions> + <Refills/> + </Immunization> + </Immunizations> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/l0a7.FlX3_0</id> + <published>2008-09-29T03:14:47.461Z</published> + <updated>2008-09-29T03:14:47.461Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category term="DEMOGRAPHICS"/> + <category scheme="http://schemas.google.com/health/item" term="Demographics"/> + <title type="text">Demographics + + + + + + User Name + user@gmail.com + + + U5GDAVOxFbexQw3iyvqPYg + + + + + + + + + + + + + + + + 1984-07-22 + + + Male + + + + + + user@gmail.com + + Patient + + + + + + + + + https://www.google.com/health/feeds/profile/default/oIBDdgwFLyo + 2008-09-29T03:14:47.690Z + 2008-09-29T03:14:47.691Z + + + + FunctionalStatus + + + + + + User Name + user@gmail.com + + + W.EJcnhxb7W5M4eR4Tr1YA + + + + + + + + + + Pregnancy status + + + Not pregnant + + + + + user@gmail.com + + Patient + + + + + + + Breastfeeding status + + + Not breastfeeding + + + + + user@gmail.com + + Patient + + + + + + + + + + https://www.google.com/health/feeds/profile/default/wwljIlXuTVg + 2008-09-29T03:26:10.080Z + 2008-09-29T03:26:10.081Z + + + + + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/MEDICATION/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DLipitor"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/wwljIlXuTVg"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/wwljIlXuTVg"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>OrpghzvvbG_YaO5koqT2ug</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Medications> + <Medication> + <Type/> + <Description/> + <Status> + <Text>ACTIVE</Text> + </Status> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <Product> + <ProductName> + <Text>Lipitor</Text> + </ProductName> + <Strength> + <Units/> + <StrengthSequencePosition>0</StrengthSequencePosition> + <VariableStrengthModifier/> + </Strength> + </Product> + <Directions> + <Direction> + <Description/> + <DeliveryMethod/> + <Dose> + <Units/> + <DoseSequencePosition>0</DoseSequencePosition> + <VariableDoseModifier/> + </Dose> + <Route> + <Text>By mouth</Text> + <Code> + <Value>C38288</Value> + <CodingSystem>FDA</CodingSystem> + </Code> + <RouteSequencePosition>0</RouteSequencePosition> + <MultipleRouteModifier/> + </Route> + </Direction> + </Directions> + <Refills/> + </Medication> + </Medications> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/dd09TR12SiY</id> + <published>2008-09-29T07:52:17.175Z</published> + <updated>2008-09-29T07:52:17.176Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category scheme="http://schemas.google.com/health/item" term="Malaria"/> + <category term="CONDITION"/> + <title type="text"/> + <content type="html"/> + <link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DMalaria/CONDITION"/> + <link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/dd09TR12SiY"/> + <link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/dd09TR12SiY"/> + <author> + <name>User Name</name> + <email>user@gmail.com</email> + </author> + <ContinuityOfCareRecord xmlns="urn:astm-org:CCR"> + <CCRDocumentObjectID>XF99N6X4lpy.jfPUPLMMSQ</CCRDocumentObjectID> + <Language/> + <DateTime> + <Type/> + </DateTime> + <Patient/> + <Body> + <Problems> + <Problem> + <Type/> + <Description> + <Text>Malaria</Text> + <Code> + <Value>136.9</Value> + <CodingSystem>ICD9_Broader</CodingSystem> + </Code> + <Code> + <Value>084.6</Value> + <CodingSystem>ICD9</CodingSystem> + </Code> + </Description> + <Status> + <Text>ACTIVE</Text> + </Status> + <Source> + <Actor> + <ActorID>user@gmail.com</ActorID> + <ActorRole> + <Text>Patient</Text> + </ActorRole> + </Actor> + </Source> + <HealthStatus> + <Description/> + </HealthStatus> + </Problem> + </Problems> + </Body> + </ContinuityOfCareRecord> +</entry> +<entry> + <id>https://www.google.com/health/feeds/profile/default/aS0Cf964DPs</id> + <published>2008-09-29T03:14:47.463Z</published> + <updated>2008-09-29T03:14:47.463Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/> + <category term="DEMOGRAPHICS"/> + <category scheme="http://schemas.google.com/health/item" term="SocialHistory (Drinking, Smoking)"/> + <title type="text">SocialHistory (Drinking, Smoking) + + + + + + User Name + user@gmail.com + + + kXylGU5YXLBzriv61xPGZQ + + + + + + + + + + Race + + S15814 + HL7 + + + + White + + + + + user@gmail.com + + Patient + + + + + + + + + + + + + + + https://www.google.com/health/feeds/profile/default/s5lII5xfj_g + 2008-09-29T03:14:47.544Z + 2008-09-29T03:14:47.545Z + + + + VitalSigns + + + + + + User Name + user@gmail.com + + + FTTIiY0TVVj35kZqFFjPjQ + + + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + + + Height + + + + 0 + + 70 + + inches + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + + + Weight + + + + 0 + + 2480 + + ounces + + + + + + + + + + + + user@gmail.com + + Patient + + + + + + + + Blood Type + + + + 0 + + O+ + + + + + + + + + +""" + +HEALTH_PROFILE_LIST_ENTRY = """ + + https://www.google.com/health/feeds/profile/list/vndCn5sdfwdEIY + 1970-01-01T00:00:00.000Z + profile name + vndCn5sdfwdEIY + + + + user@gmail.com + +""" + +BOOK_ENTRY = """"""\ + """"""\ + """http://www.google.com/books/feeds/volumes/b7GZr5Btp30C"""\ + """2009-04-24T23:35:16.000Z"""\ + """"""\ + """A theory of justice"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """John Rawls"""\ + """1999"""\ + """p Since it appeared in 1971, John Rawls's i A Theory of Justice /i has become a classic. The author has now revised the original edition to clear up a number of difficulties he and others have found in the original book. /p p Rawls aims to express an essential part of the common core of the democratic tradition--justice as fairness--and to provide an alternative to utilitarianism, which had dominated the Anglo-Saxon tradition of political thought since the nineteenth century. Rawls substitutes the ideal of the social contract as a more satisfactory account of the basic rights and liberties of citizens as free and equal persons. "Each person," writes Rawls, "possesses an inviolability founded on justice that even the welfare of society as a whole cannot override." Advancing the ideas of Rousseau, Kant, Emerson, and Lincoln, Rawls's theory is as powerful today as it was when first published. /p"""\ + """538 pages"""\ + """b7GZr5Btp30C"""\ + """ISBN:0198250541"""\ + """ISBN:9780198250548"""\ + """en"""\ + """Oxford University Press"""\ + """A theory of justice"""\ +"""""" + +BOOK_FEED = """"""\ + """"""\ + """http://www.google.com/books/feeds/volumes"""\ + """2009-04-24T23:39:47.000Z"""\ + """"""\ + """Search results for 9780198250548"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """Google Books Search"""\ + """http://www.google.com"""\ + """"""\ + """Google Book Search data API"""\ + """1"""\ + """1"""\ + """20"""\ + """"""\ + """http://www.google.com/books/feeds/volumes/b7GZr5Btp30C"""\ + """2009-04-24T23:39:47.000Z"""\ + """"""\ + """A theory of justice"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """"""\ + """John Rawls"""\ + """1999"""\ + """... 9780198250548 ..."""\ + """538 pages"""\ + """b7GZr5Btp30C"""\ + """ISBN:0198250541"""\ + """ISBN:9780198250548"""\ + """Law"""\ + """A theory of justice"""\ + """"""\ +"""""" + +MAP_FEED = """ + + http://maps.google.com/maps/feeds/maps/208825816854482607313 + 2009-07-27T18:48:29.631Z + + My maps + + + + + + + Roman + + 1 + 1 + 1 + + http://maps.google.com/maps/feeds/maps/208825816854482607313/00046fb45f88fa910bcea + 2009-07-27T18:46:34.451Z + 2009-07-27T18:48:29.631Z + 2009-07-27T18:48:29.631Z + + yes + + + Untitled +
    + + + + + + Roman + + + +""" + +MAP_ENTRY = """ + + http://maps.google.com/maps/feeds/maps/208825816854482607313/00046fb45f88fa910bcea + 2009-07-27T18:46:34.451Z + 2009-07-27T18:48:29.631Z + 2009-07-27T18:48:29.631Z + + yes + + + Untitled + + + + + + + Roman + + +""" + +MAP_FEATURE_FEED = """ + + http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea + 2009-07-27T18:48:29.631Z + + Untitled + + + + + 4 + 1 + 4 + + http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb4632573b19e0b7 + 2009-07-27T18:47:35.037Z + 2009-07-27T18:47:35.037Z + 2009-07-27T18:47:35.037Z + + Some feature title + + + Some feature title + Some feature content]]> + + + -113.818359,41.442726,0.0 + + + + + + + Roman + + + Roman + + + + http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb46325e839a11e6 + 2009-07-27T18:47:35.067Z + 2009-07-27T18:48:22.184Z + 2009-07-27T18:48:22.184Z + + A cool poly! + + + A cool poly! + And a description]]> + + + + + 1 + -109.775391,47.457809,0.0 -99.755859,51.508742,0.0 -92.900391,48.04871,0.0 -92.8125,44.339565,0.0 -95.273437,44.402392,0.0 -97.207031,46.619261,0.0 -100.898437,46.073231,0.0 -102.480469,43.068888,0.0 -110.742187,45.274886,0.0 -109.775391,47.457809,0.0 + + + + + + + + + Roman + + + Roman + + + + http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb465f5002e56b7a + 2009-07-27T18:48:22.194Z + 2009-07-27T18:48:22.194Z + 2009-07-27T18:48:22.194Z + + New Mexico + + + New Mexico + Word.]]> + + + 1 + -110.039062,37.788081,0.0 -103.183594,37.926868,0.0 -103.183594,32.472695,0.0 -108.896484,32.026706,0.0 -109.863281,31.203405,0.0 -110.039062,37.788081,0.0 + + + + + + + Roman + + + Roman + + + +""" + +MAP_FEATURE_ENTRY = """ + + http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb4632573b19e0b7 + 2009-07-27T18:47:35.037Z + 2009-07-27T18:47:35.037Z + 2009-07-27T18:47:35.037Z + + Some feature title + + + Some feature title + Some feature content]]> + + + -113.818359,41.442726,0.0 + + + + + + + Roman + + + Roman + + +""" + +MAP_FEATURE_KML = """ + Some feature title + Some feature content]]> + + + -113.818359,41.442726,0.0 + + +""" + +SITES_LISTPAGE_ENTRY = ''' + + http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703 + 2009-06-16T00:37:37.393Z + + ListPagesTitle + +
    + +
    stuff go here
    asdf
    +
    sdf
    +
    +
    +
    +
    +
    +
    +
    + + + + Test User + test@gmail.com + + + + + + + + + + + +
    ''' + +SITES_COMMENT_ENTRY = ''' + + http://sites.google.com/feeds/content/site/gdatatestsite/abc123 + 2009-06-15T18:40:22.407Z + + + <content type="xhtml"> + <div xmlns="http://www.w3.org/1999/xhtml">first comment</div> + </content> + <link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123parent"/> + <link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <author> + <name>Test User</name> + <email>test@gmail.com</email> + </author> + <thr:in-reply-to xmlns:thr="http://purl.org/syndication/thread/1.0" href="http://sites.google.com/site/gdatatestsite/annoucment/testpost" ref="http://sites.google.com/feeds/content/site/gdatatestsite/abc123" source="http://sites.google.com/feeds/content/site/gdatatestsite" type="text/html"/> +</entry>''' + +SITES_LISTITEM_ENTRY = '''<?xml version="1.0" encoding="UTF-8"?> +<entry xmlns="http://www.w3.org/2005/Atom"> + <id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id> + <updated>2009-06-16T00:34:55.633Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#listitem"/> + <title type="text"/> + <link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123def"/> + <link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <author> + <name>Test User</name> + <email>test@gmail.com</email> + </author> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="A" name="Owner">test value</gs:field> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="B" name="Description">test</gs:field> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="C" name="Resolution">90</gs:field> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="D" name="Complete"/> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="E" name="MyCo">2009-05-31</gs:field> +</entry>''' + +SITES_CONTENT_FEED = '''<?xml version="1.0" encoding="UTF-8"?> +<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/" +xmlns:sites="http://schemas.google.com/sites/2008" xmlns:gs="http://schemas.google.com/spreadsheets/2006" +xmlns:dc="http://purl.org/dc/terms" xmlns:batch="http://schemas.google.com/gdata/batch" +xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr="http://purl.org/syndication/thread/1.0"> +<id>http://sites.google.com/feeds/content/site/gdatatestsite</id> +<updated>2009-06-15T21:35:43.282Z</updated> +<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite"/> +<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite"/> +<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite"/> +<generator version="1" uri="http://sites.google.com">Google Sites</generator> +<openSearch:startIndex>1</openSearch:startIndex> +<entry> + <id>http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703</id> + <updated>2009-06-16T00:37:37.393Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#listpage"/> + <title type="text">ListPagesTitle + +
    + +
    stuff go here
    asdf
    +
    sdf
    +
    +
    +
    +
    +
    +
    +
    + + + + + + Test User + test@gmail.com + + + + + + + + + + + 2 + + home + +
    + + http://sites.google.com/feeds/content/site/gdatatestsite/abc123 + 2009-06-17T00:40:37.082Z + + filecabinet + +
    + +
    sdf
    +
    +
    +
    + + + + + + Test User + test@gmail.com + + +
    + + http://sites.google.com/feeds/content/site/gdatatestsite/abc123 + 2009-06-16T00:34:55.633Z + + + <link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123def"/> + <link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <author> + <name>Test User</name> + <email>test@gmail.com</email> + </author> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="A" name="Owner">test value</gs:field> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="B" name="Description">test</gs:field> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="C" name="Resolution">90</gs:field> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="D" name="Complete"/> + <gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="E" name="MyCo">2009-05-31</gs:field> +</entry> +<entry> + <id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id> + <updated>2009-06-15T18:40:32.922Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#attachment"/> + <title type="text">testFile.ods + + + + + + + Test User + test@gmail.com + + + something else + + + http://sites.google.com/feeds/content/site/gdatatestsite/abc123 + 2009-06-15T18:40:22.407Z + + + <content type="xhtml"> + <div xmlns="http://www.w3.org/1999/xhtml">first comment</div> + </content> + <link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/> + <author> + <name>Test User</name> + <email>test@gmail.com</email> + </author> + <thr:in-reply-to xmlns:thr="http://purl.org/syndication/thread/1.0" href="http://sites.google.com/site/gdatatestsite/annoucment/testpost" ref="http://sites.google.com/feeds/content/site/gdatatestsite/abc123" source="http://sites.google.com/feeds/content/site/gdatatestsite" type="text/html"/> +</entry> +<entry> + <id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id> + <updated>2009-06-15T18:40:16.388Z</updated> + <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#announcement"/> + <title type="text">TestPost + +
    + +
    content goes here
    +
    +
    +
    + + + + + + Test User + test@gmail.com + +
    + + http://sites.google.com/feeds/content/site/gdatatestsite/abc123 + 2009-06-12T23:37:59.417Z + + Home + +
    + +
    Some Content goes here
    +
    +
    +
    + +
    +
    +
    +
    +
    +
    + + + + + Test User + test@gmail.com + +
    + + http://sites.google.com/feeds/content/site/gdatatestsite/2639323850129333500 + 2009-06-12T23:32:09.191Z + + annoucment + +
    +
    +
    + + + + + Test User + test@gmail.com + + +
    +''' + +SITES_ACTIVITY_FEED = ''' + +http://sites.google.com/feeds/activity/site/siteName +2009-08-19T05:46:01.503Z +Activity + + + +Google Sites +1 + +http://sites.google.com/feeds/activity/site/siteName/197441951793148343 +2009-08-17T00:08:19.387Z + +NewWebpage3 +
    + + + + + + + User + user@gmail.com + + + +http://sites.google.com/feeds/activity/site/siteName/7299542210274956360 +2009-08-17T00:08:03.711Z + +NewWebpage3 + +
    User edited NewWebpage3 +
    +
    + + + + + User + user@gmail.com + +
    +''' + +SITES_REVISION_FEED = ''' + +http://sites.google.com/feeds/revision/site/siteName/2947510322163358574 +2009-08-19T06:20:18.151Z +Revisions + + +Google Sites +1 + +http://sites.google.com/feeds/revision/site/siteName/2947510322163358574/1 +2009-08-19T04:33:14.856Z + + +<content type="xhtml"> + <div xmlns="http://www.w3.org/1999/xhtml"> + <table cellspacing="0" class="sites-layout-name-one-column sites-layout-hbox"> + <tbody> + <tr> + <td class="sites-layout-tile sites-tile-name-content-1">testcomment</td> + </tr> + </tbody> + </table> +</div> +</content> +<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/siteName/54395424125706119"/> +<link rel="alternate" type="text" href="http://sites.google.com/site/system/app/pages/admin/compare?wuid=wuid%3Agx%3A28e7a9057c581b6e&rev1=1"/> +<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/revision/site/siteName/2947510322163358574/1"/> +<author> + <name>User</name> + <email>user@gmail.com</email> +</author> +<thr:in-reply-to href="http://sites.google.com/site/siteName/code/js" ref="http://sites.google.com/feeds/content/site/siteName/54395424125706119" source="http://sites.google.com/feeds/content/google.com/siteName" type="text/html;charset=UTF-8"/> +<sites:revision>1</sites:revision> +</entry> +</feed>''' + +SITES_SITE_FEED = ''' +<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/" xmlns:gAcl="http://schemas.google.com/acl/2007" xmlns:sites="http://schemas.google.com/sites/2008" xmlns:gs="http://schemas.google.com/spreadsheets/2006" xmlns:dc="http://purl.org/dc/terms" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr="http://purl.org/syndication/thread/1.0"> +<id>https://sites.google.com/feeds/site/example.com</id> +<updated>2009-12-09T01:05:54.631Z</updated> +<title>Site + + + +Google Sites +1 + +https://sites.google.com/feeds/site/example.com/new-test-site +2009-12-02T22:55:31.040Z +2009-12-02T22:55:31.040Z +New Test Site +A new site to hold memories + + + + + +new-test-site +iceberg + + +https://sites.google.com/feeds/site/example.com/newautosite2 +2009-12-05T00:28:01.077Z +2009-12-05T00:28:01.077Z +newAutoSite3 +A new site to hold memories2 + + + + +newautosite2 +default + +''' + +SITES_ACL_FEED = ''' + +https://sites.google.comsites.google.com/feeds/acl/site/example.com/new-test-site +2009-12-09T01:24:59.080Z + +Acl + + + +Google Sites +1 + + https://sites.google.com/feeds/acl/site/google.com/new-test-site/user%3Auser%40example.com + 2009-12-09T01:24:59.080Z + 2009-12-09T01:24:59.080Z + + + + + + +''' + +ANALYTICS_ACCOUNT_FEED_old = ''' + +http://www.google.com/analytics/feeds/accounts/abc@test.com +2009-06-25T03:55:22.000-07:00 +Profile list for abc@test.com + + +Google Analytics + +Google Analytics +12 +1 +12 + +http://www.google.com/analytics/feeds/accounts/ga:1174 +2009-06-25T03:55:22.000-07:00 +www.googlestore.com + +ga:1174 + + + + + + + +''' + +ANALYTICS_ACCOUNT_FEED = ''' + + http://www.google.com/analytics/feeds/accounts/api.nickm@google.com + 2009-10-14T09:14:25.000-07:00 + Profile list for abc@test.com + + + Google Analytics + + Google Analytics + 37 + 1 + 37 + + ga:operatingSystem==iPhone + + + http://www.google.com/analytics/feeds/accounts/ga:1174 + 2009-10-14T09:14:25.000-07:00 + www.googlestore.com + + + + + + + + + + + + + + + + + + + + + + ga:1174 + +''' + +ANALYTICS_DATA_FEED = ''' + + http://www.google.com/analytics/feeds/data?ids=ga:1174&dimensions=ga:medium,ga:source&metrics=ga:bounces,ga:visits&filters=ga:medium%3D%3Dreferral&start-date=2008-10-01&end-date=2008-10-31 + 2008-10-31T16:59:59.999-07:00 + Google Analytics Data for Profile 1174 + + + + Google Analytics + + Google Analytics + 6451 + 1 + 2 + 2008-10-01 + 2008-10-31 + + ga:operatingSystem==iPhone + + + + + + + ga:1174 + www.googlestore.com + + + + + + http://www.google.com/analytics/feeds/data?ids=ga:1174&ga:medium=referral&ga:source=blogger.com&filters=ga:medium%3D%3Dreferral&start-date=2008-10-01&end-date=2008-10-31 + 2008-10-30T17:00:00.001-07:00 + ga:source=blogger.com | ga:medium=referral + + + + + + +''' diff --git a/gam/gdata/tlslite/BaseDB.py b/gam/gdata/tlslite/BaseDB.py new file mode 100755 index 00000000000..ca8dff6b408 --- /dev/null +++ b/gam/gdata/tlslite/BaseDB.py @@ -0,0 +1,120 @@ +"""Base class for SharedKeyDB and VerifierDB.""" + +import anydbm +import thread + +class BaseDB: + def __init__(self, filename, type): + self.type = type + self.filename = filename + if self.filename: + self.db = None + else: + self.db = {} + self.lock = thread.allocate_lock() + + def create(self): + """Create a new on-disk database. + + @raise anydbm.error: If there's a problem creating the database. + """ + if self.filename: + self.db = anydbm.open(self.filename, "n") #raises anydbm.error + self.db["--Reserved--type"] = self.type + self.db.sync() + else: + self.db = {} + + def open(self): + """Open a pre-existing on-disk database. + + @raise anydbm.error: If there's a problem opening the database. + @raise ValueError: If the database is not of the right type. + """ + if not self.filename: + raise ValueError("Can only open on-disk databases") + self.db = anydbm.open(self.filename, "w") #raises anydbm.error + try: + if self.db["--Reserved--type"] != self.type: + raise ValueError("Not a %s database" % self.type) + except KeyError: + raise ValueError("Not a recognized database") + + def __getitem__(self, username): + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + valueStr = self.db[username] + finally: + self.lock.release() + + return self._getItem(username, valueStr) + + def __setitem__(self, username, value): + if self.db == None: + raise AssertionError("DB not open") + + valueStr = self._setItem(username, value) + + self.lock.acquire() + try: + self.db[username] = valueStr + if self.filename: + self.db.sync() + finally: + self.lock.release() + + def __delitem__(self, username): + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + del(self.db[username]) + if self.filename: + self.db.sync() + finally: + self.lock.release() + + def __contains__(self, username): + """Check if the database contains the specified username. + + @type username: str + @param username: The username to check for. + + @rtype: bool + @return: True if the database contains the username, False + otherwise. + + """ + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + return self.db.has_key(username) + finally: + self.lock.release() + + def check(self, username, param): + value = self.__getitem__(username) + return self._checkItem(value, username, param) + + def keys(self): + """Return a list of usernames in the database. + + @rtype: list + @return: The usernames in the database. + """ + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + usernames = self.db.keys() + finally: + self.lock.release() + usernames = [u for u in usernames if not u.startswith("--Reserved--")] + return usernames \ No newline at end of file diff --git a/gam/gdata/tlslite/Checker.py b/gam/gdata/tlslite/Checker.py new file mode 100755 index 00000000000..f978697628e --- /dev/null +++ b/gam/gdata/tlslite/Checker.py @@ -0,0 +1,146 @@ +"""Class for post-handshake certificate checking.""" + +from utils.cryptomath import hashAndBase64 +from X509 import X509 +from X509CertChain import X509CertChain +from errors import * + + +class Checker: + """This class is passed to a handshake function to check the other + party's certificate chain. + + If a handshake function completes successfully, but the Checker + judges the other party's certificate chain to be missing or + inadequate, a subclass of + L{tlslite.errors.TLSAuthenticationError} will be raised. + + Currently, the Checker can check either an X.509 or a cryptoID + chain (for the latter, cryptoIDlib must be installed). + """ + + def __init__(self, cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + checkResumedSession=False): + """Create a new Checker instance. + + You must pass in one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + @type cryptoID: str + @param cryptoID: A cryptoID which the other party's certificate + chain must match. The cryptoIDlib module must be installed. + Mutually exclusive with all of the 'x509...' arguments. + + @type protocol: str + @param protocol: A cryptoID protocol URI which the other + party's certificate chain must match. Requires the 'cryptoID' + argument. + + @type x509Fingerprint: str + @param x509Fingerprint: A hex-encoded X.509 end-entity + fingerprint which the other party's end-entity certificate must + match. Mutually exclusive with the 'cryptoID' and + 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed. Mutually exclusive with the 'cryptoID' and + 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type checkResumedSession: bool + @param checkResumedSession: If resumed sessions should be + checked. This defaults to False, on the theory that if the + session was checked once, we don't need to bother + re-checking it. + """ + + if cryptoID and (x509Fingerprint or x509TrustList): + raise ValueError() + if x509Fingerprint and x509TrustList: + raise ValueError() + if x509CommonName and not x509TrustList: + raise ValueError() + if protocol and not cryptoID: + raise ValueError() + if cryptoID: + import cryptoIDlib #So we raise an error here + if x509TrustList: + import cryptlib_py #So we raise an error here + self.cryptoID = cryptoID + self.protocol = protocol + self.x509Fingerprint = x509Fingerprint + self.x509TrustList = x509TrustList + self.x509CommonName = x509CommonName + self.checkResumedSession = checkResumedSession + + def __call__(self, connection): + """Check a TLSConnection. + + When a Checker is passed to a handshake function, this will + be called at the end of the function. + + @type connection: L{tlslite.TLSConnection.TLSConnection} + @param connection: The TLSConnection to examine. + + @raise tlslite.errors.TLSAuthenticationError: If the other + party's certificate chain is missing or bad. + """ + if not self.checkResumedSession and connection.resumed: + return + + if self.cryptoID or self.x509Fingerprint or self.x509TrustList: + if connection._client: + chain = connection.session.serverCertChain + else: + chain = connection.session.clientCertChain + + if self.x509Fingerprint or self.x509TrustList: + if isinstance(chain, X509CertChain): + if self.x509Fingerprint: + if chain.getFingerprint() != self.x509Fingerprint: + raise TLSFingerprintError(\ + "X.509 fingerprint mismatch: %s, %s" % \ + (chain.getFingerprint(), self.x509Fingerprint)) + else: #self.x509TrustList + if not chain.validate(self.x509TrustList): + raise TLSValidationError("X.509 validation failure") + if self.x509CommonName and \ + (chain.getCommonName() != self.x509CommonName): + raise TLSAuthorizationError(\ + "X.509 Common Name mismatch: %s, %s" % \ + (chain.getCommonName(), self.x509CommonName)) + elif chain: + raise TLSAuthenticationTypeError() + else: + raise TLSNoAuthenticationError() + elif self.cryptoID: + import cryptoIDlib.CertChain + if isinstance(chain, cryptoIDlib.CertChain.CertChain): + if chain.cryptoID != self.cryptoID: + raise TLSFingerprintError(\ + "cryptoID mismatch: %s, %s" % \ + (chain.cryptoID, self.cryptoID)) + if self.protocol: + if not chain.checkProtocol(self.protocol): + raise TLSAuthorizationError(\ + "cryptoID protocol mismatch") + if not chain.validate(): + raise TLSValidationError("cryptoID validation failure") + elif chain: + raise TLSAuthenticationTypeError() + else: + raise TLSNoAuthenticationError() + diff --git a/gam/gdata/tlslite/FileObject.py b/gam/gdata/tlslite/FileObject.py new file mode 100755 index 00000000000..6ee02b2436b --- /dev/null +++ b/gam/gdata/tlslite/FileObject.py @@ -0,0 +1,220 @@ +"""Class returned by TLSConnection.makefile().""" + +class FileObject: + """This class provides a file object interface to a + L{tlslite.TLSConnection.TLSConnection}. + + Call makefile() on a TLSConnection to create a FileObject instance. + + This class was copied, with minor modifications, from the + _fileobject class in socket.py. Note that fileno() is not + implemented.""" + + default_bufsize = 16384 #TREV: changed from 8192 + + def __init__(self, sock, mode='rb', bufsize=-1): + self._sock = sock + self.mode = mode # Not actually used in this version + if bufsize < 0: + bufsize = self.default_bufsize + self.bufsize = bufsize + self.softspace = False + if bufsize == 0: + self._rbufsize = 1 + elif bufsize == 1: + self._rbufsize = self.default_bufsize + else: + self._rbufsize = bufsize + self._wbufsize = bufsize + self._rbuf = "" # A string + self._wbuf = [] # A list of strings + + def _getclosed(self): + return self._sock is not None + closed = property(_getclosed, doc="True if the file is closed") + + def close(self): + try: + if self._sock: + for result in self._sock._decrefAsync(): #TREV + pass + finally: + self._sock = None + + def __del__(self): + try: + self.close() + except: + # close() may fail if __init__ didn't complete + pass + + def flush(self): + if self._wbuf: + buffer = "".join(self._wbuf) + self._wbuf = [] + self._sock.sendall(buffer) + + #def fileno(self): + # raise NotImplementedError() #TREV + + def write(self, data): + data = str(data) # XXX Should really reject non-string non-buffers + if not data: + return + self._wbuf.append(data) + if (self._wbufsize == 0 or + self._wbufsize == 1 and '\n' in data or + self._get_wbuf_len() >= self._wbufsize): + self.flush() + + def writelines(self, list): + # XXX We could do better here for very long lists + # XXX Should really reject non-string non-buffers + self._wbuf.extend(filter(None, map(str, list))) + if (self._wbufsize <= 1 or + self._get_wbuf_len() >= self._wbufsize): + self.flush() + + def _get_wbuf_len(self): + buf_len = 0 + for x in self._wbuf: + buf_len += len(x) + return buf_len + + def read(self, size=-1): + data = self._rbuf + if size < 0: + # Read until EOF + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + if self._rbufsize <= 1: + recv_size = self.default_bufsize + else: + recv_size = self._rbufsize + while True: + data = self._sock.recv(recv_size) + if not data: + break + buffers.append(data) + return "".join(buffers) + else: + # Read until size bytes or EOF seen, whichever comes first + buf_len = len(data) + if buf_len >= size: + self._rbuf = data[size:] + return data[:size] + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + while True: + left = size - buf_len + recv_size = max(self._rbufsize, left) + data = self._sock.recv(recv_size) + if not data: + break + buffers.append(data) + n = len(data) + if n >= left: + self._rbuf = data[left:] + buffers[-1] = data[:left] + break + buf_len += n + return "".join(buffers) + + def readline(self, size=-1): + data = self._rbuf + if size < 0: + # Read until \n or EOF, whichever comes first + if self._rbufsize <= 1: + # Speed up unbuffered case + assert data == "" + buffers = [] + recv = self._sock.recv + while data != "\n": + data = recv(1) + if not data: + break + buffers.append(data) + return "".join(buffers) + nl = data.find('\n') + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + return data[:nl] + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + while True: + data = self._sock.recv(self._rbufsize) + if not data: + break + buffers.append(data) + nl = data.find('\n') + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + buffers[-1] = data[:nl] + break + return "".join(buffers) + else: + # Read until size bytes or \n or EOF seen, whichever comes first + nl = data.find('\n', 0, size) + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + return data[:nl] + buf_len = len(data) + if buf_len >= size: + self._rbuf = data[size:] + return data[:size] + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + while True: + data = self._sock.recv(self._rbufsize) + if not data: + break + buffers.append(data) + left = size - buf_len + nl = data.find('\n', 0, left) + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + buffers[-1] = data[:nl] + break + n = len(data) + if n >= left: + self._rbuf = data[left:] + buffers[-1] = data[:left] + break + buf_len += n + return "".join(buffers) + + def readlines(self, sizehint=0): + total = 0 + list = [] + while True: + line = self.readline() + if not line: + break + list.append(line) + total += len(line) + if sizehint and total >= sizehint: + break + return list + + # Iterator protocols + + def __iter__(self): + return self + + def next(self): + line = self.readline() + if not line: + raise StopIteration + return line diff --git a/gam/gdata/tlslite/HandshakeSettings.py b/gam/gdata/tlslite/HandshakeSettings.py new file mode 100755 index 00000000000..c7c3223e515 --- /dev/null +++ b/gam/gdata/tlslite/HandshakeSettings.py @@ -0,0 +1,159 @@ +"""Class for setting handshake parameters.""" + +from constants import CertificateType +from utils import cryptomath +from utils import cipherfactory + +class HandshakeSettings: + """This class encapsulates various parameters that can be used with + a TLS handshake. + @sort: minKeySize, maxKeySize, cipherNames, certificateTypes, + minVersion, maxVersion + + @type minKeySize: int + @ivar minKeySize: The minimum bit length for asymmetric keys. + + If the other party tries to use SRP, RSA, or Diffie-Hellman + parameters smaller than this length, an alert will be + signalled. The default is 1023. + + @type maxKeySize: int + @ivar maxKeySize: The maximum bit length for asymmetric keys. + + If the other party tries to use SRP, RSA, or Diffie-Hellman + parameters larger than this length, an alert will be signalled. + The default is 8193. + + @type cipherNames: list + @ivar cipherNames: The allowed ciphers, in order of preference. + + The allowed values in this list are 'aes256', 'aes128', '3des', and + 'rc4'. If these settings are used with a client handshake, they + determine the order of the ciphersuites offered in the ClientHello + message. + + If these settings are used with a server handshake, the server will + choose whichever ciphersuite matches the earliest entry in this + list. + + NOTE: If '3des' is used in this list, but TLS Lite can't find an + add-on library that supports 3DES, then '3des' will be silently + removed. + + The default value is ['aes256', 'aes128', '3des', 'rc4']. + + @type certificateTypes: list + @ivar certificateTypes: The allowed certificate types, in order of + preference. + + The allowed values in this list are 'x509' and 'cryptoID'. This + list is only used with a client handshake. The client will + advertise to the server which certificate types are supported, and + will check that the server uses one of the appropriate types. + + NOTE: If 'cryptoID' is used in this list, but cryptoIDlib is not + installed, then 'cryptoID' will be silently removed. + + @type minVersion: tuple + @ivar minVersion: The minimum allowed SSL/TLS version. + + This variable can be set to (3,0) for SSL 3.0, (3,1) for + TLS 1.0, or (3,2) for TLS 1.1. If the other party wishes to + use a lower version, a protocol_version alert will be signalled. + The default is (3,0). + + @type maxVersion: tuple + @ivar maxVersion: The maximum allowed SSL/TLS version. + + This variable can be set to (3,0) for SSL 3.0, (3,1) for + TLS 1.0, or (3,2) for TLS 1.1. If the other party wishes to + use a higher version, a protocol_version alert will be signalled. + The default is (3,2). (WARNING: Some servers may (improperly) + reject clients which offer support for TLS 1.1. In this case, + try lowering maxVersion to (3,1)). + """ + def __init__(self): + self.minKeySize = 1023 + self.maxKeySize = 8193 + self.cipherNames = ["aes256", "aes128", "3des", "rc4"] + self.cipherImplementations = ["cryptlib", "openssl", "pycrypto", + "python"] + self.certificateTypes = ["x509", "cryptoID"] + self.minVersion = (3,0) + self.maxVersion = (3,2) + + #Filters out options that are not supported + def _filter(self): + other = HandshakeSettings() + other.minKeySize = self.minKeySize + other.maxKeySize = self.maxKeySize + other.cipherNames = self.cipherNames + other.cipherImplementations = self.cipherImplementations + other.certificateTypes = self.certificateTypes + other.minVersion = self.minVersion + other.maxVersion = self.maxVersion + + if not cipherfactory.tripleDESPresent: + other.cipherNames = [e for e in self.cipherNames if e != "3des"] + if len(other.cipherNames)==0: + raise ValueError("No supported ciphers") + + try: + import cryptoIDlib + except ImportError: + other.certificateTypes = [e for e in self.certificateTypes \ + if e != "cryptoID"] + if len(other.certificateTypes)==0: + raise ValueError("No supported certificate types") + + if not cryptomath.cryptlibpyLoaded: + other.cipherImplementations = [e for e in \ + self.cipherImplementations if e != "cryptlib"] + if not cryptomath.m2cryptoLoaded: + other.cipherImplementations = [e for e in \ + other.cipherImplementations if e != "openssl"] + if not cryptomath.pycryptoLoaded: + other.cipherImplementations = [e for e in \ + other.cipherImplementations if e != "pycrypto"] + if len(other.cipherImplementations)==0: + raise ValueError("No supported cipher implementations") + + if other.minKeySize<512: + raise ValueError("minKeySize too small") + if other.minKeySize>16384: + raise ValueError("minKeySize too large") + if other.maxKeySize<512: + raise ValueError("maxKeySize too small") + if other.maxKeySize>16384: + raise ValueError("maxKeySize too large") + for s in other.cipherNames: + if s not in ("aes256", "aes128", "rc4", "3des"): + raise ValueError("Unknown cipher name: '%s'" % s) + for s in other.cipherImplementations: + if s not in ("cryptlib", "openssl", "python", "pycrypto"): + raise ValueError("Unknown cipher implementation: '%s'" % s) + for s in other.certificateTypes: + if s not in ("x509", "cryptoID"): + raise ValueError("Unknown certificate type: '%s'" % s) + + if other.minVersion > other.maxVersion: + raise ValueError("Versions set incorrectly") + + if not other.minVersion in ((3,0), (3,1), (3,2)): + raise ValueError("minVersion set incorrectly") + + if not other.maxVersion in ((3,0), (3,1), (3,2)): + raise ValueError("maxVersion set incorrectly") + + return other + + def _getCertificateTypes(self): + l = [] + for ct in self.certificateTypes: + if ct == "x509": + l.append(CertificateType.x509) + elif ct == "cryptoID": + l.append(CertificateType.cryptoID) + else: + raise AssertionError() + return l diff --git a/gam/gdata/tlslite/Session.py b/gam/gdata/tlslite/Session.py new file mode 100755 index 00000000000..a951f458942 --- /dev/null +++ b/gam/gdata/tlslite/Session.py @@ -0,0 +1,131 @@ +"""Class representing a TLS session.""" + +from utils.compat import * +from mathtls import * +from constants import * + +class Session: + """ + This class represents a TLS session. + + TLS distinguishes between connections and sessions. A new + handshake creates both a connection and a session. Data is + transmitted over the connection. + + The session contains a more permanent record of the handshake. The + session can be inspected to determine handshake results. The + session can also be used to create a new connection through + "session resumption". If the client and server both support this, + they can create a new connection based on an old session without + the overhead of a full handshake. + + The session for a L{tlslite.TLSConnection.TLSConnection} can be + retrieved from the connection's 'session' attribute. + + @type srpUsername: str + @ivar srpUsername: The client's SRP username (or None). + + @type sharedKeyUsername: str + @ivar sharedKeyUsername: The client's shared-key username (or + None). + + @type clientCertChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @ivar clientCertChain: The client's certificate chain (or None). + + @type serverCertChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @ivar serverCertChain: The server's certificate chain (or None). + """ + + def __init__(self): + self.masterSecret = createByteArraySequence([]) + self.sessionID = createByteArraySequence([]) + self.cipherSuite = 0 + self.srpUsername = None + self.sharedKeyUsername = None + self.clientCertChain = None + self.serverCertChain = None + self.resumable = False + self.sharedKey = False + + def _clone(self): + other = Session() + other.masterSecret = self.masterSecret + other.sessionID = self.sessionID + other.cipherSuite = self.cipherSuite + other.srpUsername = self.srpUsername + other.sharedKeyUsername = self.sharedKeyUsername + other.clientCertChain = self.clientCertChain + other.serverCertChain = self.serverCertChain + other.resumable = self.resumable + other.sharedKey = self.sharedKey + return other + + def _calcMasterSecret(self, version, premasterSecret, clientRandom, + serverRandom): + if version == (3,0): + self.masterSecret = PRF_SSL(premasterSecret, + concatArrays(clientRandom, serverRandom), 48) + elif version in ((3,1), (3,2)): + self.masterSecret = PRF(premasterSecret, "master secret", + concatArrays(clientRandom, serverRandom), 48) + else: + raise AssertionError() + + def valid(self): + """If this session can be used for session resumption. + + @rtype: bool + @return: If this session can be used for session resumption. + """ + return self.resumable or self.sharedKey + + def _setResumable(self, boolean): + #Only let it be set if this isn't a shared key + if not self.sharedKey: + #Only let it be set to True if the sessionID is non-null + if (not boolean) or (boolean and self.sessionID): + self.resumable = boolean + + def getCipherName(self): + """Get the name of the cipher used with this connection. + + @rtype: str + @return: The name of the cipher used with this connection. + Either 'aes128', 'aes256', 'rc4', or '3des'. + """ + if self.cipherSuite in CipherSuite.aes128Suites: + return "aes128" + elif self.cipherSuite in CipherSuite.aes256Suites: + return "aes256" + elif self.cipherSuite in CipherSuite.rc4Suites: + return "rc4" + elif self.cipherSuite in CipherSuite.tripleDESSuites: + return "3des" + else: + return None + + def _createSharedKey(self, sharedKeyUsername, sharedKey): + if len(sharedKeyUsername)>16: + raise ValueError() + if len(sharedKey)>47: + raise ValueError() + + self.sharedKeyUsername = sharedKeyUsername + + self.sessionID = createByteArrayZeros(16) + for x in range(len(sharedKeyUsername)): + self.sessionID[x] = ord(sharedKeyUsername[x]) + + premasterSecret = createByteArrayZeros(48) + sharedKey = chr(len(sharedKey)) + sharedKey + for x in range(48): + premasterSecret[x] = ord(sharedKey[x % len(sharedKey)]) + + self.masterSecret = PRF(premasterSecret, "shared secret", + createByteArraySequence([]), 48) + self.sharedKey = True + return self + + diff --git a/gam/gdata/tlslite/SessionCache.py b/gam/gdata/tlslite/SessionCache.py new file mode 100755 index 00000000000..34cf0b0ec4e --- /dev/null +++ b/gam/gdata/tlslite/SessionCache.py @@ -0,0 +1,103 @@ +"""Class for caching TLS sessions.""" + +import thread +import time + +class SessionCache: + """This class is used by the server to cache TLS sessions. + + Caching sessions allows the client to use TLS session resumption + and avoid the expense of a full handshake. To use this class, + simply pass a SessionCache instance into the server handshake + function. + + This class is thread-safe. + """ + + #References to these instances + #are also held by the caller, who may change the 'resumable' + #flag, so the SessionCache must return the same instances + #it was passed in. + + def __init__(self, maxEntries=10000, maxAge=14400): + """Create a new SessionCache. + + @type maxEntries: int + @param maxEntries: The maximum size of the cache. When this + limit is reached, the oldest sessions will be deleted as + necessary to make room for new ones. The default is 10000. + + @type maxAge: int + @param maxAge: The number of seconds before a session expires + from the cache. The default is 14400 (i.e. 4 hours).""" + + self.lock = thread.allocate_lock() + + # Maps sessionIDs to sessions + self.entriesDict = {} + + #Circular list of (sessionID, timestamp) pairs + self.entriesList = [(None,None)] * maxEntries + + self.firstIndex = 0 + self.lastIndex = 0 + self.maxAge = maxAge + + def __getitem__(self, sessionID): + self.lock.acquire() + try: + self._purge() #Delete old items, so we're assured of a new one + session = self.entriesDict[sessionID] + + #When we add sessions they're resumable, but it's possible + #for the session to be invalidated later on (if a fatal alert + #is returned), so we have to check for resumability before + #returning the session. + + if session.valid(): + return session + else: + raise KeyError() + finally: + self.lock.release() + + + def __setitem__(self, sessionID, session): + self.lock.acquire() + try: + #Add the new element + self.entriesDict[sessionID] = session + self.entriesList[self.lastIndex] = (sessionID, time.time()) + self.lastIndex = (self.lastIndex+1) % len(self.entriesList) + + #If the cache is full, we delete the oldest element to make an + #empty space + if self.lastIndex == self.firstIndex: + del(self.entriesDict[self.entriesList[self.firstIndex][0]]) + self.firstIndex = (self.firstIndex+1) % len(self.entriesList) + finally: + self.lock.release() + + #Delete expired items + def _purge(self): + currentTime = time.time() + + #Search through the circular list, deleting expired elements until + #we reach a non-expired element. Since elements in list are + #ordered in time, we can break once we reach the first non-expired + #element + index = self.firstIndex + while index != self.lastIndex: + if currentTime - self.entriesList[index][1] > self.maxAge: + del(self.entriesDict[self.entriesList[index][0]]) + index = (index+1) % len(self.entriesList) + else: + break + self.firstIndex = index + +def _test(): + import doctest, SessionCache + return doctest.testmod(SessionCache) + +if __name__ == "__main__": + _test() diff --git a/gam/gdata/tlslite/SharedKeyDB.py b/gam/gdata/tlslite/SharedKeyDB.py new file mode 100755 index 00000000000..3246ec7f155 --- /dev/null +++ b/gam/gdata/tlslite/SharedKeyDB.py @@ -0,0 +1,58 @@ +"""Class for storing shared keys.""" + +from utils.cryptomath import * +from utils.compat import * +from mathtls import * +from Session import Session +from BaseDB import BaseDB + +class SharedKeyDB(BaseDB): + """This class represent an in-memory or on-disk database of shared + keys. + + A SharedKeyDB can be passed to a server handshake function to + authenticate a client based on one of the shared keys. + + This class is thread-safe. + """ + + def __init__(self, filename=None): + """Create a new SharedKeyDB. + + @type filename: str + @param filename: Filename for an on-disk database, or None for + an in-memory database. If the filename already exists, follow + this with a call to open(). To create a new on-disk database, + follow this with a call to create(). + """ + BaseDB.__init__(self, filename, "shared key") + + def _getItem(self, username, valueStr): + session = Session() + session._createSharedKey(username, valueStr) + return session + + def __setitem__(self, username, sharedKey): + """Add a shared key to the database. + + @type username: str + @param username: The username to associate the shared key with. + Must be less than or equal to 16 characters in length, and must + not already be in the database. + + @type sharedKey: str + @param sharedKey: The shared key to add. Must be less than 48 + characters in length. + """ + BaseDB.__setitem__(self, username, sharedKey) + + def _setItem(self, username, value): + if len(username)>16: + raise ValueError("username too long") + if len(value)>=48: + raise ValueError("shared key too long") + return value + + def _checkItem(self, value, username, param): + newSession = self._getItem(username, param) + return value.masterSecret == newSession.masterSecret \ No newline at end of file diff --git a/gam/gdata/tlslite/TLSConnection.py b/gam/gdata/tlslite/TLSConnection.py new file mode 100755 index 00000000000..d125f8f0a40 --- /dev/null +++ b/gam/gdata/tlslite/TLSConnection.py @@ -0,0 +1,1600 @@ +""" +MAIN CLASS FOR TLS LITE (START HERE!). +""" +from __future__ import generators + +import socket +from utils.compat import formatExceptionTrace +from TLSRecordLayer import TLSRecordLayer +from Session import Session +from constants import * +from utils.cryptomath import getRandomBytes +from errors import * +from messages import * +from mathtls import * +from HandshakeSettings import HandshakeSettings + + +class TLSConnection(TLSRecordLayer): + """ + This class wraps a socket and provides TLS handshaking and data + transfer. + + To use this class, create a new instance, passing a connected + socket into the constructor. Then call some handshake function. + If the handshake completes without raising an exception, then a TLS + connection has been negotiated. You can transfer data over this + connection as if it were a socket. + + This class provides both synchronous and asynchronous versions of + its key functions. The synchronous versions should be used when + writing single-or multi-threaded code using blocking sockets. The + asynchronous versions should be used when performing asynchronous, + event-based I/O with non-blocking sockets. + + Asynchronous I/O is a complicated subject; typically, you should + not use the asynchronous functions directly, but should use some + framework like asyncore or Twisted which TLS Lite integrates with + (see + L{tlslite.integration.TLSAsyncDispatcherMixIn.TLSAsyncDispatcherMixIn} or + L{tlslite.integration.TLSTwistedProtocolWrapper.TLSTwistedProtocolWrapper}). + """ + + + def __init__(self, sock): + """Create a new TLSConnection instance. + + @param sock: The socket data will be transmitted on. The + socket should already be connected. It may be in blocking or + non-blocking mode. + + @type sock: L{socket.socket} + """ + TLSRecordLayer.__init__(self, sock) + + def handshakeClientSRP(self, username, password, session=None, + settings=None, checker=None, async=False): + """Perform an SRP handshake in the role of client. + + This function performs a TLS/SRP handshake. SRP mutually + authenticates both parties to each other using only a + username and password. This function may also perform a + combined SRP and server-certificate handshake, if the server + chooses to authenticate itself with a certificate chain in + addition to doing SRP. + + TLS/SRP is non-standard. Most TLS implementations don't + support it. See + U{http://www.ietf.org/html.charters/tls-charter.html} or + U{http://trevp.net/tlssrp/} for the latest information on + TLS/SRP. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type username: str + @param username: The SRP username. + + @type password: str + @param password: The SRP password. + + @type session: L{tlslite.Session.Session} + @param session: A TLS session to attempt to resume. This + session must be an SRP session performed with the same username + and password as were passed in. If the resumption does not + succeed, a full SRP handshake will be performed. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(srpParams=(username, password), + session=session, settings=settings, checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def handshakeClientCert(self, certChain=None, privateKey=None, + session=None, settings=None, checker=None, + async=False): + """Perform a certificate-based handshake in the role of client. + + This function performs an SSL or TLS handshake. The server + will authenticate itself using an X.509 or cryptoID certificate + chain. If the handshake succeeds, the server's certificate + chain will be stored in the session's serverCertChain attribute. + Unless a checker object is passed in, this function does no + validation or checking of the server's certificate chain. + + If the server requests client authentication, the + client will send the passed-in certificate chain, and use the + passed-in private key to authenticate itself. If no + certificate chain and private key were passed in, the client + will attempt to proceed without client authentication. The + server may or may not allow this. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: The certificate chain to be used if the + server requests client authentication. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: The private key to be used if the server + requests client authentication. + + @type session: L{tlslite.Session.Session} + @param session: A TLS session to attempt to resume. If the + resumption does not succeed, a full handshake will be + performed. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(certParams=(certChain, + privateKey), session=session, settings=settings, + checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def handshakeClientUnknown(self, srpCallback=None, certCallback=None, + session=None, settings=None, checker=None, + async=False): + """Perform a to-be-determined type of handshake in the role of client. + + This function performs an SSL or TLS handshake. If the server + requests client certificate authentication, the + certCallback will be invoked and should return a (certChain, + privateKey) pair. If the callback returns None, the library + will attempt to proceed without client authentication. The + server may or may not allow this. + + If the server requests SRP authentication, the srpCallback + will be invoked and should return a (username, password) pair. + If the callback returns None, the local implementation will + signal a user_canceled error alert. + + After the handshake completes, the client can inspect the + connection's session attribute to determine what type of + authentication was performed. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type srpCallback: callable + @param srpCallback: The callback to be used if the server + requests SRP authentication. If None, the client will not + offer support for SRP ciphersuites. + + @type certCallback: callable + @param certCallback: The callback to be used if the server + requests client certificate authentication. + + @type session: L{tlslite.Session.Session} + @param session: A TLS session to attempt to resume. If the + resumption does not succeed, a full handshake will be + performed. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(unknownParams=(srpCallback, + certCallback), session=session, settings=settings, + checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def handshakeClientSharedKey(self, username, sharedKey, settings=None, + checker=None, async=False): + """Perform a shared-key handshake in the role of client. + + This function performs a shared-key handshake. Using shared + symmetric keys of high entropy (128 bits or greater) mutually + authenticates both parties to each other. + + TLS with shared-keys is non-standard. Most TLS + implementations don't support it. See + U{http://www.ietf.org/html.charters/tls-charter.html} for the + latest information on TLS with shared-keys. If the shared-keys + Internet-Draft changes or is superceded, TLS Lite will track + those changes, so the shared-key support in later versions of + TLS Lite may become incompatible with this version. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type username: str + @param username: The shared-key username. + + @type sharedKey: str + @param sharedKey: The shared key. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(sharedKeyParams=(username, + sharedKey), settings=settings, checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def _handshakeClientAsync(self, srpParams=(), certParams=(), + unknownParams=(), sharedKeyParams=(), + session=None, settings=None, checker=None, + recursive=False): + + handshaker = self._handshakeClientAsyncHelper(srpParams=srpParams, + certParams=certParams, unknownParams=unknownParams, + sharedKeyParams=sharedKeyParams, session=session, + settings=settings, recursive=recursive) + for result in self._handshakeWrapperAsync(handshaker, checker): + yield result + + + def _handshakeClientAsyncHelper(self, srpParams, certParams, unknownParams, + sharedKeyParams, session, settings, recursive): + if not recursive: + self._handshakeStart(client=True) + + #Unpack parameters + srpUsername = None # srpParams + password = None # srpParams + clientCertChain = None # certParams + privateKey = None # certParams + srpCallback = None # unknownParams + certCallback = None # unknownParams + #session # sharedKeyParams (or session) + #settings # settings + + if srpParams: + srpUsername, password = srpParams + elif certParams: + clientCertChain, privateKey = certParams + elif unknownParams: + srpCallback, certCallback = unknownParams + elif sharedKeyParams: + session = Session()._createSharedKey(*sharedKeyParams) + + if not settings: + settings = HandshakeSettings() + settings = settings._filter() + + #Validate parameters + if srpUsername and not password: + raise ValueError("Caller passed a username but no password") + if password and not srpUsername: + raise ValueError("Caller passed a password but no username") + + if clientCertChain and not privateKey: + raise ValueError("Caller passed a certChain but no privateKey") + if privateKey and not clientCertChain: + raise ValueError("Caller passed a privateKey but no certChain") + + if clientCertChain: + foundType = False + try: + import cryptoIDlib.CertChain + if isinstance(clientCertChain, cryptoIDlib.CertChain.CertChain): + if "cryptoID" not in settings.certificateTypes: + raise ValueError("Client certificate doesn't "\ + "match Handshake Settings") + settings.certificateTypes = ["cryptoID"] + foundType = True + except ImportError: + pass + if not foundType and isinstance(clientCertChain, + X509CertChain): + if "x509" not in settings.certificateTypes: + raise ValueError("Client certificate doesn't match "\ + "Handshake Settings") + settings.certificateTypes = ["x509"] + foundType = True + if not foundType: + raise ValueError("Unrecognized certificate type") + + + if session: + if not session.valid(): + session = None #ignore non-resumable sessions... + elif session.resumable and \ + (session.srpUsername != srpUsername): + raise ValueError("Session username doesn't match") + + #Add Faults to parameters + if srpUsername and self.fault == Fault.badUsername: + srpUsername += "GARBAGE" + if password and self.fault == Fault.badPassword: + password += "GARBAGE" + if sharedKeyParams: + identifier = sharedKeyParams[0] + sharedKey = sharedKeyParams[1] + if self.fault == Fault.badIdentifier: + identifier += "GARBAGE" + session = Session()._createSharedKey(identifier, sharedKey) + elif self.fault == Fault.badSharedKey: + sharedKey += "GARBAGE" + session = Session()._createSharedKey(identifier, sharedKey) + + + #Initialize locals + serverCertChain = None + cipherSuite = 0 + certificateType = CertificateType.x509 + premasterSecret = None + + #Get client nonce + clientRandom = getRandomBytes(32) + + #Initialize acceptable ciphersuites + cipherSuites = [] + if srpParams: + cipherSuites += CipherSuite.getSrpRsaSuites(settings.cipherNames) + cipherSuites += CipherSuite.getSrpSuites(settings.cipherNames) + elif certParams: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + elif unknownParams: + if srpCallback: + cipherSuites += \ + CipherSuite.getSrpRsaSuites(settings.cipherNames) + cipherSuites += \ + CipherSuite.getSrpSuites(settings.cipherNames) + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + elif sharedKeyParams: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + else: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + + #Initialize acceptable certificate types + certificateTypes = settings._getCertificateTypes() + + #Tentatively set the version to the client's minimum version. + #We'll use this for the ClientHello, and if an error occurs + #parsing the Server Hello, we'll use this version for the response + self.version = settings.maxVersion + + #Either send ClientHello (with a resumable session)... + if session: + #If it's a resumable (i.e. not a shared-key session), then its + #ciphersuite must be one of the acceptable ciphersuites + if (not sharedKeyParams) and \ + session.cipherSuite not in cipherSuites: + raise ValueError("Session's cipher suite not consistent "\ + "with parameters") + else: + clientHello = ClientHello() + clientHello.create(settings.maxVersion, clientRandom, + session.sessionID, cipherSuites, + certificateTypes, session.srpUsername) + + #Or send ClientHello (without) + else: + clientHello = ClientHello() + clientHello.create(settings.maxVersion, clientRandom, + createByteArraySequence([]), cipherSuites, + certificateTypes, srpUsername) + for result in self._sendMsg(clientHello): + yield result + + #Get ServerHello (or missing_srp_username) + for result in self._getMsg((ContentType.handshake, + ContentType.alert), + HandshakeType.server_hello): + if result in (0,1): + yield result + else: + break + msg = result + + if isinstance(msg, ServerHello): + serverHello = msg + elif isinstance(msg, Alert): + alert = msg + + #If it's not a missing_srp_username, re-raise + if alert.description != AlertDescription.missing_srp_username: + self._shutdown(False) + raise TLSRemoteAlert(alert) + + #If we're not in SRP callback mode, we won't have offered SRP + #without a username, so we shouldn't get this alert + if not srpCallback: + for result in self._sendError(\ + AlertDescription.unexpected_message): + yield result + srpParams = srpCallback() + #If the callback returns None, cancel the handshake + if srpParams == None: + for result in self._sendError(AlertDescription.user_canceled): + yield result + + #Recursively perform handshake + for result in self._handshakeClientAsyncHelper(srpParams, + None, None, None, None, settings, True): + yield result + return + + #Get the server version. Do this before anything else, so any + #error alerts will use the server's version + self.version = serverHello.server_version + + #Future responses from server must use this version + self._versionCheck = True + + #Check ServerHello + if serverHello.server_version < settings.minVersion: + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too old version: %s" % str(serverHello.server_version)): + yield result + if serverHello.server_version > settings.maxVersion: + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too new version: %s" % str(serverHello.server_version)): + yield result + if serverHello.cipher_suite not in cipherSuites: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server responded with incorrect ciphersuite"): + yield result + if serverHello.certificate_type not in certificateTypes: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server responded with incorrect certificate type"): + yield result + if serverHello.compression_method != 0: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server responded with incorrect compression method"): + yield result + + #Get the server nonce + serverRandom = serverHello.random + + #If the server agrees to resume + if session and session.sessionID and \ + serverHello.session_id == session.sessionID: + + #If a shared-key, we're flexible about suites; otherwise the + #server-chosen suite has to match the session's suite + if sharedKeyParams: + session.cipherSuite = serverHello.cipher_suite + elif serverHello.cipher_suite != session.cipherSuite: + for result in self._sendError(\ + AlertDescription.illegal_parameter,\ + "Server's ciphersuite doesn't match session"): + yield result + + #Set the session for this connection + self.session = session + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._getFinished(): + yield result + for result in self._sendFinished(): + yield result + + #Mark the connection as open + self._handshakeDone(resumed=True) + + #If server DOES NOT agree to resume + else: + + if sharedKeyParams: + for result in self._sendError(\ + AlertDescription.user_canceled, + "Was expecting a shared-key resumption"): + yield result + + #We've already validated these + cipherSuite = serverHello.cipher_suite + certificateType = serverHello.certificate_type + + #If the server chose an SRP suite... + if cipherSuite in CipherSuite.srpSuites: + #Get ServerKeyExchange, ServerHelloDone + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_key_exchange, cipherSuite): + if result in (0,1): + yield result + else: + break + serverKeyExchange = result + + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_hello_done): + if result in (0,1): + yield result + else: + break + serverHelloDone = result + + #If the server chose an SRP+RSA suite... + elif cipherSuite in CipherSuite.srpRsaSuites: + #Get Certificate, ServerKeyExchange, ServerHelloDone + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate, certificateType): + if result in (0,1): + yield result + else: + break + serverCertificate = result + + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_key_exchange, cipherSuite): + if result in (0,1): + yield result + else: + break + serverKeyExchange = result + + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_hello_done): + if result in (0,1): + yield result + else: + break + serverHelloDone = result + + #If the server chose an RSA suite... + elif cipherSuite in CipherSuite.rsaSuites: + #Get Certificate[, CertificateRequest], ServerHelloDone + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate, certificateType): + if result in (0,1): + yield result + else: + break + serverCertificate = result + + for result in self._getMsg(ContentType.handshake, + (HandshakeType.server_hello_done, + HandshakeType.certificate_request)): + if result in (0,1): + yield result + else: + break + msg = result + + certificateRequest = None + if isinstance(msg, CertificateRequest): + certificateRequest = msg + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_hello_done): + if result in (0,1): + yield result + else: + break + serverHelloDone = result + elif isinstance(msg, ServerHelloDone): + serverHelloDone = msg + else: + raise AssertionError() + + + #Calculate SRP premaster secret, if server chose an SRP or + #SRP+RSA suite + if cipherSuite in CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites: + #Get and check the server's group parameters and B value + N = serverKeyExchange.srp_N + g = serverKeyExchange.srp_g + s = serverKeyExchange.srp_s + B = serverKeyExchange.srp_B + + if (g,N) not in goodGroupParameters: + for result in self._sendError(\ + AlertDescription.untrusted_srp_parameters, + "Unknown group parameters"): + yield result + if numBits(N) < settings.minKeySize: + for result in self._sendError(\ + AlertDescription.untrusted_srp_parameters, + "N value is too small: %d" % numBits(N)): + yield result + if numBits(N) > settings.maxKeySize: + for result in self._sendError(\ + AlertDescription.untrusted_srp_parameters, + "N value is too large: %d" % numBits(N)): + yield result + if B % N == 0: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Suspicious B value"): + yield result + + #Check the server's signature, if server chose an + #SRP+RSA suite + if cipherSuite in CipherSuite.srpRsaSuites: + #Hash ServerKeyExchange/ServerSRPParams + hashBytes = serverKeyExchange.hash(clientRandom, + serverRandom) + + #Extract signature bytes from ServerKeyExchange + sigBytes = serverKeyExchange.signature + if len(sigBytes) == 0: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server sent an SRP ServerKeyExchange "\ + "message without a signature"): + yield result + + #Get server's public key from the Certificate message + for result in self._getKeyFromChain(serverCertificate, + settings): + if result in (0,1): + yield result + else: + break + publicKey, serverCertChain = result + + #Verify signature + if not publicKey.verify(sigBytes, hashBytes): + for result in self._sendError(\ + AlertDescription.decrypt_error, + "Signature failed to verify"): + yield result + + + #Calculate client's ephemeral DH values (a, A) + a = bytesToNumber(getRandomBytes(32)) + A = powMod(g, a, N) + + #Calculate client's static DH values (x, v) + x = makeX(bytesToString(s), srpUsername, password) + v = powMod(g, x, N) + + #Calculate u + u = makeU(N, A, B) + + #Calculate premaster secret + k = makeK(N, g) + S = powMod((B - (k*v)) % N, a+(u*x), N) + + if self.fault == Fault.badA: + A = N + S = 0 + premasterSecret = numberToBytes(S) + + #Send ClientKeyExchange + for result in self._sendMsg(\ + ClientKeyExchange(cipherSuite).createSRP(A)): + yield result + + + #Calculate RSA premaster secret, if server chose an RSA suite + elif cipherSuite in CipherSuite.rsaSuites: + + #Handle the presence of a CertificateRequest + if certificateRequest: + if unknownParams and certCallback: + certParamsNew = certCallback() + if certParamsNew: + clientCertChain, privateKey = certParamsNew + + #Get server's public key from the Certificate message + for result in self._getKeyFromChain(serverCertificate, + settings): + if result in (0,1): + yield result + else: + break + publicKey, serverCertChain = result + + + #Calculate premaster secret + premasterSecret = getRandomBytes(48) + premasterSecret[0] = settings.maxVersion[0] + premasterSecret[1] = settings.maxVersion[1] + + if self.fault == Fault.badPremasterPadding: + premasterSecret[0] = 5 + if self.fault == Fault.shortPremasterSecret: + premasterSecret = premasterSecret[:-1] + + #Encrypt premaster secret to server's public key + encryptedPreMasterSecret = publicKey.encrypt(premasterSecret) + + #If client authentication was requested, send Certificate + #message, either with certificates or empty + if certificateRequest: + clientCertificate = Certificate(certificateType) + + if clientCertChain: + #Check to make sure we have the same type of + #certificates the server requested + wrongType = False + if certificateType == CertificateType.x509: + if not isinstance(clientCertChain, X509CertChain): + wrongType = True + elif certificateType == CertificateType.cryptoID: + if not isinstance(clientCertChain, + cryptoIDlib.CertChain.CertChain): + wrongType = True + if wrongType: + for result in self._sendError(\ + AlertDescription.handshake_failure, + "Client certificate is of wrong type"): + yield result + + clientCertificate.create(clientCertChain) + + for result in self._sendMsg(clientCertificate): + yield result + else: + #The server didn't request client auth, so we + #zeroize these so the clientCertChain won't be + #stored in the session. + privateKey = None + clientCertChain = None + + #Send ClientKeyExchange + clientKeyExchange = ClientKeyExchange(cipherSuite, + self.version) + clientKeyExchange.createRSA(encryptedPreMasterSecret) + for result in self._sendMsg(clientKeyExchange): + yield result + + #If client authentication was requested and we have a + #private key, send CertificateVerify + if certificateRequest and privateKey: + if self.version == (3,0): + #Create a temporary session object, just for the + #purpose of creating the CertificateVerify + session = Session() + session._calcMasterSecret(self.version, + premasterSecret, + clientRandom, + serverRandom) + verifyBytes = self._calcSSLHandshakeHash(\ + session.masterSecret, "") + elif self.version in ((3,1), (3,2)): + verifyBytes = stringToBytes(\ + self._handshake_md5.digest() + \ + self._handshake_sha.digest()) + if self.fault == Fault.badVerifyMessage: + verifyBytes[0] = ((verifyBytes[0]+1) % 256) + signedBytes = privateKey.sign(verifyBytes) + certificateVerify = CertificateVerify() + certificateVerify.create(signedBytes) + for result in self._sendMsg(certificateVerify): + yield result + + + #Create the session object + self.session = Session() + self.session._calcMasterSecret(self.version, premasterSecret, + clientRandom, serverRandom) + self.session.sessionID = serverHello.session_id + self.session.cipherSuite = cipherSuite + self.session.srpUsername = srpUsername + self.session.clientCertChain = clientCertChain + self.session.serverCertChain = serverCertChain + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._sendFinished(): + yield result + for result in self._getFinished(): + yield result + + #Mark the connection as open + self.session._setResumable(True) + self._handshakeDone(resumed=False) + + + + def handshakeServer(self, sharedKeyDB=None, verifierDB=None, + certChain=None, privateKey=None, reqCert=False, + sessionCache=None, settings=None, checker=None): + """Perform a handshake in the role of server. + + This function performs an SSL or TLS handshake. Depending on + the arguments and the behavior of the client, this function can + perform a shared-key, SRP, or certificate-based handshake. It + can also perform a combined SRP and server-certificate + handshake. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + This function does not send a Hello Request message before + performing the handshake, so if re-handshaking is required, + the server must signal the client to begin the re-handshake + through some other means. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type sharedKeyDB: L{tlslite.SharedKeyDB.SharedKeyDB} + @param sharedKeyDB: A database of shared symmetric keys + associated with usernames. If the client performs a + shared-key handshake, the session's sharedKeyUsername + attribute will be set. + + @type verifierDB: L{tlslite.VerifierDB.VerifierDB} + @param verifierDB: A database of SRP password verifiers + associated with usernames. If the client performs an SRP + handshake, the session's srpUsername attribute will be set. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: The certificate chain to be used if the + client requests server certificate authentication. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: The private key to be used if the client + requests server certificate authentication. + + @type reqCert: bool + @param reqCert: Whether to request client certificate + authentication. This only applies if the client chooses server + certificate authentication; if the client chooses SRP or + shared-key authentication, this will be ignored. If the client + performs a client certificate authentication, the sessions's + clientCertChain attribute will be set. + + @type sessionCache: L{tlslite.SessionCache.SessionCache} + @param sessionCache: An in-memory cache of resumable sessions. + The client can resume sessions from this cache. Alternatively, + if the client performs a full handshake, a new session will be + added to the cache. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites and SSL/TLS version chosen by the server. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + for result in self.handshakeServerAsync(sharedKeyDB, verifierDB, + certChain, privateKey, reqCert, sessionCache, settings, + checker): + pass + + + def handshakeServerAsync(self, sharedKeyDB=None, verifierDB=None, + certChain=None, privateKey=None, reqCert=False, + sessionCache=None, settings=None, checker=None): + """Start a server handshake operation on the TLS connection. + + This function returns a generator which behaves similarly to + handshakeServer(). Successive invocations of the generator + will return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or it will raise StopIteration + if the handshake operation is complete. + + @rtype: iterable + @return: A generator; see above for details. + """ + handshaker = self._handshakeServerAsyncHelper(\ + sharedKeyDB=sharedKeyDB, + verifierDB=verifierDB, certChain=certChain, + privateKey=privateKey, reqCert=reqCert, + sessionCache=sessionCache, settings=settings) + for result in self._handshakeWrapperAsync(handshaker, checker): + yield result + + + def _handshakeServerAsyncHelper(self, sharedKeyDB, verifierDB, + certChain, privateKey, reqCert, sessionCache, + settings): + + self._handshakeStart(client=False) + + if (not sharedKeyDB) and (not verifierDB) and (not certChain): + raise ValueError("Caller passed no authentication credentials") + if certChain and not privateKey: + raise ValueError("Caller passed a certChain but no privateKey") + if privateKey and not certChain: + raise ValueError("Caller passed a privateKey but no certChain") + + if not settings: + settings = HandshakeSettings() + settings = settings._filter() + + #Initialize acceptable cipher suites + cipherSuites = [] + if verifierDB: + if certChain: + cipherSuites += \ + CipherSuite.getSrpRsaSuites(settings.cipherNames) + cipherSuites += CipherSuite.getSrpSuites(settings.cipherNames) + if sharedKeyDB or certChain: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + + #Initialize acceptable certificate type + certificateType = None + if certChain: + try: + import cryptoIDlib.CertChain + if isinstance(certChain, cryptoIDlib.CertChain.CertChain): + certificateType = CertificateType.cryptoID + except ImportError: + pass + if isinstance(certChain, X509CertChain): + certificateType = CertificateType.x509 + if certificateType == None: + raise ValueError("Unrecognized certificate type") + + #Initialize locals + clientCertChain = None + serverCertChain = None #We may set certChain to this later + postFinishedError = None + + #Tentatively set version to most-desirable version, so if an error + #occurs parsing the ClientHello, this is what we'll use for the + #error alert + self.version = settings.maxVersion + + #Get ClientHello + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_hello): + if result in (0,1): + yield result + else: + break + clientHello = result + + #If client's version is too low, reject it + if clientHello.client_version < settings.minVersion: + self.version = settings.minVersion + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too old version: %s" % str(clientHello.client_version)): + yield result + + #If client's version is too high, propose my highest version + elif clientHello.client_version > settings.maxVersion: + self.version = settings.maxVersion + + else: + #Set the version to the client's version + self.version = clientHello.client_version + + #Get the client nonce; create server nonce + clientRandom = clientHello.random + serverRandom = getRandomBytes(32) + + #Calculate the first cipher suite intersection. + #This is the 'privileged' ciphersuite. We'll use it if we're + #doing a shared-key resumption or a new negotiation. In fact, + #the only time we won't use it is if we're resuming a non-sharedkey + #session, in which case we use the ciphersuite from the session. + # + #Given the current ciphersuite ordering, this means we prefer SRP + #over non-SRP. + for cipherSuite in cipherSuites: + if cipherSuite in clientHello.cipher_suites: + break + else: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + + #If resumption was requested... + if clientHello.session_id and (sharedKeyDB or sessionCache): + session = None + + #Check in the sharedKeys container + if sharedKeyDB and len(clientHello.session_id)==16: + try: + #Trim off zero padding, if any + for x in range(16): + if clientHello.session_id[x]==0: + break + self.allegedSharedKeyUsername = bytesToString(\ + clientHello.session_id[:x]) + session = sharedKeyDB[self.allegedSharedKeyUsername] + if not session.sharedKey: + raise AssertionError() + #use privileged ciphersuite + session.cipherSuite = cipherSuite + except KeyError: + pass + + #Then check in the session cache + if sessionCache and not session: + try: + session = sessionCache[bytesToString(\ + clientHello.session_id)] + if session.sharedKey: + raise AssertionError() + if not session.resumable: + raise AssertionError() + #Check for consistency with ClientHello + if session.cipherSuite not in cipherSuites: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + if session.cipherSuite not in clientHello.cipher_suites: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + if clientHello.srp_username: + if clientHello.srp_username != session.srpUsername: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + except KeyError: + pass + + #If a session is found.. + if session: + #Set the session + self.session = session + + #Send ServerHello + serverHello = ServerHello() + serverHello.create(self.version, serverRandom, + session.sessionID, session.cipherSuite, + certificateType) + for result in self._sendMsg(serverHello): + yield result + + #From here on, the client's messages must have the right version + self._versionCheck = True + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._sendFinished(): + yield result + for result in self._getFinished(): + yield result + + #Mark the connection as open + self._handshakeDone(resumed=True) + return + + + #If not a resumption... + + #TRICKY: we might have chosen an RSA suite that was only deemed + #acceptable because of the shared-key resumption. If the shared- + #key resumption failed, because the identifier wasn't recognized, + #we might fall through to here, where we have an RSA suite + #chosen, but no certificate. + if cipherSuite in CipherSuite.rsaSuites and not certChain: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + + #If an RSA suite is chosen, check for certificate type intersection + #(We do this check down here because if the mismatch occurs but the + # client is using a shared-key session, it's okay) + if cipherSuite in CipherSuite.rsaSuites + \ + CipherSuite.srpRsaSuites: + if certificateType not in clientHello.certificate_types: + for result in self._sendError(\ + AlertDescription.handshake_failure, + "the client doesn't support my certificate type"): + yield result + + #Move certChain -> serverCertChain, now that we're using it + serverCertChain = certChain + + + #Create sessionID + if sessionCache: + sessionID = getRandomBytes(32) + else: + sessionID = createByteArraySequence([]) + + #If we've selected an SRP suite, exchange keys and calculate + #premaster secret: + if cipherSuite in CipherSuite.srpSuites + CipherSuite.srpRsaSuites: + + #If there's no SRP username... + if not clientHello.srp_username: + + #Ask the client to re-send ClientHello with one + for result in self._sendMsg(Alert().create(\ + AlertDescription.missing_srp_username, + AlertLevel.warning)): + yield result + + #Get ClientHello + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_hello): + if result in (0,1): + yield result + else: + break + clientHello = result + + #Check ClientHello + #If client's version is too low, reject it (COPIED CODE; BAD!) + if clientHello.client_version < settings.minVersion: + self.version = settings.minVersion + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too old version: %s" % str(clientHello.client_version)): + yield result + + #If client's version is too high, propose my highest version + elif clientHello.client_version > settings.maxVersion: + self.version = settings.maxVersion + + else: + #Set the version to the client's version + self.version = clientHello.client_version + + #Recalculate the privileged cipher suite, making sure to + #pick an SRP suite + cipherSuites = [c for c in cipherSuites if c in \ + CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites] + for cipherSuite in cipherSuites: + if cipherSuite in clientHello.cipher_suites: + break + else: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + + #Get the client nonce; create server nonce + clientRandom = clientHello.random + serverRandom = getRandomBytes(32) + + #The username better be there, this time + if not clientHello.srp_username: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Client resent a hello, but without the SRP"\ + " username"): + yield result + + + #Get username + self.allegedSrpUsername = clientHello.srp_username + + #Get parameters from username + try: + entry = verifierDB[self.allegedSrpUsername] + except KeyError: + for result in self._sendError(\ + AlertDescription.unknown_srp_username): + yield result + (N, g, s, v) = entry + + #Calculate server's ephemeral DH values (b, B) + b = bytesToNumber(getRandomBytes(32)) + k = makeK(N, g) + B = (powMod(g, b, N) + (k*v)) % N + + #Create ServerKeyExchange, signing it if necessary + serverKeyExchange = ServerKeyExchange(cipherSuite) + serverKeyExchange.createSRP(N, g, stringToBytes(s), B) + if cipherSuite in CipherSuite.srpRsaSuites: + hashBytes = serverKeyExchange.hash(clientRandom, + serverRandom) + serverKeyExchange.signature = privateKey.sign(hashBytes) + + #Send ServerHello[, Certificate], ServerKeyExchange, + #ServerHelloDone + msgs = [] + serverHello = ServerHello() + serverHello.create(self.version, serverRandom, sessionID, + cipherSuite, certificateType) + msgs.append(serverHello) + if cipherSuite in CipherSuite.srpRsaSuites: + certificateMsg = Certificate(certificateType) + certificateMsg.create(serverCertChain) + msgs.append(certificateMsg) + msgs.append(serverKeyExchange) + msgs.append(ServerHelloDone()) + for result in self._sendMsgs(msgs): + yield result + + #From here on, the client's messages must have the right version + self._versionCheck = True + + #Get and check ClientKeyExchange + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_key_exchange, + cipherSuite): + if result in (0,1): + yield result + else: + break + clientKeyExchange = result + A = clientKeyExchange.srp_A + if A % N == 0: + postFinishedError = (AlertDescription.illegal_parameter, + "Suspicious A value") + #Calculate u + u = makeU(N, A, B) + + #Calculate premaster secret + S = powMod((A * powMod(v,u,N)) % N, b, N) + premasterSecret = numberToBytes(S) + + + #If we've selected an RSA suite, exchange keys and calculate + #premaster secret: + elif cipherSuite in CipherSuite.rsaSuites: + + #Send ServerHello, Certificate[, CertificateRequest], + #ServerHelloDone + msgs = [] + msgs.append(ServerHello().create(self.version, serverRandom, + sessionID, cipherSuite, certificateType)) + msgs.append(Certificate(certificateType).create(serverCertChain)) + if reqCert: + msgs.append(CertificateRequest()) + msgs.append(ServerHelloDone()) + for result in self._sendMsgs(msgs): + yield result + + #From here on, the client's messages must have the right version + self._versionCheck = True + + #Get [Certificate,] (if was requested) + if reqCert: + if self.version == (3,0): + for result in self._getMsg((ContentType.handshake, + ContentType.alert), + HandshakeType.certificate, + certificateType): + if result in (0,1): + yield result + else: + break + msg = result + + if isinstance(msg, Alert): + #If it's not a no_certificate alert, re-raise + alert = msg + if alert.description != \ + AlertDescription.no_certificate: + self._shutdown(False) + raise TLSRemoteAlert(alert) + elif isinstance(msg, Certificate): + clientCertificate = msg + if clientCertificate.certChain and \ + clientCertificate.certChain.getNumCerts()!=0: + clientCertChain = clientCertificate.certChain + else: + raise AssertionError() + elif self.version in ((3,1), (3,2)): + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate, + certificateType): + if result in (0,1): + yield result + else: + break + clientCertificate = result + if clientCertificate.certChain and \ + clientCertificate.certChain.getNumCerts()!=0: + clientCertChain = clientCertificate.certChain + else: + raise AssertionError() + + #Get ClientKeyExchange + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_key_exchange, + cipherSuite): + if result in (0,1): + yield result + else: + break + clientKeyExchange = result + + #Decrypt ClientKeyExchange + premasterSecret = privateKey.decrypt(\ + clientKeyExchange.encryptedPreMasterSecret) + + randomPreMasterSecret = getRandomBytes(48) + versionCheck = (premasterSecret[0], premasterSecret[1]) + if not premasterSecret: + premasterSecret = randomPreMasterSecret + elif len(premasterSecret)!=48: + premasterSecret = randomPreMasterSecret + elif versionCheck != clientHello.client_version: + if versionCheck != self.version: #Tolerate buggy IE clients + premasterSecret = randomPreMasterSecret + + #Get and check CertificateVerify, if relevant + if clientCertChain: + if self.version == (3,0): + #Create a temporary session object, just for the purpose + #of checking the CertificateVerify + session = Session() + session._calcMasterSecret(self.version, premasterSecret, + clientRandom, serverRandom) + verifyBytes = self._calcSSLHandshakeHash(\ + session.masterSecret, "") + elif self.version in ((3,1), (3,2)): + verifyBytes = stringToBytes(self._handshake_md5.digest() +\ + self._handshake_sha.digest()) + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate_verify): + if result in (0,1): + yield result + else: + break + certificateVerify = result + publicKey = clientCertChain.getEndEntityPublicKey() + if len(publicKey) < settings.minKeySize: + postFinishedError = (AlertDescription.handshake_failure, + "Client's public key too small: %d" % len(publicKey)) + if len(publicKey) > settings.maxKeySize: + postFinishedError = (AlertDescription.handshake_failure, + "Client's public key too large: %d" % len(publicKey)) + + if not publicKey.verify(certificateVerify.signature, + verifyBytes): + postFinishedError = (AlertDescription.decrypt_error, + "Signature failed to verify") + + + #Create the session object + self.session = Session() + self.session._calcMasterSecret(self.version, premasterSecret, + clientRandom, serverRandom) + self.session.sessionID = sessionID + self.session.cipherSuite = cipherSuite + self.session.srpUsername = self.allegedSrpUsername + self.session.clientCertChain = clientCertChain + self.session.serverCertChain = serverCertChain + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._getFinished(): + yield result + + #If we were holding a post-finished error until receiving the client + #finished message, send it now. We delay the call until this point + #because calling sendError() throws an exception, and our caller might + #shut down the socket upon receiving the exception. If he did, and the + #client was still sending its ChangeCipherSpec or Finished messages, it + #would cause a socket error on the client side. This is a lot of + #consideration to show to misbehaving clients, but this would also + #cause problems with fault-testing. + if postFinishedError: + for result in self._sendError(*postFinishedError): + yield result + + for result in self._sendFinished(): + yield result + + #Add the session object to the session cache + if sessionCache and sessionID: + sessionCache[bytesToString(sessionID)] = self.session + + #Mark the connection as open + self.session._setResumable(True) + self._handshakeDone(resumed=False) + + + def _handshakeWrapperAsync(self, handshaker, checker): + if not self.fault: + try: + for result in handshaker: + yield result + if checker: + try: + checker(self) + except TLSAuthenticationError: + alert = Alert().create(AlertDescription.close_notify, + AlertLevel.fatal) + for result in self._sendMsg(alert): + yield result + raise + except: + self._shutdown(False) + raise + else: + try: + for result in handshaker: + yield result + if checker: + try: + checker(self) + except TLSAuthenticationError: + alert = Alert().create(AlertDescription.close_notify, + AlertLevel.fatal) + for result in self._sendMsg(alert): + yield result + raise + except socket.error, e: + raise TLSFaultError("socket error!") + except TLSAbruptCloseError, e: + raise TLSFaultError("abrupt close error!") + except TLSAlert, alert: + if alert.description not in Fault.faultAlerts[self.fault]: + raise TLSFaultError(str(alert)) + else: + pass + except: + self._shutdown(False) + raise + else: + raise TLSFaultError("No error!") + + + def _getKeyFromChain(self, certificate, settings): + #Get and check cert chain from the Certificate message + certChain = certificate.certChain + if not certChain or certChain.getNumCerts() == 0: + for result in self._sendError(AlertDescription.illegal_parameter, + "Other party sent a Certificate message without "\ + "certificates"): + yield result + + #Get and check public key from the cert chain + publicKey = certChain.getEndEntityPublicKey() + if len(publicKey) < settings.minKeySize: + for result in self._sendError(AlertDescription.handshake_failure, + "Other party's public key too small: %d" % len(publicKey)): + yield result + if len(publicKey) > settings.maxKeySize: + for result in self._sendError(AlertDescription.handshake_failure, + "Other party's public key too large: %d" % len(publicKey)): + yield result + + yield publicKey, certChain diff --git a/gam/gdata/tlslite/TLSRecordLayer.py b/gam/gdata/tlslite/TLSRecordLayer.py new file mode 100755 index 00000000000..875ce800700 --- /dev/null +++ b/gam/gdata/tlslite/TLSRecordLayer.py @@ -0,0 +1,1123 @@ +"""Helper class for TLSConnection.""" +from __future__ import generators + +from utils.compat import * +from utils.cryptomath import * +from utils.cipherfactory import createAES, createRC4, createTripleDES +from utils.codec import * +from errors import * +from messages import * +from mathtls import * +from constants import * +from utils.cryptomath import getRandomBytes +from utils import hmac +from FileObject import FileObject +import sha +import md5 +import socket +import errno +import traceback + +class _ConnectionState: + def __init__(self): + self.macContext = None + self.encContext = None + self.seqnum = 0 + + def getSeqNumStr(self): + w = Writer(8) + w.add(self.seqnum, 8) + seqnumStr = bytesToString(w.bytes) + self.seqnum += 1 + return seqnumStr + + +class TLSRecordLayer: + """ + This class handles data transmission for a TLS connection. + + Its only subclass is L{tlslite.TLSConnection.TLSConnection}. We've + separated the code in this class from TLSConnection to make things + more readable. + + + @type sock: socket.socket + @ivar sock: The underlying socket object. + + @type session: L{tlslite.Session.Session} + @ivar session: The session corresponding to this connection. + + Due to TLS session resumption, multiple connections can correspond + to the same underlying session. + + @type version: tuple + @ivar version: The TLS version being used for this connection. + + (3,0) means SSL 3.0, and (3,1) means TLS 1.0. + + @type closed: bool + @ivar closed: If this connection is closed. + + @type resumed: bool + @ivar resumed: If this connection is based on a resumed session. + + @type allegedSharedKeyUsername: str or None + @ivar allegedSharedKeyUsername: This is set to the shared-key + username asserted by the client, whether the handshake succeeded or + not. If the handshake fails, this can be inspected to + determine if a guessing attack is in progress against a particular + user account. + + @type allegedSrpUsername: str or None + @ivar allegedSrpUsername: This is set to the SRP username + asserted by the client, whether the handshake succeeded or not. + If the handshake fails, this can be inspected to determine + if a guessing attack is in progress against a particular user + account. + + @type closeSocket: bool + @ivar closeSocket: If the socket should be closed when the + connection is closed (writable). + + If you set this to True, TLS Lite will assume the responsibility of + closing the socket when the TLS Connection is shutdown (either + through an error or through the user calling close()). The default + is False. + + @type ignoreAbruptClose: bool + @ivar ignoreAbruptClose: If an abrupt close of the socket should + raise an error (writable). + + If you set this to True, TLS Lite will not raise a + L{tlslite.errors.TLSAbruptCloseError} exception if the underlying + socket is unexpectedly closed. Such an unexpected closure could be + caused by an attacker. However, it also occurs with some incorrect + TLS implementations. + + You should set this to True only if you're not worried about an + attacker truncating the connection, and only if necessary to avoid + spurious errors. The default is False. + + @sort: __init__, read, readAsync, write, writeAsync, close, closeAsync, + getCipherImplementation, getCipherName + """ + + def __init__(self, sock): + self.sock = sock + + #My session object (Session instance; read-only) + self.session = None + + #Am I a client or server? + self._client = None + + #Buffers for processing messages + self._handshakeBuffer = [] + self._readBuffer = "" + + #Handshake digests + self._handshake_md5 = md5.md5() + self._handshake_sha = sha.sha() + + #TLS Protocol Version + self.version = (0,0) #read-only + self._versionCheck = False #Once we choose a version, this is True + + #Current and Pending connection states + self._writeState = _ConnectionState() + self._readState = _ConnectionState() + self._pendingWriteState = _ConnectionState() + self._pendingReadState = _ConnectionState() + + #Is the connection open? + self.closed = True #read-only + self._refCount = 0 #Used to trigger closure + + #Is this a resumed (or shared-key) session? + self.resumed = False #read-only + + #What username did the client claim in his handshake? + self.allegedSharedKeyUsername = None + self.allegedSrpUsername = None + + #On a call to close(), do we close the socket? (writeable) + self.closeSocket = False + + #If the socket is abruptly closed, do we ignore it + #and pretend the connection was shut down properly? (writeable) + self.ignoreAbruptClose = False + + #Fault we will induce, for testing purposes + self.fault = None + + #********************************************************* + # Public Functions START + #********************************************************* + + def read(self, max=None, min=1): + """Read some data from the TLS connection. + + This function will block until at least 'min' bytes are + available (or the connection is closed). + + If an exception is raised, the connection will have been + automatically closed. + + @type max: int + @param max: The maximum number of bytes to return. + + @type min: int + @param min: The minimum number of bytes to return + + @rtype: str + @return: A string of no more than 'max' bytes, and no fewer + than 'min' (unless the connection has been closed, in which + case fewer than 'min' bytes may be returned). + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + """ + for result in self.readAsync(max, min): + pass + return result + + def readAsync(self, max=None, min=1): + """Start a read operation on the TLS connection. + + This function returns a generator which behaves similarly to + read(). Successive invocations of the generator will return 0 + if it is waiting to read from the socket, 1 if it is waiting + to write to the socket, or a string if the read operation has + completed. + + @rtype: iterable + @return: A generator; see above for details. + """ + try: + while len(self._readBuffer)= len(s): + break + if endIndex > len(s): + endIndex = len(s) + block = stringToBytes(s[startIndex : endIndex]) + applicationData = ApplicationData().create(block) + for result in self._sendMsg(applicationData, skipEmptyFrag): + yield result + skipEmptyFrag = True #only send an empy fragment on 1st message + index += 1 + except: + self._shutdown(False) + raise + + def close(self): + """Close the TLS connection. + + This function will block until it has exchanged close_notify + alerts with the other party. After doing so, it will shut down the + TLS connection. Further attempts to read through this connection + will return "". Further attempts to write through this connection + will raise ValueError. + + If makefile() has been called on this connection, the connection + will be not be closed until the connection object and all file + objects have been closed. + + Even if an exception is raised, the connection will have been + closed. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + """ + if not self.closed: + for result in self._decrefAsync(): + pass + + def closeAsync(self): + """Start a close operation on the TLS connection. + + This function returns a generator which behaves similarly to + close(). Successive invocations of the generator will return 0 + if it is waiting to read from the socket, 1 if it is waiting + to write to the socket, or will raise StopIteration if the + close operation has completed. + + @rtype: iterable + @return: A generator; see above for details. + """ + if not self.closed: + for result in self._decrefAsync(): + yield result + + def _decrefAsync(self): + self._refCount -= 1 + if self._refCount == 0 and not self.closed: + try: + for result in self._sendMsg(Alert().create(\ + AlertDescription.close_notify, AlertLevel.warning)): + yield result + alert = None + while not alert: + for result in self._getMsg((ContentType.alert, \ + ContentType.application_data)): + if result in (0,1): + yield result + if result.contentType == ContentType.alert: + alert = result + if alert.description == AlertDescription.close_notify: + self._shutdown(True) + else: + raise TLSRemoteAlert(alert) + except (socket.error, TLSAbruptCloseError): + #If the other side closes the socket, that's okay + self._shutdown(True) + except: + self._shutdown(False) + raise + + def getCipherName(self): + """Get the name of the cipher used with this connection. + + @rtype: str + @return: The name of the cipher used with this connection. + Either 'aes128', 'aes256', 'rc4', or '3des'. + """ + if not self._writeState.encContext: + return None + return self._writeState.encContext.name + + def getCipherImplementation(self): + """Get the name of the cipher implementation used with + this connection. + + @rtype: str + @return: The name of the cipher implementation used with + this connection. Either 'python', 'cryptlib', 'openssl', + or 'pycrypto'. + """ + if not self._writeState.encContext: + return None + return self._writeState.encContext.implementation + + + + #Emulate a socket, somewhat - + def send(self, s): + """Send data to the TLS connection (socket emulation). + + @raise socket.error: If a socket error occurs. + """ + self.write(s) + return len(s) + + def sendall(self, s): + """Send data to the TLS connection (socket emulation). + + @raise socket.error: If a socket error occurs. + """ + self.write(s) + + def recv(self, bufsize): + """Get some data from the TLS connection (socket emulation). + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + """ + return self.read(bufsize) + + def makefile(self, mode='r', bufsize=-1): + """Create a file object for the TLS connection (socket emulation). + + @rtype: L{tlslite.FileObject.FileObject} + """ + self._refCount += 1 + return FileObject(self, mode, bufsize) + + def getsockname(self): + """Return the socket's own address (socket emulation).""" + return self.sock.getsockname() + + def getpeername(self): + """Return the remote address to which the socket is connected + (socket emulation).""" + return self.sock.getpeername() + + def settimeout(self, value): + """Set a timeout on blocking socket operations (socket emulation).""" + return self.sock.settimeout(value) + + def gettimeout(self): + """Return the timeout associated with socket operations (socket + emulation).""" + return self.sock.gettimeout() + + def setsockopt(self, level, optname, value): + """Set the value of the given socket option (socket emulation).""" + return self.sock.setsockopt(level, optname, value) + + + #********************************************************* + # Public Functions END + #********************************************************* + + def _shutdown(self, resumable): + self._writeState = _ConnectionState() + self._readState = _ConnectionState() + #Don't do this: self._readBuffer = "" + self.version = (0,0) + self._versionCheck = False + self.closed = True + if self.closeSocket: + self.sock.close() + + #Even if resumable is False, we'll never toggle this on + if not resumable and self.session: + self.session.resumable = False + + + def _sendError(self, alertDescription, errorStr=None): + alert = Alert().create(alertDescription, AlertLevel.fatal) + for result in self._sendMsg(alert): + yield result + self._shutdown(False) + raise TLSLocalAlert(alert, errorStr) + + def _sendMsgs(self, msgs): + skipEmptyFrag = False + for msg in msgs: + for result in self._sendMsg(msg, skipEmptyFrag): + yield result + skipEmptyFrag = True + + def _sendMsg(self, msg, skipEmptyFrag=False): + bytes = msg.write() + contentType = msg.contentType + + #Whenever we're connected and asked to send a message, + #we first send an empty Application Data message. This prevents + #an attacker from launching a chosen-plaintext attack based on + #knowing the next IV. + if not self.closed and not skipEmptyFrag and self.version == (3,1): + if self._writeState.encContext: + if self._writeState.encContext.isBlockCipher: + for result in self._sendMsg(ApplicationData(), + skipEmptyFrag=True): + yield result + + #Update handshake hashes + if contentType == ContentType.handshake: + bytesStr = bytesToString(bytes) + self._handshake_md5.update(bytesStr) + self._handshake_sha.update(bytesStr) + + #Calculate MAC + if self._writeState.macContext: + seqnumStr = self._writeState.getSeqNumStr() + bytesStr = bytesToString(bytes) + mac = self._writeState.macContext.copy() + mac.update(seqnumStr) + mac.update(chr(contentType)) + if self.version == (3,0): + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + elif self.version in ((3,1), (3,2)): + mac.update(chr(self.version[0])) + mac.update(chr(self.version[1])) + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + else: + raise AssertionError() + mac.update(bytesStr) + macString = mac.digest() + macBytes = stringToBytes(macString) + if self.fault == Fault.badMAC: + macBytes[0] = (macBytes[0]+1) % 256 + + #Encrypt for Block or Stream Cipher + if self._writeState.encContext: + #Add padding and encrypt (for Block Cipher): + if self._writeState.encContext.isBlockCipher: + + #Add TLS 1.1 fixed block + if self.version == (3,2): + bytes = self.fixedIVBlock + bytes + + #Add padding: bytes = bytes + (macBytes + paddingBytes) + currentLength = len(bytes) + len(macBytes) + 1 + blockLength = self._writeState.encContext.block_size + paddingLength = blockLength-(currentLength % blockLength) + + paddingBytes = createByteArraySequence([paddingLength] * \ + (paddingLength+1)) + if self.fault == Fault.badPadding: + paddingBytes[0] = (paddingBytes[0]+1) % 256 + endBytes = concatArrays(macBytes, paddingBytes) + bytes = concatArrays(bytes, endBytes) + #Encrypt + plaintext = stringToBytes(bytes) + ciphertext = self._writeState.encContext.encrypt(plaintext) + bytes = stringToBytes(ciphertext) + + #Encrypt (for Stream Cipher) + else: + bytes = concatArrays(bytes, macBytes) + plaintext = bytesToString(bytes) + ciphertext = self._writeState.encContext.encrypt(plaintext) + bytes = stringToBytes(ciphertext) + + #Add record header and send + r = RecordHeader3().create(self.version, contentType, len(bytes)) + s = bytesToString(concatArrays(r.write(), bytes)) + while 1: + try: + bytesSent = self.sock.send(s) #Might raise socket.error + except socket.error, why: + if why[0] == errno.EWOULDBLOCK: + yield 1 + continue + else: + raise + if bytesSent == len(s): + return + s = s[bytesSent:] + yield 1 + + + def _getMsg(self, expectedType, secondaryType=None, constructorType=None): + try: + if not isinstance(expectedType, tuple): + expectedType = (expectedType,) + + #Spin in a loop, until we've got a non-empty record of a type we + #expect. The loop will be repeated if: + # - we receive a renegotiation attempt; we send no_renegotiation, + # then try again + # - we receive an empty application-data fragment; we try again + while 1: + for result in self._getNextRecord(): + if result in (0,1): + yield result + recordHeader, p = result + + #If this is an empty application-data fragment, try again + if recordHeader.type == ContentType.application_data: + if p.index == len(p.bytes): + continue + + #If we received an unexpected record type... + if recordHeader.type not in expectedType: + + #If we received an alert... + if recordHeader.type == ContentType.alert: + alert = Alert().parse(p) + + #We either received a fatal error, a warning, or a + #close_notify. In any case, we're going to close the + #connection. In the latter two cases we respond with + #a close_notify, but ignore any socket errors, since + #the other side might have already closed the socket. + if alert.level == AlertLevel.warning or \ + alert.description == AlertDescription.close_notify: + + #If the sendMsg() call fails because the socket has + #already been closed, we will be forgiving and not + #report the error nor invalidate the "resumability" + #of the session. + try: + alertMsg = Alert() + alertMsg.create(AlertDescription.close_notify, + AlertLevel.warning) + for result in self._sendMsg(alertMsg): + yield result + except socket.error: + pass + + if alert.description == \ + AlertDescription.close_notify: + self._shutdown(True) + elif alert.level == AlertLevel.warning: + self._shutdown(False) + + else: #Fatal alert: + self._shutdown(False) + + #Raise the alert as an exception + raise TLSRemoteAlert(alert) + + #If we received a renegotiation attempt... + if recordHeader.type == ContentType.handshake: + subType = p.get(1) + reneg = False + if self._client: + if subType == HandshakeType.hello_request: + reneg = True + else: + if subType == HandshakeType.client_hello: + reneg = True + #Send no_renegotiation, then try again + if reneg: + alertMsg = Alert() + alertMsg.create(AlertDescription.no_renegotiation, + AlertLevel.warning) + for result in self._sendMsg(alertMsg): + yield result + continue + + #Otherwise: this is an unexpected record, but neither an + #alert nor renegotiation + for result in self._sendError(\ + AlertDescription.unexpected_message, + "received type=%d" % recordHeader.type): + yield result + + break + + #Parse based on content_type + if recordHeader.type == ContentType.change_cipher_spec: + yield ChangeCipherSpec().parse(p) + elif recordHeader.type == ContentType.alert: + yield Alert().parse(p) + elif recordHeader.type == ContentType.application_data: + yield ApplicationData().parse(p) + elif recordHeader.type == ContentType.handshake: + #Convert secondaryType to tuple, if it isn't already + if not isinstance(secondaryType, tuple): + secondaryType = (secondaryType,) + + #If it's a handshake message, check handshake header + if recordHeader.ssl2: + subType = p.get(1) + if subType != HandshakeType.client_hello: + for result in self._sendError(\ + AlertDescription.unexpected_message, + "Can only handle SSLv2 ClientHello messages"): + yield result + if HandshakeType.client_hello not in secondaryType: + for result in self._sendError(\ + AlertDescription.unexpected_message): + yield result + subType = HandshakeType.client_hello + else: + subType = p.get(1) + if subType not in secondaryType: + for result in self._sendError(\ + AlertDescription.unexpected_message, + "Expecting %s, got %s" % (str(secondaryType), subType)): + yield result + + #Update handshake hashes + sToHash = bytesToString(p.bytes) + self._handshake_md5.update(sToHash) + self._handshake_sha.update(sToHash) + + #Parse based on handshake type + if subType == HandshakeType.client_hello: + yield ClientHello(recordHeader.ssl2).parse(p) + elif subType == HandshakeType.server_hello: + yield ServerHello().parse(p) + elif subType == HandshakeType.certificate: + yield Certificate(constructorType).parse(p) + elif subType == HandshakeType.certificate_request: + yield CertificateRequest().parse(p) + elif subType == HandshakeType.certificate_verify: + yield CertificateVerify().parse(p) + elif subType == HandshakeType.server_key_exchange: + yield ServerKeyExchange(constructorType).parse(p) + elif subType == HandshakeType.server_hello_done: + yield ServerHelloDone().parse(p) + elif subType == HandshakeType.client_key_exchange: + yield ClientKeyExchange(constructorType, \ + self.version).parse(p) + elif subType == HandshakeType.finished: + yield Finished(self.version).parse(p) + else: + raise AssertionError() + + #If an exception was raised by a Parser or Message instance: + except SyntaxError, e: + for result in self._sendError(AlertDescription.decode_error, + formatExceptionTrace(e)): + yield result + + + #Returns next record or next handshake message + def _getNextRecord(self): + + #If there's a handshake message waiting, return it + if self._handshakeBuffer: + recordHeader, bytes = self._handshakeBuffer[0] + self._handshakeBuffer = self._handshakeBuffer[1:] + yield (recordHeader, Parser(bytes)) + return + + #Otherwise... + #Read the next record header + bytes = createByteArraySequence([]) + recordHeaderLength = 1 + ssl2 = False + while 1: + try: + s = self.sock.recv(recordHeaderLength-len(bytes)) + except socket.error, why: + if why[0] == errno.EWOULDBLOCK: + yield 0 + continue + else: + raise + + #If the connection was abruptly closed, raise an error + if len(s)==0: + raise TLSAbruptCloseError() + + bytes += stringToBytes(s) + if len(bytes)==1: + if bytes[0] in ContentType.all: + ssl2 = False + recordHeaderLength = 5 + elif bytes[0] == 128: + ssl2 = True + recordHeaderLength = 2 + else: + raise SyntaxError() + if len(bytes) == recordHeaderLength: + break + + #Parse the record header + if ssl2: + r = RecordHeader2().parse(Parser(bytes)) + else: + r = RecordHeader3().parse(Parser(bytes)) + + #Check the record header fields + if r.length > 18432: + for result in self._sendError(AlertDescription.record_overflow): + yield result + + #Read the record contents + bytes = createByteArraySequence([]) + while 1: + try: + s = self.sock.recv(r.length - len(bytes)) + except socket.error, why: + if why[0] == errno.EWOULDBLOCK: + yield 0 + continue + else: + raise + + #If the connection is closed, raise a socket error + if len(s)==0: + raise TLSAbruptCloseError() + + bytes += stringToBytes(s) + if len(bytes) == r.length: + break + + #Check the record header fields (2) + #We do this after reading the contents from the socket, so that + #if there's an error, we at least don't leave extra bytes in the + #socket.. + # + # THIS CHECK HAS NO SECURITY RELEVANCE (?), BUT COULD HURT INTEROP. + # SO WE LEAVE IT OUT FOR NOW. + # + #if self._versionCheck and r.version != self.version: + # for result in self._sendError(AlertDescription.protocol_version, + # "Version in header field: %s, should be %s" % (str(r.version), + # str(self.version))): + # yield result + + #Decrypt the record + for result in self._decryptRecord(r.type, bytes): + if result in (0,1): + yield result + else: + break + bytes = result + p = Parser(bytes) + + #If it doesn't contain handshake messages, we can just return it + if r.type != ContentType.handshake: + yield (r, p) + #If it's an SSLv2 ClientHello, we can return it as well + elif r.ssl2: + yield (r, p) + else: + #Otherwise, we loop through and add the handshake messages to the + #handshake buffer + while 1: + if p.index == len(bytes): #If we're at the end + if not self._handshakeBuffer: + for result in self._sendError(\ + AlertDescription.decode_error, \ + "Received empty handshake record"): + yield result + break + #There needs to be at least 4 bytes to get a header + if p.index+4 > len(bytes): + for result in self._sendError(\ + AlertDescription.decode_error, + "A record has a partial handshake message (1)"): + yield result + p.get(1) # skip handshake type + msgLength = p.get(3) + if p.index+msgLength > len(bytes): + for result in self._sendError(\ + AlertDescription.decode_error, + "A record has a partial handshake message (2)"): + yield result + + handshakePair = (r, bytes[p.index-4 : p.index+msgLength]) + self._handshakeBuffer.append(handshakePair) + p.index += msgLength + + #We've moved at least one handshake message into the + #handshakeBuffer, return the first one + recordHeader, bytes = self._handshakeBuffer[0] + self._handshakeBuffer = self._handshakeBuffer[1:] + yield (recordHeader, Parser(bytes)) + + + def _decryptRecord(self, recordType, bytes): + if self._readState.encContext: + + #Decrypt if it's a block cipher + if self._readState.encContext.isBlockCipher: + blockLength = self._readState.encContext.block_size + if len(bytes) % blockLength != 0: + for result in self._sendError(\ + AlertDescription.decryption_failed, + "Encrypted data not a multiple of blocksize"): + yield result + ciphertext = bytesToString(bytes) + plaintext = self._readState.encContext.decrypt(ciphertext) + if self.version == (3,2): #For TLS 1.1, remove explicit IV + plaintext = plaintext[self._readState.encContext.block_size : ] + bytes = stringToBytes(plaintext) + + #Check padding + paddingGood = True + paddingLength = bytes[-1] + if (paddingLength+1) > len(bytes): + paddingGood=False + totalPaddingLength = 0 + else: + if self.version == (3,0): + totalPaddingLength = paddingLength+1 + elif self.version in ((3,1), (3,2)): + totalPaddingLength = paddingLength+1 + paddingBytes = bytes[-totalPaddingLength:-1] + for byte in paddingBytes: + if byte != paddingLength: + paddingGood = False + totalPaddingLength = 0 + else: + raise AssertionError() + + #Decrypt if it's a stream cipher + else: + paddingGood = True + ciphertext = bytesToString(bytes) + plaintext = self._readState.encContext.decrypt(ciphertext) + bytes = stringToBytes(plaintext) + totalPaddingLength = 0 + + #Check MAC + macGood = True + macLength = self._readState.macContext.digest_size + endLength = macLength + totalPaddingLength + if endLength > len(bytes): + macGood = False + else: + #Read MAC + startIndex = len(bytes) - endLength + endIndex = startIndex + macLength + checkBytes = bytes[startIndex : endIndex] + + #Calculate MAC + seqnumStr = self._readState.getSeqNumStr() + bytes = bytes[:-endLength] + bytesStr = bytesToString(bytes) + mac = self._readState.macContext.copy() + mac.update(seqnumStr) + mac.update(chr(recordType)) + if self.version == (3,0): + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + elif self.version in ((3,1), (3,2)): + mac.update(chr(self.version[0])) + mac.update(chr(self.version[1])) + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + else: + raise AssertionError() + mac.update(bytesStr) + macString = mac.digest() + macBytes = stringToBytes(macString) + + #Compare MACs + if macBytes != checkBytes: + macGood = False + + if not (paddingGood and macGood): + for result in self._sendError(AlertDescription.bad_record_mac, + "MAC failure (or padding failure)"): + yield result + + yield bytes + + def _handshakeStart(self, client): + self._client = client + self._handshake_md5 = md5.md5() + self._handshake_sha = sha.sha() + self._handshakeBuffer = [] + self.allegedSharedKeyUsername = None + self.allegedSrpUsername = None + self._refCount = 1 + + def _handshakeDone(self, resumed): + self.resumed = resumed + self.closed = False + + def _calcPendingStates(self, clientRandom, serverRandom, implementations): + if self.session.cipherSuite in CipherSuite.aes128Suites: + macLength = 20 + keyLength = 16 + ivLength = 16 + createCipherFunc = createAES + elif self.session.cipherSuite in CipherSuite.aes256Suites: + macLength = 20 + keyLength = 32 + ivLength = 16 + createCipherFunc = createAES + elif self.session.cipherSuite in CipherSuite.rc4Suites: + macLength = 20 + keyLength = 16 + ivLength = 0 + createCipherFunc = createRC4 + elif self.session.cipherSuite in CipherSuite.tripleDESSuites: + macLength = 20 + keyLength = 24 + ivLength = 8 + createCipherFunc = createTripleDES + else: + raise AssertionError() + + if self.version == (3,0): + createMACFunc = MAC_SSL + elif self.version in ((3,1), (3,2)): + createMACFunc = hmac.HMAC + + outputLength = (macLength*2) + (keyLength*2) + (ivLength*2) + + #Calculate Keying Material from Master Secret + if self.version == (3,0): + keyBlock = PRF_SSL(self.session.masterSecret, + concatArrays(serverRandom, clientRandom), + outputLength) + elif self.version in ((3,1), (3,2)): + keyBlock = PRF(self.session.masterSecret, + "key expansion", + concatArrays(serverRandom,clientRandom), + outputLength) + else: + raise AssertionError() + + #Slice up Keying Material + clientPendingState = _ConnectionState() + serverPendingState = _ConnectionState() + p = Parser(keyBlock) + clientMACBlock = bytesToString(p.getFixBytes(macLength)) + serverMACBlock = bytesToString(p.getFixBytes(macLength)) + clientKeyBlock = bytesToString(p.getFixBytes(keyLength)) + serverKeyBlock = bytesToString(p.getFixBytes(keyLength)) + clientIVBlock = bytesToString(p.getFixBytes(ivLength)) + serverIVBlock = bytesToString(p.getFixBytes(ivLength)) + clientPendingState.macContext = createMACFunc(clientMACBlock, + digestmod=sha) + serverPendingState.macContext = createMACFunc(serverMACBlock, + digestmod=sha) + clientPendingState.encContext = createCipherFunc(clientKeyBlock, + clientIVBlock, + implementations) + serverPendingState.encContext = createCipherFunc(serverKeyBlock, + serverIVBlock, + implementations) + + #Assign new connection states to pending states + if self._client: + self._pendingWriteState = clientPendingState + self._pendingReadState = serverPendingState + else: + self._pendingWriteState = serverPendingState + self._pendingReadState = clientPendingState + + if self.version == (3,2) and ivLength: + #Choose fixedIVBlock for TLS 1.1 (this is encrypted with the CBC + #residue to create the IV for each sent block) + self.fixedIVBlock = getRandomBytes(ivLength) + + def _changeWriteState(self): + self._writeState = self._pendingWriteState + self._pendingWriteState = _ConnectionState() + + def _changeReadState(self): + self._readState = self._pendingReadState + self._pendingReadState = _ConnectionState() + + def _sendFinished(self): + #Send ChangeCipherSpec + for result in self._sendMsg(ChangeCipherSpec()): + yield result + + #Switch to pending write state + self._changeWriteState() + + #Calculate verification data + verifyData = self._calcFinished(True) + if self.fault == Fault.badFinished: + verifyData[0] = (verifyData[0]+1)%256 + + #Send Finished message under new state + finished = Finished(self.version).create(verifyData) + for result in self._sendMsg(finished): + yield result + + def _getFinished(self): + #Get and check ChangeCipherSpec + for result in self._getMsg(ContentType.change_cipher_spec): + if result in (0,1): + yield result + changeCipherSpec = result + + if changeCipherSpec.type != 1: + for result in self._sendError(AlertDescription.illegal_parameter, + "ChangeCipherSpec type incorrect"): + yield result + + #Switch to pending read state + self._changeReadState() + + #Calculate verification data + verifyData = self._calcFinished(False) + + #Get and check Finished message under new state + for result in self._getMsg(ContentType.handshake, + HandshakeType.finished): + if result in (0,1): + yield result + finished = result + if finished.verify_data != verifyData: + for result in self._sendError(AlertDescription.decrypt_error, + "Finished message is incorrect"): + yield result + + def _calcFinished(self, send=True): + if self.version == (3,0): + if (self._client and send) or (not self._client and not send): + senderStr = "\x43\x4C\x4E\x54" + else: + senderStr = "\x53\x52\x56\x52" + + verifyData = self._calcSSLHandshakeHash(self.session.masterSecret, + senderStr) + return verifyData + + elif self.version in ((3,1), (3,2)): + if (self._client and send) or (not self._client and not send): + label = "client finished" + else: + label = "server finished" + + handshakeHashes = stringToBytes(self._handshake_md5.digest() + \ + self._handshake_sha.digest()) + verifyData = PRF(self.session.masterSecret, label, handshakeHashes, + 12) + return verifyData + else: + raise AssertionError() + + #Used for Finished messages and CertificateVerify messages in SSL v3 + def _calcSSLHandshakeHash(self, masterSecret, label): + masterSecretStr = bytesToString(masterSecret) + + imac_md5 = self._handshake_md5.copy() + imac_sha = self._handshake_sha.copy() + + imac_md5.update(label + masterSecretStr + '\x36'*48) + imac_sha.update(label + masterSecretStr + '\x36'*40) + + md5Str = md5.md5(masterSecretStr + ('\x5c'*48) + \ + imac_md5.digest()).digest() + shaStr = sha.sha(masterSecretStr + ('\x5c'*40) + \ + imac_sha.digest()).digest() + + return stringToBytes(md5Str + shaStr) + diff --git a/gam/gdata/tlslite/VerifierDB.py b/gam/gdata/tlslite/VerifierDB.py new file mode 100755 index 00000000000..f706b179672 --- /dev/null +++ b/gam/gdata/tlslite/VerifierDB.py @@ -0,0 +1,90 @@ +"""Class for storing SRP password verifiers.""" + +from utils.cryptomath import * +from utils.compat import * +import mathtls +from BaseDB import BaseDB + +class VerifierDB(BaseDB): + """This class represent an in-memory or on-disk database of SRP + password verifiers. + + A VerifierDB can be passed to a server handshake to authenticate + a client based on one of the verifiers. + + This class is thread-safe. + """ + def __init__(self, filename=None): + """Create a new VerifierDB instance. + + @type filename: str + @param filename: Filename for an on-disk database, or None for + an in-memory database. If the filename already exists, follow + this with a call to open(). To create a new on-disk database, + follow this with a call to create(). + """ + BaseDB.__init__(self, filename, "verifier") + + def _getItem(self, username, valueStr): + (N, g, salt, verifier) = valueStr.split(" ") + N = base64ToNumber(N) + g = base64ToNumber(g) + salt = base64ToString(salt) + verifier = base64ToNumber(verifier) + return (N, g, salt, verifier) + + def __setitem__(self, username, verifierEntry): + """Add a verifier entry to the database. + + @type username: str + @param username: The username to associate the verifier with. + Must be less than 256 characters in length. Must not already + be in the database. + + @type verifierEntry: tuple + @param verifierEntry: The verifier entry to add. Use + L{tlslite.VerifierDB.VerifierDB.makeVerifier} to create a + verifier entry. + """ + BaseDB.__setitem__(self, username, verifierEntry) + + + def _setItem(self, username, value): + if len(username)>=256: + raise ValueError("username too long") + N, g, salt, verifier = value + N = numberToBase64(N) + g = numberToBase64(g) + salt = stringToBase64(salt) + verifier = numberToBase64(verifier) + valueStr = " ".join( (N, g, salt, verifier) ) + return valueStr + + def _checkItem(self, value, username, param): + (N, g, salt, verifier) = value + x = mathtls.makeX(salt, username, param) + v = powMod(g, x, N) + return (verifier == v) + + + def makeVerifier(username, password, bits): + """Create a verifier entry which can be stored in a VerifierDB. + + @type username: str + @param username: The username for this verifier. Must be less + than 256 characters in length. + + @type password: str + @param password: The password for this verifier. + + @type bits: int + @param bits: This values specifies which SRP group parameters + to use. It must be one of (1024, 1536, 2048, 3072, 4096, 6144, + 8192). Larger values are more secure but slower. 2048 is a + good compromise between safety and speed. + + @rtype: tuple + @return: A tuple which may be stored in a VerifierDB. + """ + return mathtls.makeVerifier(username, password, bits) + makeVerifier = staticmethod(makeVerifier) \ No newline at end of file diff --git a/gam/gdata/tlslite/X509.py b/gam/gdata/tlslite/X509.py new file mode 100755 index 00000000000..a47ddcfa2a2 --- /dev/null +++ b/gam/gdata/tlslite/X509.py @@ -0,0 +1,133 @@ +"""Class representing an X.509 certificate.""" + +from utils.ASN1Parser import ASN1Parser +from utils.cryptomath import * +from utils.keyfactory import _createPublicRSAKey + + +class X509: + """This class represents an X.509 certificate. + + @type bytes: L{array.array} of unsigned bytes + @ivar bytes: The DER-encoded ASN.1 certificate + + @type publicKey: L{tlslite.utils.RSAKey.RSAKey} + @ivar publicKey: The subject public key from the certificate. + """ + + def __init__(self): + self.bytes = createByteArraySequence([]) + self.publicKey = None + + def parse(self, s): + """Parse a PEM-encoded X.509 certificate. + + @type s: str + @param s: A PEM-encoded X.509 certificate (i.e. a base64-encoded + certificate wrapped with "-----BEGIN CERTIFICATE-----" and + "-----END CERTIFICATE-----" tags). + """ + + start = s.find("-----BEGIN CERTIFICATE-----") + end = s.find("-----END CERTIFICATE-----") + if start == -1: + raise SyntaxError("Missing PEM prefix") + if end == -1: + raise SyntaxError("Missing PEM postfix") + s = s[start+len("-----BEGIN CERTIFICATE-----") : end] + + bytes = base64ToBytes(s) + self.parseBinary(bytes) + return self + + def parseBinary(self, bytes): + """Parse a DER-encoded X.509 certificate. + + @type bytes: str or L{array.array} of unsigned bytes + @param bytes: A DER-encoded X.509 certificate. + """ + + if isinstance(bytes, type("")): + bytes = stringToBytes(bytes) + + self.bytes = bytes + p = ASN1Parser(bytes) + + #Get the tbsCertificate + tbsCertificateP = p.getChild(0) + + #Is the optional version field present? + #This determines which index the key is at. + if tbsCertificateP.value[0]==0xA0: + subjectPublicKeyInfoIndex = 6 + else: + subjectPublicKeyInfoIndex = 5 + + #Get the subjectPublicKeyInfo + subjectPublicKeyInfoP = tbsCertificateP.getChild(\ + subjectPublicKeyInfoIndex) + + #Get the algorithm + algorithmP = subjectPublicKeyInfoP.getChild(0) + rsaOID = algorithmP.value + if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]: + raise SyntaxError("Unrecognized AlgorithmIdentifier") + + #Get the subjectPublicKey + subjectPublicKeyP = subjectPublicKeyInfoP.getChild(1) + + #Adjust for BIT STRING encapsulation + if (subjectPublicKeyP.value[0] !=0): + raise SyntaxError() + subjectPublicKeyP = ASN1Parser(subjectPublicKeyP.value[1:]) + + #Get the modulus and exponent + modulusP = subjectPublicKeyP.getChild(0) + publicExponentP = subjectPublicKeyP.getChild(1) + + #Decode them into numbers + n = bytesToNumber(modulusP.value) + e = bytesToNumber(publicExponentP.value) + + #Create a public key instance + self.publicKey = _createPublicRSAKey(n, e) + + def getFingerprint(self): + """Get the hex-encoded fingerprint of this certificate. + + @rtype: str + @return: A hex-encoded fingerprint. + """ + return sha.sha(self.bytes).hexdigest() + + def getCommonName(self): + """Get the Subject's Common Name from the certificate. + + The cryptlib_py module must be installed in order to use this + function. + + @rtype: str or None + @return: The CN component of the certificate's subject DN, if + present. + """ + import cryptlib_py + import array + c = cryptlib_py.cryptImportCert(self.bytes, cryptlib_py.CRYPT_UNUSED) + name = cryptlib_py.CRYPT_CERTINFO_COMMONNAME + try: + try: + length = cryptlib_py.cryptGetAttributeString(c, name, None) + returnVal = array.array('B', [0] * length) + cryptlib_py.cryptGetAttributeString(c, name, returnVal) + returnVal = returnVal.tostring() + except cryptlib_py.CryptException, e: + if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: + returnVal = None + return returnVal + finally: + cryptlib_py.cryptDestroyCert(c) + + def writeBytes(self): + return self.bytes + + diff --git a/gam/gdata/tlslite/X509CertChain.py b/gam/gdata/tlslite/X509CertChain.py new file mode 100755 index 00000000000..d5f0b4d42a4 --- /dev/null +++ b/gam/gdata/tlslite/X509CertChain.py @@ -0,0 +1,181 @@ +"""Class representing an X.509 certificate chain.""" + +from utils import cryptomath + +class X509CertChain: + """This class represents a chain of X.509 certificates. + + @type x509List: list + @ivar x509List: A list of L{tlslite.X509.X509} instances, + starting with the end-entity certificate and with every + subsequent certificate certifying the previous. + """ + + def __init__(self, x509List=None): + """Create a new X509CertChain. + + @type x509List: list + @param x509List: A list of L{tlslite.X509.X509} instances, + starting with the end-entity certificate and with every + subsequent certificate certifying the previous. + """ + if x509List: + self.x509List = x509List + else: + self.x509List = [] + + def getNumCerts(self): + """Get the number of certificates in this chain. + + @rtype: int + """ + return len(self.x509List) + + def getEndEntityPublicKey(self): + """Get the public key from the end-entity certificate. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + """ + if self.getNumCerts() == 0: + raise AssertionError() + return self.x509List[0].publicKey + + def getFingerprint(self): + """Get the hex-encoded fingerprint of the end-entity certificate. + + @rtype: str + @return: A hex-encoded fingerprint. + """ + if self.getNumCerts() == 0: + raise AssertionError() + return self.x509List[0].getFingerprint() + + def getCommonName(self): + """Get the Subject's Common Name from the end-entity certificate. + + The cryptlib_py module must be installed in order to use this + function. + + @rtype: str or None + @return: The CN component of the certificate's subject DN, if + present. + """ + if self.getNumCerts() == 0: + raise AssertionError() + return self.x509List[0].getCommonName() + + def validate(self, x509TrustList): + """Check the validity of the certificate chain. + + This checks that every certificate in the chain validates with + the subsequent one, until some certificate validates with (or + is identical to) one of the passed-in root certificates. + + The cryptlib_py module must be installed in order to use this + function. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + certificate chain must extend to one of these certificates to + be considered valid. + """ + + import cryptlib_py + c1 = None + c2 = None + lastC = None + rootC = None + + try: + rootFingerprints = [c.getFingerprint() for c in x509TrustList] + + #Check that every certificate in the chain validates with the + #next one + for cert1, cert2 in zip(self.x509List, self.x509List[1:]): + + #If we come upon a root certificate, we're done. + if cert1.getFingerprint() in rootFingerprints: + return True + + c1 = cryptlib_py.cryptImportCert(cert1.writeBytes(), + cryptlib_py.CRYPT_UNUSED) + c2 = cryptlib_py.cryptImportCert(cert2.writeBytes(), + cryptlib_py.CRYPT_UNUSED) + try: + cryptlib_py.cryptCheckCert(c1, c2) + except: + return False + cryptlib_py.cryptDestroyCert(c1) + c1 = None + cryptlib_py.cryptDestroyCert(c2) + c2 = None + + #If the last certificate is one of the root certificates, we're + #done. + if self.x509List[-1].getFingerprint() in rootFingerprints: + return True + + #Otherwise, find a root certificate that the last certificate + #chains to, and validate them. + lastC = cryptlib_py.cryptImportCert(self.x509List[-1].writeBytes(), + cryptlib_py.CRYPT_UNUSED) + for rootCert in x509TrustList: + rootC = cryptlib_py.cryptImportCert(rootCert.writeBytes(), + cryptlib_py.CRYPT_UNUSED) + if self._checkChaining(lastC, rootC): + try: + cryptlib_py.cryptCheckCert(lastC, rootC) + return True + except: + return False + return False + finally: + if not (c1 is None): + cryptlib_py.cryptDestroyCert(c1) + if not (c2 is None): + cryptlib_py.cryptDestroyCert(c2) + if not (lastC is None): + cryptlib_py.cryptDestroyCert(lastC) + if not (rootC is None): + cryptlib_py.cryptDestroyCert(rootC) + + + + def _checkChaining(self, lastC, rootC): + import cryptlib_py + import array + def compareNames(name): + try: + length = cryptlib_py.cryptGetAttributeString(lastC, name, None) + lastName = array.array('B', [0] * length) + cryptlib_py.cryptGetAttributeString(lastC, name, lastName) + lastName = lastName.tostring() + except cryptlib_py.CryptException, e: + if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: + lastName = None + try: + length = cryptlib_py.cryptGetAttributeString(rootC, name, None) + rootName = array.array('B', [0] * length) + cryptlib_py.cryptGetAttributeString(rootC, name, rootName) + rootName = rootName.tostring() + except cryptlib_py.CryptException, e: + if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: + rootName = None + + return lastName == rootName + + cryptlib_py.cryptSetAttribute(lastC, + cryptlib_py.CRYPT_CERTINFO_ISSUERNAME, + cryptlib_py.CRYPT_UNUSED) + + if not compareNames(cryptlib_py.CRYPT_CERTINFO_COUNTRYNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_LOCALITYNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONALUNITNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_COMMONNAME): + return False + return True \ No newline at end of file diff --git a/gam/gdata/tlslite/__init__.py b/gam/gdata/tlslite/__init__.py new file mode 100755 index 00000000000..47cfd1c6f18 --- /dev/null +++ b/gam/gdata/tlslite/__init__.py @@ -0,0 +1,39 @@ +""" +TLS Lite is a free python library that implements SSL v3, TLS v1, and +TLS v1.1. TLS Lite supports non-traditional authentication methods +such as SRP, shared keys, and cryptoIDs, in addition to X.509 +certificates. TLS Lite is pure python, however it can access OpenSSL, +cryptlib, pycrypto, and GMPY for faster crypto operations. TLS Lite +integrates with httplib, xmlrpclib, poplib, imaplib, smtplib, +SocketServer, asyncore, and Twisted. + +To use, do:: + + from tlslite.api import * + +Then use the L{tlslite.TLSConnection.TLSConnection} class with a socket, +or use one of the integration classes in L{tlslite.integration}. + +@version: 0.3.8 +""" +__version__ = "0.3.8" + +__all__ = ["api", + "BaseDB", + "Checker", + "constants", + "errors", + "FileObject", + "HandshakeSettings", + "mathtls", + "messages", + "Session", + "SessionCache", + "SharedKeyDB", + "TLSConnection", + "TLSRecordLayer", + "VerifierDB", + "X509", + "X509CertChain", + "integration", + "utils"] diff --git a/gam/gdata/tlslite/__init__.pyc b/gam/gdata/tlslite/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbdf0a89aa3232b3ed48b40091c69ef7fc29d765 GIT binary patch literal 1238 zcmah|QEwbY44x!y+HRYcHo&tV4|LgUs3MgJp{f@_5eJES0hPS0COf{{X?AB;Groiq z>I1)uAI2Zxo7qH7pV-sR+8+C}{n@wm?@lpV{c}>Q>t`3=S9s$w4x`i=&JA_8q0FW- zTgu!~=B_f^%G^`tzA_J#DU^Ar%p+xXlzFVo6J6J7KN%vEaIweNCW@v$(UlJ0~@;F zb<7#44(ne0Pe)L#BW(sW5>M~SgXiBCH3zYagvs%C6=JozL;4?oIr)PYKF~r(LHKY-zUPp;xSg}GPIw7BN4m1oP$SZ3 z%s;y}^z{Jm2b1meAjEEPD(1evV3~73nMfYjuJ(b4GET27(wnjDZR}s#K^t^mgzAWNhtYQ{r3li#i(GASB2G$hCJ-G`281aR@#{DRA+FZTa~8Jz9~cP~TuFu|gL-XA$d`F(Y+a8uTvr z;n892ddb7Z913cZN4U6_r@C%f9wtLx#h-bNdUl|H!jn86EN>Y@GBdqKp{btVIba(( zAm$F#cUaLJoP;TSri}(M3H}y8lDJrjZR4+_L`E*Us*JCzD*IN|HG9w#HC3hCwyH)D z)yE^gfjapug6PSG4d?60k-u`S*CtMuy4(AvJUG}J{^WZx)HK@vYO*w1^yGRmCRJsf fm8vS?mB{+8^JdlZ_s<~5lxTZndvj~!|9}4h=iF#1 literal 0 HcmV?d00001 diff --git a/gam/gdata/tlslite/api.py b/gam/gdata/tlslite/api.py new file mode 100755 index 00000000000..eebfbc6091e --- /dev/null +++ b/gam/gdata/tlslite/api.py @@ -0,0 +1,75 @@ +"""Import this module for easy access to TLS Lite objects. + +The TLS Lite API consists of classes, functions, and variables spread +throughout this package. Instead of importing them individually with:: + + from tlslite.TLSConnection import TLSConnection + from tlslite.HandshakeSettings import HandshakeSettings + from tlslite.errors import * + . + . + +It's easier to do:: + + from tlslite.api import * + +This imports all the important objects (TLSConnection, Checker, +HandshakeSettings, etc.) into the global namespace. In particular, it +imports:: + + from constants import AlertLevel, AlertDescription, Fault + from errors import * + from Checker import Checker + from HandshakeSettings import HandshakeSettings + from Session import Session + from SessionCache import SessionCache + from SharedKeyDB import SharedKeyDB + from TLSConnection import TLSConnection + from VerifierDB import VerifierDB + from X509 import X509 + from X509CertChain import X509CertChain + + from integration.HTTPTLSConnection import HTTPTLSConnection + from integration.POP3_TLS import POP3_TLS + from integration.IMAP4_TLS import IMAP4_TLS + from integration.SMTP_TLS import SMTP_TLS + from integration.XMLRPCTransport import XMLRPCTransport + from integration.TLSSocketServerMixIn import TLSSocketServerMixIn + from integration.TLSAsyncDispatcherMixIn import TLSAsyncDispatcherMixIn + from integration.TLSTwistedProtocolWrapper import TLSTwistedProtocolWrapper + from utils.cryptomath import cryptlibpyLoaded, m2cryptoLoaded, + gmpyLoaded, pycryptoLoaded, prngName + from utils.keyfactory import generateRSAKey, parsePEMKey, parseXMLKey, + parseAsPublicKey, parsePrivateKey +""" + +from constants import AlertLevel, AlertDescription, Fault +from errors import * +from Checker import Checker +from HandshakeSettings import HandshakeSettings +from Session import Session +from SessionCache import SessionCache +from SharedKeyDB import SharedKeyDB +from TLSConnection import TLSConnection +from VerifierDB import VerifierDB +from X509 import X509 +from X509CertChain import X509CertChain + +from integration.HTTPTLSConnection import HTTPTLSConnection +from integration.TLSSocketServerMixIn import TLSSocketServerMixIn +from integration.TLSAsyncDispatcherMixIn import TLSAsyncDispatcherMixIn +from integration.POP3_TLS import POP3_TLS +from integration.IMAP4_TLS import IMAP4_TLS +from integration.SMTP_TLS import SMTP_TLS +from integration.XMLRPCTransport import XMLRPCTransport +try: + import twisted + del(twisted) + from integration.TLSTwistedProtocolWrapper import TLSTwistedProtocolWrapper +except ImportError: + pass + +from utils.cryptomath import cryptlibpyLoaded, m2cryptoLoaded, gmpyLoaded, \ + pycryptoLoaded, prngName +from utils.keyfactory import generateRSAKey, parsePEMKey, parseXMLKey, \ + parseAsPublicKey, parsePrivateKey diff --git a/gam/gdata/tlslite/constants.py b/gam/gdata/tlslite/constants.py new file mode 100755 index 00000000000..8f2d5590e94 --- /dev/null +++ b/gam/gdata/tlslite/constants.py @@ -0,0 +1,225 @@ +"""Constants used in various places.""" + +class CertificateType: + x509 = 0 + openpgp = 1 + cryptoID = 2 + +class HandshakeType: + hello_request = 0 + client_hello = 1 + server_hello = 2 + certificate = 11 + server_key_exchange = 12 + certificate_request = 13 + server_hello_done = 14 + certificate_verify = 15 + client_key_exchange = 16 + finished = 20 + +class ContentType: + change_cipher_spec = 20 + alert = 21 + handshake = 22 + application_data = 23 + all = (20,21,22,23) + +class AlertLevel: + warning = 1 + fatal = 2 + +class AlertDescription: + """ + @cvar bad_record_mac: A TLS record failed to decrypt properly. + + If this occurs during a shared-key or SRP handshake it most likely + indicates a bad password. It may also indicate an implementation + error, or some tampering with the data in transit. + + This alert will be signalled by the server if the SRP password is bad. It + may also be signalled by the server if the SRP username is unknown to the + server, but it doesn't wish to reveal that fact. + + This alert will be signalled by the client if the shared-key username is + bad. + + @cvar handshake_failure: A problem occurred while handshaking. + + This typically indicates a lack of common ciphersuites between client and + server, or some other disagreement (about SRP parameters or key sizes, + for example). + + @cvar protocol_version: The other party's SSL/TLS version was unacceptable. + + This indicates that the client and server couldn't agree on which version + of SSL or TLS to use. + + @cvar user_canceled: The handshake is being cancelled for some reason. + + """ + + close_notify = 0 + unexpected_message = 10 + bad_record_mac = 20 + decryption_failed = 21 + record_overflow = 22 + decompression_failure = 30 + handshake_failure = 40 + no_certificate = 41 #SSLv3 + bad_certificate = 42 + unsupported_certificate = 43 + certificate_revoked = 44 + certificate_expired = 45 + certificate_unknown = 46 + illegal_parameter = 47 + unknown_ca = 48 + access_denied = 49 + decode_error = 50 + decrypt_error = 51 + export_restriction = 60 + protocol_version = 70 + insufficient_security = 71 + internal_error = 80 + user_canceled = 90 + no_renegotiation = 100 + unknown_srp_username = 120 + missing_srp_username = 121 + untrusted_srp_parameters = 122 + +class CipherSuite: + TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = 0x0050 + TLS_SRP_SHA_WITH_AES_128_CBC_SHA = 0x0053 + TLS_SRP_SHA_WITH_AES_256_CBC_SHA = 0x0056 + + TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = 0x0051 + TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = 0x0054 + TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = 0x0057 + + TLS_RSA_WITH_3DES_EDE_CBC_SHA = 0x000A + TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F + TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 + TLS_RSA_WITH_RC4_128_SHA = 0x0005 + + srpSuites = [] + srpSuites.append(TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA) + srpSuites.append(TLS_SRP_SHA_WITH_AES_128_CBC_SHA) + srpSuites.append(TLS_SRP_SHA_WITH_AES_256_CBC_SHA) + def getSrpSuites(ciphers): + suites = [] + for cipher in ciphers: + if cipher == "aes128": + suites.append(CipherSuite.TLS_SRP_SHA_WITH_AES_128_CBC_SHA) + elif cipher == "aes256": + suites.append(CipherSuite.TLS_SRP_SHA_WITH_AES_256_CBC_SHA) + elif cipher == "3des": + suites.append(CipherSuite.TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA) + return suites + getSrpSuites = staticmethod(getSrpSuites) + + srpRsaSuites = [] + srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA) + srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA) + srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA) + def getSrpRsaSuites(ciphers): + suites = [] + for cipher in ciphers: + if cipher == "aes128": + suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA) + elif cipher == "aes256": + suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA) + elif cipher == "3des": + suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA) + return suites + getSrpRsaSuites = staticmethod(getSrpRsaSuites) + + rsaSuites = [] + rsaSuites.append(TLS_RSA_WITH_3DES_EDE_CBC_SHA) + rsaSuites.append(TLS_RSA_WITH_AES_128_CBC_SHA) + rsaSuites.append(TLS_RSA_WITH_AES_256_CBC_SHA) + rsaSuites.append(TLS_RSA_WITH_RC4_128_SHA) + def getRsaSuites(ciphers): + suites = [] + for cipher in ciphers: + if cipher == "aes128": + suites.append(CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA) + elif cipher == "aes256": + suites.append(CipherSuite.TLS_RSA_WITH_AES_256_CBC_SHA) + elif cipher == "rc4": + suites.append(CipherSuite.TLS_RSA_WITH_RC4_128_SHA) + elif cipher == "3des": + suites.append(CipherSuite.TLS_RSA_WITH_3DES_EDE_CBC_SHA) + return suites + getRsaSuites = staticmethod(getRsaSuites) + + tripleDESSuites = [] + tripleDESSuites.append(TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA) + tripleDESSuites.append(TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA) + tripleDESSuites.append(TLS_RSA_WITH_3DES_EDE_CBC_SHA) + + aes128Suites = [] + aes128Suites.append(TLS_SRP_SHA_WITH_AES_128_CBC_SHA) + aes128Suites.append(TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA) + aes128Suites.append(TLS_RSA_WITH_AES_128_CBC_SHA) + + aes256Suites = [] + aes256Suites.append(TLS_SRP_SHA_WITH_AES_256_CBC_SHA) + aes256Suites.append(TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA) + aes256Suites.append(TLS_RSA_WITH_AES_256_CBC_SHA) + + rc4Suites = [] + rc4Suites.append(TLS_RSA_WITH_RC4_128_SHA) + + +class Fault: + badUsername = 101 + badPassword = 102 + badA = 103 + clientSrpFaults = range(101,104) + + badVerifyMessage = 601 + clientCertFaults = range(601,602) + + badPremasterPadding = 501 + shortPremasterSecret = 502 + clientNoAuthFaults = range(501,503) + + badIdentifier = 401 + badSharedKey = 402 + clientSharedKeyFaults = range(401,403) + + badB = 201 + serverFaults = range(201,202) + + badFinished = 300 + badMAC = 301 + badPadding = 302 + genericFaults = range(300,303) + + faultAlerts = {\ + badUsername: (AlertDescription.unknown_srp_username, \ + AlertDescription.bad_record_mac),\ + badPassword: (AlertDescription.bad_record_mac,),\ + badA: (AlertDescription.illegal_parameter,),\ + badIdentifier: (AlertDescription.handshake_failure,),\ + badSharedKey: (AlertDescription.bad_record_mac,),\ + badPremasterPadding: (AlertDescription.bad_record_mac,),\ + shortPremasterSecret: (AlertDescription.bad_record_mac,),\ + badVerifyMessage: (AlertDescription.decrypt_error,),\ + badFinished: (AlertDescription.decrypt_error,),\ + badMAC: (AlertDescription.bad_record_mac,),\ + badPadding: (AlertDescription.bad_record_mac,) + } + + faultNames = {\ + badUsername: "bad username",\ + badPassword: "bad password",\ + badA: "bad A",\ + badIdentifier: "bad identifier",\ + badSharedKey: "bad sharedkey",\ + badPremasterPadding: "bad premaster padding",\ + shortPremasterSecret: "short premaster secret",\ + badVerifyMessage: "bad verify message",\ + badFinished: "bad finished message",\ + badMAC: "bad MAC",\ + badPadding: "bad padding" + } diff --git a/gam/gdata/tlslite/errors.py b/gam/gdata/tlslite/errors.py new file mode 100755 index 00000000000..c7f7ba81d4d --- /dev/null +++ b/gam/gdata/tlslite/errors.py @@ -0,0 +1,149 @@ +"""Exception classes. +@sort: TLSError, TLSAbruptCloseError, TLSAlert, TLSLocalAlert, TLSRemoteAlert, +TLSAuthenticationError, TLSNoAuthenticationError, TLSAuthenticationTypeError, +TLSFingerprintError, TLSAuthorizationError, TLSValidationError, TLSFaultError +""" + +from constants import AlertDescription, AlertLevel + +class TLSError(Exception): + """Base class for all TLS Lite exceptions.""" + pass + +class TLSAbruptCloseError(TLSError): + """The socket was closed without a proper TLS shutdown. + + The TLS specification mandates that an alert of some sort + must be sent before the underlying socket is closed. If the socket + is closed without this, it could signify that an attacker is trying + to truncate the connection. It could also signify a misbehaving + TLS implementation, or a random network failure. + """ + pass + +class TLSAlert(TLSError): + """A TLS alert has been signalled.""" + pass + + _descriptionStr = {\ + AlertDescription.close_notify: "close_notify",\ + AlertDescription.unexpected_message: "unexpected_message",\ + AlertDescription.bad_record_mac: "bad_record_mac",\ + AlertDescription.decryption_failed: "decryption_failed",\ + AlertDescription.record_overflow: "record_overflow",\ + AlertDescription.decompression_failure: "decompression_failure",\ + AlertDescription.handshake_failure: "handshake_failure",\ + AlertDescription.no_certificate: "no certificate",\ + AlertDescription.bad_certificate: "bad_certificate",\ + AlertDescription.unsupported_certificate: "unsupported_certificate",\ + AlertDescription.certificate_revoked: "certificate_revoked",\ + AlertDescription.certificate_expired: "certificate_expired",\ + AlertDescription.certificate_unknown: "certificate_unknown",\ + AlertDescription.illegal_parameter: "illegal_parameter",\ + AlertDescription.unknown_ca: "unknown_ca",\ + AlertDescription.access_denied: "access_denied",\ + AlertDescription.decode_error: "decode_error",\ + AlertDescription.decrypt_error: "decrypt_error",\ + AlertDescription.export_restriction: "export_restriction",\ + AlertDescription.protocol_version: "protocol_version",\ + AlertDescription.insufficient_security: "insufficient_security",\ + AlertDescription.internal_error: "internal_error",\ + AlertDescription.user_canceled: "user_canceled",\ + AlertDescription.no_renegotiation: "no_renegotiation",\ + AlertDescription.unknown_srp_username: "unknown_srp_username",\ + AlertDescription.missing_srp_username: "missing_srp_username"} + +class TLSLocalAlert(TLSAlert): + """A TLS alert has been signalled by the local implementation. + + @type description: int + @ivar description: Set to one of the constants in + L{tlslite.constants.AlertDescription} + + @type level: int + @ivar level: Set to one of the constants in + L{tlslite.constants.AlertLevel} + + @type message: str + @ivar message: Description of what went wrong. + """ + def __init__(self, alert, message=None): + self.description = alert.description + self.level = alert.level + self.message = message + + def __str__(self): + alertStr = TLSAlert._descriptionStr.get(self.description) + if alertStr == None: + alertStr = str(self.description) + if self.message: + return alertStr + ": " + self.message + else: + return alertStr + +class TLSRemoteAlert(TLSAlert): + """A TLS alert has been signalled by the remote implementation. + + @type description: int + @ivar description: Set to one of the constants in + L{tlslite.constants.AlertDescription} + + @type level: int + @ivar level: Set to one of the constants in + L{tlslite.constants.AlertLevel} + """ + def __init__(self, alert): + self.description = alert.description + self.level = alert.level + + def __str__(self): + alertStr = TLSAlert._descriptionStr.get(self.description) + if alertStr == None: + alertStr = str(self.description) + return alertStr + +class TLSAuthenticationError(TLSError): + """The handshake succeeded, but the other party's authentication + was inadequate. + + This exception will only be raised when a + L{tlslite.Checker.Checker} has been passed to a handshake function. + The Checker will be invoked once the handshake completes, and if + the Checker objects to how the other party authenticated, a + subclass of this exception will be raised. + """ + pass + +class TLSNoAuthenticationError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + certificate chain, but this did not occur.""" + pass + +class TLSAuthenticationTypeError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + different type of certificate chain.""" + pass + +class TLSFingerprintError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + certificate chain that matches a different fingerprint.""" + pass + +class TLSAuthorizationError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + certificate chain that has a different authorization.""" + pass + +class TLSValidationError(TLSAuthenticationError): + """The Checker has determined that the other party's certificate + chain is invalid.""" + pass + +class TLSFaultError(TLSError): + """The other party responded incorrectly to an induced fault. + + This exception will only occur during fault testing, when a + TLSConnection's fault variable is set to induce some sort of + faulty behavior, and the other party doesn't respond appropriately. + """ + pass diff --git a/gam/gdata/tlslite/integration/AsyncStateMachine.py b/gam/gdata/tlslite/integration/AsyncStateMachine.py new file mode 100755 index 00000000000..abed604321a --- /dev/null +++ b/gam/gdata/tlslite/integration/AsyncStateMachine.py @@ -0,0 +1,235 @@ +""" +A state machine for using TLS Lite with asynchronous I/O. +""" + +class AsyncStateMachine: + """ + This is an abstract class that's used to integrate TLS Lite with + asyncore and Twisted. + + This class signals wantsReadsEvent() and wantsWriteEvent(). When + the underlying socket has become readable or writeable, the event + should be passed to this class by calling inReadEvent() or + inWriteEvent(). This class will then try to read or write through + the socket, and will update its state appropriately. + + This class will forward higher-level events to its subclass. For + example, when a complete TLS record has been received, + outReadEvent() will be called with the decrypted data. + """ + + def __init__(self): + self._clear() + + def _clear(self): + #These store the various asynchronous operations (i.e. + #generators). Only one of them, at most, is ever active at a + #time. + self.handshaker = None + self.closer = None + self.reader = None + self.writer = None + + #This stores the result from the last call to the + #currently active operation. If 0 it indicates that the + #operation wants to read, if 1 it indicates that the + #operation wants to write. If None, there is no active + #operation. + self.result = None + + def _checkAssert(self, maxActive=1): + #This checks that only one operation, at most, is + #active, and that self.result is set appropriately. + activeOps = 0 + if self.handshaker: + activeOps += 1 + if self.closer: + activeOps += 1 + if self.reader: + activeOps += 1 + if self.writer: + activeOps += 1 + + if self.result == None: + if activeOps != 0: + raise AssertionError() + elif self.result in (0,1): + if activeOps != 1: + raise AssertionError() + else: + raise AssertionError() + if activeOps > maxActive: + raise AssertionError() + + def wantsReadEvent(self): + """If the state machine wants to read. + + If an operation is active, this returns whether or not the + operation wants to read from the socket. If an operation is + not active, this returns None. + + @rtype: bool or None + @return: If the state machine wants to read. + """ + if self.result != None: + return self.result == 0 + return None + + def wantsWriteEvent(self): + """If the state machine wants to write. + + If an operation is active, this returns whether or not the + operation wants to write to the socket. If an operation is + not active, this returns None. + + @rtype: bool or None + @return: If the state machine wants to write. + """ + if self.result != None: + return self.result == 1 + return None + + def outConnectEvent(self): + """Called when a handshake operation completes. + + May be overridden in subclass. + """ + pass + + def outCloseEvent(self): + """Called when a close operation completes. + + May be overridden in subclass. + """ + pass + + def outReadEvent(self, readBuffer): + """Called when a read operation completes. + + May be overridden in subclass.""" + pass + + def outWriteEvent(self): + """Called when a write operation completes. + + May be overridden in subclass.""" + pass + + def inReadEvent(self): + """Tell the state machine it can read from the socket.""" + try: + self._checkAssert() + if self.handshaker: + self._doHandshakeOp() + elif self.closer: + self._doCloseOp() + elif self.reader: + self._doReadOp() + elif self.writer: + self._doWriteOp() + else: + self.reader = self.tlsConnection.readAsync(16384) + self._doReadOp() + except: + self._clear() + raise + + def inWriteEvent(self): + """Tell the state machine it can write to the socket.""" + try: + self._checkAssert() + if self.handshaker: + self._doHandshakeOp() + elif self.closer: + self._doCloseOp() + elif self.reader: + self._doReadOp() + elif self.writer: + self._doWriteOp() + else: + self.outWriteEvent() + except: + self._clear() + raise + + def _doHandshakeOp(self): + try: + self.result = self.handshaker.next() + except StopIteration: + self.handshaker = None + self.result = None + self.outConnectEvent() + + def _doCloseOp(self): + try: + self.result = self.closer.next() + except StopIteration: + self.closer = None + self.result = None + self.outCloseEvent() + + def _doReadOp(self): + self.result = self.reader.next() + if not self.result in (0,1): + readBuffer = self.result + self.reader = None + self.result = None + self.outReadEvent(readBuffer) + + def _doWriteOp(self): + try: + self.result = self.writer.next() + except StopIteration: + self.writer = None + self.result = None + + def setHandshakeOp(self, handshaker): + """Start a handshake operation. + + @type handshaker: generator + @param handshaker: A generator created by using one of the + asynchronous handshake functions (i.e. handshakeServerAsync, or + handshakeClientxxx(..., async=True). + """ + try: + self._checkAssert(0) + self.handshaker = handshaker + self._doHandshakeOp() + except: + self._clear() + raise + + def setServerHandshakeOp(self, **args): + """Start a handshake operation. + + The arguments passed to this function will be forwarded to + L{tlslite.TLSConnection.TLSConnection.handshakeServerAsync}. + """ + handshaker = self.tlsConnection.handshakeServerAsync(**args) + self.setHandshakeOp(handshaker) + + def setCloseOp(self): + """Start a close operation. + """ + try: + self._checkAssert(0) + self.closer = self.tlsConnection.closeAsync() + self._doCloseOp() + except: + self._clear() + raise + + def setWriteOp(self, writeBuffer): + """Start a write operation. + + @type writeBuffer: str + @param writeBuffer: The string to transmit. + """ + try: + self._checkAssert(0) + self.writer = self.tlsConnection.writeAsync(writeBuffer) + self._doWriteOp() + except: + self._clear() + raise + diff --git a/gam/gdata/tlslite/integration/ClientHelper.py b/gam/gdata/tlslite/integration/ClientHelper.py new file mode 100755 index 00000000000..58e0152f9f5 --- /dev/null +++ b/gam/gdata/tlslite/integration/ClientHelper.py @@ -0,0 +1,163 @@ +""" +A helper class for using TLS Lite with stdlib clients +(httplib, xmlrpclib, imaplib, poplib). +""" + +from gdata.tlslite.Checker import Checker + +class ClientHelper: + """This is a helper class used to integrate TLS Lite with various + TLS clients (e.g. poplib, smtplib, httplib, etc.)""" + + def __init__(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings = None): + """ + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The constructor does not perform the TLS handshake itself, but + simply stores these arguments for later. The handshake is + performed only when this class needs to connect with the + server. Then you should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + self.username = None + self.password = None + self.sharedKey = None + self.certChain = None + self.privateKey = None + self.checker = None + + #SRP Authentication + if username and password and not \ + (sharedKey or certChain or privateKey): + self.username = username + self.password = password + + #Shared Key Authentication + elif username and sharedKey and not \ + (password or certChain or privateKey): + self.username = username + self.sharedKey = sharedKey + + #Certificate Chain Authentication + elif certChain and privateKey and not \ + (username or password or sharedKey): + self.certChain = certChain + self.privateKey = privateKey + + #No Authentication + elif not password and not username and not \ + sharedKey and not certChain and not privateKey: + pass + + else: + raise ValueError("Bad parameters") + + #Authenticate the server based on its cryptoID or fingerprint + if sharedKey and (cryptoID or protocol or x509Fingerprint): + raise ValueError("Can't use shared keys with other forms of"\ + "authentication") + + self.checker = Checker(cryptoID, protocol, x509Fingerprint, + x509TrustList, x509CommonName) + self.settings = settings + + self.tlsSession = None + + def _handshake(self, tlsConnection): + if self.username and self.password: + tlsConnection.handshakeClientSRP(username=self.username, + password=self.password, + checker=self.checker, + settings=self.settings, + session=self.tlsSession) + elif self.username and self.sharedKey: + tlsConnection.handshakeClientSharedKey(username=self.username, + sharedKey=self.sharedKey, + settings=self.settings) + else: + tlsConnection.handshakeClientCert(certChain=self.certChain, + privateKey=self.privateKey, + checker=self.checker, + settings=self.settings, + session=self.tlsSession) + self.tlsSession = tlsConnection.session diff --git a/gam/gdata/tlslite/integration/HTTPTLSConnection.py b/gam/gdata/tlslite/integration/HTTPTLSConnection.py new file mode 100755 index 00000000000..58e31a10805 --- /dev/null +++ b/gam/gdata/tlslite/integration/HTTPTLSConnection.py @@ -0,0 +1,169 @@ +"""TLS Lite + httplib.""" + +import socket +import httplib +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + + +class HTTPBaseTLSConnection(httplib.HTTPConnection): + """This abstract class provides a framework for adding TLS support + to httplib.""" + + default_port = 443 + + def __init__(self, host, port=None, strict=None): + if strict == None: + #Python 2.2 doesn't support strict + httplib.HTTPConnection.__init__(self, host, port) + else: + httplib.HTTPConnection.__init__(self, host, port, strict) + + def connect(self): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if hasattr(sock, 'settimeout'): + sock.settimeout(10) + sock.connect((self.host, self.port)) + + #Use a TLSConnection to emulate a socket + self.sock = TLSConnection(sock) + + #When httplib closes this, close the socket + self.sock.closeSocket = True + self._handshake(self.sock) + + def _handshake(self, tlsConnection): + """Called to perform some sort of handshake. + + This method must be overridden in a subclass to do some type of + handshake. This method will be called after the socket has + been connected but before any data has been sent. If this + method does not raise an exception, the TLS connection will be + considered valid. + + This method may (or may not) be called every time an HTTP + request is performed, depending on whether the underlying HTTP + connection is persistent. + + @type tlsConnection: L{tlslite.TLSConnection.TLSConnection} + @param tlsConnection: The connection to perform the handshake + on. + """ + raise NotImplementedError() + + +class HTTPTLSConnection(HTTPBaseTLSConnection, ClientHelper): + """This class extends L{HTTPBaseTLSConnection} to support the + common types of handshaking.""" + + def __init__(self, host, port=None, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings = None): + """Create a new HTTPTLSConnection. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The constructor does not perform the TLS handshake itself, but + simply stores these arguments for later. The handshake is + performed only when this class needs to connect with the + server. Thus you should be prepared to handle TLS-specific + exceptions when calling methods inherited from + L{httplib.HTTPConnection} such as request(), connect(), and + send(). See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type host: str + @param host: Server to connect to. + + @type port: int + @param port: Port to connect to. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + HTTPBaseTLSConnection.__init__(self, host, port) + + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + def _handshake(self, tlsConnection): + ClientHelper._handshake(self, tlsConnection) diff --git a/gam/gdata/tlslite/integration/IMAP4_TLS.py b/gam/gdata/tlslite/integration/IMAP4_TLS.py new file mode 100755 index 00000000000..e47076ccc8a --- /dev/null +++ b/gam/gdata/tlslite/integration/IMAP4_TLS.py @@ -0,0 +1,132 @@ +"""TLS Lite + imaplib.""" + +import socket +from imaplib import IMAP4 +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + +# IMAP TLS PORT +IMAP4_TLS_PORT = 993 + +class IMAP4_TLS(IMAP4, ClientHelper): + """This class extends L{imaplib.IMAP4} with TLS support.""" + + def __init__(self, host = '', port = IMAP4_TLS_PORT, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Create a new IMAP4_TLS. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The caller should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type host: str + @param host: Server to connect to. + + @type port: int + @param port: Port to connect to. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + IMAP4.__init__(self, host, port) + + + def open(self, host = '', port = IMAP4_TLS_PORT): + """Setup connection to remote server on "host:port". + + This connection will be used by the routines: + read, readline, send, shutdown. + """ + self.host = host + self.port = port + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.connect((host, port)) + self.sock = TLSConnection(self.sock) + self.sock.closeSocket = True + ClientHelper._handshake(self, self.sock) + self.file = self.sock.makefile('rb') diff --git a/gam/gdata/tlslite/integration/IntegrationHelper.py b/gam/gdata/tlslite/integration/IntegrationHelper.py new file mode 100755 index 00000000000..af5193b480e --- /dev/null +++ b/gam/gdata/tlslite/integration/IntegrationHelper.py @@ -0,0 +1,52 @@ + +class IntegrationHelper: + + def __init__(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings = None): + + self.username = None + self.password = None + self.sharedKey = None + self.certChain = None + self.privateKey = None + self.checker = None + + #SRP Authentication + if username and password and not \ + (sharedKey or certChain or privateKey): + self.username = username + self.password = password + + #Shared Key Authentication + elif username and sharedKey and not \ + (password or certChain or privateKey): + self.username = username + self.sharedKey = sharedKey + + #Certificate Chain Authentication + elif certChain and privateKey and not \ + (username or password or sharedKey): + self.certChain = certChain + self.privateKey = privateKey + + #No Authentication + elif not password and not username and not \ + sharedKey and not certChain and not privateKey: + pass + + else: + raise ValueError("Bad parameters") + + #Authenticate the server based on its cryptoID or fingerprint + if sharedKey and (cryptoID or protocol or x509Fingerprint): + raise ValueError("Can't use shared keys with other forms of"\ + "authentication") + + self.checker = Checker(cryptoID, protocol, x509Fingerprint, + x509TrustList, x509CommonName) + self.settings = settings \ No newline at end of file diff --git a/gam/gdata/tlslite/integration/POP3_TLS.py b/gam/gdata/tlslite/integration/POP3_TLS.py new file mode 100755 index 00000000000..26b37fdd84c --- /dev/null +++ b/gam/gdata/tlslite/integration/POP3_TLS.py @@ -0,0 +1,142 @@ +"""TLS Lite + poplib.""" + +import socket +from poplib import POP3 +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + +# POP TLS PORT +POP3_TLS_PORT = 995 + +class POP3_TLS(POP3, ClientHelper): + """This class extends L{poplib.POP3} with TLS support.""" + + def __init__(self, host, port = POP3_TLS_PORT, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Create a new POP3_TLS. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The caller should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type host: str + @param host: Server to connect to. + + @type port: int + @param port: Port to connect to. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + self.host = host + self.port = port + msg = "getaddrinfo returns an empty list" + self.sock = None + for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + self.sock = socket.socket(af, socktype, proto) + self.sock.connect(sa) + except socket.error, msg: + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket.error, msg + + ### New code below (all else copied from poplib) + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + self.sock = TLSConnection(self.sock) + self.sock.closeSocket = True + ClientHelper._handshake(self, self.sock) + ### + + self.file = self.sock.makefile('rb') + self._debugging = 0 + self.welcome = self._getresp() diff --git a/gam/gdata/tlslite/integration/SMTP_TLS.py b/gam/gdata/tlslite/integration/SMTP_TLS.py new file mode 100755 index 00000000000..67e0febed61 --- /dev/null +++ b/gam/gdata/tlslite/integration/SMTP_TLS.py @@ -0,0 +1,114 @@ +"""TLS Lite + smtplib.""" + +from smtplib import SMTP +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + +class SMTP_TLS(SMTP): + """This class extends L{smtplib.SMTP} with TLS support.""" + + def starttls(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Puts the connection to the SMTP server into TLS mode. + + If the server supports TLS, this will encrypt the rest of the SMTP + session. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The caller should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + (resp, reply) = self.docmd("STARTTLS") + if resp == 220: + helper = ClientHelper( + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + conn = TLSConnection(self.sock) + conn.closeSocket = True + helper._handshake(conn) + self.sock = conn + self.file = conn.makefile('rb') + return (resp, reply) diff --git a/gam/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py b/gam/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py new file mode 100755 index 00000000000..f732f62e663 --- /dev/null +++ b/gam/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py @@ -0,0 +1,139 @@ +"""TLS Lite + asyncore.""" + + +import asyncore +from gdata.tlslite.TLSConnection import TLSConnection +from AsyncStateMachine import AsyncStateMachine + + +class TLSAsyncDispatcherMixIn(AsyncStateMachine): + """This class can be "mixed in" with an + L{asyncore.dispatcher} to add TLS support. + + This class essentially sits between the dispatcher and the select + loop, intercepting events and only calling the dispatcher when + applicable. + + In the case of handle_read(), a read operation will be activated, + and when it completes, the bytes will be placed in a buffer where + the dispatcher can retrieve them by calling recv(), and the + dispatcher's handle_read() will be called. + + In the case of handle_write(), the dispatcher's handle_write() will + be called, and when it calls send(), a write operation will be + activated. + + To use this class, you must combine it with an asyncore.dispatcher, + and pass in a handshake operation with setServerHandshakeOp(). + + Below is an example of using this class with medusa. This class is + mixed in with http_channel to create http_tls_channel. Note: + 1. the mix-in is listed first in the inheritance list + + 2. the input buffer size must be at least 16K, otherwise the + dispatcher might not read all the bytes from the TLS layer, + leaving some bytes in limbo. + + 3. IE seems to have a problem receiving a whole HTTP response in a + single TLS record, so HTML pages containing '\\r\\n\\r\\n' won't + be displayed on IE. + + Add the following text into 'start_medusa.py', in the 'HTTP Server' + section:: + + from tlslite.api import * + s = open("./serverX509Cert.pem").read() + x509 = X509() + x509.parse(s) + certChain = X509CertChain([x509]) + + s = open("./serverX509Key.pem").read() + privateKey = parsePEMKey(s, private=True) + + class http_tls_channel(TLSAsyncDispatcherMixIn, + http_server.http_channel): + ac_in_buffer_size = 16384 + + def __init__ (self, server, conn, addr): + http_server.http_channel.__init__(self, server, conn, addr) + TLSAsyncDispatcherMixIn.__init__(self, conn) + self.tlsConnection.ignoreAbruptClose = True + self.setServerHandshakeOp(certChain=certChain, + privateKey=privateKey) + + hs.channel_class = http_tls_channel + + If the TLS layer raises an exception, the exception will be caught + in asyncore.dispatcher, which will call close() on this class. The + TLS layer always closes the TLS connection before raising an + exception, so the close operation will complete right away, causing + asyncore.dispatcher.close() to be called, which closes the socket + and removes this instance from the asyncore loop. + + """ + + + def __init__(self, sock=None): + AsyncStateMachine.__init__(self) + + if sock: + self.tlsConnection = TLSConnection(sock) + + #Calculate the sibling I'm being mixed in with. + #This is necessary since we override functions + #like readable(), handle_read(), etc., but we + #also want to call the sibling's versions. + for cl in self.__class__.__bases__: + if cl != TLSAsyncDispatcherMixIn and cl != AsyncStateMachine: + self.siblingClass = cl + break + else: + raise AssertionError() + + def readable(self): + result = self.wantsReadEvent() + if result != None: + return result + return self.siblingClass.readable(self) + + def writable(self): + result = self.wantsWriteEvent() + if result != None: + return result + return self.siblingClass.writable(self) + + def handle_read(self): + self.inReadEvent() + + def handle_write(self): + self.inWriteEvent() + + def outConnectEvent(self): + self.siblingClass.handle_connect(self) + + def outCloseEvent(self): + asyncore.dispatcher.close(self) + + def outReadEvent(self, readBuffer): + self.readBuffer = readBuffer + self.siblingClass.handle_read(self) + + def outWriteEvent(self): + self.siblingClass.handle_write(self) + + def recv(self, bufferSize=16384): + if bufferSize < 16384 or self.readBuffer == None: + raise AssertionError() + returnValue = self.readBuffer + self.readBuffer = None + return returnValue + + def send(self, writeBuffer): + self.setWriteOp(writeBuffer) + return len(writeBuffer) + + def close(self): + if hasattr(self, "tlsConnection"): + self.setCloseOp() + else: + asyncore.dispatcher.close(self) diff --git a/gam/gdata/tlslite/integration/TLSSocketServerMixIn.py b/gam/gdata/tlslite/integration/TLSSocketServerMixIn.py new file mode 100755 index 00000000000..10224b688b5 --- /dev/null +++ b/gam/gdata/tlslite/integration/TLSSocketServerMixIn.py @@ -0,0 +1,59 @@ +"""TLS Lite + SocketServer.""" + +from gdata.tlslite.TLSConnection import TLSConnection + +class TLSSocketServerMixIn: + """ + This class can be mixed in with any L{SocketServer.TCPServer} to + add TLS support. + + To use this class, define a new class that inherits from it and + some L{SocketServer.TCPServer} (with the mix-in first). Then + implement the handshake() method, doing some sort of server + handshake on the connection argument. If the handshake method + returns True, the RequestHandler will be triggered. Below is a + complete example of a threaded HTTPS server:: + + from SocketServer import * + from BaseHTTPServer import * + from SimpleHTTPServer import * + from tlslite.api import * + + s = open("./serverX509Cert.pem").read() + x509 = X509() + x509.parse(s) + certChain = X509CertChain([x509]) + + s = open("./serverX509Key.pem").read() + privateKey = parsePEMKey(s, private=True) + + sessionCache = SessionCache() + + class MyHTTPServer(ThreadingMixIn, TLSSocketServerMixIn, + HTTPServer): + def handshake(self, tlsConnection): + try: + tlsConnection.handshakeServer(certChain=certChain, + privateKey=privateKey, + sessionCache=sessionCache) + tlsConnection.ignoreAbruptClose = True + return True + except TLSError, error: + print "Handshake failure:", str(error) + return False + + httpd = MyHTTPServer(('localhost', 443), SimpleHTTPRequestHandler) + httpd.serve_forever() + """ + + + def finish_request(self, sock, client_address): + tlsConnection = TLSConnection(sock) + if self.handshake(tlsConnection) == True: + self.RequestHandlerClass(tlsConnection, client_address, self) + tlsConnection.close() + + #Implement this method to do some form of handshaking. Return True + #if the handshake finishes properly and the request is authorized. + def handshake(self, tlsConnection): + raise NotImplementedError() diff --git a/gam/gdata/tlslite/integration/TLSTwistedProtocolWrapper.py b/gam/gdata/tlslite/integration/TLSTwistedProtocolWrapper.py new file mode 100755 index 00000000000..c88703cacf3 --- /dev/null +++ b/gam/gdata/tlslite/integration/TLSTwistedProtocolWrapper.py @@ -0,0 +1,196 @@ +"""TLS Lite + Twisted.""" + +from twisted.protocols.policies import ProtocolWrapper, WrappingFactory +from twisted.python.failure import Failure + +from AsyncStateMachine import AsyncStateMachine +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.errors import * + +import socket +import errno + + +#The TLSConnection is created around a "fake socket" that +#plugs it into the underlying Twisted transport +class _FakeSocket: + def __init__(self, wrapper): + self.wrapper = wrapper + self.data = "" + + def send(self, data): + ProtocolWrapper.write(self.wrapper, data) + return len(data) + + def recv(self, numBytes): + if self.data == "": + raise socket.error, (errno.EWOULDBLOCK, "") + returnData = self.data[:numBytes] + self.data = self.data[numBytes:] + return returnData + +class TLSTwistedProtocolWrapper(ProtocolWrapper, AsyncStateMachine): + """This class can wrap Twisted protocols to add TLS support. + + Below is a complete example of using TLS Lite with a Twisted echo + server. + + There are two server implementations below. Echo is the original + protocol, which is oblivious to TLS. Echo1 subclasses Echo and + negotiates TLS when the client connects. Echo2 subclasses Echo and + negotiates TLS when the client sends "STARTTLS":: + + from twisted.internet.protocol import Protocol, Factory + from twisted.internet import reactor + from twisted.protocols.policies import WrappingFactory + from twisted.protocols.basic import LineReceiver + from twisted.python import log + from twisted.python.failure import Failure + import sys + from tlslite.api import * + + s = open("./serverX509Cert.pem").read() + x509 = X509() + x509.parse(s) + certChain = X509CertChain([x509]) + + s = open("./serverX509Key.pem").read() + privateKey = parsePEMKey(s, private=True) + + verifierDB = VerifierDB("verifierDB") + verifierDB.open() + + class Echo(LineReceiver): + def connectionMade(self): + self.transport.write("Welcome to the echo server!\\r\\n") + + def lineReceived(self, line): + self.transport.write(line + "\\r\\n") + + class Echo1(Echo): + def connectionMade(self): + if not self.transport.tlsStarted: + self.transport.setServerHandshakeOp(certChain=certChain, + privateKey=privateKey, + verifierDB=verifierDB) + else: + Echo.connectionMade(self) + + def connectionLost(self, reason): + pass #Handle any TLS exceptions here + + class Echo2(Echo): + def lineReceived(self, data): + if data == "STARTTLS": + self.transport.setServerHandshakeOp(certChain=certChain, + privateKey=privateKey, + verifierDB=verifierDB) + else: + Echo.lineReceived(self, data) + + def connectionLost(self, reason): + pass #Handle any TLS exceptions here + + factory = Factory() + factory.protocol = Echo1 + #factory.protocol = Echo2 + + wrappingFactory = WrappingFactory(factory) + wrappingFactory.protocol = TLSTwistedProtocolWrapper + + log.startLogging(sys.stdout) + reactor.listenTCP(1079, wrappingFactory) + reactor.run() + + This class works as follows: + + Data comes in and is given to the AsyncStateMachine for handling. + AsyncStateMachine will forward events to this class, and we'll + pass them on to the ProtocolHandler, which will proxy them to the + wrapped protocol. The wrapped protocol may then call back into + this class, and these calls will be proxied into the + AsyncStateMachine. + + The call graph looks like this: + - self.dataReceived + - AsyncStateMachine.inReadEvent + - self.out(Connect|Close|Read)Event + - ProtocolWrapper.(connectionMade|loseConnection|dataReceived) + - self.(loseConnection|write|writeSequence) + - AsyncStateMachine.(setCloseOp|setWriteOp) + """ + + #WARNING: IF YOU COPY-AND-PASTE THE ABOVE CODE, BE SURE TO REMOVE + #THE EXTRA ESCAPING AROUND "\\r\\n" + + def __init__(self, factory, wrappedProtocol): + ProtocolWrapper.__init__(self, factory, wrappedProtocol) + AsyncStateMachine.__init__(self) + self.fakeSocket = _FakeSocket(self) + self.tlsConnection = TLSConnection(self.fakeSocket) + self.tlsStarted = False + self.connectionLostCalled = False + + def connectionMade(self): + try: + ProtocolWrapper.connectionMade(self) + except TLSError, e: + self.connectionLost(Failure(e)) + ProtocolWrapper.loseConnection(self) + + def dataReceived(self, data): + try: + if not self.tlsStarted: + ProtocolWrapper.dataReceived(self, data) + else: + self.fakeSocket.data += data + while self.fakeSocket.data: + AsyncStateMachine.inReadEvent(self) + except TLSError, e: + self.connectionLost(Failure(e)) + ProtocolWrapper.loseConnection(self) + + def connectionLost(self, reason): + if not self.connectionLostCalled: + ProtocolWrapper.connectionLost(self, reason) + self.connectionLostCalled = True + + + def outConnectEvent(self): + ProtocolWrapper.connectionMade(self) + + def outCloseEvent(self): + ProtocolWrapper.loseConnection(self) + + def outReadEvent(self, data): + if data == "": + ProtocolWrapper.loseConnection(self) + else: + ProtocolWrapper.dataReceived(self, data) + + + def setServerHandshakeOp(self, **args): + self.tlsStarted = True + AsyncStateMachine.setServerHandshakeOp(self, **args) + + def loseConnection(self): + if not self.tlsStarted: + ProtocolWrapper.loseConnection(self) + else: + AsyncStateMachine.setCloseOp(self) + + def write(self, data): + if not self.tlsStarted: + ProtocolWrapper.write(self, data) + else: + #Because of the FakeSocket, write operations are guaranteed to + #terminate immediately. + AsyncStateMachine.setWriteOp(self, data) + + def writeSequence(self, seq): + if not self.tlsStarted: + ProtocolWrapper.writeSequence(self, seq) + else: + #Because of the FakeSocket, write operations are guaranteed to + #terminate immediately. + AsyncStateMachine.setWriteOp(self, "".join(seq)) diff --git a/gam/gdata/tlslite/integration/XMLRPCTransport.py b/gam/gdata/tlslite/integration/XMLRPCTransport.py new file mode 100755 index 00000000000..3f025e46e7b --- /dev/null +++ b/gam/gdata/tlslite/integration/XMLRPCTransport.py @@ -0,0 +1,137 @@ +"""TLS Lite + xmlrpclib.""" + +import xmlrpclib +import httplib +from gdata.tlslite.integration.HTTPTLSConnection import HTTPTLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + + +class XMLRPCTransport(xmlrpclib.Transport, ClientHelper): + """Handles an HTTPS transaction to an XML-RPC server.""" + + def __init__(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Create a new XMLRPCTransport. + + An instance of this class can be passed to L{xmlrpclib.ServerProxy} + to use TLS with XML-RPC calls:: + + from tlslite.api import XMLRPCTransport + from xmlrpclib import ServerProxy + + transport = XMLRPCTransport(user="alice", password="abra123") + server = ServerProxy("https://localhost", transport) + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The constructor does not perform the TLS handshake itself, but + simply stores these arguments for later. The handshake is + performed only when this class needs to connect with the + server. Thus you should be prepared to handle TLS-specific + exceptions when calling methods of L{xmlrpclib.ServerProxy}. See the + client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + + def make_connection(self, host): + # create a HTTPS connection object from a host descriptor + host, extra_headers, x509 = self.get_host_info(host) + http = HTTPTLSConnection(host, None, + self.username, self.password, + self.sharedKey, + self.certChain, self.privateKey, + self.checker.cryptoID, + self.checker.protocol, + self.checker.x509Fingerprint, + self.checker.x509TrustList, + self.checker.x509CommonName, + self.settings) + http2 = httplib.HTTP() + http2._setup(http) + return http2 diff --git a/gam/gdata/tlslite/integration/__init__.py b/gam/gdata/tlslite/integration/__init__.py new file mode 100755 index 00000000000..960f4065f26 --- /dev/null +++ b/gam/gdata/tlslite/integration/__init__.py @@ -0,0 +1,17 @@ +"""Classes for integrating TLS Lite with other packages.""" + +__all__ = ["AsyncStateMachine", + "HTTPTLSConnection", + "POP3_TLS", + "IMAP4_TLS", + "SMTP_TLS", + "XMLRPCTransport", + "TLSSocketServerMixIn", + "TLSAsyncDispatcherMixIn", + "TLSTwistedProtocolWrapper"] + +try: + import twisted + del twisted +except ImportError: + del __all__[__all__.index("TLSTwistedProtocolWrapper")] diff --git a/gam/gdata/tlslite/mathtls.py b/gam/gdata/tlslite/mathtls.py new file mode 100755 index 00000000000..3b8ede60126 --- /dev/null +++ b/gam/gdata/tlslite/mathtls.py @@ -0,0 +1,170 @@ +"""Miscellaneous helper functions.""" + +from utils.compat import * +from utils.cryptomath import * + +import hmac +import md5 +import sha + +#1024, 1536, 2048, 3072, 4096, 6144, and 8192 bit groups] +goodGroupParameters = [(2,0xEEAF0AB9ADB38DD69C33F80AFA8FC5E86072618775FF3C0B9EA2314C9C256576D674DF7496EA81D3383B4813D692C6E0E0D5D8E250B98BE48E495C1D6089DAD15DC7D7B46154D6B6CE8EF4AD69B15D4982559B297BCF1885C529F566660E57EC68EDBC3C05726CC02FD4CBF4976EAA9AFD5138FE8376435B9FC61D2FC0EB06E3),\ + (2,0x9DEF3CAFB939277AB1F12A8617A47BBBDBA51DF499AC4C80BEEEA9614B19CC4D5F4F5F556E27CBDE51C6A94BE4607A291558903BA0D0F84380B655BB9A22E8DCDF028A7CEC67F0D08134B1C8B97989149B609E0BE3BAB63D47548381DBC5B1FC764E3F4B53DD9DA1158BFD3E2B9C8CF56EDF019539349627DB2FD53D24B7C48665772E437D6C7F8CE442734AF7CCB7AE837C264AE3A9BEB87F8A2FE9B8B5292E5A021FFF5E91479E8CE7A28C2442C6F315180F93499A234DCF76E3FED135F9BB),\ + (2,0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73),\ + (2,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF),\ + (5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF),\ + (5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF),\ + (5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF)] + +def P_hash(hashModule, secret, seed, length): + bytes = createByteArrayZeros(length) + secret = bytesToString(secret) + seed = bytesToString(seed) + A = seed + index = 0 + while 1: + A = hmac.HMAC(secret, A, hashModule).digest() + output = hmac.HMAC(secret, A+seed, hashModule).digest() + for c in output: + if index >= length: + return bytes + bytes[index] = ord(c) + index += 1 + return bytes + +def PRF(secret, label, seed, length): + #Split the secret into left and right halves + S1 = secret[ : int(math.ceil(len(secret)/2.0))] + S2 = secret[ int(math.floor(len(secret)/2.0)) : ] + + #Run the left half through P_MD5 and the right half through P_SHA1 + p_md5 = P_hash(md5, S1, concatArrays(stringToBytes(label), seed), length) + p_sha1 = P_hash(sha, S2, concatArrays(stringToBytes(label), seed), length) + + #XOR the output values and return the result + for x in range(length): + p_md5[x] ^= p_sha1[x] + return p_md5 + + +def PRF_SSL(secret, seed, length): + secretStr = bytesToString(secret) + seedStr = bytesToString(seed) + bytes = createByteArrayZeros(length) + index = 0 + for x in range(26): + A = chr(ord('A')+x) * (x+1) # 'A', 'BB', 'CCC', etc.. + input = secretStr + sha.sha(A + secretStr + seedStr).digest() + output = md5.md5(input).digest() + for c in output: + if index >= length: + return bytes + bytes[index] = ord(c) + index += 1 + return bytes + +def makeX(salt, username, password): + if len(username)>=256: + raise ValueError("username too long") + if len(salt)>=256: + raise ValueError("salt too long") + return stringToNumber(sha.sha(salt + sha.sha(username + ":" + password)\ + .digest()).digest()) + +#This function is used by VerifierDB.makeVerifier +def makeVerifier(username, password, bits): + bitsIndex = {1024:0, 1536:1, 2048:2, 3072:3, 4096:4, 6144:5, 8192:6}[bits] + g,N = goodGroupParameters[bitsIndex] + salt = bytesToString(getRandomBytes(16)) + x = makeX(salt, username, password) + verifier = powMod(g, x, N) + return N, g, salt, verifier + +def PAD(n, x): + nLength = len(numberToString(n)) + s = numberToString(x) + if len(s) < nLength: + s = ("\0" * (nLength-len(s))) + s + return s + +def makeU(N, A, B): + return stringToNumber(sha.sha(PAD(N, A) + PAD(N, B)).digest()) + +def makeK(N, g): + return stringToNumber(sha.sha(numberToString(N) + PAD(N, g)).digest()) + + +""" +MAC_SSL +Modified from Python HMAC by Trevor +""" + +class MAC_SSL: + """MAC_SSL class. + + This supports the API for Cryptographic Hash Functions (PEP 247). + """ + + def __init__(self, key, msg = None, digestmod = None): + """Create a new MAC_SSL object. + + key: key for the keyed hash object. + msg: Initial input for the hash, if provided. + digestmod: A module supporting PEP 247. Defaults to the md5 module. + """ + if digestmod is None: + import md5 + digestmod = md5 + + if key == None: #TREVNEW - for faster copying + return #TREVNEW + + self.digestmod = digestmod + self.outer = digestmod.new() + self.inner = digestmod.new() + self.digest_size = digestmod.digest_size + + ipad = "\x36" * 40 + opad = "\x5C" * 40 + + self.inner.update(key) + self.inner.update(ipad) + self.outer.update(key) + self.outer.update(opad) + if msg is not None: + self.update(msg) + + + def update(self, msg): + """Update this hashing object with the string msg. + """ + self.inner.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + other = MAC_SSL(None) #TREVNEW - for faster copying + other.digest_size = self.digest_size #TREVNEW + other.digestmod = self.digestmod + other.inner = self.inner.copy() + other.outer = self.outer.copy() + return other + + def digest(self): + """Return the hash value of this hashing object. + + This returns a string containing 8-bit data. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + return "".join([hex(ord(x))[2:].zfill(2) + for x in tuple(self.digest())]) diff --git a/gam/gdata/tlslite/messages.py b/gam/gdata/tlslite/messages.py new file mode 100755 index 00000000000..afccc793a56 --- /dev/null +++ b/gam/gdata/tlslite/messages.py @@ -0,0 +1,561 @@ +"""Classes representing TLS messages.""" + +from utils.compat import * +from utils.cryptomath import * +from errors import * +from utils.codec import * +from constants import * +from X509 import X509 +from X509CertChain import X509CertChain + +import sha +import md5 + +class RecordHeader3: + def __init__(self): + self.type = 0 + self.version = (0,0) + self.length = 0 + self.ssl2 = False + + def create(self, version, type, length): + self.type = type + self.version = version + self.length = length + return self + + def write(self): + w = Writer(5) + w.add(self.type, 1) + w.add(self.version[0], 1) + w.add(self.version[1], 1) + w.add(self.length, 2) + return w.bytes + + def parse(self, p): + self.type = p.get(1) + self.version = (p.get(1), p.get(1)) + self.length = p.get(2) + self.ssl2 = False + return self + +class RecordHeader2: + def __init__(self): + self.type = 0 + self.version = (0,0) + self.length = 0 + self.ssl2 = True + + def parse(self, p): + if p.get(1)!=128: + raise SyntaxError() + self.type = ContentType.handshake + self.version = (2,0) + #We don't support 2-byte-length-headers; could be a problem + self.length = p.get(1) + return self + + +class Msg: + def preWrite(self, trial): + if trial: + w = Writer() + else: + length = self.write(True) + w = Writer(length) + return w + + def postWrite(self, w, trial): + if trial: + return w.index + else: + return w.bytes + +class Alert(Msg): + def __init__(self): + self.contentType = ContentType.alert + self.level = 0 + self.description = 0 + + def create(self, description, level=AlertLevel.fatal): + self.level = level + self.description = description + return self + + def parse(self, p): + p.setLengthCheck(2) + self.level = p.get(1) + self.description = p.get(1) + p.stopLengthCheck() + return self + + def write(self): + w = Writer(2) + w.add(self.level, 1) + w.add(self.description, 1) + return w.bytes + + +class HandshakeMsg(Msg): + def preWrite(self, handshakeType, trial): + if trial: + w = Writer() + w.add(handshakeType, 1) + w.add(0, 3) + else: + length = self.write(True) + w = Writer(length) + w.add(handshakeType, 1) + w.add(length-4, 3) + return w + + +class ClientHello(HandshakeMsg): + def __init__(self, ssl2=False): + self.contentType = ContentType.handshake + self.ssl2 = ssl2 + self.client_version = (0,0) + self.random = createByteArrayZeros(32) + self.session_id = createByteArraySequence([]) + self.cipher_suites = [] # a list of 16-bit values + self.certificate_types = [CertificateType.x509] + self.compression_methods = [] # a list of 8-bit values + self.srp_username = None # a string + + def create(self, version, random, session_id, cipher_suites, + certificate_types=None, srp_username=None): + self.client_version = version + self.random = random + self.session_id = session_id + self.cipher_suites = cipher_suites + self.certificate_types = certificate_types + self.compression_methods = [0] + self.srp_username = srp_username + return self + + def parse(self, p): + if self.ssl2: + self.client_version = (p.get(1), p.get(1)) + cipherSpecsLength = p.get(2) + sessionIDLength = p.get(2) + randomLength = p.get(2) + self.cipher_suites = p.getFixList(3, int(cipherSpecsLength/3)) + self.session_id = p.getFixBytes(sessionIDLength) + self.random = p.getFixBytes(randomLength) + if len(self.random) < 32: + zeroBytes = 32-len(self.random) + self.random = createByteArrayZeros(zeroBytes) + self.random + self.compression_methods = [0]#Fake this value + + #We're not doing a stopLengthCheck() for SSLv2, oh well.. + else: + p.startLengthCheck(3) + self.client_version = (p.get(1), p.get(1)) + self.random = p.getFixBytes(32) + self.session_id = p.getVarBytes(1) + self.cipher_suites = p.getVarList(2, 2) + self.compression_methods = p.getVarList(1, 1) + if not p.atLengthCheck(): + totalExtLength = p.get(2) + soFar = 0 + while soFar != totalExtLength: + extType = p.get(2) + extLength = p.get(2) + if extType == 6: + self.srp_username = bytesToString(p.getVarBytes(1)) + elif extType == 7: + self.certificate_types = p.getVarList(1, 1) + else: + p.getFixBytes(extLength) + soFar += 4 + extLength + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.client_hello, trial) + w.add(self.client_version[0], 1) + w.add(self.client_version[1], 1) + w.addFixSeq(self.random, 1) + w.addVarSeq(self.session_id, 1, 1) + w.addVarSeq(self.cipher_suites, 2, 2) + w.addVarSeq(self.compression_methods, 1, 1) + + extLength = 0 + if self.certificate_types and self.certificate_types != \ + [CertificateType.x509]: + extLength += 5 + len(self.certificate_types) + if self.srp_username: + extLength += 5 + len(self.srp_username) + if extLength > 0: + w.add(extLength, 2) + + if self.certificate_types and self.certificate_types != \ + [CertificateType.x509]: + w.add(7, 2) + w.add(len(self.certificate_types)+1, 2) + w.addVarSeq(self.certificate_types, 1, 1) + if self.srp_username: + w.add(6, 2) + w.add(len(self.srp_username)+1, 2) + w.addVarSeq(stringToBytes(self.srp_username), 1, 1) + + return HandshakeMsg.postWrite(self, w, trial) + + +class ServerHello(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + self.server_version = (0,0) + self.random = createByteArrayZeros(32) + self.session_id = createByteArraySequence([]) + self.cipher_suite = 0 + self.certificate_type = CertificateType.x509 + self.compression_method = 0 + + def create(self, version, random, session_id, cipher_suite, + certificate_type): + self.server_version = version + self.random = random + self.session_id = session_id + self.cipher_suite = cipher_suite + self.certificate_type = certificate_type + self.compression_method = 0 + return self + + def parse(self, p): + p.startLengthCheck(3) + self.server_version = (p.get(1), p.get(1)) + self.random = p.getFixBytes(32) + self.session_id = p.getVarBytes(1) + self.cipher_suite = p.get(2) + self.compression_method = p.get(1) + if not p.atLengthCheck(): + totalExtLength = p.get(2) + soFar = 0 + while soFar != totalExtLength: + extType = p.get(2) + extLength = p.get(2) + if extType == 7: + self.certificate_type = p.get(1) + else: + p.getFixBytes(extLength) + soFar += 4 + extLength + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.server_hello, trial) + w.add(self.server_version[0], 1) + w.add(self.server_version[1], 1) + w.addFixSeq(self.random, 1) + w.addVarSeq(self.session_id, 1, 1) + w.add(self.cipher_suite, 2) + w.add(self.compression_method, 1) + + extLength = 0 + if self.certificate_type and self.certificate_type != \ + CertificateType.x509: + extLength += 5 + + if extLength != 0: + w.add(extLength, 2) + + if self.certificate_type and self.certificate_type != \ + CertificateType.x509: + w.add(7, 2) + w.add(1, 2) + w.add(self.certificate_type, 1) + + return HandshakeMsg.postWrite(self, w, trial) + +class Certificate(HandshakeMsg): + def __init__(self, certificateType): + self.certificateType = certificateType + self.contentType = ContentType.handshake + self.certChain = None + + def create(self, certChain): + self.certChain = certChain + return self + + def parse(self, p): + p.startLengthCheck(3) + if self.certificateType == CertificateType.x509: + chainLength = p.get(3) + index = 0 + certificate_list = [] + while index != chainLength: + certBytes = p.getVarBytes(3) + x509 = X509() + x509.parseBinary(certBytes) + certificate_list.append(x509) + index += len(certBytes)+3 + if certificate_list: + self.certChain = X509CertChain(certificate_list) + elif self.certificateType == CertificateType.cryptoID: + s = bytesToString(p.getVarBytes(2)) + if s: + try: + import cryptoIDlib.CertChain + except ImportError: + raise SyntaxError(\ + "cryptoID cert chain received, cryptoIDlib not present") + self.certChain = cryptoIDlib.CertChain.CertChain().parse(s) + else: + raise AssertionError() + + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.certificate, trial) + if self.certificateType == CertificateType.x509: + chainLength = 0 + if self.certChain: + certificate_list = self.certChain.x509List + else: + certificate_list = [] + #determine length + for cert in certificate_list: + bytes = cert.writeBytes() + chainLength += len(bytes)+3 + #add bytes + w.add(chainLength, 3) + for cert in certificate_list: + bytes = cert.writeBytes() + w.addVarSeq(bytes, 1, 3) + elif self.certificateType == CertificateType.cryptoID: + if self.certChain: + bytes = stringToBytes(self.certChain.write()) + else: + bytes = createByteArraySequence([]) + w.addVarSeq(bytes, 1, 2) + else: + raise AssertionError() + return HandshakeMsg.postWrite(self, w, trial) + +class CertificateRequest(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + self.certificate_types = [] + #treat as opaque bytes for now + self.certificate_authorities = createByteArraySequence([]) + + def create(self, certificate_types, certificate_authorities): + self.certificate_types = certificate_types + self.certificate_authorities = certificate_authorities + return self + + def parse(self, p): + p.startLengthCheck(3) + self.certificate_types = p.getVarList(1, 1) + self.certificate_authorities = p.getVarBytes(2) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.certificate_request, + trial) + w.addVarSeq(self.certificate_types, 1, 1) + w.addVarSeq(self.certificate_authorities, 1, 2) + return HandshakeMsg.postWrite(self, w, trial) + +class ServerKeyExchange(HandshakeMsg): + def __init__(self, cipherSuite): + self.cipherSuite = cipherSuite + self.contentType = ContentType.handshake + self.srp_N = 0L + self.srp_g = 0L + self.srp_s = createByteArraySequence([]) + self.srp_B = 0L + self.signature = createByteArraySequence([]) + + def createSRP(self, srp_N, srp_g, srp_s, srp_B): + self.srp_N = srp_N + self.srp_g = srp_g + self.srp_s = srp_s + self.srp_B = srp_B + return self + + def parse(self, p): + p.startLengthCheck(3) + self.srp_N = bytesToNumber(p.getVarBytes(2)) + self.srp_g = bytesToNumber(p.getVarBytes(2)) + self.srp_s = p.getVarBytes(1) + self.srp_B = bytesToNumber(p.getVarBytes(2)) + if self.cipherSuite in CipherSuite.srpRsaSuites: + self.signature = p.getVarBytes(2) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.server_key_exchange, + trial) + w.addVarSeq(numberToBytes(self.srp_N), 1, 2) + w.addVarSeq(numberToBytes(self.srp_g), 1, 2) + w.addVarSeq(self.srp_s, 1, 1) + w.addVarSeq(numberToBytes(self.srp_B), 1, 2) + if self.cipherSuite in CipherSuite.srpRsaSuites: + w.addVarSeq(self.signature, 1, 2) + return HandshakeMsg.postWrite(self, w, trial) + + def hash(self, clientRandom, serverRandom): + oldCipherSuite = self.cipherSuite + self.cipherSuite = None + try: + bytes = clientRandom + serverRandom + self.write()[4:] + s = bytesToString(bytes) + return stringToBytes(md5.md5(s).digest() + sha.sha(s).digest()) + finally: + self.cipherSuite = oldCipherSuite + +class ServerHelloDone(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + + def create(self): + return self + + def parse(self, p): + p.startLengthCheck(3) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.server_hello_done, trial) + return HandshakeMsg.postWrite(self, w, trial) + +class ClientKeyExchange(HandshakeMsg): + def __init__(self, cipherSuite, version=None): + self.cipherSuite = cipherSuite + self.version = version + self.contentType = ContentType.handshake + self.srp_A = 0 + self.encryptedPreMasterSecret = createByteArraySequence([]) + + def createSRP(self, srp_A): + self.srp_A = srp_A + return self + + def createRSA(self, encryptedPreMasterSecret): + self.encryptedPreMasterSecret = encryptedPreMasterSecret + return self + + def parse(self, p): + p.startLengthCheck(3) + if self.cipherSuite in CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites: + self.srp_A = bytesToNumber(p.getVarBytes(2)) + elif self.cipherSuite in CipherSuite.rsaSuites: + if self.version in ((3,1), (3,2)): + self.encryptedPreMasterSecret = p.getVarBytes(2) + elif self.version == (3,0): + self.encryptedPreMasterSecret = \ + p.getFixBytes(len(p.bytes)-p.index) + else: + raise AssertionError() + else: + raise AssertionError() + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.client_key_exchange, + trial) + if self.cipherSuite in CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites: + w.addVarSeq(numberToBytes(self.srp_A), 1, 2) + elif self.cipherSuite in CipherSuite.rsaSuites: + if self.version in ((3,1), (3,2)): + w.addVarSeq(self.encryptedPreMasterSecret, 1, 2) + elif self.version == (3,0): + w.addFixSeq(self.encryptedPreMasterSecret, 1) + else: + raise AssertionError() + else: + raise AssertionError() + return HandshakeMsg.postWrite(self, w, trial) + +class CertificateVerify(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + self.signature = createByteArraySequence([]) + + def create(self, signature): + self.signature = signature + return self + + def parse(self, p): + p.startLengthCheck(3) + self.signature = p.getVarBytes(2) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.certificate_verify, + trial) + w.addVarSeq(self.signature, 1, 2) + return HandshakeMsg.postWrite(self, w, trial) + +class ChangeCipherSpec(Msg): + def __init__(self): + self.contentType = ContentType.change_cipher_spec + self.type = 1 + + def create(self): + self.type = 1 + return self + + def parse(self, p): + p.setLengthCheck(1) + self.type = p.get(1) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = Msg.preWrite(self, trial) + w.add(self.type,1) + return Msg.postWrite(self, w, trial) + + +class Finished(HandshakeMsg): + def __init__(self, version): + self.contentType = ContentType.handshake + self.version = version + self.verify_data = createByteArraySequence([]) + + def create(self, verify_data): + self.verify_data = verify_data + return self + + def parse(self, p): + p.startLengthCheck(3) + if self.version == (3,0): + self.verify_data = p.getFixBytes(36) + elif self.version in ((3,1), (3,2)): + self.verify_data = p.getFixBytes(12) + else: + raise AssertionError() + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.finished, trial) + w.addFixSeq(self.verify_data, 1) + return HandshakeMsg.postWrite(self, w, trial) + +class ApplicationData(Msg): + def __init__(self): + self.contentType = ContentType.application_data + self.bytes = createByteArraySequence([]) + + def create(self, bytes): + self.bytes = bytes + return self + + def parse(self, p): + self.bytes = p.bytes + return self + + def write(self): + return self.bytes \ No newline at end of file diff --git a/gam/gdata/tlslite/utils/AES.py b/gam/gdata/tlslite/utils/AES.py new file mode 100755 index 00000000000..8413f4c1093 --- /dev/null +++ b/gam/gdata/tlslite/utils/AES.py @@ -0,0 +1,31 @@ +"""Abstract class for AES.""" + +class AES: + def __init__(self, key, mode, IV, implementation): + if len(key) not in (16, 24, 32): + raise AssertionError() + if mode != 2: + raise AssertionError() + if len(IV) != 16: + raise AssertionError() + self.isBlockCipher = True + self.block_size = 16 + self.implementation = implementation + if len(key)==16: + self.name = "aes128" + elif len(key)==24: + self.name = "aes192" + elif len(key)==32: + self.name = "aes256" + else: + raise AssertionError() + + #CBC-Mode encryption, returns ciphertext + #WARNING: *MAY* modify the input as well + def encrypt(self, plaintext): + assert(len(plaintext) % 16 == 0) + + #CBC-Mode decryption, returns plaintext + #WARNING: *MAY* modify the input as well + def decrypt(self, ciphertext): + assert(len(ciphertext) % 16 == 0) \ No newline at end of file diff --git a/gam/gdata/tlslite/utils/ASN1Parser.py b/gam/gdata/tlslite/utils/ASN1Parser.py new file mode 100755 index 00000000000..16b50f29cde --- /dev/null +++ b/gam/gdata/tlslite/utils/ASN1Parser.py @@ -0,0 +1,34 @@ +"""Class for parsing ASN.1""" +from compat import * +from codec import * + +#Takes a byte array which has a DER TLV field at its head +class ASN1Parser: + def __init__(self, bytes): + p = Parser(bytes) + p.get(1) #skip Type + + #Get Length + self.length = self._getASN1Length(p) + + #Get Value + self.value = p.getFixBytes(self.length) + + #Assuming this is a sequence... + def getChild(self, which): + p = Parser(self.value) + for x in range(which+1): + markIndex = p.index + p.get(1) #skip Type + length = self._getASN1Length(p) + p.getFixBytes(length) + return ASN1Parser(p.bytes[markIndex : p.index]) + + #Decode the ASN.1 DER length field + def _getASN1Length(self, p): + firstLength = p.get(1) + if firstLength<=127: + return firstLength + else: + lengthLength = firstLength & 0x7F + return p.get(lengthLength) diff --git a/gam/gdata/tlslite/utils/ASN1Parser.pyc b/gam/gdata/tlslite/utils/ASN1Parser.pyc new file mode 100644 index 0000000000000000000000000000000000000000..567303d78bd3b83cbe488126cf80159429b07c14 GIT binary patch literal 1548 zcmcgsQEL-H5T3nD(k5+`_N6a^5noOrIZ`Nyh}57L^B*TMt$r?#`}~kga4EHb8ZR+ODtR2rR+Iqg3XW21@9`M2gWJ4I z^N}4(R#v$iRn}|0IJmT_v2~8WM%g?S}|6G@+ zm;O<4k!MAe7=IK^ySu%;z3%MNj*Gl&%DCh8pZKFBvXO5ylWCj!bE`As-;&+q>~%gg z40Wz;7)tLjL26UA-ECL+#7%)t_%!T*PiE*3rHi*@nMGlBhe$I#ky7Y*b{-M9g5CXI#0AAs%jB@53jOsi|7XdmF z0CNEF0f-E+Ory>QCJ=3MJD>ZL&T&cO5xMOmpgAcf-L5o1jrm8tD<)JYv;pWja4MX`8 z3BzfT%rg!H01vicaHCt8ch^YK7{q2^8mhU`Y#mpH0fA+WXOk6#7OAO(Z^8P{n*e0wjcJBajfm3FmxhEuquJw@EIw@7&u> zBMV1JB>$3s0Y3nqXU=w76!8U%Bs-bg-J5yl*_qi3|6E&{{Qk#YU#34H{(pnveuhXz zdXPraKn6w*rdm*Gp$tM9hSCeg*qW5jb!SZmbs09K*ASD|#RhVO_ZxC0aw+n>wszwpyC*ySlcuGBSs%byf^jCX#p z<7%4V@$7Hua%FV7VIE);YfmxUIfO-kK&O%3AdOrtOD|yffR8xSP?egbHC5_@5V_Qt zhKTSGTe>V)MtaMVHg&Zrc13JcE-~2z!qy6WXBg5a9F7LY4u<;a({L>$oeScy%9~{Nc|%NlF|NIEFAM< z;bGla-&OhiTZuFL^Aaunwcg4Go8>7|cu`TlGxPH7$KCx|q@Q7OR{uD-(vDcj6G6R%g~N(ZcE%9)y2m(&CJ8hJMAzp z>^LGx3&IpVhON&r9G#14nr3jvw0MzC?{wa_ajcCfj)z5hHo#-V{EgTrqW37+*P|_p zk0=OEE&Ekyj^Bg*p~W{}dJS8(|MbzJ%&7wcX_&1>Gt}SJ=IUzLY;>yT%_5KEwCGQx z&~h(^3EC+$zWNB&D^UCNXk2aBEt1rxo|&|xpSaue4A#?9?~4pa`TnY6_-m$$V{DCJ NC0H@*K_gfX{staQT)zMS literal 0 HcmV?d00001 diff --git a/gam/gdata/tlslite/utils/PyCrypto_TripleDES.py b/gam/gdata/tlslite/utils/PyCrypto_TripleDES.py new file mode 100755 index 00000000000..8c22bb80a57 --- /dev/null +++ b/gam/gdata/tlslite/utils/PyCrypto_TripleDES.py @@ -0,0 +1,22 @@ +"""PyCrypto 3DES implementation.""" + +from cryptomath import * +from TripleDES import * + +if pycryptoLoaded: + import Crypto.Cipher.DES3 + + def new(key, mode, IV): + return PyCrypto_TripleDES(key, mode, IV) + + class PyCrypto_TripleDES(TripleDES): + + def __init__(self, key, mode, IV): + TripleDES.__init__(self, key, mode, IV, "pycrypto") + self.context = Crypto.Cipher.DES3.new(key, mode, IV) + + def encrypt(self, plaintext): + return self.context.encrypt(plaintext) + + def decrypt(self, ciphertext): + return self.context.decrypt(ciphertext) \ No newline at end of file diff --git a/gam/gdata/tlslite/utils/Python_AES.py b/gam/gdata/tlslite/utils/Python_AES.py new file mode 100755 index 00000000000..657152f8921 --- /dev/null +++ b/gam/gdata/tlslite/utils/Python_AES.py @@ -0,0 +1,68 @@ +"""Pure-Python AES implementation.""" + +from cryptomath import * + +from AES import * +from rijndael import rijndael + +def new(key, mode, IV): + return Python_AES(key, mode, IV) + +class Python_AES(AES): + def __init__(self, key, mode, IV): + AES.__init__(self, key, mode, IV, "python") + self.rijndael = rijndael(key, 16) + self.IV = IV + + def encrypt(self, plaintext): + AES.encrypt(self, plaintext) + + plaintextBytes = stringToBytes(plaintext) + chainBytes = stringToBytes(self.IV) + + #CBC Mode: For each block... + for x in range(len(plaintextBytes)/16): + + #XOR with the chaining block + blockBytes = plaintextBytes[x*16 : (x*16)+16] + for y in range(16): + blockBytes[y] ^= chainBytes[y] + blockString = bytesToString(blockBytes) + + #Encrypt it + encryptedBytes = stringToBytes(self.rijndael.encrypt(blockString)) + + #Overwrite the input with the output + for y in range(16): + plaintextBytes[(x*16)+y] = encryptedBytes[y] + + #Set the next chaining block + chainBytes = encryptedBytes + + self.IV = bytesToString(chainBytes) + return bytesToString(plaintextBytes) + + def decrypt(self, ciphertext): + AES.decrypt(self, ciphertext) + + ciphertextBytes = stringToBytes(ciphertext) + chainBytes = stringToBytes(self.IV) + + #CBC Mode: For each block... + for x in range(len(ciphertextBytes)/16): + + #Decrypt it + blockBytes = ciphertextBytes[x*16 : (x*16)+16] + blockString = bytesToString(blockBytes) + decryptedBytes = stringToBytes(self.rijndael.decrypt(blockString)) + + #XOR with the chaining block and overwrite the input with output + for y in range(16): + decryptedBytes[y] ^= chainBytes[y] + ciphertextBytes[(x*16)+y] = decryptedBytes[y] + + #Set the next chaining block + chainBytes = blockBytes + + self.IV = bytesToString(chainBytes) + return bytesToString(ciphertextBytes) diff --git a/gam/gdata/tlslite/utils/Python_RC4.py b/gam/gdata/tlslite/utils/Python_RC4.py new file mode 100755 index 00000000000..56ce5fb2fc2 --- /dev/null +++ b/gam/gdata/tlslite/utils/Python_RC4.py @@ -0,0 +1,39 @@ +"""Pure-Python RC4 implementation.""" + +from RC4 import RC4 +from cryptomath import * + +def new(key): + return Python_RC4(key) + +class Python_RC4(RC4): + def __init__(self, key): + RC4.__init__(self, key, "python") + keyBytes = stringToBytes(key) + S = [i for i in range(256)] + j = 0 + for i in range(256): + j = (j + S[i] + keyBytes[i % len(keyBytes)]) % 256 + S[i], S[j] = S[j], S[i] + + self.S = S + self.i = 0 + self.j = 0 + + def encrypt(self, plaintext): + plaintextBytes = stringToBytes(plaintext) + S = self.S + i = self.i + j = self.j + for x in range(len(plaintextBytes)): + i = (i + 1) % 256 + j = (j + S[i]) % 256 + S[i], S[j] = S[j], S[i] + t = (S[i] + S[j]) % 256 + plaintextBytes[x] ^= S[t] + self.i = i + self.j = j + return bytesToString(plaintextBytes) + + def decrypt(self, ciphertext): + return self.encrypt(ciphertext) diff --git a/gam/gdata/tlslite/utils/Python_RSAKey.py b/gam/gdata/tlslite/utils/Python_RSAKey.py new file mode 100755 index 00000000000..2c469b572c7 --- /dev/null +++ b/gam/gdata/tlslite/utils/Python_RSAKey.py @@ -0,0 +1,209 @@ +"""Pure-Python RSA implementation.""" + +from cryptomath import * +import xmltools +from ASN1Parser import ASN1Parser +from RSAKey import * + +class Python_RSAKey(RSAKey): + def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0): + if (n and not e) or (e and not n): + raise AssertionError() + self.n = n + self.e = e + self.d = d + self.p = p + self.q = q + self.dP = dP + self.dQ = dQ + self.qInv = qInv + self.blinder = 0 + self.unblinder = 0 + + def hasPrivateKey(self): + return self.d != 0 + + def hash(self): + s = self.writeXMLPublicKey('\t\t') + return hashAndBase64(s.strip()) + + def _rawPrivateKeyOp(self, m): + #Create blinding values, on the first pass: + if not self.blinder: + self.unblinder = getRandomNumber(2, self.n) + self.blinder = powMod(invMod(self.unblinder, self.n), self.e, + self.n) + + #Blind the input + m = (m * self.blinder) % self.n + + #Perform the RSA operation + c = self._rawPrivateKeyOpHelper(m) + + #Unblind the output + c = (c * self.unblinder) % self.n + + #Update blinding values + self.blinder = (self.blinder * self.blinder) % self.n + self.unblinder = (self.unblinder * self.unblinder) % self.n + + #Return the output + return c + + + def _rawPrivateKeyOpHelper(self, m): + #Non-CRT version + #c = powMod(m, self.d, self.n) + + #CRT version (~3x faster) + s1 = powMod(m, self.dP, self.p) + s2 = powMod(m, self.dQ, self.q) + h = ((s1 - s2) * self.qInv) % self.p + c = s2 + self.q * h + return c + + def _rawPublicKeyOp(self, c): + m = powMod(c, self.e, self.n) + return m + + def acceptsPassword(self): return False + + def write(self, indent=''): + if self.d: + s = indent+'\n' + else: + s = indent+'\n' + s += indent+'\t%s\n' % numberToBase64(self.n) + s += indent+'\t%s\n' % numberToBase64(self.e) + if self.d: + s += indent+'\t%s\n' % numberToBase64(self.d) + s += indent+'\t

    %s

    \n' % numberToBase64(self.p) + s += indent+'\t%s\n' % numberToBase64(self.q) + s += indent+'\t%s\n' % numberToBase64(self.dP) + s += indent+'\t%s\n' % numberToBase64(self.dQ) + s += indent+'\t%s\n' % numberToBase64(self.qInv) + s += indent+'
    ' + else: + s += indent+'' + #Only add \n if part of a larger structure + if indent != '': + s += '\n' + return s + + def writeXMLPublicKey(self, indent=''): + return Python_RSAKey(self.n, self.e).write(indent) + + def generate(bits): + key = Python_RSAKey() + p = getRandomPrime(bits/2, False) + q = getRandomPrime(bits/2, False) + t = lcm(p-1, q-1) + key.n = p * q + key.e = 3L #Needed to be long, for Java + key.d = invMod(key.e, t) + key.p = p + key.q = q + key.dP = key.d % (p-1) + key.dQ = key.d % (q-1) + key.qInv = invMod(q, p) + return key + generate = staticmethod(generate) + + def parsePEM(s, passwordCallback=None): + """Parse a string containing a or , or + PEM-encoded key.""" + + start = s.find("-----BEGIN PRIVATE KEY-----") + if start != -1: + end = s.find("-----END PRIVATE KEY-----") + if end == -1: + raise SyntaxError("Missing PEM Postfix") + s = s[start+len("-----BEGIN PRIVATE KEY -----") : end] + bytes = base64ToBytes(s) + return Python_RSAKey._parsePKCS8(bytes) + else: + start = s.find("-----BEGIN RSA PRIVATE KEY-----") + if start != -1: + end = s.find("-----END RSA PRIVATE KEY-----") + if end == -1: + raise SyntaxError("Missing PEM Postfix") + s = s[start+len("-----BEGIN RSA PRIVATE KEY -----") : end] + bytes = base64ToBytes(s) + return Python_RSAKey._parseSSLeay(bytes) + raise SyntaxError("Missing PEM Prefix") + parsePEM = staticmethod(parsePEM) + + def parseXML(s): + element = xmltools.parseAndStripWhitespace(s) + return Python_RSAKey._parseXML(element) + parseXML = staticmethod(parseXML) + + def _parsePKCS8(bytes): + p = ASN1Parser(bytes) + + version = p.getChild(0).value[0] + if version != 0: + raise SyntaxError("Unrecognized PKCS8 version") + + rsaOID = p.getChild(1).value + if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]: + raise SyntaxError("Unrecognized AlgorithmIdentifier") + + #Get the privateKey + privateKeyP = p.getChild(2) + + #Adjust for OCTET STRING encapsulation + privateKeyP = ASN1Parser(privateKeyP.value) + + return Python_RSAKey._parseASN1PrivateKey(privateKeyP) + _parsePKCS8 = staticmethod(_parsePKCS8) + + def _parseSSLeay(bytes): + privateKeyP = ASN1Parser(bytes) + return Python_RSAKey._parseASN1PrivateKey(privateKeyP) + _parseSSLeay = staticmethod(_parseSSLeay) + + def _parseASN1PrivateKey(privateKeyP): + version = privateKeyP.getChild(0).value[0] + if version != 0: + raise SyntaxError("Unrecognized RSAPrivateKey version") + n = bytesToNumber(privateKeyP.getChild(1).value) + e = bytesToNumber(privateKeyP.getChild(2).value) + d = bytesToNumber(privateKeyP.getChild(3).value) + p = bytesToNumber(privateKeyP.getChild(4).value) + q = bytesToNumber(privateKeyP.getChild(5).value) + dP = bytesToNumber(privateKeyP.getChild(6).value) + dQ = bytesToNumber(privateKeyP.getChild(7).value) + qInv = bytesToNumber(privateKeyP.getChild(8).value) + return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv) + _parseASN1PrivateKey = staticmethod(_parseASN1PrivateKey) + + def _parseXML(element): + try: + xmltools.checkName(element, "privateKey") + except SyntaxError: + xmltools.checkName(element, "publicKey") + + #Parse attributes + xmltools.getReqAttribute(element, "xmlns", "http://trevp.net/rsa\Z") + xmltools.checkNoMoreAttributes(element) + + #Parse public values ( and ) + n = base64ToNumber(xmltools.getText(xmltools.getChild(element, 0, "n"), xmltools.base64RegEx)) + e = base64ToNumber(xmltools.getText(xmltools.getChild(element, 1, "e"), xmltools.base64RegEx)) + d = 0 + p = 0 + q = 0 + dP = 0 + dQ = 0 + qInv = 0 + #Parse private values, if present + if element.childNodes.length>=3: + d = base64ToNumber(xmltools.getText(xmltools.getChild(element, 2, "d"), xmltools.base64RegEx)) + p = base64ToNumber(xmltools.getText(xmltools.getChild(element, 3, "p"), xmltools.base64RegEx)) + q = base64ToNumber(xmltools.getText(xmltools.getChild(element, 4, "q"), xmltools.base64RegEx)) + dP = base64ToNumber(xmltools.getText(xmltools.getChild(element, 5, "dP"), xmltools.base64RegEx)) + dQ = base64ToNumber(xmltools.getText(xmltools.getChild(element, 6, "dQ"), xmltools.base64RegEx)) + qInv = base64ToNumber(xmltools.getText(xmltools.getLastChild(element, 7, "qInv"), xmltools.base64RegEx)) + return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv) + _parseXML = staticmethod(_parseXML) diff --git a/gam/gdata/tlslite/utils/Python_RSAKey.pyc b/gam/gdata/tlslite/utils/Python_RSAKey.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d2b59529b0f47b4266cffbb4a33f85884ce09ed GIT binary patch literal 8410 zcmd5>OK%(36~04CvPhA7M{=CBrClUojL3q6=2=;Y;;4z?m|;dvlL#$XiZhnRB8T+c zk!=<6rU{B}y6(E0{)3`GcI`juw%aZWbkly{cV|ewjI1O?)SS8Z-1m9kGsS;b%k_i5 zH9IEzE8_h=9@_*6jM)bCOi(ag&)jFJXu1WnT@ZH6bc?1}G}}evWxdAGYutom<^lRn zm~V~w#+a|R$2oATeu*>x1fY44ge%R%bgv)R{H={z)EjieUKpqCH0sAUqW|Hq?xAB! zaJ}KmWt2Cz8aJEm#D+;)4jp^#zb6!ecrl5}$EAxA9n<+I;Pqq-wq{n2No^SMn-R6}(ch@^_wz)$40xS{{3_ z09aH3a7+cjaTNe3Q~7u&cZ6-pU&{-h6XqAW>bh zNvBr7`P%Ak&`#T{Y1ek6G+aGMqpn>&Ce1f6fUCAzQ5>Z$PzgM`3}Cz~Ud1b&`gN!) ze0Y3ulo|p8Ka>*hQv;8XmOXVqDVI+oU%-jDrM=U)!&(mK)*TIC z3vh*;HxW-&rzrasfOH06H%$F@9Q1pQgWgV<|H-|6pkfq1Mp4?BE}^NF zw4aQG=hMN*VRsNF;`@k!J+ZNK0c@N-V@p2<&MpF&LdkokP{AyYhcC*?}$7`Zax!nVR0{W!1#z!FuHj+nBm3cXON3=aDkfMsLPg7 z+ngDbRg*0fXJpXSg@(YVSjv9do9^|7NV@00gog8{tN$XFc?*x_4$b&wkH5Jy6y#+z zkEnEIop>*W)KbfRoLYy0>PX`;=O=bfwJ0r+V^N|hUW^u%v zkHpJ#{i5Bq;e{G+YHqvJ2?wcdwyk~APl8RLEFSIHsqgYmX_bntPz@E<6KjbRw`?9v(9_j@bSEl{KB!}6L}N#XE@v~ zdMR%jKh-puH%**sD&4u8UE533!F#K#X%apj+=#<;HL>k$>t*|zy5}PHeZ8strL}nd z6}z??W3;P>s)xgRpn5Q@4^$rv>yK1_ggOm*X)S2F0nHKSGl%&M7&__F8sjoIj0HDx zZFO|cbxW54On#Jg|EEA$DnVIcn?e|ch_PJB{l4?=N20jr25_AREv^^y3P+m$+^v^%0)) zEpezU79%CMhHz7wPRnO=RXB31&f_!`_(T*{+@2ZO)u|Zbg9LUM1R(D~zU)M))fD^T;ppBX*PWg}b9G(W4dXCDX#X4R33o{RL6{ftTZRBL z9^%Di5x;pS&c~SlUvzig0t_C;qhJAz<3ZjeC%`C3oY==Lb(3NNFM%Q14OPYyLW8?I zkjvfW zC_`#!ckO)uWjJc>8gD1@ZmrWtW(2oD6x+2UUSPe}Pf#D)#@A6V*RsFn=Dn3L?(~B& zs6iZVScYyZ{QF?@qdSdS)4%h}jr*ImyPIDLtE7u%;HBzjeSRVp_TMG(t$ajlk3|b<&$vfGketX9&VWveX^zI(dpP8lMq{MDETBM zd4LQ+N>Xnff~-$nLP2^5^G^-k(e)6HRVf%gzdvR z#aON*<&|KNMMt;W-R@4iv!7ix+es?z46y+Y*x>>fq?c~1vy4); zaH4+3eVlxTadx*9suAwv&v7?028dG9e!pu|hThr%Zr@wHa(}Uh`?(#oJE0>mvm1_} zxU@UBL`STgP3Uri7fC2=;tyDUbTg*`C(9Tu+-A!^06>b&oWLo;sX(7)ws5mX;l<35 zc(Pu%Xxm{_MkdM)kTJsmm=;JU>=Zh{lEH2ve~LUKGmL=9ks&*V8QedTiJsYYLF1n^ zbLG^`Rbpu9uuIrFEzRngpgPava&0o!pT|kq>F>tTZ(%hg>b1wnY9a3?KNj(Z z7?D>I1JdaC1RoRphk(ixkxI4(aBQxP?rt9`nZ4c}23gUAC`?>lK*1TQn|Y7)_FmL= z>hZYUJqZ09Y@uDt21Pd!wA6geTj+B0L1gpWgXUx<3jro98cA z{!MJ@k9b^Kz^fFNsCtEY?_F;Oseze7hZJCd#z*o+Lwr=e;4HvGNEw)#hjlbsK1Yfy z{`)97)e#9e%>|t84J`9#JejUkoY~;LopPeH%wmt3hjscPMR}3QCza^kZ_<(+)eopk zHpZz)9uSQ15@9GE#oR@Kx)M5P*fK&b;GT)NrzhF06##{QL zB#=u5IVi26lno_~`g6EA@=>(M1>=1YUo~Tpul=$~-Zq}+L9@Up$QLCJ9b(UXNt}2| z8w=UOS1qEzJ%x-O3@7W55J0dJgJq=Akr+seGqLcAr+dnfpX{md6rziHV3-N@vFRBM zKgt*asBa|?OXeZWf645Ro8+Ho?L?oPPR@^wo!$vN34FzgJH=XwNCpcEJRTr+Q8@ErRF^cEeroPAHc&>jg+eBc*7M zth_$>3?pSMXN*!PmPzHavM9gWj?m&%LcULW;WUq`GrE#V5%qyc+NkuZR8pUcA~`t5 zkM8#O4occgXD{sRH`=)PNkQ|hc=%`|MS!$(kcKG_;=1+k^^)yoZ2j@zR`>lA5`XUBoAJk${z10#yTCH9`IOwA4 zlLJ1Zcb|0kUnZaz_G<*!2wo*%3d866qMPM6>rOAkx9mV+N~0f#{@d)cMnKo^(?`mr z`8>eLDXVmDxfY5k9fGBRO?X$QOU2Sy>2hfbuuxg56id?;z6+%yp84!u#rtJ_Es#jH zT0y^)5v!9N4$^+Fo$mP*V}%})Bls(PVsm1Hh9f6DufvIxYmRs841x5eU;^p0iZ|<( G3;zcjIC{ 256: + raise ValueError() + self.isBlockCipher = False + self.name = "rc4" + self.implementation = implementation + + def encrypt(self, plaintext): + raise NotImplementedError() + + def decrypt(self, ciphertext): + raise NotImplementedError() \ No newline at end of file diff --git a/gam/gdata/tlslite/utils/RSAKey.py b/gam/gdata/tlslite/utils/RSAKey.py new file mode 100755 index 00000000000..2f5d28680a8 --- /dev/null +++ b/gam/gdata/tlslite/utils/RSAKey.py @@ -0,0 +1,264 @@ +"""Abstract class for RSA.""" + +from cryptomath import * + + +class RSAKey: + """This is an abstract base class for RSA keys. + + Particular implementations of RSA keys, such as + L{OpenSSL_RSAKey.OpenSSL_RSAKey}, + L{Python_RSAKey.Python_RSAKey}, and + L{PyCrypto_RSAKey.PyCrypto_RSAKey}, + inherit from this. + + To create or parse an RSA key, don't use one of these classes + directly. Instead, use the factory functions in + L{tlslite.utils.keyfactory}. + """ + + def __init__(self, n=0, e=0): + """Create a new RSA key. + + If n and e are passed in, the new key will be initialized. + + @type n: int + @param n: RSA modulus. + + @type e: int + @param e: RSA public exponent. + """ + raise NotImplementedError() + + def __len__(self): + """Return the length of this key in bits. + + @rtype: int + """ + return numBits(self.n) + + def hasPrivateKey(self): + """Return whether or not this key has a private component. + + @rtype: bool + """ + raise NotImplementedError() + + def hash(self): + """Return the cryptoID value corresponding to this + key. + + @rtype: str + """ + raise NotImplementedError() + + def getSigningAlgorithm(self): + """Return the cryptoID sigAlgo value corresponding to this key. + + @rtype: str + """ + return "pkcs1-sha1" + + def hashAndSign(self, bytes): + """Hash and sign the passed-in bytes. + + This requires the key to have a private component. It performs + a PKCS1-SHA1 signature on the passed-in data. + + @type bytes: str or L{array.array} of unsigned bytes + @param bytes: The value which will be hashed and signed. + + @rtype: L{array.array} of unsigned bytes. + @return: A PKCS1-SHA1 signature on the passed-in data. + """ + if not isinstance(bytes, type("")): + bytes = bytesToString(bytes) + hashBytes = stringToBytes(sha1(bytes).digest()) + prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes) + sigBytes = self.sign(prefixedHashBytes) + return sigBytes + + def hashAndVerify(self, sigBytes, bytes): + """Hash and verify the passed-in bytes with the signature. + + This verifies a PKCS1-SHA1 signature on the passed-in data. + + @type sigBytes: L{array.array} of unsigned bytes + @param sigBytes: A PKCS1-SHA1 signature. + + @type bytes: str or L{array.array} of unsigned bytes + @param bytes: The value which will be hashed and verified. + + @rtype: bool + @return: Whether the signature matches the passed-in data. + """ + if not isinstance(bytes, type("")): + bytes = bytesToString(bytes) + hashBytes = stringToBytes(sha1(bytes).digest()) + prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes) + return self.verify(sigBytes, prefixedHashBytes) + + def sign(self, bytes): + """Sign the passed-in bytes. + + This requires the key to have a private component. It performs + a PKCS1 signature on the passed-in data. + + @type bytes: L{array.array} of unsigned bytes + @param bytes: The value which will be signed. + + @rtype: L{array.array} of unsigned bytes. + @return: A PKCS1 signature on the passed-in data. + """ + if not self.hasPrivateKey(): + raise AssertionError() + paddedBytes = self._addPKCS1Padding(bytes, 1) + m = bytesToNumber(paddedBytes) + if m >= self.n: + raise ValueError() + c = self._rawPrivateKeyOp(m) + sigBytes = numberToBytes(c) + return sigBytes + + def verify(self, sigBytes, bytes): + """Verify the passed-in bytes with the signature. + + This verifies a PKCS1 signature on the passed-in data. + + @type sigBytes: L{array.array} of unsigned bytes + @param sigBytes: A PKCS1 signature. + + @type bytes: L{array.array} of unsigned bytes + @param bytes: The value which will be verified. + + @rtype: bool + @return: Whether the signature matches the passed-in data. + """ + paddedBytes = self._addPKCS1Padding(bytes, 1) + c = bytesToNumber(sigBytes) + if c >= self.n: + return False + m = self._rawPublicKeyOp(c) + checkBytes = numberToBytes(m) + return checkBytes == paddedBytes + + def encrypt(self, bytes): + """Encrypt the passed-in bytes. + + This performs PKCS1 encryption of the passed-in data. + + @type bytes: L{array.array} of unsigned bytes + @param bytes: The value which will be encrypted. + + @rtype: L{array.array} of unsigned bytes. + @return: A PKCS1 encryption of the passed-in data. + """ + paddedBytes = self._addPKCS1Padding(bytes, 2) + m = bytesToNumber(paddedBytes) + if m >= self.n: + raise ValueError() + c = self._rawPublicKeyOp(m) + encBytes = numberToBytes(c) + return encBytes + + def decrypt(self, encBytes): + """Decrypt the passed-in bytes. + + This requires the key to have a private component. It performs + PKCS1 decryption of the passed-in data. + + @type encBytes: L{array.array} of unsigned bytes + @param encBytes: The value which will be decrypted. + + @rtype: L{array.array} of unsigned bytes or None. + @return: A PKCS1 decryption of the passed-in data or None if + the data is not properly formatted. + """ + if not self.hasPrivateKey(): + raise AssertionError() + c = bytesToNumber(encBytes) + if c >= self.n: + return None + m = self._rawPrivateKeyOp(c) + decBytes = numberToBytes(m) + if (len(decBytes) != numBytes(self.n)-1): #Check first byte + return None + if decBytes[0] != 2: #Check second byte + return None + for x in range(len(decBytes)-1): #Scan through for zero separator + if decBytes[x]== 0: + break + else: + return None + return decBytes[x+1:] #Return everything after the separator + + def _rawPrivateKeyOp(self, m): + raise NotImplementedError() + + def _rawPublicKeyOp(self, c): + raise NotImplementedError() + + def acceptsPassword(self): + """Return True if the write() method accepts a password for use + in encrypting the private key. + + @rtype: bool + """ + raise NotImplementedError() + + def write(self, password=None): + """Return a string containing the key. + + @rtype: str + @return: A string describing the key, in whichever format (PEM + or XML) is native to the implementation. + """ + raise NotImplementedError() + + def writeXMLPublicKey(self, indent=''): + """Return a string containing the key. + + @rtype: str + @return: A string describing the public key, in XML format. + """ + return Python_RSAKey(self.n, self.e).write(indent) + + def generate(bits): + """Generate a new key with the specified bit length. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + """ + raise NotImplementedError() + generate = staticmethod(generate) + + + # ************************************************************************** + # Helper Functions for RSA Keys + # ************************************************************************** + + def _addPKCS1SHA1Prefix(self, bytes): + prefixBytes = createByteArraySequence(\ + [48,33,48,9,6,5,43,14,3,2,26,5,0,4,20]) + prefixedBytes = prefixBytes + bytes + return prefixedBytes + + def _addPKCS1Padding(self, bytes, blockType): + padLength = (numBytes(self.n) - (len(bytes)+3)) + if blockType == 1: #Signature padding + pad = [0xFF] * padLength + elif blockType == 2: #Encryption padding + pad = createByteArraySequence([]) + while len(pad) < padLength: + padBytes = getRandomBytes(padLength * 2) + pad = [b for b in padBytes if b != 0] + pad = pad[:padLength] + else: + raise AssertionError() + + #NOTE: To be proper, we should add [0,blockType]. However, + #the zero is lost when the returned padding is converted + #to a number, so we don't even bother with it. Also, + #adding it would cause a misalignment in verify() + padding = createByteArraySequence([blockType] + pad + [0]) + paddedBytes = padding + bytes + return paddedBytes diff --git a/gam/gdata/tlslite/utils/RSAKey.pyc b/gam/gdata/tlslite/utils/RSAKey.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1723c6b5fe97fcae5f592fdc4fc99a4680ad18b8 GIT binary patch literal 10231 zcmc&)&u`qu6`tMIZ%dYA%dwL-HPfWYI+dlWMUut{0?R*|7UmT}Hd) znw+()#=6Amp+GM^_mV>j^w4|JTY&-v+CzZ=Jr(Gs|3D8#&*}HQA-UW{j-tftO5$iZ zoZ-y8`M&qwykYG>b2Ih+@7H~m{nYUP0>qwpJanmNLDvflX$*y{EnBV&Q_c_Zew*R{1iX+3Mv>T&zT9NcyLYc4t0DH z#px$VG&)AFNYMi_GJ+4Q>LvzIWA8V|vSHjD=MY$eflWweQU#NenNq=&WTsUxEtwe= z%t+>l3XVu-Rt2+?nNz`>WR9xfsAT3 zUSdMO-|-SB?Djgkt0Uu?Fpg3u-lBY^@}iUW{kG$!(&qa6H+nj1G_E(Tn3slc_ZG8S z>$|2MM|q{8yS+t>F(~R?N_KlD9@HGV&)yeCZJmV1*-GNBV=&Qd1{<;CC)zXG0U&x_ zg4tsF*~k~2Ada3jP9LRlglh{;buJKX=M#j9_DyGZ$#JemsnK4rC=F24*#h9=WY^j1 zN4^zH7-hpYowO4gz0@~hCtZR}v$}grQX#~mD4&q$MQC2amChkCg76OndyRUP#ee5> zI}Oi?^iDp3Y?556t6L71H3}R|FwvN5n(Dv_qeV_h+F%UMPT1)h(7}q3`HBJxDn+Rsw9h zW|?^c(_4$p)f{3vxRNAsQm1Il7zLm^Tf*N+Qe8)xk|e#3-13i8outd#VY0cqd^z5U zIoM9C;r|`mNlLBfs7m!c6 z0_up*HR8qF@><b(m2*O1Aim+{=yrk_M2`HqfSrfs1G?r{;q$k`0dkPH(Faae@1 z@`m~?>US@qzDr3+A1(zmN*S=%W)nkiHlIY+ieS7D!9j3+6KygP?6fuTmymr?Y=oI2 zgSH1fB_mZ?X6gj5UwMq40Tvb zn?~4*uy~vb%u&1hB8qdk5VhmDC=k&g?;-j!nN2mkUGokKQ<+diblj?3wZc%?Mm-(+D0pd*T0|ad~lQj|NsEu}wE~_JP z6cT-_57U>57K7x8$F_G{?@L?QALI0N0=KJc%_h~m=d545)L1#!cx`pXVZR>KB2BHl zbI?Eaad~0IqR61mJHR)U$MyHUB=L5aB;BJr?MHkgY@AdqIFOmEtn~)yX1TS~4&kHZ z_K%=NQ`$@WmU8zQvkvNVV8$*aqVLZ;tB0B}MZ_OK0P2i(#}K|@8p6-;B43N`qc9BB zkW6VKZkPlBlM=`V*^T%j*UuVCfL>hOAZ($?5S2|Y2xJ|&4C{&B3h&su5MP~m6mA&i zqzf~R90g=w`v~rO(n<#ekuBwDAvw9REJJ~M@YeVU-paA8MnMDPeTALOva8vXmHEnP zT=NxiqlSp}Q&b?as{0Yk(xqA=mXr*LWtjwG6-)xpK&%QW1%hUp?5|~3-G=wM1&#o3 z16+$ZfjQWJJ|NPB0VS{r)ueFX>+*ct>L=F=RG8}8?wnrPm>_`l)`QU zc829*d1#BOv@ozd1M^~eF{ZZ1Rq}>{!We2;;tRZ1G6gnjT;1g^ouf!^gD>RpXO=;* zR6^JTHa-K^9nS6?r2P+RaLE1Pz1G*ZG6&Br$96~$$YPti-4NTW*dJnZA4js)G6Z>H zDAr*yavLMdvk|ZLyPGB(6Z3?93)*DnsV;#R!cZt;sMvex zM+Xd$Hg;Yzuov7`YTGIGEt zgF&!fnmAjQJtW5pdDGy%Jg`InU%&gV|dS??;ZWU_lPX#sI(KZ4FGo zVSztrnHi5k-z8QL*^R??k(rzOIdc!uN?0Zdl%D`5+;1_VKQHdnkGxK*4UI;Ve2}Qe zHNAVI=RS!tiB*cPRk9S{<_n%?@)Q!G7EG1DEuXmjy^ftU^BLF@U0XIChC8d|GqhvDg-!2|ahCIZD_W$X$H2|$6O zguu}0s4*zPt!#3?LgyBWM}|}}C&aZGi-H%i&8_x3=;r z`$)Ekx_*FqSwwy#L8Lj9lP+fmF-l!O>vR#Mj}Z>@ecdw>n8)$aPMidCj*8>40mP%i z*fK=Ta7Pxy-#<9}|Bc?$70V{Ip3Uelh4dy4!et@7KMbVjF(8S(5n}Z?3dJc=CWZZH zkGh5AiWtRe{)#}SeiCk$Ua`otA&FFKIJKFIaq8<=UN2q)E%)~8*UySN!ci>Zv2sp; zCCyLI}thmL3vQG-RPfDnz5Pl601iDm_Pzp4^a4aPhaj!~K zY&Tii@!HQH%35S6mRMC;5=cB-jDcM1)Zwzv9Xh*~-GEEI?fw9Xgg$J5m2ONJ1yJ#$ z6gUG_B5;hk*n#{5g;u}qb3QYyj0?~sLZn{RkxqC(XHT6Wj?!MVr~Pm%WB?xL?)I3o zK62lysD`nnFjdY>NLXL^LkYM z8oW8C_!tE4AU*)Wryuy}1D|`~Ll1o7ff9V*fiFAeRBK)~`_>bMqqhejBPvkmerqlK z9@6khCUg%%!3IGSewPXBgd~Re5h3?#)~(>E!UZH^FbzYD_VWgs|5culG;nMKM@tJl ztp#OB(#BgPjBh;!u6iEUmy!_*T4JXEMu#aSt}3<43zZogw&(0QDG~EOkts9)9O=TJ zr=xtD0)+IfJN**ZpanfqQww`zD!MDr5SxUn{X05SEqh2drtWz19@%oD_BI@<-^y{C ztl@$U&Y(7Y^~>*^&Z09|NP78ujMThaRk!vOI>ELTFL02%wV~dM;P~euQ1pkPaX4e@ z6xt$piZ8~lLe33N?O|Ing~jJOui2F)%EM2Y(ASaWwNBn5Zmkv}7TlMRNW>1Oi!KhV zV(S`;M_H%%N{h_f%}(rZZ}8BFcL=dvmoE^+zrycNi-RSSt>7r3z0FOlBkN&Np#<&o<2#Nz=8!(#!Q#^Ez4 z>m&)5UCv@4CCNSTvoDtj;lxsoYhreyIzKl(iQg$)C#DysAI0y9=`+(c{7%hI&rVHG t%uY^Etl5E4eYjVU*r1YpbASy?TV6ZrvCG1}Y?ta4&LbtsC#o~G{{gms&YJ)L literal 0 HcmV?d00001 diff --git a/gam/gdata/tlslite/utils/TripleDES.py b/gam/gdata/tlslite/utils/TripleDES.py new file mode 100755 index 00000000000..2db45888bde --- /dev/null +++ b/gam/gdata/tlslite/utils/TripleDES.py @@ -0,0 +1,26 @@ +"""Abstract class for 3DES.""" + +from compat import * #For True + +class TripleDES: + def __init__(self, key, mode, IV, implementation): + if len(key) != 24: + raise ValueError() + if mode != 2: + raise ValueError() + if len(IV) != 8: + raise ValueError() + self.isBlockCipher = True + self.block_size = 8 + self.implementation = implementation + self.name = "3des" + + #CBC-Mode encryption, returns ciphertext + #WARNING: *MAY* modify the input as well + def encrypt(self, plaintext): + assert(len(plaintext) % 8 == 0) + + #CBC-Mode decryption, returns plaintext + #WARNING: *MAY* modify the input as well + def decrypt(self, ciphertext): + assert(len(ciphertext) % 8 == 0) diff --git a/gam/gdata/tlslite/utils/__init__.py b/gam/gdata/tlslite/utils/__init__.py new file mode 100755 index 00000000000..e96b4bef8a5 --- /dev/null +++ b/gam/gdata/tlslite/utils/__init__.py @@ -0,0 +1,31 @@ +"""Toolkit for crypto and other stuff.""" + +__all__ = ["AES", + "ASN1Parser", + "cipherfactory", + "codec", + "Cryptlib_AES", + "Cryptlib_RC4", + "Cryptlib_TripleDES", + "cryptomath: cryptomath module", + "dateFuncs", + "hmac", + "JCE_RSAKey", + "compat", + "keyfactory", + "OpenSSL_AES", + "OpenSSL_RC4", + "OpenSSL_RSAKey", + "OpenSSL_TripleDES", + "PyCrypto_AES", + "PyCrypto_RC4", + "PyCrypto_RSAKey", + "PyCrypto_TripleDES", + "Python_AES", + "Python_RC4", + "Python_RSAKey", + "RC4", + "rijndael", + "RSAKey", + "TripleDES", + "xmltools"] diff --git a/gam/gdata/tlslite/utils/__init__.pyc b/gam/gdata/tlslite/utils/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..527741ed68af2ac0a4d1e4627462ade990f2f7d9 GIT binary patch literal 849 zcmbVJO>fgc5S=7#+NMoQ%lAj9B`)ERSVi)9kGYp>-U3;fIY zVf+DR9g~6Jd0PX`G0CoWn0gnLh0UiUM0Nw|D0QeB_5#VFMCxA}@p8-Axd;!>lc@J)x`6UGX zQ+MGjHaEuTWklZ0WL}sRDH%^D=own5%(IlwWxxX%q&&$SL8ltzNRV(f-j}AdhCQ9 z*76nEaSd8aEnAuvWtV2%;~H>OM|^Grtc1C~*Vu;4$)6UqFiA;O3muVr>MC1Fax-z6 zUEZ_kdxBEwvgs=CUTMSbs(!|rc-VH*Z*oLCY=l4)3Q_q|Ya#j!!^YSNhyFJnxAm7% zc31dU=0~E9MC;FGJa{uaKOdx}?t#rhuYdO1pYwF2Pul8;l%G?ituI8BL?pzJYn_;P St6{!xux~Hew41HQ|E)hs`OusI literal 0 HcmV?d00001 diff --git a/gam/gdata/tlslite/utils/cipherfactory.py b/gam/gdata/tlslite/utils/cipherfactory.py new file mode 100755 index 00000000000..ccbb6b5ff9b --- /dev/null +++ b/gam/gdata/tlslite/utils/cipherfactory.py @@ -0,0 +1,111 @@ +"""Factory functions for symmetric cryptography.""" + +import os + +import Python_AES +import Python_RC4 + +import cryptomath + +tripleDESPresent = False + +if cryptomath.m2cryptoLoaded: + import OpenSSL_AES + import OpenSSL_RC4 + import OpenSSL_TripleDES + tripleDESPresent = True + +if cryptomath.cryptlibpyLoaded: + import Cryptlib_AES + import Cryptlib_RC4 + import Cryptlib_TripleDES + tripleDESPresent = True + +if cryptomath.pycryptoLoaded: + import PyCrypto_AES + import PyCrypto_RC4 + import PyCrypto_TripleDES + tripleDESPresent = True + +# ************************************************************************** +# Factory Functions for AES +# ************************************************************************** + +def createAES(key, IV, implList=None): + """Create a new AES object. + + @type key: str + @param key: A 16, 24, or 32 byte string. + + @type IV: str + @param IV: A 16 byte string + + @rtype: L{tlslite.utils.AES} + @return: An AES object. + """ + if implList == None: + implList = ["cryptlib", "openssl", "pycrypto", "python"] + + for impl in implList: + if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: + return Cryptlib_AES.new(key, 2, IV) + elif impl == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_AES.new(key, 2, IV) + elif impl == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_AES.new(key, 2, IV) + elif impl == "python": + return Python_AES.new(key, 2, IV) + raise NotImplementedError() + +def createRC4(key, IV, implList=None): + """Create a new RC4 object. + + @type key: str + @param key: A 16 to 32 byte string. + + @type IV: object + @param IV: Ignored, whatever it is. + + @rtype: L{tlslite.utils.RC4} + @return: An RC4 object. + """ + if implList == None: + implList = ["cryptlib", "openssl", "pycrypto", "python"] + + if len(IV) != 0: + raise AssertionError() + for impl in implList: + if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: + return Cryptlib_RC4.new(key) + elif impl == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_RC4.new(key) + elif impl == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_RC4.new(key) + elif impl == "python": + return Python_RC4.new(key) + raise NotImplementedError() + +#Create a new TripleDES instance +def createTripleDES(key, IV, implList=None): + """Create a new 3DES object. + + @type key: str + @param key: A 24 byte string. + + @type IV: str + @param IV: An 8 byte string + + @rtype: L{tlslite.utils.TripleDES} + @return: A 3DES object. + """ + if implList == None: + implList = ["cryptlib", "openssl", "pycrypto"] + + for impl in implList: + if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: + return Cryptlib_TripleDES.new(key, 2, IV) + elif impl == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_TripleDES.new(key, 2, IV) + elif impl == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_TripleDES.new(key, 2, IV) + raise NotImplementedError() \ No newline at end of file diff --git a/gam/gdata/tlslite/utils/codec.py b/gam/gdata/tlslite/utils/codec.py new file mode 100755 index 00000000000..13022a0b932 --- /dev/null +++ b/gam/gdata/tlslite/utils/codec.py @@ -0,0 +1,94 @@ +"""Classes for reading/writing binary data (such as TLS records).""" + +from compat import * + +class Writer: + def __init__(self, length=0): + #If length is zero, then this is just a "trial run" to determine length + self.index = 0 + self.bytes = createByteArrayZeros(length) + + def add(self, x, length): + if self.bytes: + newIndex = self.index+length-1 + while newIndex >= self.index: + self.bytes[newIndex] = x & 0xFF + x >>= 8 + newIndex -= 1 + self.index += length + + def addFixSeq(self, seq, length): + if self.bytes: + for e in seq: + self.add(e, length) + else: + self.index += len(seq)*length + + def addVarSeq(self, seq, length, lengthLength): + if self.bytes: + self.add(len(seq)*length, lengthLength) + for e in seq: + self.add(e, length) + else: + self.index += lengthLength + (len(seq)*length) + + +class Parser: + def __init__(self, bytes): + self.bytes = bytes + self.index = 0 + + def get(self, length): + if self.index + length > len(self.bytes): + raise SyntaxError() + x = 0 + for count in range(length): + x <<= 8 + x |= self.bytes[self.index] + self.index += 1 + return x + + def getFixBytes(self, lengthBytes): + bytes = self.bytes[self.index : self.index+lengthBytes] + self.index += lengthBytes + return bytes + + def getVarBytes(self, lengthLength): + lengthBytes = self.get(lengthLength) + return self.getFixBytes(lengthBytes) + + def getFixList(self, length, lengthList): + l = [0] * lengthList + for x in range(lengthList): + l[x] = self.get(length) + return l + + def getVarList(self, length, lengthLength): + lengthList = self.get(lengthLength) + if lengthList % length != 0: + raise SyntaxError() + lengthList = int(lengthList/length) + l = [0] * lengthList + for x in range(lengthList): + l[x] = self.get(length) + return l + + def startLengthCheck(self, lengthLength): + self.lengthCheck = self.get(lengthLength) + self.indexCheck = self.index + + def setLengthCheck(self, length): + self.lengthCheck = length + self.indexCheck = self.index + + def stopLengthCheck(self): + if (self.index - self.indexCheck) != self.lengthCheck: + raise SyntaxError() + + def atLengthCheck(self): + if (self.index - self.indexCheck) < self.lengthCheck: + return False + elif (self.index - self.indexCheck) == self.lengthCheck: + return True + else: + raise SyntaxError() \ No newline at end of file diff --git a/gam/gdata/tlslite/utils/codec.pyc b/gam/gdata/tlslite/utils/codec.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e863584070e16136b6aa85cd16f2c51f5da9340 GIT binary patch literal 4693 zcmcgvZEq7t5S}}~Bo3s6*APmCp+y*~V6_MpL4+Wrpq3~_=TK-QWL@r#JRG=pwBaVc5Fz$#U$Ff-MPKpoo8pB85jPYoT%;m^CFV`uYl(xG;<#!7FmV# zB=)7{$x2+1cwFM5d=xnl`E%8mxFm5!;&NYwDPz*8O>m8e5Kmg6F^Q?Z?PS#~3FEZA zUjLA#7SHNh+77ebY8=|IS~EM*Mm03mm8AwIMx87+H*cq(@K^J&j-|L|%aE_|d6M;y z>_fGHX6g_Lj*Ro_sXuzc6Z$;cD$Z5VPBAxNwIo5KR>C(annYZt*PftnEj9{%z7Itm&ILpG_)gkoqWX?aTc#d$;v~9_iQ`zW*-YE1Z8nLc;r8NJyokr=fKRo36CD`- zP9%G+9T@J*G~l?nUy$wt*(>Dg&5~p*0&IZuBlij3%~Mg3rG0z{J8k1~0c=%y5LR>0XAGkbCyNzU5Fd#BKqxS%(IoUWQjo{4ObiwZ4-#o9%EbX*Mk< zG@DzUc&CM7Fot2E{y@JBrXUW_tG(mqk$|`oJiY6(cd}HeRK}FTUW6IwD#APc*Vt{_u=`7vbuvwKS=e4r4w*?KEEwl( z0vmO9+IDonDl@Jp_9nJ6j0*8CdndiJcNUa=4pMP0K;OrC4g-e?rGps~m|yKdq(eLVx;#Zt9GkEVFzQ|-w3yq$VvwSd`r-mG+AK)+152rc9(6E{ z>g)5ke9X;(EC9%&0;!+lodQ`Javh+!R|1eP0c3F)NFGNq3|Jp%Sf*_o5L(S{&X;)b zKpl!%FraIGB|PC;#B%{V<~(uAn*vizApt9{^n_m1xVZ+!VBDaOI+hwG(j&?bx{$iE z^O+|bNwn$eH(qMW;#GC7k?G9U*=Ipe?O1XTD5t& zL}f$hYXGD!&&vS zaNSB_gO$RkfTJCat=8tc+Q59gmE%6K{_vi6zd{}|sI<~3`xE}zEB^sG0F^rc literal 0 HcmV?d00001 diff --git a/gam/gdata/tlslite/utils/compat.py b/gam/gdata/tlslite/utils/compat.py new file mode 100755 index 00000000000..7d2d9250d84 --- /dev/null +++ b/gam/gdata/tlslite/utils/compat.py @@ -0,0 +1,140 @@ +"""Miscellaneous functions to mask Python version differences.""" + +import sys +import os + +if sys.version_info < (2,2): + raise AssertionError("Python 2.2 or later required") + +if sys.version_info < (2,3): + + def enumerate(collection): + return zip(range(len(collection)), collection) + + class Set: + def __init__(self, seq=None): + self.values = {} + if seq: + for e in seq: + self.values[e] = None + + def add(self, e): + self.values[e] = None + + def discard(self, e): + if e in self.values.keys(): + del(self.values[e]) + + def union(self, s): + ret = Set() + for e in self.values.keys(): + ret.values[e] = None + for e in s.values.keys(): + ret.values[e] = None + return ret + + def issubset(self, other): + for e in self.values.keys(): + if e not in other.values.keys(): + return False + return True + + def __nonzero__( self): + return len(self.values.keys()) + + def __contains__(self, e): + return e in self.values.keys() + + def __iter__(self): + return iter(set.values.keys()) + + +if os.name != "java": + + import array + def createByteArraySequence(seq): + return array.array('B', seq) + def createByteArrayZeros(howMany): + return array.array('B', [0] * howMany) + def concatArrays(a1, a2): + return a1+a2 + + def bytesToString(bytes): + return bytes.tostring() + def stringToBytes(s): + bytes = createByteArrayZeros(0) + bytes.fromstring(s) + return bytes + + import math + def numBits(n): + if n==0: + return 0 + s = "%x" % n + return ((len(s)-1)*4) + \ + {'0':0, '1':1, '2':2, '3':2, + '4':3, '5':3, '6':3, '7':3, + '8':4, '9':4, 'a':4, 'b':4, + 'c':4, 'd':4, 'e':4, 'f':4, + }[s[0]] + return int(math.floor(math.log(n, 2))+1) + + BaseException = Exception + import sys + import traceback + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr + +else: + #Jython 2.1 is missing lots of python 2.3 stuff, + #which we have to emulate here: + #NOTE: JYTHON SUPPORT NO LONGER WORKS, DUE TO USE OF GENERATORS. + #THIS CODE IS LEFT IN SO THAT ONE JYTHON UPDATES TO 2.2, IT HAS A + #CHANCE OF WORKING AGAIN. + + import java + import jarray + + def createByteArraySequence(seq): + if isinstance(seq, type("")): #If it's a string, convert + seq = [ord(c) for c in seq] + return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed + def createByteArrayZeros(howMany): + return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed + def concatArrays(a1, a2): + l = list(a1)+list(a2) + return createByteArraySequence(l) + + #WAY TOO SLOW - MUST BE REPLACED------------ + def bytesToString(bytes): + return "".join([chr(b) for b in bytes]) + + def stringToBytes(s): + bytes = createByteArrayZeros(len(s)) + for count, c in enumerate(s): + bytes[count] = ord(c) + return bytes + #WAY TOO SLOW - MUST BE REPLACED------------ + + def numBits(n): + if n==0: + return 0 + n= 1L * n; #convert to long, if it isn't already + return n.__tojava__(java.math.BigInteger).bitLength() + + #Adjust the string to an array of bytes + def stringToJavaByteArray(s): + bytes = jarray.zeros(len(s), 'b') + for count, c in enumerate(s): + x = ord(c) + if x >= 128: x -= 256 + bytes[count] = x + return bytes + + BaseException = java.lang.Exception + import sys + import traceback + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr \ No newline at end of file diff --git a/gam/gdata/tlslite/utils/compat.pyc b/gam/gdata/tlslite/utils/compat.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a85043d79ccfe6f5dd6c6f88b62eb9fecb3ccdb2 GIT binary patch literal 7208 zcmcgxdvny*6+hbbYmHy{iBlT3l*g717)XF*LQ`W*5;9?%(UQ_`OEt12+p^aZ*1ZdM zCQLhpzxsvxA^K@L{Q&LnckXHz2+1EEHnw$ibnlgP9>4Rra`ivcla1~_zmBB$Q^k25 zhdK5{Vv#neCvinOp0vR@z9Q|4B%bV5C0><%k>a)_BNC6u3s3&;Nham&dpf%(@tC~C z(?%s8m-uHAPw?`E$RFEdy1F8Jn5fBqRf>O?ubGhcgv672XHDW0%1lZ;rOXM5r)iPM0l6_Oe{FDDB0PUy}Bc#3ywPWSCb5 zGAt+q85Wg+3`@#DhGk`DC0w8BbbC7~k}OKhmGr;(X{HrEiqGpUy?*6- zJum8=&?ZH_NDjMck;Eze9>L|pV+6-t9A*{7;?EOXk?-IzkAGw+B$qZDywj3=n?A9l zI2U2Iljuk%$#jvO#Bh{%I*Cf(z?CR5Ux8cu#z6YD{j}IRUfax{Wt}{X&Du_QaQXU` z8#gXLQ57$nBC0p8{c3F|4sEz*JEoJ`WUXt{j#-QHgD0WAf|WHqFv+?HNdf&%;KERU z@l>8xVk2Jhhr=5X41ilzW{fB&lmXXKWpI}RH{da@ zBa`B{8k8jBTWFiUML)zvDl}mL>Pc}`j@~D;`FKH|S117H_CPCg`*2wvA(Svy8k^6v zV<1n%PB$@@&u!*e;=I5slXSLKS(6;XZ_sU+=AOU{f*{RO8w9g_I|sz8d1KzRr`nem zxXam-AUzEMBTsI>p_)IA1wQp_@IIeU^9i0|>OlX5F~S!D@~tuuoR=#21u&4;xn>Vh zAhJIu#o5wXp8V$fj#5R`Mf=II`3W_m6=Nihu!v8AHN=H@OHMCd@KC+}1O@>Pb%4F2 zbE*!~dW9p3NJN5S5juZY1tM4F5tm`jk1ku6M*wh%TE5O_YEX;#)8r^)Lnr8DMXFKU z7Kt6Y%PC7YL&7`_#!P__v&-H|J^E2w#kiX37MP=}uepEo@fq3-TGAK@SqF-7Ddw0g zTZeP<2>QHc0s>pL*F5=I*fEL1<6+Tt5v+jM$=vQH#nA0Q)2GICw;;$l@V)R|2397# zemUqz!2*^5EsTQF`zQycinNxmVcXYJv%e}e%m8MZ4}vVuzDsy`+tX?g&CK2%jHAz|6@nVv?wxlNnzJZ@nFAT~rp9X3+DL7p#&cDTyca$V`+>p;P!DlA0tIOSeFVZq z+a>z1sfljGbw$iVVNry~&L?ab1{%F{Wh~7W6$$#HjboeK;fq^n|JW=tzm*|V!i_jm zWq4m-8MH$?s?wtBM2(yN`=DrQAU<#Q&y<{HlHbjrJqWYoVQRLB*Sw)w8>zs2jPX(} zTE9G0tqBakHAn_4W$0kUH6BRdaLqM$x;{+N337ye6k2tN_WY&Dr6+h+k=Y3``D(!~ zh32eAl+BGT(rjk{t}Xc43{!ema(&6eyru8{6a%k{|C+^rPDwQ+P&J+dMMz*SJUsDm z(b5*F>5#-b{j{s5dhOd#9(L{`a*YT3EktWoWd?*G1V2=M%=^*HW4?uIi7H*8cx_|o^Yor zcbaym8Jw^)nRTZ*cRJ}#^X|0ZPK)lelZz{c|^dqGoD-D%yDUFoI zT5y%!ZfN63GixzRGpmXBAhf&MDQ$Q1+|{{GzM}}z`!b&x874fK+_2N!NUiw{tYH=+ zFh;ydG?Hh$)A~F47MT1R3*iOCF+@cThhp#m>8R2%d4Z~4#z-$LfF9_>m%74}!xAhR zwg{tSD~$FntIl>_z%oH{6eUmirNA`+$3`;*Mgx2NByqcBT<9*+GKzJoe3IL$Fz$4( znI+E<-NQ5`P3I)PR|@`+!hD1A-tOh%z>fXO^f@Keh=UzrP?!t=E^f(=4sJf4XPN=N zUD*AlB8R8tIlPA3)Z~m+Af7uFSIO4-L?U}vVW+t#HE79fh%%{$R|9f&Uc_piJ+0xc z>S=PKSA3S#A)3su58cuIGdb7dc*S(XN(rj)Vie+laR+ytkQ#P->^ z68;O$;&9}h_ad6u;iP`8piGpyFL@mo&b4qV8TIZ8c3f}|HeH&YsB6=OJTcKpja9p- zGyPv;$Un#Da9wAZ>YT?5ShjjtT^?YyUm7l8+Qt0q;M6h*Z3xb3rrdIIlt&tai1%$__mJ9A%m2IZBfph1@=R1 zntXl<)?Boa?%dC8vV)x+v&>d%ze=(lyX&Hs-PbS?@moCq2uIH#uGB>oo-ps@Viy;{ z1$sNCUIl{7Jw(TS#X%1ppS*$&v>>kNfzi*d`=e6)Uiqqg^n8R^#6$_6^F8qJtxb)E z^lxBOe!zAu1M`~yI}*Ce9{@@liawzH7H65RHBTH3(STOJF>0gyF#i(%?Qg{Xir1SN z5b;g{W>emjOnWnLNhAMP>)@~B!P+|bcfk1@B*d=2Npg>bEsXy;h|4f6{{9!d#4)T4 zb03fX2abM;*LQ0-spOSQ28$DgUo>@N83b`2AwsAFfQEu>EVom=|ApYI^mcA((>n(L zU|=(JuPE}u0f89Ma?Y<1TD!G!xCC?_lRn?(GX-e$`NY1)T)#unzG)*gNq@u{mRh;6 zRRsQlHxhjKC(h2mn##TN!m4fFZTB(dQwFA?ug59?RAttit;|+x)#VGbxUN<3Tg5R7 YehS|<_b9MgpPav1Nq&g-T(jq literal 0 HcmV?d00001 diff --git a/gam/gdata/tlslite/utils/cryptomath.py b/gam/gdata/tlslite/utils/cryptomath.py new file mode 100755 index 00000000000..92fb77437bd --- /dev/null +++ b/gam/gdata/tlslite/utils/cryptomath.py @@ -0,0 +1,404 @@ +"""cryptomath module + +This module has basic math/crypto code.""" + +import os +import sys +import math +import base64 +import binascii +if sys.version_info[:2] <= (2, 4): + from sha import sha as sha1 +else: + from hashlib import sha1 + +from compat import * + + +# ************************************************************************** +# Load Optional Modules +# ************************************************************************** + +# Try to load M2Crypto/OpenSSL +try: + from M2Crypto import m2 + m2cryptoLoaded = True + +except ImportError: + m2cryptoLoaded = False + + +# Try to load cryptlib +try: + import cryptlib_py + try: + cryptlib_py.cryptInit() + except cryptlib_py.CryptException, e: + #If tlslite and cryptoIDlib are both present, + #they might each try to re-initialize this, + #so we're tolerant of that. + if e[0] != cryptlib_py.CRYPT_ERROR_INITED: + raise + cryptlibpyLoaded = True + +except ImportError: + cryptlibpyLoaded = False + +#Try to load GMPY +try: + import gmpy + gmpyLoaded = True +except ImportError: + gmpyLoaded = False + +#Try to load pycrypto +try: + import Crypto.Cipher.AES + pycryptoLoaded = True +except ImportError: + pycryptoLoaded = False + + +# ************************************************************************** +# PRNG Functions +# ************************************************************************** + +# Get os.urandom PRNG +try: + os.urandom(1) + def getRandomBytes(howMany): + return stringToBytes(os.urandom(howMany)) + prngName = "os.urandom" + +except: + # Else get cryptlib PRNG + if cryptlibpyLoaded: + def getRandomBytes(howMany): + randomKey = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, + cryptlib_py.CRYPT_ALGO_AES) + cryptlib_py.cryptSetAttribute(randomKey, + cryptlib_py.CRYPT_CTXINFO_MODE, + cryptlib_py.CRYPT_MODE_OFB) + cryptlib_py.cryptGenerateKey(randomKey) + bytes = createByteArrayZeros(howMany) + cryptlib_py.cryptEncrypt(randomKey, bytes) + return bytes + prngName = "cryptlib" + + else: + #Else get UNIX /dev/urandom PRNG + try: + devRandomFile = open("/dev/urandom", "rb") + def getRandomBytes(howMany): + return stringToBytes(devRandomFile.read(howMany)) + prngName = "/dev/urandom" + except IOError: + #Else get Win32 CryptoAPI PRNG + try: + import win32prng + def getRandomBytes(howMany): + s = win32prng.getRandomBytes(howMany) + if len(s) != howMany: + raise AssertionError() + return stringToBytes(s) + prngName ="CryptoAPI" + except ImportError: + #Else no PRNG :-( + def getRandomBytes(howMany): + raise NotImplementedError("No Random Number Generator "\ + "available.") + prngName = "None" + +# ************************************************************************** +# Converter Functions +# ************************************************************************** + +def bytesToNumber(bytes): + total = 0L + multiplier = 1L + for count in range(len(bytes)-1, -1, -1): + byte = bytes[count] + total += multiplier * byte + multiplier *= 256 + return total + +def numberToBytes(n): + howManyBytes = numBytes(n) + bytes = createByteArrayZeros(howManyBytes) + for count in range(howManyBytes-1, -1, -1): + bytes[count] = int(n % 256) + n >>= 8 + return bytes + +def bytesToBase64(bytes): + s = bytesToString(bytes) + return stringToBase64(s) + +def base64ToBytes(s): + s = base64ToString(s) + return stringToBytes(s) + +def numberToBase64(n): + bytes = numberToBytes(n) + return bytesToBase64(bytes) + +def base64ToNumber(s): + bytes = base64ToBytes(s) + return bytesToNumber(bytes) + +def stringToNumber(s): + bytes = stringToBytes(s) + return bytesToNumber(bytes) + +def numberToString(s): + bytes = numberToBytes(s) + return bytesToString(bytes) + +def base64ToString(s): + try: + return base64.decodestring(s) + except binascii.Error, e: + raise SyntaxError(e) + except binascii.Incomplete, e: + raise SyntaxError(e) + +def stringToBase64(s): + return base64.encodestring(s).replace("\n", "") + +def mpiToNumber(mpi): #mpi is an openssl-format bignum string + if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number + raise AssertionError() + bytes = stringToBytes(mpi[4:]) + return bytesToNumber(bytes) + +def numberToMPI(n): + bytes = numberToBytes(n) + ext = 0 + #If the high-order bit is going to be set, + #add an extra byte of zeros + if (numBits(n) & 0x7)==0: + ext = 1 + length = numBytes(n) + ext + bytes = concatArrays(createByteArrayZeros(4+ext), bytes) + bytes[0] = (length >> 24) & 0xFF + bytes[1] = (length >> 16) & 0xFF + bytes[2] = (length >> 8) & 0xFF + bytes[3] = length & 0xFF + return bytesToString(bytes) + + + +# ************************************************************************** +# Misc. Utility Functions +# ************************************************************************** + +def numBytes(n): + if n==0: + return 0 + bits = numBits(n) + return int(math.ceil(bits / 8.0)) + +def hashAndBase64(s): + return stringToBase64(sha1(s).digest()) + +def getBase64Nonce(numChars=22): #defaults to an 132 bit nonce + bytes = getRandomBytes(numChars) + bytesStr = "".join([chr(b) for b in bytes]) + return stringToBase64(bytesStr)[:numChars] + + +# ************************************************************************** +# Big Number Math +# ************************************************************************** + +def getRandomNumber(low, high): + if low >= high: + raise AssertionError() + howManyBits = numBits(high) + howManyBytes = numBytes(high) + lastBits = howManyBits % 8 + while 1: + bytes = getRandomBytes(howManyBytes) + if lastBits: + bytes[0] = bytes[0] % (1 << lastBits) + n = bytesToNumber(bytes) + if n >= low and n < high: + return n + +def gcd(a,b): + a, b = max(a,b), min(a,b) + while b: + a, b = b, a % b + return a + +def lcm(a, b): + #This will break when python division changes, but we can't use // cause + #of Jython + return (a * b) / gcd(a, b) + +#Returns inverse of a mod b, zero if none +#Uses Extended Euclidean Algorithm +def invMod(a, b): + c, d = a, b + uc, ud = 1, 0 + while c != 0: + #This will break when python division changes, but we can't use // + #cause of Jython + q = d / c + c, d = d-(q*c), c + uc, ud = ud - (q * uc), uc + if d == 1: + return ud % b + return 0 + + +if gmpyLoaded: + def powMod(base, power, modulus): + base = gmpy.mpz(base) + power = gmpy.mpz(power) + modulus = gmpy.mpz(modulus) + result = pow(base, power, modulus) + return long(result) + +else: + #Copied from Bryan G. Olson's post to comp.lang.python + #Does left-to-right instead of pow()'s right-to-left, + #thus about 30% faster than the python built-in with small bases + def powMod(base, power, modulus): + nBitScan = 5 + + """ Return base**power mod modulus, using multi bit scanning + with nBitScan bits at a time.""" + + #TREV - Added support for negative exponents + negativeResult = False + if (power < 0): + power *= -1 + negativeResult = True + + exp2 = 2**nBitScan + mask = exp2 - 1 + + # Break power into a list of digits of nBitScan bits. + # The list is recursive so easy to read in reverse direction. + nibbles = None + while power: + nibbles = int(power & mask), nibbles + power = power >> nBitScan + + # Make a table of powers of base up to 2**nBitScan - 1 + lowPowers = [1] + for i in xrange(1, exp2): + lowPowers.append((lowPowers[i-1] * base) % modulus) + + # To exponentiate by the first nibble, look it up in the table + nib, nibbles = nibbles + prod = lowPowers[nib] + + # For the rest, square nBitScan times, then multiply by + # base^nibble + while nibbles: + nib, nibbles = nibbles + for i in xrange(nBitScan): + prod = (prod * prod) % modulus + if nib: prod = (prod * lowPowers[nib]) % modulus + + #TREV - Added support for negative exponents + if negativeResult: + prodInv = invMod(prod, modulus) + #Check to make sure the inverse is correct + if (prod * prodInv) % modulus != 1: + raise AssertionError() + return prodInv + return prod + + +#Pre-calculate a sieve of the ~100 primes < 1000: +def makeSieve(n): + sieve = range(n) + for count in range(2, int(math.sqrt(n))): + if sieve[count] == 0: + continue + x = sieve[count] * 2 + while x < len(sieve): + sieve[x] = 0 + x += sieve[count] + sieve = [x for x in sieve[2:] if x] + return sieve + +sieve = makeSieve(1000) + +def isPrime(n, iterations=5, display=False): + #Trial division with sieve + for x in sieve: + if x >= n: return True + if n % x == 0: return False + #Passed trial division, proceed to Rabin-Miller + #Rabin-Miller implemented per Ferguson & Schneier + #Compute s, t for Rabin-Miller + if display: print "*", + s, t = n-1, 0 + while s % 2 == 0: + s, t = s/2, t+1 + #Repeat Rabin-Miller x times + a = 2 #Use 2 as a base for first iteration speedup, per HAC + for count in range(iterations): + v = powMod(a, s, n) + if v==1: + continue + i = 0 + while v != n-1: + if i == t-1: + return False + else: + v, i = powMod(v, 2, n), i+1 + a = getRandomNumber(2, n) + return True + +def getRandomPrime(bits, display=False): + if bits < 10: + raise AssertionError() + #The 1.5 ensures the 2 MSBs are set + #Thus, when used for p,q in RSA, n will have its MSB set + # + #Since 30 is lcm(2,3,5), we'll set our test numbers to + #29 % 30 and keep them there + low = (2L ** (bits-1)) * 3/2 + high = 2L ** bits - 30 + p = getRandomNumber(low, high) + p += 29 - (p % 30) + while 1: + if display: print ".", + p += 30 + if p >= high: + p = getRandomNumber(low, high) + p += 29 - (p % 30) + if isPrime(p, display=display): + return p + +#Unused at the moment... +def getRandomSafePrime(bits, display=False): + if bits < 10: + raise AssertionError() + #The 1.5 ensures the 2 MSBs are set + #Thus, when used for p,q in RSA, n will have its MSB set + # + #Since 30 is lcm(2,3,5), we'll set our test numbers to + #29 % 30 and keep them there + low = (2 ** (bits-2)) * 3/2 + high = (2 ** (bits-1)) - 30 + q = getRandomNumber(low, high) + q += 29 - (q % 30) + while 1: + if display: print ".", + q += 30 + if (q >= high): + q = getRandomNumber(low, high) + q += 29 - (q % 30) + #Ideas from Tom Wu's SRP code + #Do trial division on p and q before Rabin-Miller + if isPrime(q, 0, display=display): + p = (2 * q) + 1 + if isPrime(p, display=display): + if isPrime(q, display=display): + return p diff --git a/gam/gdata/tlslite/utils/cryptomath.pyc b/gam/gdata/tlslite/utils/cryptomath.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fef351911b34e6109f7eddec0146403645dac76 GIT binary patch literal 11770 zcmc&)-ESM&bw5K=6iLgHY<*br$C|d+UfNzuYwd@#S#M%nk~eDBv^$h7hly89amLbE zhdC5dBwyRwfMUjUTSOi7UmjXo#6e!S#qAz`G`cS0L{R{fg=B?;cfdJ|6ckT=& zZ5#waBSj)F@7(WuK7Qxib0_^jM+>E`e|%7v_-_Eex9|;@(jtyX6{#goN*b0_kumv{ zR8vVlE!A|A&qy_sX2COQK=5gihn{f?ucv_B=`rhEXy5bv2V%a-lJp*$E12p zdc)!#lV`_eN5oq3XVDFhW7;!gS=Dh2IWA6Krdd8An`sFKtj|O`0~Uwle3Sd;yNKy@ zzDYCRWHR3gsh$wFpW4^{VbXr;EA6MHdQ#h;+SmSzr2Q$=z9_CG&rXX|kR3}b=ZJg` z#+(rc1bGELf2{kTl;9_pb2Oxnc8psh$_-c+z~#On0(R`0H%$jK}pPmNOAg`&geQwd#U6Cp7b_I8&OrD9*HI zE{SteGp~trN;4&KPHX0}IA=6-MVwbOb5)$Pnt5HEqGqm%b51ie;+)satT-1m^M*K> z?xK7SxxFsVB`us2=QYi|DNac`@U)}oDKv*m0x+(Kb_ z!wchw;zlhjuGK=XUSyM5)4Eu1Iqr=23;dP66bA7!NJ2$AXV;xtRGW<&VZ)2u*{#THgfUG>o*A?r#-QtNWb5p@szsE9 zT$xy50ck6oi5I84=2l22-StPvVAszqO;ehtsU|N>Nk+C!WG zu(G(Ido%^NKm6eS>g~l!bS&vwaiiN2n6kDNxjLX}IKTXOseJeT>b?65i+lR>arOS) zJJD#;{{z=|15EyHw-=3TA(uiz-VTCVuj&S1Fju4tU-b1y<-sPGqdXGBdX7Nru}0B2 zV7A*iO!OhXA!#Vq5dQO4)+$&>ksBpfc1ay_`LmAud^VN?mD^xV#o;$p0-K_vnt<_) z(B1VKZp30R(NXzV${Yp-zK2<3QP<5->iJdKzJZL9ZKKWUx3>I9cSd+QVBJ#vRv24u8TNxp64KNNKqhNl0MHEMUnxxL_9^^bJfQKe#(`2Q zVx;!XSOLL;DKG#ZrSq$#Gn5LQz=27~Bk7!$n=Hu4hcDoY&Vt6YCZt9AD|GcJ_a!bt zVb9-c623#5Ho`8lIE}>fBh!Yw@fFfz6Kz_P2lfRdhe2Gd5lR)i5!jz7MjISIV~r=I zr9SpGb`lRSC2ay%Y`K?p74=mVrTT75Ep*>`%K(&a_%!+)#)e~Qx})=dfk{L1iyKxM zB2RwBMzNEw{{04ft{>`_Q%(FF-};k1_N9ZT;r_v#)UZ*N_+{kv9qt z2hAjsvoDpps~@7||Gk}AuKywuvmNr~(6*CEhk>vk{Wi8kq5WNLC(&WEoiBeI+fiY= zijjT?+)0GZ=qmzrZ&(DQW8%96~yUeWay1RI*MZvzMkg`ocET zF4NYsSvlmI3upu(VS)V?5~Ylb9rh{1vryo+8@0M?9KSt=)o;LErxp?aEHy_t!%|TAf8Jo)qAYsNYgir=hG72k%pir$GecNpPN4 z;Mk||tHx&+8r0=zw!K3EJD#gCSK_Sw4IbmOz)3v~D&q-V{LZtMO$=eocCm4u-xJ+Z zGFh^jk^rHjH0ykHP#U2FJh2nTWrMcCL*p}xK`@ORz;}ah{7ok-w@`+$Z?a@?w}ieO zN^-j;c~cViD9BBySHb{puWVvB6f@)faVy>wlV335aYp?>6CWaHRGrnHYJdiKbH|H9 z`x2_mfxhL}YmuJvV_#8`kOSL&v#We$P=c_4Gv9i&anP2S+fBgn-h-vT!(&L_kIqsy zJ&AMvs5K3B=6~#h6Jh$iWvIaJv7=WHUyTPo)E zZRG9Okm&h?fhWz>U9Yk0q-fC^2ze7imD-j29X#x1CX@>n z5ex$}7oHqfEryz6vzB~>v*|Q5@$m|w4ps1Ce9*Vw!(fKRIS$K-p0#jJB5CRyfl|jH z9qS>J^Bc7wG^bUrY|R)%9=2e`518?KjzOf;PzvY!y8CZfLrBG>YLu)xDMy6h`seWt ze~bjG0NQL0NH9gp?er1f0G0thkW96J2Y$7IlQ{5qAE_Bovb_KtWq5nDJwTX36AJI4r|FcTbUF!A+)y!IN=K=R6_>yDl9E zHnzCdHNT#(paozMso7u zaQ87E4{HaFdh=(P&%jgqmvya+O0)J9+yy8SROEZeWCXg?T~NJBspqxI;11Mm;sw+M zIix8WpF$G%gdVXFk+vx$DKkXpG#HCfjaw2?*}(kC3bZlt(MTROU`ih2C!i>1Ftx|- zlT7(v*VKVI#05^>{5V?HIcT9EYB2^q*;bgsuOBqhtY=2*W+cC~G zTK>8*1Z>OGk)D1~jqoJts#keip<22z35`-oBufJa1*?)>)Xwb z(1=bhwhsW&U@ioQsC<=T1X+ETUO!r7=<3p`P0JWjA9z0?QxH`L9S5a*MUkfypAaCY zw`f%d-*#hE%9Sv!I$c%zD^5pc$0?&66JGwn26||4T-ySKh zI%wkF)GSrRQVuzln!B|I-9T)^L9j>KzGe=>y7fsCw_I z-22{|c4!AJM>{aH;~ zzGO;=^-C@g^~rfe1&GuzzEKf5l@|Lha>}Hz6GY}Ng)*gnj>=i+xe$X3Y(~S-Jz(ak zFg0tNZiVx{PcDvQTDm>5%@ih?1YxfaGPyXi6 z-(v-&QS4_-icF|m_Cq8k(%Xm$b9k`=_y4%SHScjaUWh1G&;C9eokpU<6B);$FHFyE zFzCP?M_N`dd=Pj|_uG^w#=9g1CqXjSsN^zP8ANbz%$iPR6$j{4GdF5wQs?liP8ubP ziZPFG_!A_Q4Wq034@A^k!>}>Xd>mju0T?i)V~FKU(RivK0r{hC0NWr^uz5(cGC6ev zCEbsyyg`$}9Th~aV+~rNALaP3fBi5JVLC48tnZWTSXO|6Gm!L;&FP7pD|<}!t~C`$ zg`ihrB0#S&cLs^_Z^nkB6bCT%ctt_5KV=!|V}FYYo7lh4#73h1+6N69!*Xx5x6*%ds@ssa-`Am*YGwaQ)AGuaWHk4w_r9(>EpbV zzpC)4j`{oN%HIz8p73`-_Vf2|U&i14!|d`G>Tg(_vI0k?jk1?PfDx}8K?DE*eG&Ni zFA2r}vO%TubLlwzI6V06ipFj%(3}POGGYHA2&Z1kua*na_GNNmud?DZBnK@Ue6^=9 z8J$TX)-pX`62m?s?N|(YEi~ zt4rmjWxOfYf2|7(Yxm4tc8wh<6+GX>^B=>bS?A2W*WPf08NO9Uo|{sgxZTr*#A~)y zB61vz310^)*P6zYZNXKwPq2!VS-NjF#p@M~-0a@!+Ik`S1}m;JnPb9;tzO%hc;qcU zE;6~qH@-Y)NYd>I4jozos?J)Tq$*zQI?qJ?PBr$H9cR9a7o`-*l zB%5NS$NZ03W2wD!_adGK~MEDWaJ%K$@1NS;p7b3cES + + +typedef BOOL (WINAPI *CRYPTACQUIRECONTEXTA)(HCRYPTPROV *phProv,\ + LPCSTR pszContainer, LPCSTR pszProvider, DWORD dwProvType,\ + DWORD dwFlags ); +typedef BOOL (WINAPI *CRYPTGENRANDOM)(HCRYPTPROV hProv, DWORD dwLen,\ + BYTE *pbBuffer ); +typedef BOOL (WINAPI *CRYPTRELEASECONTEXT)(HCRYPTPROV hProv,\ + DWORD dwFlags); + + +static PyObject* entropy(PyObject *self, PyObject *args) +{ + int howMany = 0; + HINSTANCE hAdvAPI32 = NULL; + CRYPTACQUIRECONTEXTA pCryptAcquireContextA = NULL; + CRYPTGENRANDOM pCryptGenRandom = NULL; + CRYPTRELEASECONTEXT pCryptReleaseContext = NULL; + HCRYPTPROV hCryptProv = 0; + unsigned char* bytes = NULL; + PyObject* returnVal = NULL; + + + /* Read arguments */ + if (!PyArg_ParseTuple(args, "i", &howMany)) + return(NULL); + + /* Obtain handle to the DLL containing CryptoAPI + This should not fail */ + if( (hAdvAPI32 = GetModuleHandle("advapi32.dll")) == NULL) { + PyErr_Format(PyExc_SystemError, + "Advapi32.dll not found"); + return NULL; + } + + /* Obtain pointers to the CryptoAPI functions + This will fail on some early version of Win95 */ + pCryptAcquireContextA = (CRYPTACQUIRECONTEXTA)GetProcAddress(hAdvAPI32,\ + "CryptAcquireContextA"); + pCryptGenRandom = (CRYPTGENRANDOM)GetProcAddress(hAdvAPI32,\ + "CryptGenRandom"); + pCryptReleaseContext = (CRYPTRELEASECONTEXT) GetProcAddress(hAdvAPI32,\ + "CryptReleaseContext"); + if (pCryptAcquireContextA == NULL || pCryptGenRandom == NULL || + pCryptReleaseContext == NULL) { + PyErr_Format(PyExc_NotImplementedError, + "CryptoAPI not available on this version of Windows"); + return NULL; + } + + /* Allocate bytes */ + if ((bytes = (unsigned char*)PyMem_Malloc(howMany)) == NULL) + return PyErr_NoMemory(); + + + /* Acquire context */ + if(!pCryptAcquireContextA(&hCryptProv, NULL, NULL, PROV_RSA_FULL, + CRYPT_VERIFYCONTEXT)) { + PyErr_Format(PyExc_SystemError, + "CryptAcquireContext failed, error %d", GetLastError()); + PyMem_Free(bytes); + return NULL; + } + + /* Get random data */ + if(!pCryptGenRandom(hCryptProv, howMany, bytes)) { + PyErr_Format(PyExc_SystemError, + "CryptGenRandom failed, error %d", GetLastError()); + PyMem_Free(bytes); + CryptReleaseContext(hCryptProv, 0); + return NULL; + } + + /* Build return value */ + returnVal = Py_BuildValue("s#", bytes, howMany); + PyMem_Free(bytes); + + /* Release context */ + if (!pCryptReleaseContext(hCryptProv, 0)) { + PyErr_Format(PyExc_SystemError, + "CryptReleaseContext failed, error %d", GetLastError()); + return NULL; + } + + return returnVal; +} + +#elif defined(HAVE_UNISTD_H) && defined(HAVE_FCNTL_H) + +#include +#include + +static PyObject* entropy(PyObject *self, PyObject *args) +{ + int howMany; + int fd; + unsigned char* bytes = NULL; + PyObject* returnVal = NULL; + + + /* Read arguments */ + if (!PyArg_ParseTuple(args, "i", &howMany)) + return(NULL); + + /* Allocate bytes */ + if ((bytes = (unsigned char*)PyMem_Malloc(howMany)) == NULL) + return PyErr_NoMemory(); + + /* Open device */ + if ((fd = open("/dev/urandom", O_RDONLY, 0)) == -1) { + PyErr_Format(PyExc_NotImplementedError, + "No entropy source found"); + PyMem_Free(bytes); + return NULL; + } + + /* Get random data */ + if (read(fd, bytes, howMany) < howMany) { + PyErr_Format(PyExc_SystemError, + "Reading from /dev/urandom failed"); + PyMem_Free(bytes); + close(fd); + return NULL; + } + + /* Build return value */ + returnVal = Py_BuildValue("s#", bytes, howMany); + PyMem_Free(bytes); + + /* Close device */ + close(fd); + + return returnVal; +} + +#else + +static PyObject* entropy(PyObject *self, PyObject *args) +{ + PyErr_Format(PyExc_NotImplementedError, + "Function not supported"); + return NULL; +} + +#endif + + + +/* List of functions exported by this module */ + +static struct PyMethodDef entropy_functions[] = { + {"entropy", (PyCFunction)entropy, METH_VARARGS, "Return a string of random bytes produced by a platform-specific\nentropy source."}, + {NULL, NULL} /* Sentinel */ +}; + + +/* Initialize this module. */ + +PyMODINIT_FUNC initentropy(void) +{ + Py_InitModule("entropy", entropy_functions); +} \ No newline at end of file diff --git a/gam/gdata/tlslite/utils/hmac.py b/gam/gdata/tlslite/utils/hmac.py new file mode 100755 index 00000000000..fe8feec219c --- /dev/null +++ b/gam/gdata/tlslite/utils/hmac.py @@ -0,0 +1,104 @@ +"""HMAC (Keyed-Hashing for Message Authentication) Python module. + +Implements the HMAC algorithm as described by RFC 2104. + +(This file is modified from the standard library version to do faster +copying) +""" + +def _strxor(s1, s2): + """Utility method. XOR the two strings s1 and s2 (must have same length). + """ + return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2)) + +# The size of the digests returned by HMAC depends on the underlying +# hashing module used. +digest_size = None + +class HMAC: + """RFC2104 HMAC class. + + This supports the API for Cryptographic Hash Functions (PEP 247). + """ + + def __init__(self, key, msg = None, digestmod = None): + """Create a new HMAC object. + + key: key for the keyed hash object. + msg: Initial input for the hash, if provided. + digestmod: A module supporting PEP 247. Defaults to the md5 module. + """ + if digestmod is None: + import md5 + digestmod = md5 + + if key == None: #TREVNEW - for faster copying + return #TREVNEW + + self.digestmod = digestmod + self.outer = digestmod.new() + self.inner = digestmod.new() + self.digest_size = digestmod.digest_size + + blocksize = 64 + ipad = "\x36" * blocksize + opad = "\x5C" * blocksize + + if len(key) > blocksize: + key = digestmod.new(key).digest() + + key = key + chr(0) * (blocksize - len(key)) + self.outer.update(_strxor(key, opad)) + self.inner.update(_strxor(key, ipad)) + if msg is not None: + self.update(msg) + +## def clear(self): +## raise NotImplementedError, "clear() method not available in HMAC." + + def update(self, msg): + """Update this hashing object with the string msg. + """ + self.inner.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + other = HMAC(None) #TREVNEW - for faster copying + other.digest_size = self.digest_size #TREVNEW + other.digestmod = self.digestmod + other.inner = self.inner.copy() + other.outer = self.outer.copy() + return other + + def digest(self): + """Return the hash value of this hashing object. + + This returns a string containing 8-bit data. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + return "".join([hex(ord(x))[2:].zfill(2) + for x in tuple(self.digest())]) + +def new(key, msg = None, digestmod = None): + """Create a new hashing object and return it. + + key: The starting key for the hash. + msg: if available, will immediately be hashed into the object's starting + state. + + You can now feed arbitrary strings into the object using its update() + method, and can ask for the hash value at any time by calling its digest() + method. + """ + return HMAC(key, msg, digestmod) diff --git a/gam/gdata/tlslite/utils/jython_compat.py b/gam/gdata/tlslite/utils/jython_compat.py new file mode 100755 index 00000000000..1245183a99c --- /dev/null +++ b/gam/gdata/tlslite/utils/jython_compat.py @@ -0,0 +1,195 @@ +"""Miscellaneous functions to mask Python/Jython differences.""" + +import os +import sha + +if os.name != "java": + BaseException = Exception + + from sets import Set + import array + import math + + def createByteArraySequence(seq): + return array.array('B', seq) + def createByteArrayZeros(howMany): + return array.array('B', [0] * howMany) + def concatArrays(a1, a2): + return a1+a2 + + def bytesToString(bytes): + return bytes.tostring() + + def stringToBytes(s): + bytes = createByteArrayZeros(0) + bytes.fromstring(s) + return bytes + + def numBits(n): + if n==0: + return 0 + return int(math.floor(math.log(n, 2))+1) + + class CertChainBase: pass + class SelfTestBase: pass + class ReportFuncBase: pass + + #Helper functions for working with sets (from Python 2.3) + def iterSet(set): + return iter(set) + + def getListFromSet(set): + return list(set) + + #Factory function for getting a SHA1 object + def getSHA1(s): + return sha.sha(s) + + import sys + import traceback + + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr + +else: + #Jython 2.1 is missing lots of python 2.3 stuff, + #which we have to emulate here: + import java + import jarray + + BaseException = java.lang.Exception + + def createByteArraySequence(seq): + if isinstance(seq, type("")): #If it's a string, convert + seq = [ord(c) for c in seq] + return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed + def createByteArrayZeros(howMany): + return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed + def concatArrays(a1, a2): + l = list(a1)+list(a2) + return createByteArraySequence(l) + + #WAY TOO SLOW - MUST BE REPLACED------------ + def bytesToString(bytes): + return "".join([chr(b) for b in bytes]) + + def stringToBytes(s): + bytes = createByteArrayZeros(len(s)) + for count, c in enumerate(s): + bytes[count] = ord(c) + return bytes + #WAY TOO SLOW - MUST BE REPLACED------------ + + def numBits(n): + if n==0: + return 0 + n= 1L * n; #convert to long, if it isn't already + return n.__tojava__(java.math.BigInteger).bitLength() + + #This properly creates static methods for Jython + class staticmethod: + def __init__(self, anycallable): self.__call__ = anycallable + + #Properties are not supported for Jython + class property: + def __init__(self, anycallable): pass + + #True and False have to be specially defined + False = 0 + True = 1 + + class StopIteration(Exception): pass + + def enumerate(collection): + return zip(range(len(collection)), collection) + + class Set: + def __init__(self, seq=None): + self.values = {} + if seq: + for e in seq: + self.values[e] = None + + def add(self, e): + self.values[e] = None + + def discard(self, e): + if e in self.values.keys(): + del(self.values[e]) + + def union(self, s): + ret = Set() + for e in self.values.keys(): + ret.values[e] = None + for e in s.values.keys(): + ret.values[e] = None + return ret + + def issubset(self, other): + for e in self.values.keys(): + if e not in other.values.keys(): + return False + return True + + def __nonzero__( self): + return len(self.values.keys()) + + def __contains__(self, e): + return e in self.values.keys() + + def iterSet(set): + return set.values.keys() + + def getListFromSet(set): + return set.values.keys() + + """ + class JCE_SHA1: + def __init__(self, s=None): + self.md = java.security.MessageDigest.getInstance("SHA1") + if s: + self.update(s) + + def update(self, s): + self.md.update(s) + + def copy(self): + sha1 = JCE_SHA1() + sha1.md = self.md.clone() + return sha1 + + def digest(self): + digest = self.md.digest() + bytes = jarray.zeros(20, 'h') + for count in xrange(20): + x = digest[count] + if x < 0: x += 256 + bytes[count] = x + return bytes + """ + + #Factory function for getting a SHA1 object + #The JCE_SHA1 class is way too slow... + #the sha.sha object we use instead is broken in the jython 2.1 + #release, and needs to be patched + def getSHA1(s): + #return JCE_SHA1(s) + return sha.sha(s) + + + #Adjust the string to an array of bytes + def stringToJavaByteArray(s): + bytes = jarray.zeros(len(s), 'b') + for count, c in enumerate(s): + x = ord(c) + if x >= 128: x -= 256 + bytes[count] = x + return bytes + + import sys + import traceback + + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr diff --git a/gam/gdata/tlslite/utils/keyfactory.py b/gam/gdata/tlslite/utils/keyfactory.py new file mode 100755 index 00000000000..5005af7f5b7 --- /dev/null +++ b/gam/gdata/tlslite/utils/keyfactory.py @@ -0,0 +1,243 @@ +"""Factory functions for asymmetric cryptography. +@sort: generateRSAKey, parseXMLKey, parsePEMKey, parseAsPublicKey, +parseAsPrivateKey +""" + +from compat import * + +from RSAKey import RSAKey +from Python_RSAKey import Python_RSAKey +import cryptomath + +if cryptomath.m2cryptoLoaded: + from OpenSSL_RSAKey import OpenSSL_RSAKey + +if cryptomath.pycryptoLoaded: + from PyCrypto_RSAKey import PyCrypto_RSAKey + +# ************************************************************************** +# Factory Functions for RSA Keys +# ************************************************************************** + +def generateRSAKey(bits, implementations=["openssl", "python"]): + """Generate an RSA key with the specified bit length. + + @type bits: int + @param bits: Desired bit length of the new key's modulus. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: A new RSA private key. + """ + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_RSAKey.generate(bits) + elif implementation == "python": + return Python_RSAKey.generate(bits) + raise ValueError("No acceptable implementations") + +def parseXMLKey(s, private=False, public=False, implementations=["python"]): + """Parse an XML-format key. + + The XML format used here is specific to tlslite and cryptoIDlib. The + format can store the public component of a key, or the public and + private components. For example:: + + + 4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou... + Aw== + + + + 4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou... + Aw== + JZ0TIgUxWXmL8KJ0VqyG1V0J3ern9pqIoB0xmy... +

    5PreIj6z6ldIGL1V4+1C36dQFHNCQHJvW52GXc... + /E/wDit8YXPCxx126zTq2ilQ3IcW54NJYyNjiZ... + mKc+wX8inDowEH45Qp4slRo1YveBgExKPROu6... + qDVKtBz9lk0shL5PR3ickXDgkwS576zbl2ztB... + j6E8EA7dNsTImaXexAmLA1DoeArsYeFAInr... + + + @type s: str + @param s: A string containing an XML public or private key. + + @type private: bool + @param private: If True, a L{SyntaxError} will be raised if the private + key component is not present. + + @type public: bool + @param public: If True, the private key component (if present) will be + discarded, so this function will always return a public key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA key. + + @raise SyntaxError: If the key is not properly formatted. + """ + for implementation in implementations: + if implementation == "python": + key = Python_RSAKey.parseXML(s) + break + else: + raise ValueError("No acceptable implementations") + + return _parseKeyHelper(key, private, public) + +#Parse as an OpenSSL or Python key +def parsePEMKey(s, private=False, public=False, passwordCallback=None, + implementations=["openssl", "python"]): + """Parse a PEM-format key. + + The PEM format is used by OpenSSL and other tools. The + format is typically used to store both the public and private + components of a key. For example:: + + -----BEGIN RSA PRIVATE KEY----- + MIICXQIBAAKBgQDYscuoMzsGmW0pAYsmyHltxB2TdwHS0dImfjCMfaSDkfLdZY5+ + dOWORVns9etWnr194mSGA1F0Pls/VJW8+cX9+3vtJV8zSdANPYUoQf0TP7VlJxkH + dSRkUbEoz5bAAs/+970uos7n7iXQIni+3erUTdYEk2iWnMBjTljfgbK/dQIDAQAB + AoGAJHoJZk75aKr7DSQNYIHuruOMdv5ZeDuJvKERWxTrVJqE32/xBKh42/IgqRrc + esBN9ZregRCd7YtxoL+EVUNWaJNVx2mNmezEznrc9zhcYUrgeaVdFO2yBF1889zO + gCOVwrO8uDgeyj6IKa25H6c1N13ih/o7ZzEgWbGG+ylU1yECQQDv4ZSJ4EjSh/Fl + aHdz3wbBa/HKGTjC8iRy476Cyg2Fm8MZUe9Yy3udOrb5ZnS2MTpIXt5AF3h2TfYV + VoFXIorjAkEA50FcJmzT8sNMrPaV8vn+9W2Lu4U7C+K/O2g1iXMaZms5PC5zV5aV + CKXZWUX1fq2RaOzlbQrpgiolhXpeh8FjxwJBAOFHzSQfSsTNfttp3KUpU0LbiVvv + i+spVSnA0O4rq79KpVNmK44Mq67hsW1P11QzrzTAQ6GVaUBRv0YS061td1kCQHnP + wtN2tboFR6lABkJDjxoGRvlSt4SOPr7zKGgrWjeiuTZLHXSAnCY+/hr5L9Q3ZwXG + 6x6iBdgLjVIe4BZQNtcCQQDXGv/gWinCNTN3MPWfTW/RGzuMYVmyBFais0/VrgdH + h1dLpztmpQqfyH/zrBXQ9qL/zR4ojS6XYneO/U18WpEe + -----END RSA PRIVATE KEY----- + + To generate a key like this with OpenSSL, run:: + + openssl genrsa 2048 > key.pem + + This format also supports password-encrypted private keys. TLS + Lite can only handle password-encrypted private keys when OpenSSL + and M2Crypto are installed. In this case, passwordCallback will be + invoked to query the user for the password. + + @type s: str + @param s: A string containing a PEM-encoded public or private key. + + @type private: bool + @param private: If True, a L{SyntaxError} will be raised if the + private key component is not present. + + @type public: bool + @param public: If True, the private key component (if present) will + be discarded, so this function will always return a public key. + + @type passwordCallback: callable + @param passwordCallback: This function will be called, with no + arguments, if the PEM-encoded private key is password-encrypted. + The callback should return the password string. If the password is + incorrect, SyntaxError will be raised. If no callback is passed + and the key is password-encrypted, a prompt will be displayed at + the console. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA key. + + @raise SyntaxError: If the key is not properly formatted. + """ + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + key = OpenSSL_RSAKey.parse(s, passwordCallback) + break + elif implementation == "python": + key = Python_RSAKey.parsePEM(s) + break + else: + raise ValueError("No acceptable implementations") + + return _parseKeyHelper(key, private, public) + + +def _parseKeyHelper(key, private, public): + if private: + if not key.hasPrivateKey(): + raise SyntaxError("Not a private key!") + + if public: + return _createPublicKey(key) + + if private: + if hasattr(key, "d"): + return _createPrivateKey(key) + else: + return key + + return key + +def parseAsPublicKey(s): + """Parse an XML or PEM-formatted public key. + + @type s: str + @param s: A string containing an XML or PEM-encoded public or private key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA public key. + + @raise SyntaxError: If the key is not properly formatted. + """ + try: + return parsePEMKey(s, public=True) + except: + return parseXMLKey(s, public=True) + +def parsePrivateKey(s): + """Parse an XML or PEM-formatted private key. + + @type s: str + @param s: A string containing an XML or PEM-encoded private key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA private key. + + @raise SyntaxError: If the key is not properly formatted. + """ + try: + return parsePEMKey(s, private=True) + except: + return parseXMLKey(s, private=True) + +def _createPublicKey(key): + """ + Create a new public key. Discard any private component, + and return the most efficient key possible. + """ + if not isinstance(key, RSAKey): + raise AssertionError() + return _createPublicRSAKey(key.n, key.e) + +def _createPrivateKey(key): + """ + Create a new private key. Return the most efficient key possible. + """ + if not isinstance(key, RSAKey): + raise AssertionError() + if not key.hasPrivateKey(): + raise AssertionError() + return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP, + key.dQ, key.qInv) + +def _createPublicRSAKey(n, e, implementations = ["openssl", "pycrypto", + "python"]): + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_RSAKey(n, e) + elif implementation == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_RSAKey(n, e) + elif implementation == "python": + return Python_RSAKey(n, e) + raise ValueError("No acceptable implementations") + +def _createPrivateRSAKey(n, e, d, p, q, dP, dQ, qInv, + implementations = ["pycrypto", "python"]): + for implementation in implementations: + if implementation == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_RSAKey(n, e, d, p, q, dP, dQ, qInv) + elif implementation == "python": + return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv) + raise ValueError("No acceptable implementations") diff --git a/gam/gdata/tlslite/utils/keyfactory.pyc b/gam/gdata/tlslite/utils/keyfactory.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e83a7119d80dc1bc50bdba6ebcb51c766e0a1a17 GIT binary patch literal 9307 zcmd5?TXWk;c5YI#EXmjLcv6+!N-cJFHlta|qDYYxrShoEn<$DDMS>J>m01@EfFwxV zga*aS+No4LeO|Hb{4agiOM|-6f98zTGAICNVZicawcnHi@!D24SLX zlkY>MyG7J({D+M&-97R>%yrLiyhE1#@00Ig<_AQ1K)wgY9}?vut2`pgBUX7#l*g>{ z8BsoCl_zXClt6rqD4(+#KPL)r0Un;RmZwCyL+U$U;JyH9awf^H<;;ct#FSmlGO5tF z96_RU!%$sElLgtC+paZmBzrjD-+nNuBQrw&YN?eOX?-Dntfwwo=AD0^$~H6BJMK%D<~?KAjz_7yHZbA1(HYI(c<{ig!c33YIChm`m zitx^SjP%=WJJ5@;f>xmxxu&W^BV*0!%_BK$YU;oMjSM8?S!_R%c*Zs78Fge~C;Clf zph&J1@%Amk_ALTC(dX8+54~|Q0n^NE{sLO)eJIG*gU}bD2ci3+&q6Y?jhhkZVi{g$ z$K=%-=;Ck5>vi&K-Pc9XG^UGO1z+DGRZupjZ%i4vzPX@@cTkl|xWn_?Y|}pn&iJ{Q-WMP*A3!>VWL1ub8snT7tJ|prd%sqL|aQ-o79J z1xCrEAj3=)oQ=w~&}JS%kS)WuOi%>WP6?;q6TktX_Io{mE~DZrSTKUZ6lc&^ok^%o zCnwx8&^n&;u_Z93nW3BX+b@T%YoA0SuA|=C`=;tf94dWz$_V2C{CRGk#-+r3kx5yF zihgdTlF4kE+Uack=rVqGJ4>V%(^AT|Cj0wb!}%~6sGg>$-+uc%qAm$Oj|427uJRRl zz5gr5*h-bti|#?AIJlm*+D185x;SXw%nQ-x!NsBKn8)@_(aIdmjQM*jwNDdOM=g$$ zi=?g;3*~4tz8lRRCY9RRc_mvrzqoBBVuiN+-oTsFNIo*nX>O|1u4ZSmXe_yC+{84! zc36~KiFoCrGp~%a?t253>ZwtZcc<-?W`Z5&&*O=j9jCfzMLV}@W{{tis^Zlo`ToG# z=}oR#ax;r#eSAQND*vHAhbd;vFTRGzqU%CW6&y)J zbJlo3@CGtkFx!evf~lL93#_OVOw^APoDF8bAK=1k513%4ybKO2)OTQhkMuV_Ym5m+ zqq5|HzuyxmIG7=@;o}qDPtvE-oC-V&!7@Epv9eWvN-PT^u)k2YO~T3!o(QlkTo7(k zusMJz933DObCj;Cc=7MYg}xnh^dgI57}pp&0=Fw-EM%eUC2K0F^K(^)iFl+$oJ1L% z@r1|W4BLAVV~U7y30yxR!0N}2ZTDarpK)OHui=#kwy!a$Jqm4x?}ufC1Bk!yZx(w@ zp_nNc&T`*X2}CalQy$1l`5+4_KFERq_$EwVZ?GVTU02Z-3}!IQ`^;kC#u9VEPQBV# zje)r7^dGl^7$^W<{cs3`4}J&)fwL&sn+qP`WI@jYg@A|!)`$_Q@pjOIF<>s5Ea{*j zY&3*S9tC?a@SQ;C8$GjvWuOaU;~$N9&+sRcFBB^bj8(;AGu_AwrF@5dVf4`RUlxnm zcCDC6r%RbZE!Uy)#JXJ2g3&s#(;aHe&vkc}i8YkzdHp~s8vRlBvM<$h<9=D`b`rY* zA?2!dB{ofZthz1Ji5|y|dLbP>JE-b3(!6M;cIEc*?%}O_(M&DsO1e_*TwAsNL8E%q z)Gub^^Ch9WIKJ-Xtwo}jPSeQl@zKG=qDSVD27H>@E(E0OhSJH8V_M6+%#0fPs6XhH zB1)~8OV`qwfKb{hq%Y2`i|+U+A(fn?T)kH56wfElLR}|9r<`M zlsnhXK$V)x*;Q(x&oH|#jNp7IdN6BJIiY3mIalg9-s6LFGap#Uwlg^dJ;k1{LBIl(-W0Xy4;yiwo%+3d~ zGb44`y;hGq^TUa9<@6F=vmU!_*u}P+NS_@JV~u{N8F19J&e}!G8KuYhbmHJlzAzSz z6s=r3RjHY}HFu9&vGOE-eU#lTMXq9lsMfxex&}>Dvx!ABAuS1IOYLs!x*hG`#6;<8 zq4#QzJTEQdUS!f&XXlH0tzV~&N+00YVd>hwJ}CFJ=Iw33k+w_iX5CC5 zT*aN6qvMj@tQe(u{PHGwG^DL)H5#ogoJAvDOBR~abw<2B=+qCAQCEqM0dAPpfY8*f z#N3{BCMNZCW_*zw&8&iWtJmFl{i^C5ElP!f(;BJTq|q&(x9e#$+u4l_okaP#cG#V^ z3jv|zEU9IbL3z|Hs_{&>R&iyNOuKL!8MHJrTWM4dFRQJ7qZJVgi^*lDX@KYz>^#TGC~9V|`AY7? zX3UsuSi$)lLLWvVT^l2)24BODj-GSc6P$^;ii*9HFOCNEE5r`sDd7|e?AxlrL284= zT8{W79fB{N*ft#bsbEW#PAx}yrkdh-vtSe9VT_bi$Zu6grdopD@zn$j!!!apL>!#ehW)z=B+gb zX2I>=OjO8pVE6_{?65=!zWBrasL$ZdkH>8+bik%qfLDAV@+sjP+pQH*tN^S4p?n(n z#yA0V`7^*b4lzBI^W}O1TBYllKVUPx;CR{>-?A51VlScPD+3#gh?1FzX>lcU1{2Ie z&^;duBMu0Jxq@0@EB%R<7QlfXM0`e^sU96#6J7C<>8g5pm>;#*gWcZp07x=sG|(Z( zQDt{;6>MQX?5)y!; z3AJ?9H;VrUc={*glXehFi-h0mW1Vgt7&b>4z0DKbcoF?aMNw7vw|xLE(mV!z)*0r?;2D+I|Sz{$|| z73Nw2ZSx$_%~u3+*^nS|;t06?lqQVn;voXw0c`{NcIueh^gk0)$5HTy<&-Lnp$eQA zQNvhv7uE3BEUBxol&6cQlA*Mc{B`l{uyVp;d;L;&RLBii*>MI|%MLm)OmbbvMcMcr zf-^T`fC*wLwvQ|lrY`>g%utj+32lG`o`g3-+weSs@@`nhQyM-&E-wNv^QU;Y!7UA; z4Bmu7^_KaRghC;{^ULNoY`T}O`vs7xuB7WQM~1E{ICJ`u2R7U{*`FB4SQ^l$8@NCw z0&HE8s6WAr_o2dl13HE~Syz(80^7ApkHe>I!PVugMyw=fSZE zwneE1@QdS~<(-X288|m(RYZ5k?mE&GFg9N9u+*cA7V3L#o~2@UAS^bRtW>$3eu7{s z;(gS=!b34dNA5fV&mCALj?$tNf`S-Hi^ zZC2i6<$dyI(-$P(^?X2(D+VG6BD;EHAn%7ogz;AV4HhUE@hdET35AQ>?8}3b=hu89 zChx?rvG@h{!(lcnZ$K3xXjS>28uPK3D*tq1`xVUpZ}9Nk;yQ{Af4>f~8$o-{Zwlo# z=z;+Ng`36he+$KIjPNG+olIT>o`DPU58dX1m;fiCU11NNU#ziI4V?*LM*!DXtcCpyQ;u#j7Ofo3>|AdEI4wKBUz;b>QV*e3@!RwbV6-$2klHDU< zV31tyGSKMXIZ27jg`+aEgVh;ii#Xh3j-di~x*|d`hLXi!W9>^U(DjOF$V^*AWNfLH ztoCuP-#1~R=j{JGsHZ40igRs*x5JM^k3;vvTcOYK*?98!i|AJ9Nq*}w``y~Q2Vnl* F{{R<~p-cb( literal 0 HcmV?d00001 diff --git a/gam/gdata/tlslite/utils/rijndael.py b/gam/gdata/tlslite/utils/rijndael.py new file mode 100755 index 00000000000..cb2f547346d --- /dev/null +++ b/gam/gdata/tlslite/utils/rijndael.py @@ -0,0 +1,392 @@ +""" +A pure python (slow) implementation of rijndael with a decent interface + +To include - + +from rijndael import rijndael + +To do a key setup - + +r = rijndael(key, block_size = 16) + +key must be a string of length 16, 24, or 32 +blocksize must be 16, 24, or 32. Default is 16 + +To use - + +ciphertext = r.encrypt(plaintext) +plaintext = r.decrypt(ciphertext) + +If any strings are of the wrong length a ValueError is thrown +""" + +# ported from the Java reference code by Bram Cohen, bram@gawth.com, April 2001 +# this code is public domain, unless someone makes +# an intellectual property claim against the reference +# code, in which case it can be made public domain by +# deleting all the comments and renaming all the variables + +import copy +import string + + + +#----------------------- +#TREV - ADDED BECAUSE THERE'S WARNINGS ABOUT INT OVERFLOW BEHAVIOR CHANGING IN +#2.4..... +import os +if os.name != "java": + import exceptions + if hasattr(exceptions, "FutureWarning"): + import warnings + warnings.filterwarnings("ignore", category=FutureWarning, append=1) +#----------------------- + + + +shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]], + [[0, 0], [1, 5], [2, 4], [3, 3]], + [[0, 0], [1, 7], [3, 5], [4, 4]]] + +# [keysize][block_size] +num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}} + +A = [[1, 1, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 1, 1, 1, 1, 1], + [1, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 1, 1, 1], + [1, 1, 1, 0, 0, 0, 1, 1], + [1, 1, 1, 1, 0, 0, 0, 1]] + +# produce log and alog tables, needed for multiplying in the +# field GF(2^m) (generator = 3) +alog = [1] +for i in xrange(255): + j = (alog[-1] << 1) ^ alog[-1] + if j & 0x100 != 0: + j ^= 0x11B + alog.append(j) + +log = [0] * 256 +for i in xrange(1, 255): + log[alog[i]] = i + +# multiply two elements of GF(2^m) +def mul(a, b): + if a == 0 or b == 0: + return 0 + return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255] + +# substitution box based on F^{-1}(x) +box = [[0] * 8 for i in xrange(256)] +box[1][7] = 1 +for i in xrange(2, 256): + j = alog[255 - log[i]] + for t in xrange(8): + box[i][t] = (j >> (7 - t)) & 0x01 + +B = [0, 1, 1, 0, 0, 0, 1, 1] + +# affine transform: box[i] <- B + A*box[i] +cox = [[0] * 8 for i in xrange(256)] +for i in xrange(256): + for t in xrange(8): + cox[i][t] = B[t] + for j in xrange(8): + cox[i][t] ^= A[t][j] * box[i][j] + +# S-boxes and inverse S-boxes +S = [0] * 256 +Si = [0] * 256 +for i in xrange(256): + S[i] = cox[i][0] << 7 + for t in xrange(1, 8): + S[i] ^= cox[i][t] << (7-t) + Si[S[i] & 0xFF] = i + +# T-boxes +G = [[2, 1, 1, 3], + [3, 2, 1, 1], + [1, 3, 2, 1], + [1, 1, 3, 2]] + +AA = [[0] * 8 for i in xrange(4)] + +for i in xrange(4): + for j in xrange(4): + AA[i][j] = G[i][j] + AA[i][i+4] = 1 + +for i in xrange(4): + pivot = AA[i][i] + if pivot == 0: + t = i + 1 + while AA[t][i] == 0 and t < 4: + t += 1 + assert t != 4, 'G matrix must be invertible' + for j in xrange(8): + AA[i][j], AA[t][j] = AA[t][j], AA[i][j] + pivot = AA[i][i] + for j in xrange(8): + if AA[i][j] != 0: + AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255] + for t in xrange(4): + if i != t: + for j in xrange(i+1, 8): + AA[t][j] ^= mul(AA[i][j], AA[t][i]) + AA[t][i] = 0 + +iG = [[0] * 4 for i in xrange(4)] + +for i in xrange(4): + for j in xrange(4): + iG[i][j] = AA[i][j + 4] + +def mul4(a, bs): + if a == 0: + return 0 + r = 0 + for b in bs: + r <<= 8 + if b != 0: + r = r | mul(a, b) + return r + +T1 = [] +T2 = [] +T3 = [] +T4 = [] +T5 = [] +T6 = [] +T7 = [] +T8 = [] +U1 = [] +U2 = [] +U3 = [] +U4 = [] + +for t in xrange(256): + s = S[t] + T1.append(mul4(s, G[0])) + T2.append(mul4(s, G[1])) + T3.append(mul4(s, G[2])) + T4.append(mul4(s, G[3])) + + s = Si[t] + T5.append(mul4(s, iG[0])) + T6.append(mul4(s, iG[1])) + T7.append(mul4(s, iG[2])) + T8.append(mul4(s, iG[3])) + + U1.append(mul4(t, iG[0])) + U2.append(mul4(t, iG[1])) + U3.append(mul4(t, iG[2])) + U4.append(mul4(t, iG[3])) + +# round constants +rcon = [1] +r = 1 +for t in xrange(1, 30): + r = mul(2, r) + rcon.append(r) + +del A +del AA +del pivot +del B +del G +del box +del log +del alog +del i +del j +del r +del s +del t +del mul +del mul4 +del cox +del iG + +class rijndael: + def __init__(self, key, block_size = 16): + if block_size != 16 and block_size != 24 and block_size != 32: + raise ValueError('Invalid block size: ' + str(block_size)) + if len(key) != 16 and len(key) != 24 and len(key) != 32: + raise ValueError('Invalid key size: ' + str(len(key))) + self.block_size = block_size + + ROUNDS = num_rounds[len(key)][block_size] + BC = block_size / 4 + # encryption round keys + Ke = [[0] * BC for i in xrange(ROUNDS + 1)] + # decryption round keys + Kd = [[0] * BC for i in xrange(ROUNDS + 1)] + ROUND_KEY_COUNT = (ROUNDS + 1) * BC + KC = len(key) / 4 + + # copy user material bytes into temporary ints + tk = [] + for i in xrange(0, KC): + tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) | + (ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3])) + + # copy values into round key arrays + t = 0 + j = 0 + while j < KC and t < ROUND_KEY_COUNT: + Ke[t / BC][t % BC] = tk[j] + Kd[ROUNDS - (t / BC)][t % BC] = tk[j] + j += 1 + t += 1 + tt = 0 + rconpointer = 0 + while t < ROUND_KEY_COUNT: + # extrapolate using phi (the round key evolution function) + tt = tk[KC - 1] + tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \ + (S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \ + (S[ tt & 0xFF] & 0xFF) << 8 ^ \ + (S[(tt >> 24) & 0xFF] & 0xFF) ^ \ + (rcon[rconpointer] & 0xFF) << 24 + rconpointer += 1 + if KC != 8: + for i in xrange(1, KC): + tk[i] ^= tk[i-1] + else: + for i in xrange(1, KC / 2): + tk[i] ^= tk[i-1] + tt = tk[KC / 2 - 1] + tk[KC / 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \ + (S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \ + (S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \ + (S[(tt >> 24) & 0xFF] & 0xFF) << 24 + for i in xrange(KC / 2 + 1, KC): + tk[i] ^= tk[i-1] + # copy values into round key arrays + j = 0 + while j < KC and t < ROUND_KEY_COUNT: + Ke[t / BC][t % BC] = tk[j] + Kd[ROUNDS - (t / BC)][t % BC] = tk[j] + j += 1 + t += 1 + # inverse MixColumn where needed + for r in xrange(1, ROUNDS): + for j in xrange(BC): + tt = Kd[r][j] + Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \ + U2[(tt >> 16) & 0xFF] ^ \ + U3[(tt >> 8) & 0xFF] ^ \ + U4[ tt & 0xFF] + self.Ke = Ke + self.Kd = Kd + + def encrypt(self, plaintext): + if len(plaintext) != self.block_size: + raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext))) + Ke = self.Ke + + BC = self.block_size / 4 + ROUNDS = len(Ke) - 1 + if BC == 4: + SC = 0 + elif BC == 6: + SC = 1 + else: + SC = 2 + s1 = shifts[SC][1][0] + s2 = shifts[SC][2][0] + s3 = shifts[SC][3][0] + a = [0] * BC + # temporary work array + t = [] + # plaintext to ints + key + for i in xrange(BC): + t.append((ord(plaintext[i * 4 ]) << 24 | + ord(plaintext[i * 4 + 1]) << 16 | + ord(plaintext[i * 4 + 2]) << 8 | + ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i]) + # apply round transforms + for r in xrange(1, ROUNDS): + for i in xrange(BC): + a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^ + T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^ + T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^ + T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i] + t = copy.copy(a) + # last round is special + result = [] + for i in xrange(BC): + tt = Ke[ROUNDS][i] + result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF) + result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF) + result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF) + result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF) + return string.join(map(chr, result), '') + + def decrypt(self, ciphertext): + if len(ciphertext) != self.block_size: + raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext))) + Kd = self.Kd + + BC = self.block_size / 4 + ROUNDS = len(Kd) - 1 + if BC == 4: + SC = 0 + elif BC == 6: + SC = 1 + else: + SC = 2 + s1 = shifts[SC][1][1] + s2 = shifts[SC][2][1] + s3 = shifts[SC][3][1] + a = [0] * BC + # temporary work array + t = [0] * BC + # ciphertext to ints + key + for i in xrange(BC): + t[i] = (ord(ciphertext[i * 4 ]) << 24 | + ord(ciphertext[i * 4 + 1]) << 16 | + ord(ciphertext[i * 4 + 2]) << 8 | + ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i] + # apply round transforms + for r in xrange(1, ROUNDS): + for i in xrange(BC): + a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^ + T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^ + T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^ + T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i] + t = copy.copy(a) + # last round is special + result = [] + for i in xrange(BC): + tt = Kd[ROUNDS][i] + result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF) + result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF) + result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF) + result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF) + return string.join(map(chr, result), '') + +def encrypt(key, block): + return rijndael(key, len(block)).encrypt(block) + +def decrypt(key, block): + return rijndael(key, len(block)).decrypt(block) + +def test(): + def t(kl, bl): + b = 'b' * bl + r = rijndael('a' * kl, bl) + assert r.decrypt(r.encrypt(b)) == b + t(16, 16) + t(16, 24) + t(16, 32) + t(24, 16) + t(24, 24) + t(24, 32) + t(32, 16) + t(32, 24) + t(32, 32) + diff --git a/gam/gdata/tlslite/utils/win32prng.c b/gam/gdata/tlslite/utils/win32prng.c new file mode 100755 index 00000000000..de08b3b3b90 --- /dev/null +++ b/gam/gdata/tlslite/utils/win32prng.c @@ -0,0 +1,63 @@ + +#include "Python.h" +#define _WIN32_WINNT 0x0400 /* Needed for CryptoAPI on some systems */ +#include + + +static PyObject* getRandomBytes(PyObject *self, PyObject *args) +{ + int howMany; + HCRYPTPROV hCryptProv; + unsigned char* bytes = NULL; + PyObject* returnVal = NULL; + + + /* Read Arguments */ + if (!PyArg_ParseTuple(args, "i", &howMany)) + return(NULL); + + /* Get Context */ + if(CryptAcquireContext( + &hCryptProv, + NULL, + NULL, + PROV_RSA_FULL, + CRYPT_VERIFYCONTEXT) == 0) + return Py_BuildValue("s#", NULL, 0); + + + /* Allocate bytes */ + bytes = malloc(howMany); + + + /* Get random data */ + if(CryptGenRandom( + hCryptProv, + howMany, + bytes) == 0) + returnVal = Py_BuildValue("s#", NULL, 0); + else + returnVal = Py_BuildValue("s#", bytes, howMany); + + free(bytes); + CryptReleaseContext(hCryptProv, 0); + + return returnVal; +} + + + +/* List of functions exported by this module */ + +static struct PyMethodDef win32prng_functions[] = { + {"getRandomBytes", (PyCFunction)getRandomBytes, METH_VARARGS}, + {NULL, NULL} /* Sentinel */ +}; + + +/* Initialize this module. */ + +DL_EXPORT(void) initwin32prng(void) +{ + Py_InitModule("win32prng", win32prng_functions); +} diff --git a/gam/gdata/tlslite/utils/xmltools.py b/gam/gdata/tlslite/utils/xmltools.py new file mode 100755 index 00000000000..c1e8c4d950a --- /dev/null +++ b/gam/gdata/tlslite/utils/xmltools.py @@ -0,0 +1,202 @@ +"""Helper functions for XML. + +This module has misc. helper functions for working with XML DOM nodes.""" + +from compat import * +import os +import re + +if os.name == "java": + # Only for Jython + from javax.xml.parsers import * + import java + + builder = DocumentBuilderFactory.newInstance().newDocumentBuilder() + + def parseDocument(s): + stream = java.io.ByteArrayInputStream(java.lang.String(s).getBytes()) + return builder.parse(stream) +else: + from xml.dom import minidom + from xml.sax import saxutils + + def parseDocument(s): + return minidom.parseString(s) + +def parseAndStripWhitespace(s): + try: + element = parseDocument(s).documentElement + except BaseException, e: + raise SyntaxError(str(e)) + stripWhitespace(element) + return element + +#Goes through a DOM tree and removes whitespace besides child elements, +#as long as this whitespace is correctly tab-ified +def stripWhitespace(element, tab=0): + element.normalize() + + lastSpacer = "\n" + ("\t"*tab) + spacer = lastSpacer + "\t" + + #Zero children aren't allowed (i.e. ) + #This makes writing output simpler, and matches Canonical XML + if element.childNodes.length==0: #DON'T DO len(element.childNodes) - doesn't work in Jython + raise SyntaxError("Empty XML elements not allowed") + + #If there's a single child, it must be text context + if element.childNodes.length==1: + if element.firstChild.nodeType == element.firstChild.TEXT_NODE: + #If it's an empty element, remove + if element.firstChild.data == lastSpacer: + element.removeChild(element.firstChild) + return + #If not text content, give an error + elif element.firstChild.nodeType == element.firstChild.ELEMENT_NODE: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + else: + raise SyntaxError("Unexpected node type in XML document") + + #Otherwise there's multiple child element + child = element.firstChild + while child: + if child.nodeType == child.ELEMENT_NODE: + stripWhitespace(child, tab+1) + child = child.nextSibling + elif child.nodeType == child.TEXT_NODE: + if child == element.lastChild: + if child.data != lastSpacer: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + elif child.data != spacer: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + next = child.nextSibling + element.removeChild(child) + child = next + else: + raise SyntaxError("Unexpected node type in XML document") + + +def checkName(element, name): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Missing element: '%s'" % name) + + if name == None: + return + + if element.tagName != name: + raise SyntaxError("Wrong element name: should be '%s', is '%s'" % (name, element.tagName)) + +def getChild(element, index, name=None): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getChild()") + + child = element.childNodes.item(index) + if child == None: + raise SyntaxError("Missing child: '%s'" % name) + checkName(child, name) + return child + +def getChildIter(element, index): + class ChildIter: + def __init__(self, element, index): + self.element = element + self.index = index + + def next(self): + if self.index < len(self.element.childNodes): + retVal = self.element.childNodes.item(self.index) + self.index += 1 + else: + retVal = None + return retVal + + def checkEnd(self): + if self.index != len(self.element.childNodes): + raise SyntaxError("Too many elements under: '%s'" % self.element.tagName) + return ChildIter(element, index) + +def getChildOrNone(element, index): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getChild()") + child = element.childNodes.item(index) + return child + +def getLastChild(element, index, name=None): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getLastChild()") + + child = element.childNodes.item(index) + if child == None: + raise SyntaxError("Missing child: '%s'" % name) + if child != element.lastChild: + raise SyntaxError("Too many elements under: '%s'" % element.tagName) + checkName(child, name) + return child + +#Regular expressions for syntax-checking attribute and element content +nsRegEx = "http://trevp.net/cryptoID\Z" +cryptoIDRegEx = "([a-km-z3-9]{5}\.){3}[a-km-z3-9]{5}\Z" +urlRegEx = "http(s)?://.{1,100}\Z" +sha1Base64RegEx = "[A-Za-z0-9+/]{27}=\Z" +base64RegEx = "[A-Za-z0-9+/]+={0,4}\Z" +certsListRegEx = "(0)?(1)?(2)?(3)?(4)?(5)?(6)?(7)?(8)?(9)?\Z" +keyRegEx = "[A-Z]\Z" +keysListRegEx = "(A)?(B)?(C)?(D)?(E)?(F)?(G)?(H)?(I)?(J)?(K)?(L)?(M)?(N)?(O)?(P)?(Q)?(R)?(S)?(T)?(U)?(V)?(W)?(X)?(Y)?(Z)?\Z" +dateTimeRegEx = "\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ\Z" +shortStringRegEx = ".{1,100}\Z" +exprRegEx = "[a-zA-Z0-9 ,()]{1,200}\Z" +notAfterDeltaRegEx = "0|([1-9][0-9]{0,8})\Z" #A number from 0 to (1 billion)-1 +booleanRegEx = "(true)|(false)" + +def getReqAttribute(element, attrName, regEx=""): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getReqAttribute()") + + value = element.getAttribute(attrName) + if not value: + raise SyntaxError("Missing Attribute: " + attrName) + if not re.match(regEx, value): + raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value)) + element.removeAttribute(attrName) + return str(value) #de-unicode it; this is needed for bsddb, for example + +def getAttribute(element, attrName, regEx=""): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getAttribute()") + + value = element.getAttribute(attrName) + if value: + if not re.match(regEx, value): + raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value)) + element.removeAttribute(attrName) + return str(value) #de-unicode it; this is needed for bsddb, for example + +def checkNoMoreAttributes(element): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in checkNoMoreAttributes()") + + if element.attributes.length!=0: + raise SyntaxError("Extra attributes on '%s'" % element.tagName) + +def getText(element, regEx=""): + textNode = element.firstChild + if textNode == None: + raise SyntaxError("Empty element '%s'" % element.tagName) + if textNode.nodeType != textNode.TEXT_NODE: + raise SyntaxError("Non-text node: '%s'" % element.tagName) + if not re.match(regEx, textNode.data): + raise SyntaxError("Bad Text Value for '%s': '%s' " % (element.tagName, textNode.data)) + return str(textNode.data) #de-unicode it; this is needed for bsddb, for example + +#Function for adding tabs to a string +def indent(s, steps, ch="\t"): + tabs = ch*steps + if s[-1] != "\n": + s = tabs + s.replace("\n", "\n"+tabs) + else: + s = tabs + s.replace("\n", "\n"+tabs) + s = s[ : -len(tabs)] + return s + +def escape(s): + return saxutils.escape(s) diff --git a/gam/gdata/tlslite/utils/xmltools.pyc b/gam/gdata/tlslite/utils/xmltools.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da795af08f03571a2decc563433495e30e472fc5 GIT binary patch literal 8653 zcmcgxO>-2-8SdF#B<-?7LIUw+!eE1086@#(Y+@S&l4IFuaYnMii>b|MXM`5*&Mea{ zBmue@S0z=srBcZ`m3t}|SFSnal56|}@+0yClIMAQcD0gan(zZcf3d1VE0DIL}j;iceK~ab!TwX9J!~-hWC&Ys)7?Q%6)DBDSArd`}Yt!|dt z+nv5M=SFc5>dE@Q@vnq4R)*OdWs}3tBmaqC<2TtLi*yS~UqEJ1>8M8;DP(!v&y{)s z${rpj3Y(5HedB9Kb!@xKGlxgTj-Go!Crryv~ z>&=FVrp+3o0h<>Xn$Qs6NuX|3&b4K zLTq7e!^houMON-bCX-rdWKb^;Vo&$#aoR&jnk!L`telL&1CW)^qOdW_#itA*tZ+9v zt#T%#RthS)rku@;a!gj?jGt%79r*vHqasJ$!%13(aq!dLH90_LHbW={DatdAkYEe^ zH+>z>tk=R8r9-fp--wLAK9eMIf|;;jlSps;)?0U%agw^_*TNF(1UKQQFddVJ5%v^G zLlorzZ$tsluxn9Axn7>KamJ20K^vr31F*plAo& zzN;Pfo6cR!=lPy=F&w* zxPGG%uZ2OKee@AzH~qj}>+rI>8bKGjXHM%g`V?wEjl%U-STkWDO5QaatlZAtZL{D)<~8G55EQ_o=eX1>1IpyuZa z8-6-xNfZJthlsZE7iCebpQlhKR`uH=HIwW{mcuc7D%Y}nqOadVrkp}%pHs+l zyGgu7cO#tIHCHdktBt^245isQ7hVZDu|)j6W6Vx46Bf#$RF0$2b5RhJ?a679&e3{$ zLc&Y6<*>FwcDsWqQ(cS0$rN!sLr#rWfs5gS+S~;8yI2s_%%cjb5!~)f<36mQ7$v^7Nn@IXKWFf8^A}Sck z-d|W~k6pBQdJqL?XR$<0PCGYNzPR8*I{F+i;lY9 zB3!M)<22POw%Ybb^VVhK3b`lQ4@JQ8HXLUbaDB_t6t5_i%xqDtlMfL<72JEY%;5tk zay0`WqCT=Z>2%yUL0JryRn`PTlr=1gg@9b?QJDxwS|u>WR#fguXg>2BJ>@~~4CbN9 z)dghAIp!R8MlyXCA96|uA1dkJqa%h^R2Fp5&rDLex`S#Gs3MoQZT)n(WOarb{n(*~G`CNWr3mog))3AsfO5?5C9xTVuuW75bw&qFU z8y)T_G_u4kBz+Z`Wh~~_LjRyq5w|`Supc%QyP01x2qJnr5$e6-r=D*kbSI$?@PtSV zAQSZu6Yy>0`zuub*Es#4@ulSj?6@94jMv0GTS|YI41?WwSm+!UQ`RSu?{1-Qcf8c^ zw9+r34(RXhX2-WQruQnFxXiX&bIa^Mk|9qyzO!i{s`Lo5Zez|1?4YCR1TGuXx;8b1 zsLYerWE7gITC&kH@$B>$6@41@rThNGN^|1rrHQv6JiD^_#pL+2OPf1r74c3vQb~_r z$7qw!E}Xk?{`@A!ptRq=F;Vd+o}Qn0`|Q+%XBV$-zKaSP?QIoj-+gxe+-3F=_|A`C zN37?1=_1l4q{~QGklsYPiu4xJ+vC^KoGunedVn(QpDNuz)lH;ZNYh9&NFN~m4CzCp zS)`AUK1RBYG>241x`Xry($A4RqWh7QLKD5@i%6a`UazeeFD1(bdo+y{|PJ==DQ9bDjT3`ks6Ui6{o;DlrlxM_}x;IIe14=bZYYj+v-!pAoZVlIoTCfwmncj&m$yoLdI#qQ8tI3=s0 z1Ydc#v8LO#LCqakoK!G6U2#(rw}!5E(>JwciALE=ai@_-$?5XTsTO@Klou6=aVFuz znRO{W;YxaI&4w^LbagjazXz#Hyoa1V`069Ly24+lzeM0WTtQv%KX65|PM2yM3;0`n z@{%kNY+Yr)TUB>(2M_!oXYF0XM!lahdxzP(%(kDT_a152nZ1v!r{^Zi`6Fi0j6+*J z%!5jgiC!cVkLwnaojvJ`7NK998eaux{LVPn|C}HCt(MFT^gq+ zt~3=D7(DS^vc1TscH_uK62wK`A!bx`TTML%#k!njT7#ZqZKynV-O-;=vE|m_R@-s^ zI`Z*bG_k;^v057`I4jI1L;;cu2q$NBvbTpzOAiIe9es^rDo=R?D8?X7W6nY6 zgkwXu9py*$E>g9G0w9HLI$-1PxFa4(h3BgHlsF4qUL-^hrN#jEp-o()&wfuOxP@1A zv2uG|v<4xK8OZ~ljV9YSFfKj`TMfiR|@OMcP^i+R8ENz40$@L zRuNOjm6%Ja#myG95}U-@@-+Q%LyqU0>iQ(!4=3gIC(-sfl>TY^{qUxJ)%?J(nK;=< zW8rJFkv4u*3k?M`3fFeD_ITj-m5e1$fX7DrSlA}u!OtM&Ww{9r|psXeh`$2C(FZ&nx6!+$3OWO=_d#u9>m(_zE-mQ0sy z5s$W^9|^&`0fIH&6j^VE*+!zInBxYG6g7@VrGcHnUPE} lH|z{&^4WZ$cowOUAIThis is the text) + extension_elements: list (optional) A list of ExtensionElement instances + which are children of this element. + extension_attributes: dict (optional) A dictionary of strings which are + the values for additional XML attributes of this element. + """ + + self.start_index = start_index + self.category = category or [] + self.id = atom_id + self.link = link or [] + self.title = title + self.updated = updated + self.entry = entry or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SitesFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SitesFeed, xml_string) + + +class SitemapsEntry(atom.Entry, LinkFinder): + """A Google Webmaster Tools meta Sitemaps Entry flavor of an Atom Entry """ + + _tag = atom.Entry._tag + _namespace = atom.Entry._namespace + _children = atom.Entry._children.copy() + _attributes = atom.Entry._attributes.copy() + _children['{%s}sitemap-type' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_type', SitemapType) + _children['{%s}sitemap-status' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_status', SitemapStatus) + _children['{%s}sitemap-last-downloaded' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_last_downloaded', SitemapLastDownloaded) + _children['{%s}sitemap-url-count' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_url_count', SitemapUrlCount) + _children['{%s}sitemap-mobile-markup-language' % GWEBMASTERTOOLS_NAMESPACE] \ + = ('sitemap_mobile_markup_language', SitemapMobileMarkupLanguage) + _children['{%s}sitemap-news-publication-label' % GWEBMASTERTOOLS_NAMESPACE] \ + = ('sitemap_news_publication_label', SitemapNewsPublicationLabel) + + def __GetId(self): + return self.__id + + # This method was created to strip the unwanted whitespace from the id's + # text node. + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __init__(self, category=None, content=None, + atom_id=None, link=None, title=None, updated=None, + sitemap_type=None, sitemap_status=None, sitemap_last_downloaded=None, + sitemap_url_count=None, sitemap_mobile_markup_language=None, + sitemap_news_publication_label=None, + extension_elements=None, extension_attributes=None, text=None): + atom.Entry.__init__(self, category=category, + content=content, atom_id=atom_id, link=link, + title=title, updated=updated, text=text) + + self.sitemap_type = sitemap_type + self.sitemap_status = sitemap_status + self.sitemap_last_downloaded = sitemap_last_downloaded + self.sitemap_url_count = sitemap_url_count + self.sitemap_mobile_markup_language = sitemap_mobile_markup_language + self.sitemap_news_publication_label = sitemap_news_publication_label + + +def SitemapsEntryFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapsEntry, xml_string) + + +class SitemapsFeed(atom.Feed, LinkFinder): + """A Google Webmaster Tools meta Sitemaps feed flavor of an Atom Feed""" + + _tag = atom.Feed._tag + _namespace = atom.Feed._namespace + _children = atom.Feed._children.copy() + _attributes = atom.Feed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [SitemapsEntry]) + _children['{%s}sitemap-mobile' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_mobile', SitemapMobile) + _children['{%s}sitemap-news' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_news', SitemapNews) + del _children['{%s}generator' % atom.ATOM_NAMESPACE] + del _children['{%s}author' % atom.ATOM_NAMESPACE] + del _children['{%s}contributor' % atom.ATOM_NAMESPACE] + del _children['{%s}logo' % atom.ATOM_NAMESPACE] + del _children['{%s}icon' % atom.ATOM_NAMESPACE] + del _children['{%s}rights' % atom.ATOM_NAMESPACE] + del _children['{%s}subtitle' % atom.ATOM_NAMESPACE] + + def __GetId(self): + return self.__id + + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __init__(self, category=None, content=None, + atom_id=None, link=None, title=None, updated=None, + entry=None, sitemap_mobile=None, sitemap_news=None, + extension_elements=None, extension_attributes=None, text=None): + + self.category = category or [] + self.id = atom_id + self.link = link or [] + self.title = title + self.updated = updated + self.entry = entry or [] + self.text = text + self.sitemap_mobile = sitemap_mobile + self.sitemap_news = sitemap_news + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SitemapsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapsFeed, xml_string) diff --git a/gam/gdata/webmastertools/data.py b/gam/gdata/webmastertools/data.py new file mode 100755 index 00000000000..8b50a47a89e --- /dev/null +++ b/gam/gdata/webmastertools/data.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains the data classes of the Google Webmaster Tools Data API""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import atom.data +import gdata.data +import gdata.opensearch.data + + +WT_TEMPLATE = '{http://schemas.google.com/webmaster/tools/2007/}%s' + + +class CrawlIssueCrawlType(atom.core.XmlElement): + """Type of crawl of the crawl issue""" + _qname = WT_TEMPLATE % 'crawl-type' + + +class CrawlIssueDateDetected(atom.core.XmlElement): + """Detection date for the issue""" + _qname = WT_TEMPLATE % 'date-detected' + + +class CrawlIssueDetail(atom.core.XmlElement): + """Detail of the crawl issue""" + _qname = WT_TEMPLATE % 'detail' + + +class CrawlIssueIssueType(atom.core.XmlElement): + """Type of crawl issue""" + _qname = WT_TEMPLATE % 'issue-type' + + +class CrawlIssueLinkedFromUrl(atom.core.XmlElement): + """Source URL that links to the issue URL""" + _qname = WT_TEMPLATE % 'linked-from' + + +class CrawlIssueUrl(atom.core.XmlElement): + """URL affected by the crawl issue""" + _qname = WT_TEMPLATE % 'url' + + +class CrawlIssueEntry(gdata.data.GDEntry): + """Describes a crawl issue entry""" + date_detected = CrawlIssueDateDetected + url = CrawlIssueUrl + detail = CrawlIssueDetail + issue_type = CrawlIssueIssueType + crawl_type = CrawlIssueCrawlType + linked_from = [CrawlIssueLinkedFromUrl] + + +class CrawlIssuesFeed(gdata.data.GDFeed): + """Feed of crawl issues for a particular site""" + entry = [CrawlIssueEntry] + + +class Indexed(atom.core.XmlElement): + """Describes the indexing status of a site""" + _qname = WT_TEMPLATE % 'indexed' + + +class Keyword(atom.core.XmlElement): + """A keyword in a site or in a link to a site""" + _qname = WT_TEMPLATE % 'keyword' + source = 'source' + + +class KeywordEntry(gdata.data.GDEntry): + """Describes a keyword entry""" + + +class KeywordsFeed(gdata.data.GDFeed): + """Feed of keywords for a particular site""" + entry = [KeywordEntry] + keyword = [Keyword] + + +class LastCrawled(atom.core.XmlElement): + """Describes the last crawled date of a site""" + _qname = WT_TEMPLATE % 'last-crawled' + + +class MessageBody(atom.core.XmlElement): + """Message body""" + _qname = WT_TEMPLATE % 'body' + + +class MessageDate(atom.core.XmlElement): + """Message date""" + _qname = WT_TEMPLATE % 'date' + + +class MessageLanguage(atom.core.XmlElement): + """Message language""" + _qname = WT_TEMPLATE % 'language' + + +class MessageRead(atom.core.XmlElement): + """Indicates if the message has already been read""" + _qname = WT_TEMPLATE % 'read' + + +class MessageSubject(atom.core.XmlElement): + """Message subject""" + _qname = WT_TEMPLATE % 'subject' + + +class SiteId(atom.core.XmlElement): + """Site URL""" + _qname = WT_TEMPLATE % 'id' + + +class MessageEntry(gdata.data.GDEntry): + """Describes a message entry""" + wt_id = SiteId + subject = MessageSubject + date = MessageDate + body = MessageBody + language = MessageLanguage + read = MessageRead + + +class MessagesFeed(gdata.data.GDFeed): + """Describes a messages feed""" + entry = [MessageEntry] + + +class SitemapEntry(gdata.data.GDEntry): + """Describes a sitemap entry""" + indexed = Indexed + wt_id = SiteId + + +class SitemapMobileMarkupLanguage(atom.core.XmlElement): + """Describes a markup language for URLs in this sitemap""" + _qname = WT_TEMPLATE % 'sitemap-mobile-markup-language' + + +class SitemapMobile(atom.core.XmlElement): + """Lists acceptable mobile markup languages for URLs in this sitemap""" + _qname = WT_TEMPLATE % 'sitemap-mobile' + sitemap_mobile_markup_language = [SitemapMobileMarkupLanguage] + + +class SitemapNewsPublicationLabel(atom.core.XmlElement): + """Specifies the publication label for this sitemap""" + _qname = WT_TEMPLATE % 'sitemap-news-publication-label' + + +class SitemapNews(atom.core.XmlElement): + """Lists publication labels for this sitemap""" + _qname = WT_TEMPLATE % 'sitemap-news' + sitemap_news_publication_label = [SitemapNewsPublicationLabel] + + +class SitemapType(atom.core.XmlElement): + """Indicates the type of sitemap. Not used for News or Mobile Sitemaps""" + _qname = WT_TEMPLATE % 'sitemap-type' + + +class SitemapUrlCount(atom.core.XmlElement): + """Indicates the number of URLs contained in the sitemap""" + _qname = WT_TEMPLATE % 'sitemap-url-count' + + +class SitemapsFeed(gdata.data.GDFeed): + """Describes a sitemaps feed""" + entry = [SitemapEntry] + + +class VerificationMethod(atom.core.XmlElement): + """Describes a verification method that may be used for a site""" + _qname = WT_TEMPLATE % 'verification-method' + in_use = 'in-use' + type = 'type' + + +class Verified(atom.core.XmlElement): + """Describes the verification status of a site""" + _qname = WT_TEMPLATE % 'verified' + + +class SiteEntry(gdata.data.GDEntry): + """Describes a site entry""" + indexed = Indexed + wt_id = SiteId + verified = Verified + last_crawled = LastCrawled + verification_method = [VerificationMethod] + + +class SitesFeed(gdata.data.GDFeed): + """Describes a sites feed""" + entry = [SiteEntry] + + diff --git a/gam/gdata/webmastertools/service.py b/gam/gdata/webmastertools/service.py new file mode 100755 index 00000000000..8c3286db40b --- /dev/null +++ b/gam/gdata/webmastertools/service.py @@ -0,0 +1,516 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Yu-Jie Lin +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GWebmasterToolsService extends the GDataService to streamline +Google Webmaster Tools operations. + + GWebmasterToolsService: Provides methods to query feeds and manipulate items. + Extends GDataService. +""" + +__author__ = 'livibetter (Yu-Jie Lin)' + +import urllib +import gdata +import atom.service +import gdata.service +import gdata.webmastertools as webmastertools +import atom + + +FEED_BASE = 'https://www.google.com/webmasters/tools/feeds/' +SITES_FEED = FEED_BASE + 'sites/' +SITE_TEMPLATE = SITES_FEED + '%s' +SITEMAPS_FEED_TEMPLATE = FEED_BASE + '%(site_id)s/sitemaps/' +SITEMAP_TEMPLATE = SITEMAPS_FEED_TEMPLATE + '%(sitemap_id)s' + + +class Error(Exception): + pass + + +class RequestError(Error): + pass + + +class GWebmasterToolsService(gdata.service.GDataService): + """Client for the Google Webmaster Tools service.""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', **kwargs): + """Creates a client for the Google Webmaster Tools service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'www.google.com'. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + gdata.service.GDataService.__init__( + self, email=email, password=password, service='sitemaps', source=source, + server=server, **kwargs) + + def GetSitesFeed(self, uri=SITES_FEED, + converter=webmastertools.SitesFeedFromString): + """Gets sites feed. + + Args: + uri: str (optional) URI to retrieve sites feed. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitesFeedFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesFeed object. + """ + return self.Get(uri, converter=converter) + + def AddSite(self, site_uri, uri=SITES_FEED, + url_params=None, escape_params=True, converter=None): + """Adds a site to Google Webmaster Tools. + + Args: + site_uri: str URI of which site to add. + uri: str (optional) URI to add a site. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitesEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry() + site_entry.content = atom.Content(src=site_uri) + response = self.Post(site_entry, uri, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def DeleteSite(self, site_uri, uri=SITE_TEMPLATE, + url_params=None, escape_params=True): + """Removes a site from Google Webmaster Tools. + + Args: + site_uri: str URI of which site to remove. + uri: str (optional) A URI template to send DELETE request. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + True if the delete succeeded. + """ + + return self.Delete( + uri % urllib.quote_plus(site_uri), + url_params=url_params, escape_params=escape_params) + + def VerifySite(self, site_uri, verification_method, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Requests a verification of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + verification_method: str The method to verify a site. Valid values are + 'htmlpage', and 'metatag'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + verification_method=webmastertools.VerificationMethod( + type=verification_method, in_use='true') + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + + def UpdateGeoLocation(self, site_uri, geolocation, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Updates geolocation setting of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + geolocation: str The geographic location. Valid values are listed in + http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + geolocation=webmastertools.GeoLocation(text=geolocation) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def UpdateCrawlRate(self, site_uri, crawl_rate, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Updates crawl rate setting of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + crawl_rate: str The crawl rate for a site. Valid values are 'slower', + 'normal', and 'faster'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + crawl_rate=webmastertools.CrawlRate(text=crawl_rate) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def UpdatePreferredDomain(self, site_uri, preferred_domain, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Updates preferred domain setting of a site. + + Note that if using 'preferwww', will also need www.example.com in account to + take effect. + + Args: + site_uri: str URI of which site to add sitemap for. + preferred_domain: str The preferred domain for a site. Valid values are 'none', + 'preferwww', and 'prefernowww'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + preferred_domain=webmastertools.PreferredDomain(text=preferred_domain) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def UpdateEnhancedImageSearch(self, site_uri, enhanced_image_search, + uri=SITE_TEMPLATE, url_params=None, escape_params=True, converter=None): + """Updates enhanced image search setting of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + enhanced_image_search: str The enhanced image search setting for a site. + Valid values are 'true', and 'false'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + enhanced_image_search=webmastertools.EnhancedImageSearch( + text=enhanced_image_search) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def GetSitemapsFeed(self, site_uri, uri=SITEMAPS_FEED_TEMPLATE, + converter=webmastertools.SitemapsFeedFromString): + """Gets sitemaps feed of a site. + + Args: + site_uri: str (optional) URI of which site to retrieve its sitemaps feed. + uri: str (optional) URI to retrieve sites feed. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsFeedFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsFeed object. + """ + return self.Get(uri % {'site_id': urllib.quote_plus(site_uri)}, + converter=converter) + + def AddSitemap(self, site_uri, sitemap_uri, sitemap_type='WEB', + uri=SITEMAPS_FEED_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Adds a regular sitemap to a site. + + Args: + site_uri: str URI of which site to add sitemap for. + sitemap_uri: str URI of sitemap to add to a site. + sitemap_type: str Type of added sitemap. Valid types: WEB, VIDEO, or CODE. + uri: str (optional) URI template to add a sitemap. + Default SITEMAP_FEED_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsEntry object. + """ + + sitemap_entry = webmastertools.SitemapsEntry( + atom_id=atom.Id(text=sitemap_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sitemap-regular'), + sitemap_type=webmastertools.SitemapType(text=sitemap_type)) + response = self.Post( + sitemap_entry, + uri % {'site_id': urllib.quote_plus(site_uri)}, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitemapsEntryFromString(response.ToString()) + return response + + def AddMobileSitemap(self, site_uri, sitemap_uri, + sitemap_mobile_markup_language='XHTML', uri=SITEMAPS_FEED_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Adds a mobile sitemap to a site. + + Args: + site_uri: str URI of which site to add sitemap for. + sitemap_uri: str URI of sitemap to add to a site. + sitemap_mobile_markup_language: str Format of added sitemap. Valid types: + XHTML, WML, or cHTML. + uri: str (optional) URI template to add a sitemap. + Default SITEMAP_FEED_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsEntry object. + """ + # FIXME + sitemap_entry = webmastertools.SitemapsEntry( + atom_id=atom.Id(text=sitemap_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sitemap-mobile'), + sitemap_mobile_markup_language=\ + webmastertools.SitemapMobileMarkupLanguage( + text=sitemap_mobile_markup_language)) + print sitemap_entry + response = self.Post( + sitemap_entry, + uri % {'site_id': urllib.quote_plus(site_uri)}, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitemapsEntryFromString(response.ToString()) + return response + + def AddNewsSitemap(self, site_uri, sitemap_uri, + sitemap_news_publication_label, uri=SITEMAPS_FEED_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Adds a news sitemap to a site. + + Args: + site_uri: str URI of which site to add sitemap for. + sitemap_uri: str URI of sitemap to add to a site. + sitemap_news_publication_label: str, list of str Publication Labels for + sitemap. + uri: str (optional) URI template to add a sitemap. + Default SITEMAP_FEED_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsEntry object. + """ + + sitemap_entry = webmastertools.SitemapsEntry( + atom_id=atom.Id(text=sitemap_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sitemap-news'), + sitemap_news_publication_label=[], + ) + if isinstance(sitemap_news_publication_label, str): + sitemap_news_publication_label = [sitemap_news_publication_label] + for label in sitemap_news_publication_label: + sitemap_entry.sitemap_news_publication_label.append( + webmastertools.SitemapNewsPublicationLabel(text=label)) + print sitemap_entry + response = self.Post( + sitemap_entry, + uri % {'site_id': urllib.quote_plus(site_uri)}, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitemapsEntryFromString(response.ToString()) + return response + + def DeleteSitemap(self, site_uri, sitemap_uri, uri=SITEMAP_TEMPLATE, + url_params=None, escape_params=True): + """Removes a sitemap from a site. + + Args: + site_uri: str URI of which site to remove a sitemap from. + sitemap_uri: str URI of sitemap to remove from a site. + uri: str (optional) A URI template to send DELETE request. + Default SITEMAP_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + True if the delete succeeded. + """ + + return self.Delete( + uri % {'site_id': urllib.quote_plus(site_uri), + 'sitemap_id': urllib.quote_plus(sitemap_uri)}, + url_params=url_params, escape_params=escape_params) diff --git a/gam/gdata/youtube/__init__.py b/gam/gdata/youtube/__init__.py new file mode 100755 index 00000000000..c41aaea528e --- /dev/null +++ b/gam/gdata/youtube/__init__.py @@ -0,0 +1,684 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = ('api.stephaniel@gmail.com (Stephanie Liu)' + ', api.jhartmann@gmail.com (Jochen Hartmann)') + +import atom +import gdata +import gdata.media as Media +import gdata.geo as Geo + +YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007' +YOUTUBE_FORMAT = '{http://gdata.youtube.com/schemas/2007}format' +YOUTUBE_DEVELOPER_TAG_SCHEME = '%s/%s' % (YOUTUBE_NAMESPACE, + 'developertags.cat') +YOUTUBE_SUBSCRIPTION_TYPE_SCHEME = '%s/%s' % (YOUTUBE_NAMESPACE, + 'subscriptiontypes.cat') + +class Username(atom.AtomBase): + """The YouTube Username element""" + _tag = 'username' + _namespace = YOUTUBE_NAMESPACE + +class QueryString(atom.AtomBase): + """The YouTube QueryString element""" + _tag = 'queryString' + _namespace = YOUTUBE_NAMESPACE + + +class FirstName(atom.AtomBase): + """The YouTube FirstName element""" + _tag = 'firstName' + _namespace = YOUTUBE_NAMESPACE + + +class LastName(atom.AtomBase): + """The YouTube LastName element""" + _tag = 'lastName' + _namespace = YOUTUBE_NAMESPACE + + +class Age(atom.AtomBase): + """The YouTube Age element""" + _tag = 'age' + _namespace = YOUTUBE_NAMESPACE + + +class Books(atom.AtomBase): + """The YouTube Books element""" + _tag = 'books' + _namespace = YOUTUBE_NAMESPACE + + +class Gender(atom.AtomBase): + """The YouTube Gender element""" + _tag = 'gender' + _namespace = YOUTUBE_NAMESPACE + + +class Company(atom.AtomBase): + """The YouTube Company element""" + _tag = 'company' + _namespace = YOUTUBE_NAMESPACE + + +class Hobbies(atom.AtomBase): + """The YouTube Hobbies element""" + _tag = 'hobbies' + _namespace = YOUTUBE_NAMESPACE + + +class Hometown(atom.AtomBase): + """The YouTube Hometown element""" + _tag = 'hometown' + _namespace = YOUTUBE_NAMESPACE + + +class Location(atom.AtomBase): + """The YouTube Location element""" + _tag = 'location' + _namespace = YOUTUBE_NAMESPACE + + +class Movies(atom.AtomBase): + """The YouTube Movies element""" + _tag = 'movies' + _namespace = YOUTUBE_NAMESPACE + + +class Music(atom.AtomBase): + """The YouTube Music element""" + _tag = 'music' + _namespace = YOUTUBE_NAMESPACE + + +class Occupation(atom.AtomBase): + """The YouTube Occupation element""" + _tag = 'occupation' + _namespace = YOUTUBE_NAMESPACE + + +class School(atom.AtomBase): + """The YouTube School element""" + _tag = 'school' + _namespace = YOUTUBE_NAMESPACE + + +class Relationship(atom.AtomBase): + """The YouTube Relationship element""" + _tag = 'relationship' + _namespace = YOUTUBE_NAMESPACE + + +class Recorded(atom.AtomBase): + """The YouTube Recorded element""" + _tag = 'recorded' + _namespace = YOUTUBE_NAMESPACE + + +class Statistics(atom.AtomBase): + """The YouTube Statistics element.""" + _tag = 'statistics' + _namespace = YOUTUBE_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['viewCount'] = 'view_count' + _attributes['videoWatchCount'] = 'video_watch_count' + _attributes['subscriberCount'] = 'subscriber_count' + _attributes['lastWebAccess'] = 'last_web_access' + _attributes['favoriteCount'] = 'favorite_count' + + def __init__(self, view_count=None, video_watch_count=None, + favorite_count=None, subscriber_count=None, last_web_access=None, + extension_elements=None, extension_attributes=None, text=None): + + self.view_count = view_count + self.video_watch_count = video_watch_count + self.subscriber_count = subscriber_count + self.last_web_access = last_web_access + self.favorite_count = favorite_count + + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Status(atom.AtomBase): + """The YouTube Status element""" + _tag = 'status' + _namespace = YOUTUBE_NAMESPACE + + +class Position(atom.AtomBase): + """The YouTube Position element. The position in a playlist feed.""" + _tag = 'position' + _namespace = YOUTUBE_NAMESPACE + + +class Racy(atom.AtomBase): + """The YouTube Racy element.""" + _tag = 'racy' + _namespace = YOUTUBE_NAMESPACE + +class Description(atom.AtomBase): + """The YouTube Description element.""" + _tag = 'description' + _namespace = YOUTUBE_NAMESPACE + + +class Private(atom.AtomBase): + """The YouTube Private element.""" + _tag = 'private' + _namespace = YOUTUBE_NAMESPACE + + +class NoEmbed(atom.AtomBase): + """The YouTube VideoShare element. Whether a video can be embedded or not.""" + _tag = 'noembed' + _namespace = YOUTUBE_NAMESPACE + + +class Comments(atom.AtomBase): + """The GData Comments element""" + _tag = 'comments' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, feed_link=None, extension_elements=None, + extension_attributes=None, text=None): + + self.feed_link = feed_link + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Rating(atom.AtomBase): + """The GData Rating element""" + _tag = 'rating' + _namespace = gdata.GDATA_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['min'] = 'min' + _attributes['max'] = 'max' + _attributes['numRaters'] = 'num_raters' + _attributes['average'] = 'average' + + def __init__(self, min=None, max=None, + num_raters=None, average=None, extension_elements=None, + extension_attributes=None, text=None): + + self.min = min + self.max = max + self.num_raters = num_raters + self.average = average + + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class YouTubePlaylistVideoEntry(gdata.GDataEntry): + """Represents a YouTubeVideoEntry on a YouTubePlaylist.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', + Description) + _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) + _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}location' % YOUTUBE_NAMESPACE] = ('location', Location) + _children['{%s}position' % YOUTUBE_NAMESPACE] = ('position', Position) + _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, title=None, + updated=None, feed_link=None, description=None, + rating=None, comments=None, statistics=None, + location=None, position=None, media=None, + extension_elements=None, extension_attributes=None): + + self.feed_link = feed_link + self.description = description + self.rating = rating + self.comments = comments + self.statistics = statistics + self.location = location + self.position = position + self.media = media + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, + link=link, published=published, title=title, + updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + +class YouTubeVideoCommentEntry(gdata.GDataEntry): + """Represents a comment on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + +class YouTubeSubscriptionEntry(gdata.GDataEntry): + """Represents a subscription entry on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) + _children['{%s}queryString' % YOUTUBE_NAMESPACE] = ( + 'query_string', QueryString) + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, title=None, + updated=None, username=None, query_string=None, feed_link=None, + extension_elements=None, extension_attributes=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated) + + self.username = username + self.query_string = query_string + self.feed_link = feed_link + + + def GetSubscriptionType(self): + """Retrieve the type of this subscription. + + Returns: + A string that is either 'channel, 'query' or 'favorites' + """ + for category in self.category: + if category.scheme == YOUTUBE_SUBSCRIPTION_TYPE_SCHEME: + return category.term + + +class YouTubeVideoResponseEntry(gdata.GDataEntry): + """Represents a video response. """ + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) + _children['{%s}noembed' % YOUTUBE_NAMESPACE] = ('noembed', NoEmbed) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}racy' % YOUTUBE_NAMESPACE] = ('racy', Racy) + _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, rating=None, + noembed=None, statistics=None, racy=None, media=None, + extension_elements=None, extension_attributes=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated) + + self.rating = rating + self.noembed = noembed + self.statistics = statistics + self.racy = racy + self.media = media or Media.Group() + + +class YouTubeContactEntry(gdata.GDataEntry): + """Represents a contact entry.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) + _children['{%s}status' % YOUTUBE_NAMESPACE] = ('status', Status) + + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, + username=None, status=None, extension_elements=None, + extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated) + + self.username = username + self.status = status + + +class YouTubeVideoEntry(gdata.GDataEntry): + """Represents a video on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) + _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) + _children['{%s}noembed' % YOUTUBE_NAMESPACE] = ('noembed', NoEmbed) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}recorded' % YOUTUBE_NAMESPACE] = ('recorded', Recorded) + _children['{%s}racy' % YOUTUBE_NAMESPACE] = ('racy', Racy) + _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) + _children['{%s}where' % gdata.geo.GEORSS_NAMESPACE] = ('geo', Geo.Where) + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, rating=None, + noembed=None, statistics=None, racy=None, media=None, geo=None, + recorded=None, comments=None, extension_elements=None, + extension_attributes=None): + + self.rating = rating + self.noembed = noembed + self.statistics = statistics + self.racy = racy + self.comments = comments + self.media = media or Media.Group() + self.geo = geo + self.recorded = recorded + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + def GetSwfUrl(self): + """Return the URL for the embeddable Video + + Returns: + URL of the embeddable video + """ + if self.media.content: + for content in self.media.content: + if content.extension_attributes[YOUTUBE_FORMAT] == '5': + return content.url + else: + return None + + def AddDeveloperTags(self, developer_tags): + """Add a developer tag for this entry. + + Developer tags can only be set during the initial upload. + + Arguments: + developer_tags: A list of developer tags as strings. + + Returns: + A list of all developer tags for this video entry. + """ + for tag_text in developer_tags: + self.media.category.append(gdata.media.Category( + text=tag_text, label=tag_text, scheme=YOUTUBE_DEVELOPER_TAG_SCHEME)) + + return self.GetDeveloperTags() + + def GetDeveloperTags(self): + """Retrieve developer tags for this video entry.""" + developer_tags = [] + for category in self.media.category: + if category.scheme == YOUTUBE_DEVELOPER_TAG_SCHEME: + developer_tags.append(category) + if len(developer_tags) > 0: + return developer_tags + + def GetYouTubeCategoryAsString(self): + """Convenience method to return the YouTube category as string. + + YouTubeVideoEntries can contain multiple Category objects with differing + schemes. This method returns only the category with the correct + scheme, ignoring developer tags. + """ + for category in self.media.category: + if category.scheme != YOUTUBE_DEVELOPER_TAG_SCHEME: + return category.text + +class YouTubeUserEntry(gdata.GDataEntry): + """Represents a user on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) + _children['{%s}firstName' % YOUTUBE_NAMESPACE] = ('first_name', FirstName) + _children['{%s}lastName' % YOUTUBE_NAMESPACE] = ('last_name', LastName) + _children['{%s}age' % YOUTUBE_NAMESPACE] = ('age', Age) + _children['{%s}books' % YOUTUBE_NAMESPACE] = ('books', Books) + _children['{%s}gender' % YOUTUBE_NAMESPACE] = ('gender', Gender) + _children['{%s}company' % YOUTUBE_NAMESPACE] = ('company', Company) + _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', + Description) + _children['{%s}hobbies' % YOUTUBE_NAMESPACE] = ('hobbies', Hobbies) + _children['{%s}hometown' % YOUTUBE_NAMESPACE] = ('hometown', Hometown) + _children['{%s}location' % YOUTUBE_NAMESPACE] = ('location', Location) + _children['{%s}movies' % YOUTUBE_NAMESPACE] = ('movies', Movies) + _children['{%s}music' % YOUTUBE_NAMESPACE] = ('music', Music) + _children['{%s}occupation' % YOUTUBE_NAMESPACE] = ('occupation', Occupation) + _children['{%s}school' % YOUTUBE_NAMESPACE] = ('school', School) + _children['{%s}relationship' % YOUTUBE_NAMESPACE] = ('relationship', + Relationship) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + _children['{%s}thumbnail' % gdata.media.MEDIA_NAMESPACE] = ('thumbnail', + Media.Thumbnail) + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, + username=None, first_name=None, last_name=None, age=None, + books=None, gender=None, company=None, description=None, + hobbies=None, hometown=None, location=None, movies=None, + music=None, occupation=None, school=None, relationship=None, + statistics=None, feed_link=None, extension_elements=None, + extension_attributes=None, text=None): + + self.username = username + self.first_name = first_name + self.last_name = last_name + self.age = age + self.books = books + self.gender = gender + self.company = company + self.description = description + self.hobbies = hobbies + self.hometown = hometown + self.location = location + self.movies = movies + self.music = music + self.occupation = occupation + self.school = school + self.relationship = relationship + self.statistics = statistics + self.feed_link = feed_link + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, + link=link, published=published, + title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class YouTubeVideoFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a video feed on YouTube.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [YouTubeVideoEntry]) + +class YouTubePlaylistEntry(gdata.GDataEntry): + """Represents a playlist in YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', + Description) + _children['{%s}private' % YOUTUBE_NAMESPACE] = ('private', + Private) + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, title=None, + updated=None, private=None, feed_link=None, + description=None, extension_elements=None, + extension_attributes=None): + + self.description = description + self.private = private + self.feed_link = feed_link + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, + link=link, published=published, title=title, + updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + + +class YouTubePlaylistFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of a user's playlists """ + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubePlaylistEntry]) + + +class YouTubePlaylistVideoFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of video entry on a playlist.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubePlaylistVideoEntry]) + + +class YouTubeContactFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of a users contacts.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeContactEntry]) + + +class YouTubeSubscriptionFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of a users subscriptions.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeSubscriptionEntry]) + + +class YouTubeVideoCommentFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of comments for a video.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeVideoCommentEntry]) + + +class YouTubeVideoResponseFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of video responses.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeVideoResponseEntry]) + + +def YouTubeVideoFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoFeed, xml_string) + + +def YouTubeVideoEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoEntry, xml_string) + + +def YouTubeContactFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeContactFeed, xml_string) + + +def YouTubeContactEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeContactEntry, xml_string) + + +def YouTubeVideoCommentFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoCommentFeed, xml_string) + + +def YouTubeVideoCommentEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoCommentEntry, xml_string) + + +def YouTubeUserFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoFeed, xml_string) + + +def YouTubeUserEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeUserEntry, xml_string) + + +def YouTubePlaylistFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistFeed, xml_string) + + +def YouTubePlaylistVideoFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistVideoFeed, xml_string) + + +def YouTubePlaylistEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistEntry, xml_string) + + +def YouTubePlaylistVideoEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistVideoEntry, xml_string) + + +def YouTubeSubscriptionFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeSubscriptionFeed, xml_string) + + +def YouTubeSubscriptionEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeSubscriptionEntry, xml_string) + + +def YouTubeVideoResponseFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoResponseFeed, xml_string) + + +def YouTubeVideoResponseEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoResponseEntry, xml_string) diff --git a/gam/gdata/youtube/data.py b/gam/gdata/youtube/data.py new file mode 100755 index 00000000000..b75cf79d786 --- /dev/null +++ b/gam/gdata/youtube/data.py @@ -0,0 +1,477 @@ +#!/usr/bin/python +# +# Copyright (C) 2009 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains the data classes of the YouTube Data API""" + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import atom.core +import atom.data +import gdata.data +import gdata.geo.data +import gdata.media.data +import gdata.opensearch.data +import gdata.youtube.data + + +YT_TEMPLATE = '{http://gdata.youtube.com/schemas/2007/}%s' + + +class ComplaintEntry(gdata.data.GDEntry): + """Describes a complaint about a video""" + + +class ComplaintFeed(gdata.data.GDFeed): + """Describes complaints about a video""" + entry = [ComplaintEntry] + + +class RatingEntry(gdata.data.GDEntry): + """A rating about a video""" + rating = gdata.data.Rating + + +class RatingFeed(gdata.data.GDFeed): + """Describes ratings for a video""" + entry = [RatingEntry] + + +class YouTubeMediaContent(gdata.media.data.MediaContent): + """Describes a you tube media content""" + _qname = gdata.media.data.MEDIA_TEMPLATE % 'content' + format = 'format' + + +class YtAge(atom.core.XmlElement): + """User's age""" + _qname = YT_TEMPLATE % 'age' + + +class YtBooks(atom.core.XmlElement): + """User's favorite books""" + _qname = YT_TEMPLATE % 'books' + + +class YtCompany(atom.core.XmlElement): + """User's company""" + _qname = YT_TEMPLATE % 'company' + + +class YtDescription(atom.core.XmlElement): + """Description""" + _qname = YT_TEMPLATE % 'description' + + +class YtDuration(atom.core.XmlElement): + """Video duration""" + _qname = YT_TEMPLATE % 'duration' + seconds = 'seconds' + + +class YtFirstName(atom.core.XmlElement): + """User's first name""" + _qname = YT_TEMPLATE % 'firstName' + + +class YtGender(atom.core.XmlElement): + """User's gender""" + _qname = YT_TEMPLATE % 'gender' + + +class YtHobbies(atom.core.XmlElement): + """User's hobbies""" + _qname = YT_TEMPLATE % 'hobbies' + + +class YtHometown(atom.core.XmlElement): + """User's hometown""" + _qname = YT_TEMPLATE % 'hometown' + + +class YtLastName(atom.core.XmlElement): + """User's last name""" + _qname = YT_TEMPLATE % 'lastName' + + +class YtLocation(atom.core.XmlElement): + """Location""" + _qname = YT_TEMPLATE % 'location' + + +class YtMovies(atom.core.XmlElement): + """User's favorite movies""" + _qname = YT_TEMPLATE % 'movies' + + +class YtMusic(atom.core.XmlElement): + """User's favorite music""" + _qname = YT_TEMPLATE % 'music' + + +class YtNoEmbed(atom.core.XmlElement): + """Disables embedding for the video""" + _qname = YT_TEMPLATE % 'noembed' + + +class YtOccupation(atom.core.XmlElement): + """User's occupation""" + _qname = YT_TEMPLATE % 'occupation' + + +class YtPlaylistId(atom.core.XmlElement): + """Playlist id""" + _qname = YT_TEMPLATE % 'playlistId' + + +class YtPosition(atom.core.XmlElement): + """Video position on the playlist""" + _qname = YT_TEMPLATE % 'position' + + +class YtPrivate(atom.core.XmlElement): + """Flags the entry as private""" + _qname = YT_TEMPLATE % 'private' + + +class YtQueryString(atom.core.XmlElement): + """Keywords or query string associated with a subscription""" + _qname = YT_TEMPLATE % 'queryString' + + +class YtRacy(atom.core.XmlElement): + """Mature content""" + _qname = YT_TEMPLATE % 'racy' + + +class YtRecorded(atom.core.XmlElement): + """Date when the video was recorded""" + _qname = YT_TEMPLATE % 'recorded' + + +class YtRelationship(atom.core.XmlElement): + """User's relationship status""" + _qname = YT_TEMPLATE % 'relationship' + + +class YtSchool(atom.core.XmlElement): + """User's school""" + _qname = YT_TEMPLATE % 'school' + + +class YtStatistics(atom.core.XmlElement): + """Video and user statistics""" + _qname = YT_TEMPLATE % 'statistics' + favorite_count = 'favoriteCount' + video_watch_count = 'videoWatchCount' + view_count = 'viewCount' + last_web_access = 'lastWebAccess' + subscriber_count = 'subscriberCount' + + +class YtStatus(atom.core.XmlElement): + """Status of a contact""" + _qname = YT_TEMPLATE % 'status' + + +class YtUserProfileStatistics(YtStatistics): + """User statistics""" + _qname = YT_TEMPLATE % 'statistics' + + +class YtUsername(atom.core.XmlElement): + """Youtube username""" + _qname = YT_TEMPLATE % 'username' + + +class FriendEntry(gdata.data.BatchEntry): + """Describes a contact in friend list""" + username = YtUsername + status = YtStatus + email = gdata.data.Email + + +class FriendFeed(gdata.data.BatchFeed): + """Describes user's friends""" + entry = [FriendEntry] + + +class YtVideoStatistics(YtStatistics): + """Video statistics""" + _qname = YT_TEMPLATE % 'statistics' + + +class ChannelEntry(gdata.data.GDEntry): + """Describes a video channel""" + + +class ChannelFeed(gdata.data.GDFeed): + """Describes channels""" + entry = [ChannelEntry] + + +class FavoriteEntry(gdata.data.BatchEntry): + """Describes a favorite video""" + + +class FavoriteFeed(gdata.data.BatchFeed): + """Describes favorite videos""" + entry = [FavoriteEntry] + + +class YouTubeMediaCredit(gdata.media.data.MediaCredit): + """Describes a you tube media credit""" + _qname = gdata.media.data.MEDIA_TEMPLATE % 'credit' + type = 'type' + + +class YouTubeMediaRating(gdata.media.data.MediaRating): + """Describes a you tube media rating""" + _qname = gdata.media.data.MEDIA_TEMPLATE % 'rating' + country = 'country' + + +class YtAboutMe(atom.core.XmlElement): + """User's self description""" + _qname = YT_TEMPLATE % 'aboutMe' + + +class UserProfileEntry(gdata.data.BatchEntry): + """Describes an user's profile""" + relationship = YtRelationship + description = YtDescription + location = YtLocation + statistics = YtUserProfileStatistics + school = YtSchool + music = YtMusic + first_name = YtFirstName + gender = YtGender + occupation = YtOccupation + hometown = YtHometown + company = YtCompany + movies = YtMovies + books = YtBooks + username = YtUsername + about_me = YtAboutMe + last_name = YtLastName + age = YtAge + thumbnail = gdata.media.data.MediaThumbnail + hobbies = YtHobbies + + +class UserProfileFeed(gdata.data.BatchFeed): + """Describes a feed of user's profile""" + entry = [UserProfileEntry] + + +class YtAspectRatio(atom.core.XmlElement): + """The aspect ratio of a media file""" + _qname = YT_TEMPLATE % 'aspectRatio' + + +class YtBasePublicationState(atom.core.XmlElement): + """Status of an unpublished entry""" + _qname = YT_TEMPLATE % 'state' + help_url = 'helpUrl' + + +class YtPublicationState(YtBasePublicationState): + """Status of an unpublished video""" + _qname = YT_TEMPLATE % 'state' + name = 'name' + reason_code = 'reasonCode' + + +class YouTubeAppControl(atom.data.Control): + """Describes a you tube app control""" + _qname = (atom.data.APP_TEMPLATE_V1 % 'control', + atom.data.APP_TEMPLATE_V2 % 'control') + state = YtPublicationState + + +class YtCaptionPublicationState(YtBasePublicationState): + """Status of an unpublished caption track""" + _qname = YT_TEMPLATE % 'state' + reason_code = 'reasonCode' + name = 'name' + + +class YouTubeCaptionAppControl(atom.data.Control): + """Describes a you tube caption app control""" + _qname = atom.data.APP_TEMPLATE_V2 % 'control' + state = YtCaptionPublicationState + + +class CaptionTrackEntry(gdata.data.GDEntry): + """Describes a caption track""" + + +class CaptionTrackFeed(gdata.data.GDFeed): + """Describes caption tracks""" + entry = [CaptionTrackEntry] + + +class YtCountHint(atom.core.XmlElement): + """Hint as to how many entries the linked feed contains""" + _qname = YT_TEMPLATE % 'countHint' + + +class PlaylistLinkEntry(gdata.data.BatchEntry): + """Describes a playlist""" + description = YtDescription + playlist_id = YtPlaylistId + count_hint = YtCountHint + private = YtPrivate + + +class PlaylistLinkFeed(gdata.data.BatchFeed): + """Describes list of playlists""" + entry = [PlaylistLinkEntry] + + +class YtModerationStatus(atom.core.XmlElement): + """Moderation status""" + _qname = YT_TEMPLATE % 'moderationStatus' + + +class YtPlaylistTitle(atom.core.XmlElement): + """Playlist title""" + _qname = YT_TEMPLATE % 'playlistTitle' + + +class SubscriptionEntry(gdata.data.BatchEntry): + """Describes user's channel subscritpions""" + count_hint = YtCountHint + playlist_title = YtPlaylistTitle + thumbnail = gdata.media.data.MediaThumbnail + username = YtUsername + query_string = YtQueryString + playlist_id = YtPlaylistId + + +class SubscriptionFeed(gdata.data.BatchFeed): + """Describes list of user's video subscriptions""" + entry = [SubscriptionEntry] + + +class YtSpam(atom.core.XmlElement): + """Indicates that the entry probably contains spam""" + _qname = YT_TEMPLATE % 'spam' + + +class CommentEntry(gdata.data.BatchEntry): + """Describes a comment for a video""" + spam = YtSpam + + +class CommentFeed(gdata.data.BatchFeed): + """Describes comments for a video""" + entry = [CommentEntry] + + +class YtUploaded(atom.core.XmlElement): + """Date/Time at which the video was uploaded""" + _qname = YT_TEMPLATE % 'uploaded' + + +class YtVideoId(atom.core.XmlElement): + """Video id""" + _qname = YT_TEMPLATE % 'videoid' + + +class YouTubeMediaGroup(gdata.media.data.MediaGroup): + """Describes a you tube media group""" + _qname = gdata.media.data.MEDIA_TEMPLATE % 'group' + videoid = YtVideoId + private = YtPrivate + duration = YtDuration + aspect_ratio = YtAspectRatio + uploaded = YtUploaded + + +class VideoEntryBase(gdata.data.GDEntry): + """Elements that describe or contain videos""" + group = YouTubeMediaGroup + statistics = YtVideoStatistics + racy = YtRacy + recorded = YtRecorded + where = gdata.geo.data.GeoRssWhere + rating = gdata.data.Rating + noembed = YtNoEmbed + location = YtLocation + comments = gdata.data.Comments + + +class PlaylistEntry(gdata.data.BatchEntry): + """Describes a video in a playlist""" + description = YtDescription + position = YtPosition + + +class PlaylistFeed(gdata.data.BatchFeed): + """Describes videos in a playlist""" + private = YtPrivate + group = YouTubeMediaGroup + playlist_id = YtPlaylistId + entry = [PlaylistEntry] + + +class VideoEntry(gdata.data.BatchEntry): + """Describes a video""" + + +class VideoFeed(gdata.data.BatchFeed): + """Describes a video feed""" + entry = [VideoEntry] + + +class VideoMessageEntry(gdata.data.BatchEntry): + """Describes a video message""" + description = YtDescription + + +class VideoMessageFeed(gdata.data.BatchFeed): + """Describes videos in a videoMessage""" + entry = [VideoMessageEntry] + + +class UserEventEntry(gdata.data.GDEntry): + """Describes a user event""" + playlist_id = YtPlaylistId + videoid = YtVideoId + username = YtUsername + query_string = YtQueryString + rating = gdata.data.Rating + + +class UserEventFeed(gdata.data.GDFeed): + """Describes list of events""" + entry = [UserEventEntry] + + +class VideoModerationEntry(gdata.data.GDEntry): + """Describes video moderation""" + moderation_status = YtModerationStatus + videoid = YtVideoId + + +class VideoModerationFeed(gdata.data.GDFeed): + """Describes a video moderation feed""" + entry = [VideoModerationEntry] + + diff --git a/gam/gdata/youtube/service.py b/gam/gdata/youtube/service.py new file mode 100755 index 00000000000..c98201b15c6 --- /dev/null +++ b/gam/gdata/youtube/service.py @@ -0,0 +1,1563 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""YouTubeService extends GDataService to streamline YouTube operations. + + YouTubeService: Provides methods to perform CRUD operations on YouTube feeds. + Extends GDataService. +""" + +__author__ = ('api.stephaniel@gmail.com (Stephanie Liu), ' + 'api.jhartmann@gmail.com (Jochen Hartmann)') + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import os +import atom +import gdata +import gdata.service +import gdata.youtube + +YOUTUBE_SERVER = 'gdata.youtube.com' +YOUTUBE_SERVICE = 'youtube' +YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL = 'https://www.google.com/youtube/accounts/ClientLogin' +YOUTUBE_SUPPORTED_UPLOAD_TYPES = ('mov', 'avi', 'wmv', 'mpg', 'quicktime', + 'flv', 'mp4', 'x-flv') +YOUTUBE_QUERY_VALID_TIME_PARAMETERS = ('today', 'this_week', 'this_month', + 'all_time') +YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS = ('published', 'viewCount', 'rating', + 'relevance') +YOUTUBE_QUERY_VALID_RACY_PARAMETERS = ('include', 'exclude') +YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS = ('1', '5', '6') +YOUTUBE_STANDARDFEEDS = ('most_recent', 'recently_featured', + 'top_rated', 'most_viewed','watch_on_mobile') +YOUTUBE_UPLOAD_URI = 'http://uploads.gdata.youtube.com/feeds/api/users' +YOUTUBE_UPLOAD_TOKEN_URI = 'http://gdata.youtube.com/action/GetUploadToken' +YOUTUBE_VIDEO_URI = 'http://gdata.youtube.com/feeds/api/videos' +YOUTUBE_USER_FEED_URI = 'http://gdata.youtube.com/feeds/api/users' +YOUTUBE_PLAYLIST_FEED_URI = 'http://gdata.youtube.com/feeds/api/playlists' + +YOUTUBE_STANDARD_FEEDS = 'http://gdata.youtube.com/feeds/api/standardfeeds' +YOUTUBE_STANDARD_TOP_RATED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'top_rated') +YOUTUBE_STANDARD_MOST_VIEWED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_viewed') +YOUTUBE_STANDARD_RECENTLY_FEATURED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'recently_featured') +YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'watch_on_mobile') +YOUTUBE_STANDARD_TOP_FAVORITES_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'top_favorites') +YOUTUBE_STANDARD_MOST_RECENT_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_recent') +YOUTUBE_STANDARD_MOST_DISCUSSED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_discussed') +YOUTUBE_STANDARD_MOST_LINKED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_linked') +YOUTUBE_STANDARD_MOST_RESPONDED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_responded') +YOUTUBE_SCHEMA = 'http://gdata.youtube.com/schemas' + +YOUTUBE_RATING_LINK_REL = '%s#video.ratings' % YOUTUBE_SCHEMA + +YOUTUBE_COMPLAINT_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA, + 'complaint-reasons.cat') +YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA, + 'subscriptiontypes.cat') + +YOUTUBE_COMPLAINT_CATEGORY_TERMS = ('PORN', 'VIOLENCE', 'HATE', 'DANGEROUS', + 'RIGHTS', 'SPAM') +YOUTUBE_CONTACT_STATUS = ('accepted', 'rejected') +YOUTUBE_CONTACT_CATEGORY = ('Friends', 'Family') + +UNKOWN_ERROR = 1000 +YOUTUBE_BAD_REQUEST = 400 +YOUTUBE_CONFLICT = 409 +YOUTUBE_INTERNAL_SERVER_ERROR = 500 +YOUTUBE_INVALID_ARGUMENT = 601 +YOUTUBE_INVALID_CONTENT_TYPE = 602 +YOUTUBE_NOT_A_VIDEO = 603 +YOUTUBE_INVALID_KIND = 604 + + +class Error(Exception): + """Base class for errors within the YouTube service.""" + pass + +class RequestError(Error): + """Error class that is thrown in response to an invalid HTTP Request.""" + pass + +class YouTubeError(Error): + """YouTube service specific error class.""" + pass + +class YouTubeService(gdata.service.GDataService): + + """Client for the YouTube service. + + Performs all documented Google Data YouTube API functions, such as inserting, + updating and deleting videos, comments, playlist, subscriptions etc. + YouTube Service requires authentication for any write, update or delete + actions. + + Attributes: + email: An optional string identifying the user. Required only for + authenticated actions. + password: An optional string identifying the user's password. + source: An optional string identifying the name of your application. + server: An optional address of the YouTube API server. gdata.youtube.com + is provided as the default value. + additional_headers: An optional dictionary containing additional headers + to be passed along with each request. Use to store developer key. + client_id: An optional string identifying your application, required for + authenticated requests, along with a developer key. + developer_key: An optional string value. Register your application at + http://code.google.com/apis/youtube/dashboard to obtain a (free) key. + """ + + def __init__(self, email=None, password=None, source=None, + server=YOUTUBE_SERVER, additional_headers=None, client_id=None, + developer_key=None, **kwargs): + """Creates a client for the YouTube service. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'gdata.youtube.com'. + client_id: string (optional) Identifies your application, required for + authenticated requests, along with a developer key. + developer_key: string (optional) Register your application at + http://code.google.com/apis/youtube/dashboard to obtain a (free) key. + **kwargs: The other parameters to pass to gdata.service.GDataService + constructor. + """ + + gdata.service.GDataService.__init__( + self, email=email, password=password, service=YOUTUBE_SERVICE, + source=source, server=server, additional_headers=additional_headers, + **kwargs) + + if client_id is not None: + self.additional_headers['X-Gdata-Client'] = client_id + + if developer_key is not None: + self.additional_headers['X-GData-Key'] = 'key=%s' % developer_key + + self.auth_service_url = YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL + + def GetYouTubeVideoFeed(self, uri): + """Retrieve a YouTubeVideoFeed. + + Args: + uri: A string representing the URI of the feed that is to be retrieved. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.Get(uri, converter=gdata.youtube.YouTubeVideoFeedFromString) + + def GetYouTubeVideoEntry(self, uri=None, video_id=None): + """Retrieve a YouTubeVideoEntry. + + Either a uri or a video_id must be provided. + + Args: + uri: An optional string representing the URI of the entry that is to + be retrieved. + video_id: An optional string representing the ID of the video. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeVideoEntry() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeVideoEntry() method') + elif video_id and not uri: + uri = '%s/%s' % (YOUTUBE_VIDEO_URI, video_id) + return self.Get(uri, converter=gdata.youtube.YouTubeVideoEntryFromString) + + def GetYouTubeContactFeed(self, uri=None, username='default'): + """Retrieve a YouTubeContactFeed. + + Either a uri or a username must be provided. + + Args: + uri: An optional string representing the URI of the contact feed that + is to be retrieved. + username: An optional string representing the username. Defaults to the + currently authenticated user. + + Returns: + A YouTubeContactFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubeContactFeed() method. + """ + if uri is None: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'contacts') + return self.Get(uri, converter=gdata.youtube.YouTubeContactFeedFromString) + + def GetYouTubeContactEntry(self, uri): + """Retrieve a YouTubeContactEntry. + + Args: + uri: A string representing the URI of the contact entry that is to + be retrieved. + + Returns: + A YouTubeContactEntry if successfully retrieved. + """ + return self.Get(uri, converter=gdata.youtube.YouTubeContactEntryFromString) + + def GetYouTubeVideoCommentFeed(self, uri=None, video_id=None): + """Retrieve a YouTubeVideoCommentFeed. + + Either a uri or a video_id must be provided. + + Args: + uri: An optional string representing the URI of the comment feed that + is to be retrieved. + video_id: An optional string representing the ID of the video for which + to retrieve the comment feed. + + Returns: + A YouTubeVideoCommentFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeVideoCommentFeed() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeVideoCommentFeed() method') + elif video_id and not uri: + uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'comments') + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoCommentFeedFromString) + + def GetYouTubeVideoCommentEntry(self, uri): + """Retrieve a YouTubeVideoCommentEntry. + + Args: + uri: A string representing the URI of the comment entry that is to + be retrieved. + + Returns: + A YouTubeCommentEntry if successfully retrieved. + """ + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoCommentEntryFromString) + + def GetYouTubeUserFeed(self, uri=None, username=None): + """Retrieve a YouTubeVideoFeed of user uploaded videos + + Either a uri or a username must be provided. This will retrieve list + of videos uploaded by specified user. The uri will be of format + "http://gdata.youtube.com/feeds/api/users/{username}/uploads". + + Args: + uri: An optional string representing the URI of the user feed that is + to be retrieved. + username: An optional string representing the username. + + Returns: + A YouTubeUserFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubeUserFeed() method. + """ + if uri is None and username is None: + raise YouTubeError('You must provide at least a uri or a username ' + 'to the GetYouTubeUserFeed() method') + elif username and not uri: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'uploads') + return self.Get(uri, converter=gdata.youtube.YouTubeUserFeedFromString) + + def GetYouTubeUserEntry(self, uri=None, username=None): + """Retrieve a YouTubeUserEntry. + + Either a uri or a username must be provided. + + Args: + uri: An optional string representing the URI of the user entry that is + to be retrieved. + username: An optional string representing the username. + + Returns: + A YouTubeUserEntry if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubeUserEntry() method. + """ + if uri is None and username is None: + raise YouTubeError('You must provide at least a uri or a username ' + 'to the GetYouTubeUserEntry() method') + elif username and not uri: + uri = '%s/%s' % (YOUTUBE_USER_FEED_URI, username) + return self.Get(uri, converter=gdata.youtube.YouTubeUserEntryFromString) + + def GetYouTubePlaylistFeed(self, uri=None, username='default'): + """Retrieve a YouTubePlaylistFeed (a feed of playlists for a user). + + Either a uri or a username must be provided. + + Args: + uri: An optional string representing the URI of the playlist feed that + is to be retrieved. + username: An optional string representing the username. Defaults to the + currently authenticated user. + + Returns: + A YouTubePlaylistFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubePlaylistFeed() method. + """ + if uri is None: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'playlists') + return self.Get(uri, converter=gdata.youtube.YouTubePlaylistFeedFromString) + + def GetYouTubePlaylistEntry(self, uri): + """Retrieve a YouTubePlaylistEntry. + + Args: + uri: A string representing the URI of the playlist feed that is to + be retrieved. + + Returns: + A YouTubePlaylistEntry if successfully retrieved. + """ + return self.Get(uri, converter=gdata.youtube.YouTubePlaylistEntryFromString) + + def GetYouTubePlaylistVideoFeed(self, uri=None, playlist_id=None): + """Retrieve a YouTubePlaylistVideoFeed (a feed of videos on a playlist). + + Either a uri or a playlist_id must be provided. + + Args: + uri: An optional string representing the URI of the playlist video feed + that is to be retrieved. + playlist_id: An optional string representing the Id of the playlist whose + playlist video feed is to be retrieved. + + Returns: + A YouTubePlaylistVideoFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a playlist_id to the + GetYouTubePlaylistVideoFeed() method. + """ + if uri is None and playlist_id is None: + raise YouTubeError('You must provide at least a uri or a playlist_id ' + 'to the GetYouTubePlaylistVideoFeed() method') + elif playlist_id and not uri: + uri = '%s/%s' % (YOUTUBE_PLAYLIST_FEED_URI, playlist_id) + return self.Get( + uri, converter=gdata.youtube.YouTubePlaylistVideoFeedFromString) + + def GetYouTubeVideoResponseFeed(self, uri=None, video_id=None): + """Retrieve a YouTubeVideoResponseFeed. + + Either a uri or a playlist_id must be provided. + + Args: + uri: An optional string representing the URI of the video response feed + that is to be retrieved. + video_id: An optional string representing the ID of the video whose + response feed is to be retrieved. + + Returns: + A YouTubeVideoResponseFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeVideoResponseFeed() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeVideoResponseFeed() method') + elif video_id and not uri: + uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses') + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoResponseFeedFromString) + + def GetYouTubeVideoResponseEntry(self, uri): + """Retrieve a YouTubeVideoResponseEntry. + + Args: + uri: A string representing the URI of the video response entry that + is to be retrieved. + + Returns: + A YouTubeVideoResponseEntry if successfully retrieved. + """ + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoResponseEntryFromString) + + def GetYouTubeSubscriptionFeed(self, uri=None, username='default'): + """Retrieve a YouTubeSubscriptionFeed. + + Either the uri of the feed or a username must be provided. + + Args: + uri: An optional string representing the URI of the feed that is to + be retrieved. + username: An optional string representing the username whose subscription + feed is to be retrieved. Defaults to the currently authenticted user. + + Returns: + A YouTubeVideoSubscriptionFeed if successfully retrieved. + """ + if uri is None: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'subscriptions') + return self.Get( + uri, converter=gdata.youtube.YouTubeSubscriptionFeedFromString) + + def GetYouTubeSubscriptionEntry(self, uri): + """Retrieve a YouTubeSubscriptionEntry. + + Args: + uri: A string representing the URI of the entry that is to be retrieved. + + Returns: + A YouTubeVideoSubscriptionEntry if successfully retrieved. + """ + return self.Get( + uri, converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + def GetYouTubeRelatedVideoFeed(self, uri=None, video_id=None): + """Retrieve a YouTubeRelatedVideoFeed. + + Either a uri for the feed or a video_id is required. + + Args: + uri: An optional string representing the URI of the feed that is to + be retrieved. + video_id: An optional string representing the ID of the video for which + to retrieve the related video feed. + + Returns: + A YouTubeRelatedVideoFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeRelatedVideoFeed() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeRelatedVideoFeed() method') + elif video_id and not uri: + uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'related') + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoFeedFromString) + + def GetTopRatedVideoFeed(self): + """Retrieve the 'top_rated' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_RATED_URI) + + def GetMostViewedVideoFeed(self): + """Retrieve the 'most_viewed' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_VIEWED_URI) + + def GetRecentlyFeaturedVideoFeed(self): + """Retrieve the 'recently_featured' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_RECENTLY_FEATURED_URI) + + def GetWatchOnMobileVideoFeed(self): + """Retrieve the 'watch_on_mobile' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI) + + def GetTopFavoritesVideoFeed(self): + """Retrieve the 'top_favorites' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_FAVORITES_URI) + + def GetMostRecentVideoFeed(self): + """Retrieve the 'most_recent' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RECENT_URI) + + def GetMostDiscussedVideoFeed(self): + """Retrieve the 'most_discussed' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_DISCUSSED_URI) + + def GetMostLinkedVideoFeed(self): + """Retrieve the 'most_linked' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_LINKED_URI) + + def GetMostRespondedVideoFeed(self): + """Retrieve the 'most_responded' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RESPONDED_URI) + + def GetUserFavoritesFeed(self, username='default'): + """Retrieve the favorites feed for a given user. + + Args: + username: An optional string representing the username whose favorites + feed is to be retrieved. Defaults to the currently authenticated user. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + favorites_feed_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, + 'favorites') + return self.GetYouTubeVideoFeed(favorites_feed_uri) + + def InsertVideoEntry(self, video_entry, filename_or_handle, + youtube_username='default', + content_type='video/quicktime'): + """Upload a new video to YouTube using the direct upload mechanism. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to upload. + filename_or_handle: A file-like object or file name where the video + will be read from. + youtube_username: An optional string representing the username into whose + account this video is to be uploaded to. Defaults to the currently + authenticated user. + content_type: An optional string representing internet media type + (a.k.a. mime type) of the media object. Currently the YouTube API + supports these types: + o video/mpeg + o video/quicktime + o video/x-msvideo + o video/mp4 + o video/x-flv + + Returns: + The newly created YouTubeVideoEntry if successful. + + Raises: + AssertionError: video_entry must be a gdata.youtube.VideoEntry instance. + YouTubeError: An error occurred trying to read the video file provided. + gdata.service.RequestError: An error occurred trying to upload the video + to the API server. + """ + + # We need to perform a series of checks on the video_entry and on the + # file that we plan to upload, such as checking whether we have a valid + # video_entry and that the file is the correct type and readable, prior + # to performing the actual POST request. + + try: + assert(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry)) + except AssertionError: + raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, + 'body':'`video_entry` must be a gdata.youtube.VideoEntry instance', + 'reason':'Found %s, not VideoEntry' % type(video_entry) + }) + #majtype, mintype = content_type.split('/') + # + #try: + # assert(mintype in YOUTUBE_SUPPORTED_UPLOAD_TYPES) + #except (ValueError, AssertionError): + # raise YouTubeError({'status':YOUTUBE_INVALID_CONTENT_TYPE, + # 'body':'This is not a valid content type: %s' % content_type, + # 'reason':'Accepted content types: %s' % + # ['video/%s' % (t) for t in YOUTUBE_SUPPORTED_UPLOAD_TYPES]}) + + if (isinstance(filename_or_handle, (str, unicode)) + and os.path.exists(filename_or_handle)): + mediasource = gdata.MediaSource() + mediasource.setFile(filename_or_handle, content_type) + elif hasattr(filename_or_handle, 'read'): + import StringIO + if hasattr(filename_or_handle, 'seek'): + filename_or_handle.seek(0) + file_handle = StringIO.StringIO(filename_or_handle.read()) + name = 'video' + if hasattr(filename_or_handle, 'name'): + name = filename_or_handle.name + mediasource = gdata.MediaSource(file_handle, content_type, + content_length=file_handle.len, file_name=name) + else: + raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, 'body': + '`filename_or_handle` must be a path name or a file-like object', + 'reason': ('Found %s, not path name or object ' + 'with a .read() method' % type(filename_or_handle))}) + upload_uri = '%s/%s/%s' % (YOUTUBE_UPLOAD_URI, youtube_username, + 'uploads') + self.additional_headers['Slug'] = mediasource.file_name + + # Using a nested try statement to retain Python 2.4 compatibility + try: + try: + return self.Post(video_entry, uri=upload_uri, media_source=mediasource, + converter=gdata.youtube.YouTubeVideoEntryFromString) + except gdata.service.RequestError, e: + raise YouTubeError(e.args[0]) + finally: + del(self.additional_headers['Slug']) + + def CheckUploadStatus(self, video_entry=None, video_id=None): + """Check upload status on a recently uploaded video entry. + + Needs authentication. Either video_entry or video_id must be provided. + + Args: + video_entry: An optional YouTubeVideoEntry whose upload status to check + video_id: An optional string representing the ID of the uploaded video + whose status is to be checked. + + Returns: + A tuple containing (video_upload_state, detailed_message) or None if + no status information is found. + + Raises: + YouTubeError: You must provide at least a video_entry or a video_id to the + CheckUploadStatus() method. + """ + if video_entry is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the CheckUploadStatus() method') + elif video_id and not video_entry: + video_entry = self.GetYouTubeVideoEntry(video_id=video_id) + + control = video_entry.control + if control is not None: + draft = control.draft + if draft is not None: + if draft.text == 'yes': + yt_state = control.extension_elements[0] + if yt_state is not None: + state_value = yt_state.attributes['name'] + message = '' + if yt_state.text is not None: + message = yt_state.text + + return (state_value, message) + + def GetFormUploadToken(self, video_entry, uri=YOUTUBE_UPLOAD_TOKEN_URI): + """Receives a YouTube Token and a YouTube PostUrl from a YouTubeVideoEntry. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to upload (meta-data only). + uri: An optional string representing the URI from where to fetch the + token information. Defaults to the YOUTUBE_UPLOADTOKEN_URI. + + Returns: + A tuple containing the URL to which to post your video file, along + with the youtube token that must be included with your upload in the + form of: (post_url, youtube_token). + """ + try: + response = self.Post(video_entry, uri) + except gdata.service.RequestError, e: + raise YouTubeError(e.args[0]) + + tree = ElementTree.fromstring(response) + + for child in tree: + if child.tag == 'url': + post_url = child.text + elif child.tag == 'token': + youtube_token = child.text + return (post_url, youtube_token) + + def UpdateVideoEntry(self, video_entry): + """Updates a video entry's meta-data. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to update, containing updated + meta-data. + + Returns: + An updated YouTubeVideoEntry on success or None. + """ + for link in video_entry.link: + if link.rel == 'edit': + edit_uri = link.href + return self.Put(video_entry, uri=edit_uri, + converter=gdata.youtube.YouTubeVideoEntryFromString) + + def DeleteVideoEntry(self, video_entry): + """Deletes a video entry. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to be deleted. + + Returns: + True if entry was deleted successfully. + """ + for link in video_entry.link: + if link.rel == 'edit': + edit_uri = link.href + return self.Delete(edit_uri) + + def AddRating(self, rating_value, video_entry): + """Add a rating to a video entry. + + Needs authentication. + + Args: + rating_value: The integer value for the rating (between 1 and 5). + video_entry: The YouTubeVideoEntry to be rated. + + Returns: + True if the rating was added successfully. + + Raises: + YouTubeError: rating_value must be between 1 and 5 in AddRating(). + """ + if rating_value < 1 or rating_value > 5: + raise YouTubeError('rating_value must be between 1 and 5 in AddRating()') + + entry = gdata.GDataEntry() + rating = gdata.youtube.Rating(min='1', max='5') + rating.extension_attributes['name'] = 'value' + rating.extension_attributes['value'] = str(rating_value) + entry.extension_elements.append(rating) + + for link in video_entry.link: + if link.rel == YOUTUBE_RATING_LINK_REL: + rating_uri = link.href + + return self.Post(entry, uri=rating_uri) + + def AddComment(self, comment_text, video_entry): + """Add a comment to a video entry. + + Needs authentication. Note that each comment that is posted must contain + the video entry that it is to be posted to. + + Args: + comment_text: A string representing the text of the comment. + video_entry: The YouTubeVideoEntry to be commented on. + + Returns: + True if the comment was added successfully. + """ + content = atom.Content(text=comment_text) + comment_entry = gdata.youtube.YouTubeVideoCommentEntry(content=content) + comment_post_uri = video_entry.comments.feed_link[0].href + + return self.Post(comment_entry, uri=comment_post_uri) + + def AddVideoResponse(self, video_id_to_respond_to, video_response): + """Add a video response. + + Needs authentication. + + Args: + video_id_to_respond_to: A string representing the ID of the video to be + responded to. + video_response: YouTubeVideoEntry to be posted as a response. + + Returns: + True if video response was posted successfully. + """ + post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id_to_respond_to, + 'responses') + return self.Post(video_response, uri=post_uri) + + def DeleteVideoResponse(self, video_id, response_video_id): + """Delete a video response. + + Needs authentication. + + Args: + video_id: A string representing the ID of video that contains the + response. + response_video_id: A string representing the ID of the video that was + posted as a response. + + Returns: + True if video response was deleted succcessfully. + """ + delete_uri = '%s/%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses', + response_video_id) + return self.Delete(delete_uri) + + def AddComplaint(self, complaint_text, complaint_term, video_id): + """Add a complaint for a particular video entry. + + Needs authentication. + + Args: + complaint_text: A string representing the complaint text. + complaint_term: A string representing the complaint category term. + video_id: A string representing the ID of YouTubeVideoEntry to + complain about. + + Returns: + True if posted successfully. + + Raises: + YouTubeError: Your complaint_term is not valid. + """ + if complaint_term not in YOUTUBE_COMPLAINT_CATEGORY_TERMS: + raise YouTubeError('Your complaint_term is not valid') + + content = atom.Content(text=complaint_text) + category = atom.Category(term=complaint_term, + scheme=YOUTUBE_COMPLAINT_CATEGORY_SCHEME) + + complaint_entry = gdata.GDataEntry(content=content, category=[category]) + post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'complaints') + + return self.Post(complaint_entry, post_uri) + + def AddVideoEntryToFavorites(self, video_entry, username='default'): + """Add a video entry to a users favorite feed. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to add. + username: An optional string representing the username to whose favorite + feed you wish to add the entry. Defaults to the currently + authenticated user. + Returns: + The posted YouTubeVideoEntry if successfully posted. + """ + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites') + + return self.Post(video_entry, post_uri, + converter=gdata.youtube.YouTubeVideoEntryFromString) + + def DeleteVideoEntryFromFavorites(self, video_id, username='default'): + """Delete a video entry from the users favorite feed. + + Needs authentication. + + Args: + video_id: A string representing the ID of the video that is to be removed + username: An optional string representing the username of the user's + favorite feed. Defaults to the currently authenticated user. + + Returns: + True if entry was successfully deleted. + """ + edit_link = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites', + video_id) + return self.Delete(edit_link) + + def AddPlaylist(self, playlist_title, playlist_description, + playlist_private=None): + """Add a new playlist to the currently authenticated users account. + + Needs authentication. + + Args: + playlist_title: A string representing the title for the new playlist. + playlist_description: A string representing the description of the + playlist. + playlist_private: An optional boolean, set to True if the playlist is + to be private. + + Returns: + The YouTubePlaylistEntry if successfully posted. + """ + playlist_entry = gdata.youtube.YouTubePlaylistEntry( + title=atom.Title(text=playlist_title), + description=gdata.youtube.Description(text=playlist_description)) + if playlist_private: + playlist_entry.private = gdata.youtube.Private() + + playlist_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, 'default', + 'playlists') + return self.Post(playlist_entry, playlist_post_uri, + converter=gdata.youtube.YouTubePlaylistEntryFromString) + + def UpdatePlaylist(self, playlist_id, new_playlist_title, + new_playlist_description, playlist_private=None, + username='default'): + """Update a playlist with new meta-data. + + Needs authentication. + + Args: + playlist_id: A string representing the ID of the playlist to be updated. + new_playlist_title: A string representing a new title for the playlist. + new_playlist_description: A string representing a new description for the + playlist. + playlist_private: An optional boolean, set to True if the playlist is + to be private. + username: An optional string representing the username whose playlist is + to be updated. Defaults to the currently authenticated user. + + Returns: + A YouTubePlaylistEntry if the update was successful. + """ + updated_playlist = gdata.youtube.YouTubePlaylistEntry( + title=atom.Title(text=new_playlist_title), + description=gdata.youtube.Description(text=new_playlist_description)) + if playlist_private: + updated_playlist.private = gdata.youtube.Private() + + playlist_put_uri = '%s/%s/playlists/%s' % (YOUTUBE_USER_FEED_URI, username, + playlist_id) + + return self.Put(updated_playlist, playlist_put_uri, + converter=gdata.youtube.YouTubePlaylistEntryFromString) + + def DeletePlaylist(self, playlist_uri): + """Delete a playlist from the currently authenticated users playlists. + + Needs authentication. + + Args: + playlist_uri: A string representing the URI of the playlist that is + to be deleted. + + Returns: + True if successfully deleted. + """ + return self.Delete(playlist_uri) + + def AddPlaylistVideoEntryToPlaylist( + self, playlist_uri, video_id, custom_video_title=None, + custom_video_description=None): + """Add a video entry to a playlist, optionally providing a custom title + and description. + + Needs authentication. + + Args: + playlist_uri: A string representing the URI of the playlist to which this + video entry is to be added. + video_id: A string representing the ID of the video entry to add. + custom_video_title: An optional string representing a custom title for + the video (only shown on the playlist). + custom_video_description: An optional string representing a custom + description for the video (only shown on the playlist). + + Returns: + A YouTubePlaylistVideoEntry if successfully posted. + """ + playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry( + atom_id=atom.Id(text=video_id)) + if custom_video_title: + playlist_video_entry.title = atom.Title(text=custom_video_title) + if custom_video_description: + playlist_video_entry.description = gdata.youtube.Description( + text=custom_video_description) + + return self.Post(playlist_video_entry, playlist_uri, + converter=gdata.youtube.YouTubePlaylistVideoEntryFromString) + + def UpdatePlaylistVideoEntryMetaData( + self, playlist_uri, playlist_entry_id, new_video_title, + new_video_description, new_video_position): + """Update the meta data for a YouTubePlaylistVideoEntry. + + Needs authentication. + + Args: + playlist_uri: A string representing the URI of the playlist that contains + the entry to be updated. + playlist_entry_id: A string representing the ID of the entry to be + updated. + new_video_title: A string representing the new title for the video entry. + new_video_description: A string representing the new description for + the video entry. + new_video_position: An integer representing the new position on the + playlist for the video. + + Returns: + A YouTubePlaylistVideoEntry if the update was successful. + """ + playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry( + title=atom.Title(text=new_video_title), + description=gdata.youtube.Description(text=new_video_description), + position=gdata.youtube.Position(text=str(new_video_position))) + + playlist_put_uri = playlist_uri + '/' + playlist_entry_id + + return self.Put(playlist_video_entry, playlist_put_uri, + converter=gdata.youtube.YouTubePlaylistVideoEntryFromString) + + def DeletePlaylistVideoEntry(self, playlist_uri, playlist_video_entry_id): + """Delete a playlist video entry from a playlist. + + Needs authentication. + + Args: + playlist_uri: A URI representing the playlist from which the playlist + video entry is to be removed from. + playlist_video_entry_id: A string representing id of the playlist video + entry that is to be removed. + + Returns: + True if entry was successfully deleted. + """ + delete_uri = '%s/%s' % (playlist_uri, playlist_video_entry_id) + return self.Delete(delete_uri) + + def AddSubscriptionToChannel(self, username_to_subscribe_to, + my_username = 'default'): + """Add a new channel subscription to the currently authenticated users + account. + + Needs authentication. + + Args: + username_to_subscribe_to: A string representing the username of the + channel to which we want to subscribe to. + my_username: An optional string representing the name of the user which + we want to subscribe. Defaults to currently authenticated user. + + Returns: + A new YouTubeSubscriptionEntry if successfully posted. + """ + subscription_category = atom.Category( + scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME, + term='channel') + subscription_username = gdata.youtube.Username( + text=username_to_subscribe_to) + + subscription_entry = gdata.youtube.YouTubeSubscriptionEntry( + category=subscription_category, + username=subscription_username) + + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'subscriptions') + + return self.Post(subscription_entry, post_uri, + converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + def AddSubscriptionToFavorites(self, username, my_username = 'default'): + """Add a new subscription to a users favorites to the currently + authenticated user's account. + + Needs authentication + + Args: + username: A string representing the username of the user's favorite feed + to subscribe to. + my_username: An optional string representing the username of the user + that is to be subscribed. Defaults to currently authenticated user. + + Returns: + A new YouTubeSubscriptionEntry if successful. + """ + subscription_category = atom.Category( + scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME, + term='favorites') + subscription_username = gdata.youtube.Username(text=username) + + subscription_entry = gdata.youtube.YouTubeSubscriptionEntry( + category=subscription_category, + username=subscription_username) + + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'subscriptions') + + return self.Post(subscription_entry, post_uri, + converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + def AddSubscriptionToQuery(self, query, my_username = 'default'): + """Add a new subscription to a specific keyword query to the currently + authenticated user's account. + + Needs authentication + + Args: + query: A string representing the keyword query to subscribe to. + my_username: An optional string representing the username of the user + that is to be subscribed. Defaults to currently authenticated user. + + Returns: + A new YouTubeSubscriptionEntry if successful. + """ + subscription_category = atom.Category( + scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME, + term='query') + subscription_query_string = gdata.youtube.QueryString(text=query) + + subscription_entry = gdata.youtube.YouTubeSubscriptionEntry( + category=subscription_category, + query_string=subscription_query_string) + + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'subscriptions') + + return self.Post(subscription_entry, post_uri, + converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + + + def DeleteSubscription(self, subscription_uri): + """Delete a subscription from the currently authenticated user's account. + + Needs authentication. + + Args: + subscription_uri: A string representing the URI of the subscription that + is to be deleted. + + Returns: + True if deleted successfully. + """ + return self.Delete(subscription_uri) + + def AddContact(self, contact_username, my_username='default'): + """Add a new contact to the currently authenticated user's contact feed. + + Needs authentication. + + Args: + contact_username: A string representing the username of the contact + that you wish to add. + my_username: An optional string representing the username to whose + contact the new contact is to be added. + + Returns: + A YouTubeContactEntry if added successfully. + """ + contact_category = atom.Category( + scheme = 'http://gdata.youtube.com/schemas/2007/contact.cat', + term = 'Friends') + contact_username = gdata.youtube.Username(text=contact_username) + contact_entry = gdata.youtube.YouTubeContactEntry( + category=contact_category, + username=contact_username) + + contact_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'contacts') + + return self.Post(contact_entry, contact_post_uri, + converter=gdata.youtube.YouTubeContactEntryFromString) + + def UpdateContact(self, contact_username, new_contact_status, + new_contact_category, my_username='default'): + """Update a contact, providing a new status and a new category. + + Needs authentication. + + Args: + contact_username: A string representing the username of the contact + that is to be updated. + new_contact_status: A string representing the new status of the contact. + This can either be set to 'accepted' or 'rejected'. + new_contact_category: A string representing the new category for the + contact, either 'Friends' or 'Family'. + my_username: An optional string representing the username of the user + whose contact feed we are modifying. Defaults to the currently + authenticated user. + + Returns: + A YouTubeContactEntry if updated succesfully. + + Raises: + YouTubeError: New contact status must be within the accepted values. Or + new contact category must be within the accepted categories. + """ + if new_contact_status not in YOUTUBE_CONTACT_STATUS: + raise YouTubeError('New contact status must be one of %s' % + (' '.join(YOUTUBE_CONTACT_STATUS))) + if new_contact_category not in YOUTUBE_CONTACT_CATEGORY: + raise YouTubeError('New contact category must be one of %s' % + (' '.join(YOUTUBE_CONTACT_CATEGORY))) + + contact_category = atom.Category( + scheme='http://gdata.youtube.com/schemas/2007/contact.cat', + term=new_contact_category) + + contact_status = gdata.youtube.Status(text=new_contact_status) + contact_entry = gdata.youtube.YouTubeContactEntry( + category=contact_category, + status=contact_status) + + contact_put_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'contacts', contact_username) + + return self.Put(contact_entry, contact_put_uri, + converter=gdata.youtube.YouTubeContactEntryFromString) + + def DeleteContact(self, contact_username, my_username='default'): + """Delete a contact from a users contact feed. + + Needs authentication. + + Args: + contact_username: A string representing the username of the contact + that is to be deleted. + my_username: An optional string representing the username of the user's + contact feed from which to delete the contact. Defaults to the + currently authenticated user. + + Returns: + True if the contact was deleted successfully + """ + contact_edit_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'contacts', contact_username) + return self.Delete(contact_edit_uri) + + def _GetDeveloperKey(self): + """Getter for Developer Key property. + + Returns: + If the developer key has been set, a string representing the developer key + is returned or None. + """ + if 'X-GData-Key' in self.additional_headers: + return self.additional_headers['X-GData-Key'][4:] + else: + return None + + def _SetDeveloperKey(self, developer_key): + """Setter for Developer Key property. + + Sets the developer key in the 'X-GData-Key' header. The actual value that + is set is 'key=' plus the developer_key that was passed. + """ + self.additional_headers['X-GData-Key'] = 'key=' + developer_key + + developer_key = property(_GetDeveloperKey, _SetDeveloperKey, + doc="""The Developer Key property""") + + def _GetClientId(self): + """Getter for Client Id property. + + Returns: + If the client_id has been set, a string representing it is returned + or None. + """ + if 'X-Gdata-Client' in self.additional_headers: + return self.additional_headers['X-Gdata-Client'] + else: + return None + + def _SetClientId(self, client_id): + """Setter for Client Id property. + + Sets the 'X-Gdata-Client' header. + """ + self.additional_headers['X-Gdata-Client'] = client_id + + client_id = property(_GetClientId, _SetClientId, + doc="""The ClientId property""") + + def Query(self, uri): + """Performs a query and returns a resulting feed or entry. + + Args: + uri: A string representing the URI of the feed that is to be queried. + + Returns: + On success, a tuple in the form: + (boolean succeeded=True, ElementTree._Element result) + On failure, a tuple in the form: + (boolean succeeded=False, {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response}) + """ + result = self.Get(uri) + return result + + def YouTubeQuery(self, query): + """Performs a YouTube specific query and returns a resulting feed or entry. + + Args: + query: A Query object or one if its sub-classes (YouTubeVideoQuery, + YouTubeUserQuery or YouTubePlaylistQuery). + + Returns: + Depending on the type of Query object submitted returns either a + YouTubeVideoFeed, a YouTubeUserFeed, a YouTubePlaylistFeed. If the + Query object provided was not YouTube-related, a tuple is returned. + On success the tuple will be in this form: + (boolean succeeded=True, ElementTree._Element result) + On failure, the tuple will be in this form: + (boolean succeeded=False, {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server response}) + """ + result = self.Query(query.ToUri()) + if isinstance(query, YouTubeVideoQuery): + return gdata.youtube.YouTubeVideoFeedFromString(result.ToString()) + elif isinstance(query, YouTubeUserQuery): + return gdata.youtube.YouTubeUserFeedFromString(result.ToString()) + elif isinstance(query, YouTubePlaylistQuery): + return gdata.youtube.YouTubePlaylistFeedFromString(result.ToString()) + else: + return result + +class YouTubeVideoQuery(gdata.service.Query): + + """Subclasses gdata.service.Query to represent a YouTube Data API query. + + Attributes are set dynamically via properties. Properties correspond to + the standard Google Data API query parameters with YouTube Data API + extensions. Please refer to the API documentation for details. + + Attributes: + vq: The vq parameter, which is only supported for video feeds, specifies a + search query term. Refer to API documentation for further details. + orderby: The orderby parameter, which is only supported for video feeds, + specifies the value that will be used to sort videos in the search + result set. Valid values for this parameter are relevance, published, + viewCount and rating. + time: The time parameter, which is only available for the top_rated, + top_favorites, most_viewed, most_discussed, most_linked and + most_responded standard feeds, restricts the search to videos uploaded + within the specified time. Valid values for this parameter are today + (1 day), this_week (7 days), this_month (1 month) and all_time. + The default value for this parameter is all_time. + format: The format parameter specifies that videos must be available in a + particular video format. Refer to the API documentation for details. + racy: The racy parameter allows a search result set to include restricted + content as well as standard content. Valid values for this parameter + are include and exclude. By default, restricted content is excluded. + lr: The lr parameter restricts the search to videos that have a title, + description or keywords in a specific language. Valid values for the lr + parameter are ISO 639-1 two-letter language codes. + restriction: The restriction parameter identifies the IP address that + should be used to filter videos that can only be played in specific + countries. + location: A string of geo coordinates. Note that this is not used when the + search is performed but rather to filter the returned videos for ones + that match to the location entered. + feed: str (optional) The base URL which is the beginning of the query URL. + defaults to 'http://%s/feeds/videos' % (YOUTUBE_SERVER) + """ + + def __init__(self, video_id=None, feed_type=None, text_query=None, + params=None, categories=None, feed=None): + + if feed_type in YOUTUBE_STANDARDFEEDS and feed is None: + feed = 'http://%s/feeds/standardfeeds/%s' % (YOUTUBE_SERVER, feed_type) + elif (feed_type is 'responses' or feed_type is 'comments' and video_id + and feed is None): + feed = 'http://%s/feeds/videos/%s/%s' % (YOUTUBE_SERVER, video_id, + feed_type) + elif feed is None: + feed = 'http://%s/feeds/videos' % (YOUTUBE_SERVER) + + gdata.service.Query.__init__(self, feed, text_query=text_query, + params=params, categories=categories) + + def _GetVideoQuery(self): + if 'vq' in self: + return self['vq'] + else: + return None + + def _SetVideoQuery(self, val): + self['vq'] = val + + vq = property(_GetVideoQuery, _SetVideoQuery, + doc="""The video query (vq) query parameter""") + + def _GetOrderBy(self): + if 'orderby' in self: + return self['orderby'] + else: + return None + + def _SetOrderBy(self, val): + if val not in YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS: + if val.startswith('relevance_lang_') is False: + raise YouTubeError('OrderBy must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS)) + self['orderby'] = val + + orderby = property(_GetOrderBy, _SetOrderBy, + doc="""The orderby query parameter""") + + def _GetTime(self): + if 'time' in self: + return self['time'] + else: + return None + + def _SetTime(self, val): + if val not in YOUTUBE_QUERY_VALID_TIME_PARAMETERS: + raise YouTubeError('Time must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_TIME_PARAMETERS)) + self['time'] = val + + time = property(_GetTime, _SetTime, + doc="""The time query parameter""") + + def _GetFormat(self): + if 'format' in self: + return self['format'] + else: + return None + + def _SetFormat(self, val): + if val not in YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS: + raise YouTubeError('Format must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS)) + self['format'] = val + + format = property(_GetFormat, _SetFormat, + doc="""The format query parameter""") + + def _GetRacy(self): + if 'racy' in self: + return self['racy'] + else: + return None + + def _SetRacy(self, val): + if val not in YOUTUBE_QUERY_VALID_RACY_PARAMETERS: + raise YouTubeError('Racy must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_RACY_PARAMETERS)) + self['racy'] = val + + racy = property(_GetRacy, _SetRacy, + doc="""The racy query parameter""") + + def _GetLanguageRestriction(self): + if 'lr' in self: + return self['lr'] + else: + return None + + def _SetLanguageRestriction(self, val): + self['lr'] = val + + lr = property(_GetLanguageRestriction, _SetLanguageRestriction, + doc="""The lr (language restriction) query parameter""") + + def _GetIPRestriction(self): + if 'restriction' in self: + return self['restriction'] + else: + return None + + def _SetIPRestriction(self, val): + self['restriction'] = val + + restriction = property(_GetIPRestriction, _SetIPRestriction, + doc="""The restriction query parameter""") + + def _GetLocation(self): + if 'location' in self: + return self['location'] + else: + return None + + def _SetLocation(self, val): + self['location'] = val + + location = property(_GetLocation, _SetLocation, + doc="""The location query parameter""") + + + +class YouTubeUserQuery(YouTubeVideoQuery): + + """Subclasses YouTubeVideoQuery to perform user-specific queries. + + Attributes are set dynamically via properties. Properties correspond to + the standard Google Data API query parameters with YouTube Data API + extensions. + """ + + def __init__(self, username=None, feed_type=None, subscription_id=None, + text_query=None, params=None, categories=None): + + uploads_favorites_playlists = ('uploads', 'favorites', 'playlists') + + if feed_type is 'subscriptions' and subscription_id and username: + feed = "http://%s/feeds/users/%s/%s/%s" % (YOUTUBE_SERVER, username, + feed_type, subscription_id) + elif feed_type is 'subscriptions' and not subscription_id and username: + feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username, + feed_type) + elif feed_type in uploads_favorites_playlists: + feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username, + feed_type) + else: + feed = "http://%s/feeds/users" % (YOUTUBE_SERVER) + + YouTubeVideoQuery.__init__(self, feed, text_query=text_query, + params=params, categories=categories) + + +class YouTubePlaylistQuery(YouTubeVideoQuery): + + """Subclasses YouTubeVideoQuery to perform playlist-specific queries. + + Attributes are set dynamically via properties. Properties correspond to + the standard Google Data API query parameters with YouTube Data API + extensions. + """ + + def __init__(self, playlist_id, text_query=None, params=None, + categories=None): + if playlist_id: + feed = "http://%s/feeds/playlists/%s" % (YOUTUBE_SERVER, playlist_id) + else: + feed = "http://%s/feeds/playlists" % (YOUTUBE_SERVER) + + YouTubeVideoQuery.__init__(self, feed, text_query=text_query, + params=params, categories=categories) diff --git a/gam/lastupdatecheck.txt b/gam/lastupdatecheck.txt new file mode 100644 index 00000000000..4d54b58403c --- /dev/null +++ b/gam/lastupdatecheck.txt @@ -0,0 +1 @@ +1363798939 \ No newline at end of file diff --git a/gam/oauth.txt b/gam/oauth.txt new file mode 100644 index 00000000000..88ddda798a8 --- /dev/null +++ b/gam/oauth.txt @@ -0,0 +1,88 @@ +puppetlabs.com +ccopy_reg +_reconstructor +p0 +(cgdata.auth +OAuthToken +p1 +c__builtin__ +object +p2 +Ntp3 +Rp4 +(dp5 +S'scopes' +p6 +(lp7 +S'https://www.googleapis.com/auth/userinfo.email' +p8 +aS'https://apps-apis.google.com/a/feeds/groups/' +p9 +aS'https://apps-apis.google.com/a/feeds/alias/' +p10 +aS'https://apps-apis.google.com/a/feeds/policies/' +p11 +aS'https://apps-apis.google.com/a/feeds/user/' +p12 +aS'https://apps-apis.google.com/a/feeds/emailsettings/2.0/' +p13 +aS'https://apps-apis.google.com/a/feeds/calendar/resource/' +p14 +aS'https://apps-apis.google.com/a/feeds/compliance/audit/' +p15 +aS'https://apps-apis.google.com/a/feeds/domain/' +p16 +aS'https://www.googleapis.com/auth/apps/reporting/audit.readonly' +p17 +aS'https://www.googleapis.com/auth/apps.groups.settings' +p18 +aS'https://www.google.com/m8/feeds' +p19 +aS'https://www.google.com/calendar/feeds/' +p20 +aS'https://www.googleapis.com/auth/calendar' +p21 +aS'https://www.google.com/hosted/services/v1.0/reports/ReportingData' +p22 +asS'secret' +p23 +S'y0MTYjHS2m_y41uIzzR__ICS' +p24 +sS'key' +p25 +S'1/fnVgchm2TBAsUORjALEe3-05ObFmCqhEKuAqvYl0wqc' +p26 +sS'oauth_input_params' +p27 +g0 +(cgdata.auth +OAuthInputParams +p28 +g2 +Ntp29 +Rp30 +(dp31 +S'_consumer' +p32 +g0 +(cgdata.oauth +OAuthConsumer +p33 +g2 +Ntp34 +Rp35 +(dp36 +g23 +S'anonymous' +p37 +sg25 +g37 +sbsS'requestor_id' +p38 +NsS'rsa_key' +p39 +NsS'_OAuthInputParams__signature_strategy' +p40 +S'HMAC_SHA1' +p41 +sbsb. \ No newline at end of file diff --git a/gam/whatsnew.txt b/gam/whatsnew.txt new file mode 100755 index 00000000000..1fd5329d3b3 --- /dev/null +++ b/gam/whatsnew.txt @@ -0,0 +1,154 @@ +GAM 2.55 + -Fix change in Google APIs broke "gam whatis" command. + -Fix change in Google APIs broke "gam info domain" command on CNAME Verification Status message. + +GAM 2.54 + -Fix a stupid bug that broke "gam print users" when used without additional attributes. + -Another fix for outbound gateway settings on "gam info domain" + -Get this whatsnew.txt doc up to date. + +GAM 2.53 + -Two new group settings, spam_moderation_level and include_in_global_address_list allow further customization of your Google Groups. + -Error reporting for mailbox delegation has been further improved, GAM does a better job of pinpointing why a delegation failed. + -Fixed updating and deleting domain and default users for calendar ACLs + -proper error handling for adding and removing group members and owners + -Fixed error on gam info domain caused by failure to retrieve outbound gateway settings. + -An EXPERIMENTAL 64-bit build of GAM for Windows is now available. I do not expect it will be any bit faster for most GAM commands since most delay + is due to network I/O. However, some GAM commands like "gam print users", when run against large domains (10,000+ users), use a large amount of memory + and resources due to result size. In these scenarios, the x64 build MIGHT prove faster. If you try the x64 build, please report how it worked back to + the mailing list. "It feels faster" is nice but hard numbers with details of what you did are better :-) Note that if you're using the Python source + build and your Python is 64-bit, you're already using 64-bit GAM :-) + +GAM 2.52 + -It's a dud! Major bug caused me to pull this version 10 minutes after release :-) + +GAM 2.51 + -New gam calendar wipe command allows clearing all data off a user's primary calendar + -create user and update user commands now support setting user's org. + -gam whatis command allows looking up an email address to determine if it's a user, alias or group. + -gam delete user no longer renames a user by default since undelete is now in CPanel. Added optional dorename parameter to force old renaming behavior. + -Fix issue that broke gam delete resource command + -GAM now offers to remember your client secret and key when entered the first time + -various other bug fixes + +GAM 2.5 + -GAM now handles and retries errors consistently and provides nice error messages. Long running GAM processes + like "gam all users" should be much stabler now. Death to the 1000/Unknown errors! + This involved some major changes to the Google API calls so if you run into problems, try + downgrading to 2.3.1 and see if they go away. Be sure to submit bug reports! + -GAM checks for updates + -New parameters for gam create user and gam update user + -New parameters for gam print group: owners, members and settings + -GAM now works for delegated admins with user read/create/update/delete API rights + -gam update group add owner now only adds the user as a group owner, not a member (Google Group member + and owner status are independant of each other) + -gam update group add member no longer revokes user's owner rights if they have them + -gam info group now shows owners who are not a member of the group + -gam now works around the group settings "Backend Error" by making an HTTP request to the groups website. + This workaround may cease to work if performed on more than a few hundred groups at a time. + -moving large numbers of users to an Organization is now more reliable and is performed 25 users at a time. + -gam print users aliases now makes only 1 API call to retrieve all user aliases + -New commands "gam oauth info" and "gam oauth revoke" allow further OAuth token management + -gam info domain now shows the unique customer id + +GAM 2.3.1 + -Fixes to add calendar command + -Allow updating and removal of special Calendar ACL users domain and default + -pop commands now work without supplying all arguments (defaults to enable for all mail and keep) + -New "file" argument for signature and vacation commands allows specifying a file with message content. + -"gam create group" now only requires group name argument, rest are optional. + -special user * (everyone in domain) can now be added to a group via GAM + -print groups, print resources, print aliases and print orgs commands now output proper CSV + -Dito company information now displayed on OAuth token create + +GAM 2.3 + + -GAM is now owned by Dito (www.ditoweb.com), the Google Apps Experts! See announcement and details at http://code.google.com/p/google-apps-manager + -New user profile photo management commands can update, get and delete user profile photos + -GAM now gracefully handles cross-domain mailbox delegations by using (or giving the delegate) an alias in the mailbox's domain. + -"gam user XXXX show delegates" now has optional argument "csv" to print existing delegations in CSV format + -GAM can now properly rename and delete long usernames by ensuring that the renamed user is max 64 characters in length + -"gam print groups" now has optional arguments nousermanagedgroups and onlyusermanagedgroups allowing user managed groups to be excluded from output or print user managed groups exclusively. + +GAM 2.2 + + -Update Calendar ACLs command, update user calendar settings command and ability to set calendar settings when subscribing user + -Delete Gmail labels command + -Fixes for *nix CSV formatting + -Fixes to make Windows and *nix generated oauth.txt files compatible + -"gam info user" now shows mailbox quota and user organization + -"gam update user" can now handle change of user's domain in renames. "gam multi" commands now fully deprecated. + -Fix reply_to and a few other group settings were never getting updated. + -"gam info group" now makes 3 efficient API calls rather than one per member/owner of the group greatly increasing performance with large groups + -GAM should do a better job of always printing out full email address instead of just username. If you see GAM reporting only the username and not the full email address, please report it as a bug. + -All OAuth scopes are now selected by default. + +GAM 2.1.1 + + -Fix to prevent unnecessary call to Groups Provisioning API when viewing detailed group settings + -should be show_in_group_directory not show_in_groups_directory. + +GAM 2.1 + + -New Reporting API Support allows you to pull 5 different daily reports: accounts, activity, disk_space, email_clients and summary. + -Fix for Adding calendars to a user's list of calendars. Bug in 2.0 meant calendar was always added to the calendar list of the admin who authorized GAM, not the target user. + -GAM now looks for an environment variable called OAUTHFILE. If it exists, GAM will use that file instead of oauth.txt for authentication. This allows admins of many Google Apps domains to switch quickly between domains. + -Fixes for many "gam print users" issues. Thanks to Craig Box for the patch. + +GAM 2.0 + +-Group Settings commands allow you to update Google Group settings +-Calendar commands allow you to grant access to calendars and modify user's list of calendars +-Update Admin Settings like the logo, outbound gateway, email migration and more +-OAuth is now the default authentication method. Support for username/password ClientLogin has been removed. +-Vacation/Away messages can now have a start and end date. They can also be limited to within the domain only. +-Further work to make all GAM commands multi-domain friendly. +-Lot's more bugfixes! look at the Wiki pages for details + +GAM 1.9.1 + +-"gam print postini" will print all of the Postini Batch commands necessary to "mirror" Google Apps email addresses + into a Postini standalone instance. + +-"gam version" will print details about the version of GAM you are using. + +GAM 1.9 - "Baby Steps" + +GAM 1.9 is dedicated to David, my 13 month year old son. Whose just starting to step out into the world this week. + +-whatnew.txt is new (is that an oxymoron?) + +-Share or Hide users profile from autocomplete and contacts search. + "gam user jsmith show profile" + "gam user jsmith profile share" + "gam group asked-to-be-hidden profile unshare" + + Profile modifications only work with OAuth, not ClientLogin (username/password entered into GAM). + Since the profile API uses a scope GAM was not previously making use of, you'll need to re-run + "gam oauth request" to include the Profile API scope. + +-Numerous actions can now be performed for all users in a given Organizational + Unit just like they can be for a group or all users. i.e. "gam ou Students webclips off". + +-Provisioning API OAuth scope has been subdivided into user, group, alias and ou scopes + offering finer granularity. + +-"gam all users" will now include all users across primary and secondary domains instead of just primary domain users. + +-"gam info user" will show all email aliases for a user, not just those in the primary domain. + +-"gam print users" with any extra arguments would fail, this should be fixed now. + +-"gam info group" and "gam print groups" should no longer fail for groups with custom permissions. + +GAM 1.8 + +-OAuth Support - GAM now supports OAuth Authentication. Instead of providing GAM your username and password, you grant GAM access to selected APIs from within your Google Account. This has a number of advantages: + -With OAuth GAM doesn't need to know your password. + -OAuth tokens don't expire, once you grant GAM OAuth access, GAM will have access until you revoke it within your Google account. + -OAuth has the concept of scopes, limiting the areas and services that access is granted to. This allows you to only provide GAM with the privileges it needs. + -More info about OAuth support is on it's way. But for now, you can try OAuth access by running "gam oauth request". + +-The settings filter command http://code.google.com/p/google-apps-manager/wiki/ExamplesEmailSettings#Create_a_Filter now has more actions including forward, star, trash and never send to spam. + +-Downloading Audit Exports now has partial resume support. GAM will not re-download files that already exist on the local drive. If a large export download fails you should delete the last file GAM was working on since it's incomplete and then restart the process, GAM will pick up with the last file. \ No newline at end of file diff --git a/oh-my-zsh/.oh-my-zsh b/oh-my-zsh/.oh-my-zsh new file mode 120000 index 00000000000..cdec60c2447 --- /dev/null +++ b/oh-my-zsh/.oh-my-zsh @@ -0,0 +1 @@ +/Users/kirby/.oh-my-zsh \ No newline at end of file diff --git a/vim/vim.symlink/plugin/gnupg.vim b/vim/vim.symlink/plugin/gnupg.vim index 09116d6214a..97539c575d1 100644 --- a/vim/vim.symlink/plugin/gnupg.vim +++ b/vim/vim.symlink/plugin/gnupg.vim @@ -1,10 +1,13 @@ " Name: gnupg.vim -" Version: $Id: gnupg.vim 3026 2010-01-27 08:18:04Z mbr $ -" Author: Markus Braun +" Last Change: 2012 May 31 +" Maintainer: James McCoy +" Original Author: Markus Braun " Summary: Vim plugin for transparent editing of gpg encrypted files. -" Licence: This program is free software; you can redistribute it and/or -" modify it under the terms of the GNU General Public License. -" See http://www.gnu.org/copyleft/gpl.txt +" License: This program is free software; you can redistribute it and/or +" modify it under the terms of the GNU General Public License +" as published by the Free Software Foundation; either version +" 2 of the License, or (at your option) any later version. +" See http://www.gnu.org/copyleft/gpl-2.0.txt " " Section: Documentation {{{1 " @@ -15,7 +18,7 @@ " a file the content is decrypted, when opening a new file the script will " ask for the recipients of the encrypted file. The file content will be " encrypted to all recipients before it is written. The script turns off -" viminfo and swapfile to increase security. +" viminfo, swapfile, and undofile to increase security. " " Installation: {{{2 " @@ -71,7 +74,8 @@ " If set to 1 symmetric encryption is preferred for new files. Defaults to 0. " " g:GPGPreferArmor -" If set to 1 armored data is preferred for new files. Defaults to 0. +" If set to 1 armored data is preferred for new files. Defaults to 0 +" unless a "*.asc" file is being edited. " " g:GPGPreferSign " If set to 1 signed data is preferred for new files. Defaults to 0. @@ -80,9 +84,19 @@ " If set, these recipients are used as defaults when no other recipient is " defined. This variable is a Vim list. Default is unset. " +" g:GPGUsePipes +" If set to 1, use pipes instead of temporary files when interacting with +" gnupg. When set to 1, this can cause terminal-based gpg agents to not +" display correctly when prompting for passwords. Defaults to 0. +" +" g:GPGHomedir +" If set, specifies the directory that will be used for GPG's homedir. +" This corresponds to gpg's --homedir option. This variable is a Vim +" string. +" " Known Issues: {{{2 " -" In some cases gvim can't decryt files +" In some cases gvim can't decrypt files " This is caused by the fact that a running gvim has no TTY and thus gpg is " not able to ask for the passphrase by itself. This is a problem for Windows @@ -109,25 +123,28 @@ " - Erik Remmelzwaal for patch to enable windows support and patient beta " testing. " - Lars Becker for patch to make gpg2 working. -" - Thomas Arendsen Hein for patch to convert encoding of gpg output +" - Thomas Arendsen Hein for patch to convert encoding of gpg output. " - Karl-Heinz Ruskowski for patch to fix unknown recipients and trust model " and patient beta testing. " - Giel van Schijndel for patch to get GPG_TTY dynamically. " - Sebastian Luettich for patch to fix issue with symmetric encryption an set " recipients. -" - Tim Swast for patch to generate signed files +" - Tim Swast for patch to generate signed files. +" - James Vega for patches for better '*.asc' handling, better filename +" escaping and better handling of multiple keyrings. " " Section: Plugin header {{{1 " guard against multiple loads {{{2 -if (exists("g:loaded_gnupg") || &cp || exists("#BufReadPre#*.\(gpg\|asc\|pgp\)")) +if (exists("g:loaded_gnupg") || &cp || exists("#GnuPG")) finish endif -let g:loaded_gnupg = "$Revision: 3026 $" +let g:loaded_gnupg = '2.5' +let s:GPGInitRun = 0 " check for correct vim version {{{2 -if (v:version < 700) - echohl ErrorMsg | echo 'plugin gnupg.vim requires Vim version >= 7.0' | echohl None +if (v:version < 702) + echohl ErrorMsg | echo 'plugin gnupg.vim requires Vim version >= 7.2' | echohl None finish endif @@ -136,19 +153,17 @@ endif augroup GnuPG autocmd! - " initialize the internal variables - autocmd BufNewFile,BufReadPre,FileReadPre *.\(gpg\|asc\|pgp\) call s:GPGInit() - " force the user to edit the recipient list if he opens a new file and public - " keys are preferred - autocmd BufNewFile *.\(gpg\|asc\|pgp\) if (exists("g:GPGPreferSymmetric") && g:GPGPreferSymmetric == 0) | call s:GPGEditRecipients() | endif " do the decryption - autocmd BufReadPost,FileReadPost *.\(gpg\|asc\|pgp\) call s:GPGDecrypt() + autocmd BufReadCmd *.\(gpg\|asc\|pgp\) call s:GPGInit(1) + autocmd BufReadCmd *.\(gpg\|asc\|pgp\) call s:GPGDecrypt(1) + autocmd BufReadCmd *.\(gpg\|asc\|pgp\) call s:GPGBufReadPost() + autocmd FileReadCmd *.\(gpg\|asc\|pgp\) call s:GPGInit(0) + autocmd FileReadCmd *.\(gpg\|asc\|pgp\) call s:GPGDecrypt(0) " convert all text to encrypted text before writing - autocmd BufWritePre,FileWritePre *.\(gpg\|asc\|pgp\) call s:GPGEncrypt() - " undo the encryption so we are back in the normal text, directly - " after the file has been written. - autocmd BufWritePost,FileWritePost *.\(gpg\|asc\|pgp\) call s:GPGEncryptPost() + autocmd BufWriteCmd *.\(gpg\|asc\|pgp\) call s:GPGBufWritePre() + autocmd BufWriteCmd,FileWriteCmd *.\(gpg\|asc\|pgp\) call s:GPGInit(0) + autocmd BufWriteCmd,FileWriteCmd *.\(gpg\|asc\|pgp\) call s:GPGEncrypt() " cleanup on leaving vim autocmd VimLeave *.\(gpg\|asc\|pgp\) call s:GPGCleanup() @@ -157,6 +172,7 @@ augroup END " Section: Constants {{{1 let s:GPGMagicString = "\t \t" +let s:keyPattern = '\%(0x\)\=[[:xdigit:]]\{8,16}' " Section: Highlight setup {{{1 @@ -166,19 +182,36 @@ highlight default link GPGHighlightUnknownRecipient ErrorMsg " Section: Functions {{{1 -" Function: s:GPGInit() {{{2 +" Function: s:GPGInit(bufread) {{{2 " " initialize the plugin +" The bufread argument specifies whether this was called due to BufReadCmd " -function s:GPGInit() - call s:GPGDebug(3, ">>>>>>>> Entering s:GPGInit()") +function s:GPGInit(bufread) + call s:GPGDebug(3, printf(">>>>>>>> Entering s:GPGInit(%d)", a:bufread)) + + " For FileReadCmd, we're reading the contents into another buffer. If that + " buffer is also destined to be encrypted, then these settings will have + " already been set, otherwise don't set them since it limits the + " functionality of the cleartext buffer. + if a:bufread + " we don't want a swap file, as it writes unencrypted data to disk + setl noswapfile + + " if persistent undo is present, disable it for this buffer + if exists('+undofile') + setl noundofile + endif - " first make sure nothing is written to ~/.viminfo while editing - " an encrypted file. - set viminfo= + " first make sure nothing is written to ~/.viminfo while editing + " an encrypted file. + set viminfo= + endif - " we don't want a swap file, as it writes unencrypted data to disk - set noswapfile + " the rest only has to be run once + if s:GPGInitRun + return + endif " check what gpg command to use if (!exists("g:GPGExecutable")) @@ -197,7 +230,12 @@ function s:GPGInit() " check if armored files are preferred if (!exists("g:GPGPreferArmor")) - let g:GPGPreferArmor = 0 + " .asc files should be armored as that's what the extension is used for + if expand('') =~ '\.asc$' + let g:GPGPreferArmor = 1 + else + let g:GPGPreferArmor = 0 + endif endif " check if signed files are preferred @@ -210,6 +248,16 @@ function s:GPGInit() let g:GPGDefaultRecipients = [] endif + " prefer not to use pipes since it can garble gpg agent display + if (!exists("g:GPGUsePipes")) + let g:GPGUsePipes = 0 + endif + + " allow alternate gnupg homedir + if (!exists('g:GPGHomedir')) + let g:GPGHomedir = '' + endif + " print version call s:GPGDebug(1, "gnupg.vim ". g:loaded_gnupg) @@ -230,16 +278,21 @@ function s:GPGInit() let s:GPGCommand = g:GPGExecutable . " --no-use-agent" endif - " don't use tty in gvim + " don't use tty in gvim except for windows: we get their a tty for free. " FIXME find a better way to avoid an error. " with this solution only --use-agent will work - if (has("gui_running")) + if (has("gui_running") && !has("gui_win32")) let s:GPGCommand = s:GPGCommand . " --no-tty" endif " setup shell environment for unix and windows let s:shellredirsave = &shellredir let s:shellsave = &shell + let s:shelltempsave = &shelltemp + " noshelltemp isn't currently supported on Windows, but it doesn't cause any + " errors and this future proofs us against requiring changes if Windows + " gains noshelltemp functionality + let s:shelltemp = !g:GPGUsePipes if (has("unix")) " unix specific settings let s:shellredir = ">%s 2>&1" @@ -255,6 +308,7 @@ function s:GPGInit() call s:GPGDebug(3, "shellredirsave: " . s:shellredirsave) call s:GPGDebug(3, "shellsave: " . s:shellsave) + call s:GPGDebug(3, "shelltempsave: " . s:shelltempsave) call s:GPGDebug(3, "shell: " . s:shell) call s:GPGDebug(3, "shellcmdflag: " . &shellcmdflag) @@ -265,14 +319,7 @@ function s:GPGInit() call s:GPGDebug(3, "shell implementation: " . resolve(s:shell)) " find the supported algorithms - let commandline = s:GPGCommand . " --version" - call s:GPGDebug(2, "command: ". commandline) - let &shellredir = s:shellredir - let &shell = s:shell - let output = system(commandline) - let &shellredir = s:shellredirsave - let &shell = s:shellsave - call s:GPGDebug(2, "output: ". output) + let output = s:GPGSystem({ 'level': 2, 'args': '--version' }) let s:GPGPubkey = substitute(output, ".*Pubkey: \\(.\\{-}\\)\n.*", "\\1", "") let s:GPGCipher = substitute(output, ".*Cipher: \\(.\\{-}\\)\n.*", "\\1", "") @@ -284,6 +331,7 @@ function s:GPGInit() call s:GPGDebug(2, "hashing algorithms: " . s:GPGHash) call s:GPGDebug(2, "compression algorithms: " . s:GPGCompress) call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGInit()") + let s:GPGInitRun = 1 endfunction " Function: s:GPGCleanup() {{{2 @@ -300,34 +348,41 @@ function s:GPGCleanup() call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGCleanup()") endfunction -" Function: s:GPGDecrypt() {{{2 +" Function: s:GPGDecrypt(bufread) {{{2 " " decrypt the buffer and find all recipients of the encrypted file +" The bufread argument specifies whether this was called due to BufReadCmd " -function s:GPGDecrypt() - call s:GPGDebug(3, ">>>>>>>> Entering s:GPGDecrypt()") - - " switch to binary mode to read the encrypted file - set bin +function s:GPGDecrypt(bufread) + call s:GPGDebug(3, printf(">>>>>>>> Entering s:GPGDecrypt(%d)", a:bufread)) " get the filename of the current buffer - let filename = escape(expand("%:p"), '\"') + let filename = expand(":p") - " clear GPGEncrypted, GPGRecipients and GPGOptions - let b:GPGEncrypted = 0 - let b:GPGRecipients = [] + " clear GPGRecipients and GPGOptions + let b:GPGRecipients = g:GPGDefaultRecipients let b:GPGOptions = [] + " File doesn't exist yet, so nothing to decrypt + if empty(glob(filename)) + return + endif + + " Only let this if the file actually exists, otherwise GPG functionality + " will be disabled when editing a buffer that doesn't yet have a backing + " file + let b:GPGEncrypted = 0 + " find the recipients of the file - let commandline = s:GPGCommand . " --verbose --decrypt --list-only --dry-run --batch --no-use-agent --logger-fd 1 \"" . filename . "\"" - call s:GPGDebug(3, "command: " . commandline) - let &shellredir = s:shellredir - let &shell = s:shell - let output = system(commandline) - let &shellredir = s:shellredirsave - let &shell = s:shellsave - call s:GPGDebug(3, "output: ". output) + let cmd = { 'level': 3 } + let cmd.args = '--verbose --decrypt --list-only --dry-run --batch --no-use-agent --logger-fd 1 ' . shellescape(filename) + let output = s:GPGSystem(cmd) + + " Suppress the "N more lines" message when editing a file, not when reading + " the contents of a file into a buffer + let silent = a:bufread ? 'silent ' : '' + let asymmPattern = 'gpg: public key is ' . s:keyPattern " check if the file is symmetric/asymmetric encrypted if (match(output, "gpg: encrypted with [[:digit:]]\\+ passphrase") >= 0) " file is symmetric encrypted @@ -347,7 +402,7 @@ function s:GPGDecrypt() echo echohl None endif - elseif (match(output, "gpg: public key is [[:xdigit:]]\\{8}") >= 0) + elseif (match(output, asymmPattern) >= 0) " file is asymmetric encrypted let b:GPGEncrypted = 1 call s:GPGDebug(1, "this file is asymmetric encrypted") @@ -355,10 +410,10 @@ function s:GPGDecrypt() let b:GPGOptions += ["encrypt"] " find the used public keys - let start = match(output, "gpg: public key is [[:xdigit:]]\\{8}") + let start = match(output, asymmPattern) while (start >= 0) let start = start + strlen("gpg: public key is ") - let recipient = strpart(output, start, 8) + let recipient = matchstr(output, s:keyPattern, start) call s:GPGDebug(1, "recipient is " . recipient) let name = s:GPGNameToID(recipient) if (strlen(name) > 0) @@ -370,7 +425,7 @@ function s:GPGDecrypt() echom "The recipient \"" . recipient . "\" is not in your public keyring!" echohl None end - let start = match(output, "gpg: public key is [[:xdigit:]]\\{8}", start) + let start = match(output, asymmPattern, start) endwhile else " file is not encrypted @@ -379,7 +434,7 @@ function s:GPGDecrypt() echohl GPGWarning echom "File is not encrypted, all GPG functions disabled!" echohl None - set nobin + exe printf('%sr %s', silent, fnameescape(filename)) call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGDecrypt()") return endif @@ -394,37 +449,64 @@ function s:GPGDecrypt() " since even with the --quiet option passphrase typos will be reported, " we must redirect stderr (using shell temporarily) call s:GPGDebug(1, "decrypting file") - let commandline = "'[,']!" . s:GPGCommand . " --quiet --decrypt " . s:stderrredirnull - call s:GPGDebug(1, "command: " . commandline) - let &shellredir = s:shellredir - let &shell = s:shell - execute commandline - let &shellredir = s:shellredirsave - let &shell = s:shellsave + let cmd = { 'level': 1, 'ex': silent . 'r !' } + let cmd.args = '--quiet --decrypt ' . shellescape(filename, 1) + call s:GPGExecute(cmd) + if (v:shell_error) " message could not be decrypted - silent u echohl GPGError let blackhole = input("Message could not be decrypted! (Press ENTER)") echohl None - bwipeout - set nobin + " Only wipeout the buffer if we were creating one to start with. + " FileReadCmd just reads the content into the existing buffer + if a:bufread + silent bwipeout! + endif call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGDecrypt()") return endif - " turn off binary mode - set nobin - - " call the autocommand for the file minus .gpg$ - execute ":doautocmd BufReadPost " . escape(expand("%:r"), ' *?\"'."'") - call s:GPGDebug(2, "called autocommand for " . escape(expand("%:r"), ' *?\"'."'")) - " refresh screen redraw! call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGDecrypt()") endfunction +" Function: s:GPGBufReadPost() {{{2 +" +" Handle functionality specific to opening a file for reading rather than +" reading the contents of a file into a buffer +" +function s:GPGBufReadPost() + call s:GPGDebug(3, ">>>>>>>> Entering s:GPGBufReadPost()") + " In order to make :undo a no-op immediately after the buffer is read, + " we need to do this dance with 'undolevels'. Actually discarding the undo + " history requires performing a change after setting 'undolevels' to -1 and, + " luckily, we have one we need to do (delete the extra line from the :r + " command) + let levels = &undolevels + set undolevels=-1 + silent 1delete + let &undolevels = levels + " call the autocommand for the file minus .gpg$ + silent execute ':doautocmd BufReadPost ' . fnameescape(expand(':r')) + call s:GPGDebug(2, 'called autocommand for ' . fnameescape(expand(':r'))) + call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGBufReadPost()") +endfunction + +" Function: s:GPGBufWritePre() {{{2 +" +" Handle functionality specific to saving an entire buffer to a file rather +" than saving a partial buffer +" +function s:GPGBufWritePre() + call s:GPGDebug(3, ">>>>>>>> Entering s:GPGBufWritePre()") + " call the autocommand for the file minus .gpg$ + silent execute ':doautocmd BufWritePre ' . fnameescape(expand(':r')) + call s:GPGDebug(2, 'called autocommand for ' . fnameescape(expand(':r'))) + call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGBufWritePre()") +endfunction + " Function: s:GPGEncrypt() {{{2 " " encrypts the buffer to all previous recipients @@ -432,10 +514,6 @@ endfunction function s:GPGEncrypt() call s:GPGDebug(3, ">>>>>>>> Entering s:GPGEncrypt()") - " save window view - let s:GPGWindowView = winsaveview() - call s:GPGDebug(2, "saved window view " . string(s:GPGWindowView)) - " store encoding and switch to a safe one if (&fileencoding != &encoding) let s:GPGEncoding = &encoding @@ -446,13 +524,10 @@ function s:GPGEncrypt() call s:GPGDebug(2, "encoding and fileencoding are the same (\"" . &encoding . "\"), not switching") endif - " switch buffer to binary mode - set bin - " guard for unencrypted files - if (!exists("b:GPGEncrypted") || b:GPGEncrypted == 0) + if (exists("b:GPGEncrypted") && b:GPGEncrypted == 0) echohl GPGError - let blackhole = input("Message could not be encrypted! File might be empty! (Press ENTER)") + let blackhole = input("Message could not be encrypted! (Press ENTER)") echohl None call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGEncrypt()") return @@ -482,6 +557,10 @@ function s:GPGEncrypt() let options = options . " --" . option . " " endfor + if (!exists('b:GPGRecipients')) + let b:GPGRecipients = [] + endif + " check here again if all recipients are available in the keyring let [ recipients, unknownrecipients ] = s:GPGCheckRecipients(b:GPGRecipients) @@ -501,57 +580,14 @@ function s:GPGEncrypt() for gpgid in recipients let options = options . " -r " . gpgid endfor - else - if (match(b:GPGOptions, "encrypt") >= 0) - echohl GPGError - echom "There are no recipients!!" - echom "Please use GPGEditRecipients to correct!!" - echo - echohl None - endif endif " encrypt the buffer - let commandline = "'[,']!" . s:GPGCommand . " --quiet --no-encrypt-to " . options . " " . s:stderrredirnull - call s:GPGDebug(1, "command: " . commandline) - let &shellredir = s:shellredir - let &shell = s:shell - silent execute commandline - let &shellredir = s:shellredirsave - let &shell = s:shellsave - if (v:shell_error) " message could not be encrypted - " delete content of the buffer to be sure no data is written unencrypted - " content will be recovered in GPGEncryptPost() - silent normal! 1GdG - - echohl GPGError - let blackhole = input("Message could not be encrypted! File might be empty! (Press ENTER)") - echohl None - call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGEncrypt()") - return - endif - - call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGEncrypt()") -endfunction - -" Function: s:GPGEncryptPost() {{{2 -" -" undo changes don by encrypt, after writing -" -function s:GPGEncryptPost() - call s:GPGDebug(3, ">>>>>>>> Entering s:GPGEncryptPost()") - - " guard for unencrypted files - if (exists("b:GPGEncrypted") && b:GPGEncrypted == 0) - call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGEncryptPost()") - return - endif - - " undo encryption of buffer content - silent u - - " switch back from binary mode - set nobin + let destfile = tempname() + let cmd = { 'level': 1, 'ex': "'[,']w !" } + let cmd.args = '--quiet --no-encrypt-to ' . options + let cmd.redirect = '>' . shellescape(destfile, 1) + call s:GPGExecute(cmd) " restore encoding if (s:GPGEncoding != "") @@ -559,14 +595,19 @@ function s:GPGEncryptPost() call s:GPGDebug(2, "restored encoding \"" . &encoding . "\"") endif - " restore window view - call winrestview(s:GPGWindowView) - call s:GPGDebug(2, "restored window view" . string(s:GPGWindowView)) - - " refresh screen - redraw! + if (v:shell_error) " message could not be encrypted + " Command failed, so clean up the tempfile + call delete(destfile) + echohl GPGError + let blackhole = input("Message could not be encrypted! (Press ENTER)") + echohl None + call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGEncrypt()") + return + endif - call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGEncryptPost()") + call rename(destfile, resolve(expand(''))) + setl nomodified + call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGEncrypt()") endfunction " Function: s:GPGViewRecipients() {{{2 @@ -638,7 +679,7 @@ function s:GPGEditRecipients() " check if this buffer exists if (!bufexists(editbuffername)) " create scratch buffer - execute 'silent! split ' . escape(editbuffername, ' *?\"'."'") + execute 'silent! split ' . fnameescape(editbuffername) " add a autocommand to regenerate the recipients after a write autocmd BufHidden,BufUnload,BufWriteCmd call s:GPGFinishRecipientsBuffer() @@ -648,14 +689,14 @@ function s:GPGEditRecipients() execute 'silent! ' . bufwinnr(editbuffername) . "wincmd w" else " split scratch buffer window - execute 'silent! sbuffer ' . escape(editbuffername, ' *?\"'."'") + execute 'silent! sbuffer ' . fnameescape(editbuffername) " add a autocommand to regenerate the recipients after a write autocmd BufHidden,BufUnload,BufWriteCmd call s:GPGFinishRecipientsBuffer() endif " empty the buffer - silent normal! 1GdG + silent %delete endif " Mark the buffer as a scratch buffer @@ -702,7 +743,7 @@ function s:GPGEditRecipients() let syntaxPattern = "\\(nonexxistinwordinthisbuffer" for name in unknownrecipients let name = "!" . name - let syntaxPattern = syntaxPattern . "\\|" . name + let syntaxPattern = syntaxPattern . "\\|" . fnameescape(name) silent put =name endfor let syntaxPattern = syntaxPattern . "\\)" @@ -720,10 +761,10 @@ function s:GPGEditRecipients() endif " delete the empty first line - silent normal! 1Gdd + silent 1delete " jump to the first recipient - silent normal! G + silent $ endif @@ -755,13 +796,13 @@ function s:GPGFinishRecipientsBuffer() " delete the autocommand autocmd! * - " get the recipients from the scratch buffer let recipients = [] let lines = getline(1,"$") for recipient in lines - " delete all text after magic string - let recipient = substitute(recipient, s:GPGMagicString . ".*$", "", "") + let matches = matchlist(recipient, '^\(.\{-}\)\%(' . s:GPGMagicString . '(ID:\s\+\(' . s:keyPattern . '\)\s\+.*\)\=$') + + let recipient = matches[2] ? matches[2] : matches[1] " delete all spaces at beginning and end of the recipient " also delete a '!' at the beginning of the recipient @@ -802,7 +843,7 @@ function s:GPGFinishRecipientsBuffer() endif " reset modified flag - set nomodified + setl nomodified call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGFinishRecipientsBuffer()") endfunction @@ -860,7 +901,7 @@ function s:GPGEditOptions() " check if this buffer exists if (!bufexists(editbuffername)) " create scratch buffer - execute 'silent! split ' . escape(editbuffername, ' *?\"'."'") + execute 'silent! split ' . fnameescape(editbuffername) " add a autocommand to regenerate the options after a write autocmd BufHidden,BufUnload,BufWriteCmd call s:GPGFinishOptionsBuffer() @@ -870,14 +911,14 @@ function s:GPGEditOptions() execute 'silent! ' . bufwinnr(editbuffername) . "wincmd w" else " split scratch buffer window - execute 'silent! sbuffer ' . escape(editbuffername, ' *?\"'."'") + execute 'silent! sbuffer ' . fnameescape(editbuffername) " add a autocommand to regenerate the options after a write autocmd BufHidden,BufUnload,BufWriteCmd call s:GPGFinishOptionsBuffer() endif " empty the buffer - silent normal! 1GdG + silent %delete endif " Mark the buffer as a scratch buffer @@ -909,10 +950,10 @@ function s:GPGEditOptions() endfor " delete the empty first line - silent normal! 1Gdd + silent 1delete " jump to the first option - silent normal! G + silent $ " define highlight if (has("syntax") && exists("g:syntax_on")) @@ -975,7 +1016,7 @@ function s:GPGFinishOptionsBuffer() call setbufvar(b:GPGCorrespondingTo, "&mod", 1) " reset modified flag - set nomodified + setl nomodified call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGFinishOptionsBuffer()") endfunction @@ -1025,14 +1066,9 @@ function s:GPGNameToID(name) call s:GPGDebug(3, ">>>>>>>> Entering s:GPGNameToID()") " ask gpg for the id for a name - let commandline = s:GPGCommand . " --quiet --with-colons --fixed-list-mode --list-keys \"" . a:name . "\"" - call s:GPGDebug(2, "command: ". commandline) - let &shellredir = s:shellredir - let &shell = s:shell - let output = system(commandline) - let &shellredir = s:shellredirsave - let &shell = s:shellsave - call s:GPGDebug(2, "output: ". output) + let cmd = { 'level': 2 } + let cmd.args = '--quiet --with-colons --fixed-list-mode --list-keys ' . shellescape(a:name) + let output = s:GPGSystem(cmd) " when called with "--with-colons" gpg encodes its output _ALWAYS_ as UTF-8, " so convert it, if necessary @@ -1045,30 +1081,41 @@ function s:GPGNameToID(name) let pubseen = 0 let counter = 0 let gpgids = [] + let duplicates = {} let choices = "The name \"" . a:name . "\" is ambiguous. Please select the correct key:\n" for line in lines - let fields = split(line, ":") - " search for the next uid - if (pubseen == 1) - if (fields[0] == "uid") - let choices = choices . " " . fields[9] . "\n" - else - let pubseen = 0 - endif - endif - " search for the next pub - if (pubseen == 0) - if (fields[0] == "pub") - let identity = fields[4] - let gpgids += [identity] - if exists("*strftime") - let choices = choices . counter . ": ID: 0x" . identity . " created at " . strftime("%c", fields[5]) . "\n" + " check if this line has already been processed + if !has_key(duplicates, line) + let duplicates[line] = 1 + + let fields = split(line, ":") + + " search for the next uid + if pubseen + if (fields[0] == "uid") + let choices = choices . " " . fields[9] . "\n" else - let choices = choices . counter . ": ID: 0x" . identity . "\n" + let pubseen = 0 + endif + " search for the next pub + else + if (fields[0] == "pub") + " Ignore keys which are not usable for encryption + if fields[11] !~? 'e' + continue + endif + + let identity = fields[4] + let gpgids += [identity] + if exists("*strftime") + let choices = choices . counter . ": ID: 0x" . identity . " created at " . strftime("%c", fields[5]) . "\n" + else + let choices = choices . counter . ": ID: 0x" . identity . "\n" + endif + let counter = counter+1 + let pubseen = 1 endif - let counter = counter+1 - let pubseen = 1 endif endif @@ -1084,7 +1131,7 @@ function s:GPGNameToID(name) endwhile endif - call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGIDToName()") + call s:GPGDebug(3, "<<<<<<<< Leaving s:GPGNameToID()") return get(gpgids, answer, "") endfunction @@ -1099,14 +1146,9 @@ function s:GPGIDToName(identity) " TODO is the encryption subkey really unique? " ask gpg for the id for a name - let commandline = s:GPGCommand . " --quiet --with-colons --fixed-list-mode --list-keys " . a:identity - call s:GPGDebug(2, "command: ". commandline) - let &shellredir = s:shellredir - let &shell = s:shell - let output = system(commandline) - let &shellredir = s:shellredirsave - let &shell = s:shellsave - call s:GPGDebug(2, "output: ". output) + let cmd = { 'level': 2 } + let cmd.args = '--quiet --with-colons --fixed-list-mode --list-keys ' . a:identity + let output = s:GPGSystem(cmd) " when called with "--with-colons" gpg encodes its output _ALWAYS_ as UTF-8, " so convert it, if necessary @@ -1120,8 +1162,14 @@ function s:GPGIDToName(identity) let uid = "" for line in lines let fields = split(line, ":") - if (pubseen == 0) " search for the next pub + + if !pubseen " search for the next pub if (fields[0] == "pub") + " Ignore keys which are not usable for encryption + if fields[11] !~? 'e' + continue + endif + let pubseen = 1 endif else " search for the next uid @@ -1141,6 +1189,68 @@ function s:GPGIDToName(identity) return uid endfunction +function s:GPGPreCmd() + let &shellredir = s:shellredir + let &shell = s:shell + let &shelltemp = s:shelltemp +endfunction + +function s:GPGPostCmd() + let &shellredir = s:shellredirsave + let &shell = s:shellsave + let &shelltemp = s:shelltempsave +endfunction + +" Function: s:GPGSystem(dict) {{{2 +" +" run g:GPGCommand using system(), logging the commandline and output +" Recognized keys are: +" level - Debug level at which the commandline and output will be logged +" args - Arguments to be given to g:GPGCommand +" +" Returns: command output +" +function s:GPGSystem(dict) + let commandline = printf('%s %s', s:GPGCommand, a:dict.args) + if (!empty(g:GPGHomedir)) + let commandline .= ' --homedir ' . shellescape(g:GPGHomedir) + endif + let commandline .= ' ' . s:stderrredirnull + call s:GPGDebug(a:dict.level, "command: ". commandline) + + call s:GPGPreCmd() + let output = system(commandline) + call s:GPGPostCmd() + + call s:GPGDebug(a:dict.level, "output: ". output) + return output +endfunction + +" Function: s:GPGExecute(dict) {{{2 +" +" run g:GPGCommand using :execute, logging the commandline +" Recognized keys are: +" level - Debug level at which the commandline will be logged +" args - Arguments to be given to g:GPGCommand +" ex - Ex command which will be :executed +" redirect - Shell redirect to use, if needed +" +function s:GPGExecute(dict) + let commandline = printf('%s%s %s', a:dict.ex, s:GPGCommand, a:dict.args) + if (!empty(g:GPGHomedir)) + let commandline .= ' --homedir ' . shellescape(g:GPGHomedir, 1) + endif + if (has_key(a:dict, 'redirect')) + let commandline .= ' ' . a:dict.redirect + endif + let commandline .= ' ' . s:stderrredirnull + call s:GPGDebug(a:dict.level, "command: " . commandline) + + call s:GPGPreCmd() + execute commandline + call s:GPGPostCmd() +endfunction + " Function: s:GPGDebug(level, text) {{{2 " " output debug message, if this message has high enough importance @@ -1150,7 +1260,7 @@ function s:GPGDebug(level, text) if exists("g:GPGDebugLevel") && g:GPGDebugLevel >= a:level if exists("g:GPGDebugLog") execute "redir >> " . g:GPGDebugLog - echom "GnuPG: " . a:text + silent echom "GnuPG: " . a:text redir END else echom "GnuPG: " . a:text diff --git a/vim/vimrc.symlink b/vim/vimrc.symlink index f38a1cda1fa..924180afa18 100644 --- a/vim/vimrc.symlink +++ b/vim/vimrc.symlink @@ -86,6 +86,7 @@ set hidden " Enable 'hidden' buffers " " Backups set directory=~/.vim/swp " keep swp files under ~/.vim/swap +set backupdir=~/Library/temp " " Searching diff --git a/zsh/zshrc.symlink b/zsh/zshrc.symlink index c244f5197d9..059c3ceb2c2 100644 --- a/zsh/zshrc.symlink +++ b/zsh/zshrc.symlink @@ -10,6 +10,7 @@ ZSH_THEME="robbyrussell" # Example aliases # alias zshconfig="mate ~/.zshrc" # alias ohmyzsh="mate ~/.oh-my-zsh" +alias digs="dig +short" # Set to this to use case-sensitive completion # CASE_SENSITIVE="true" @@ -37,13 +38,14 @@ plugins=(git) source $ZSH/oh-my-zsh.sh # Customize to your needs... -[ -f /opt/boxen/env.sh ] && source /opt/boxen/env.sh +# [ -f /opt/boxen/env.sh ] && source /opt/boxen/env.sh export PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/local/git/bin:/usr/local/MacGPG2/bin:/opt/local/bin:/usr/bin/puppet +export RUBYOPT=rubygems # eval "$(rbenv init -)" . /sw/bin/init.sh # Added for boxen -# [ -f /opt/boxen/env.sh ] && source /opt/boxen/env.sh +[ -f /opt/boxen/env.sh ] && source /opt/boxen/env.sh source /opt/boxen/env.sh