forked from mideind/GreynirServer
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathimages.py
executable file
·340 lines (275 loc) · 10.6 KB
/
images.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
#!/usr/bin/env python
"""
Greynir: Natural language processing for Icelandic
Image retrieval module
Copyright (c) 2016 Vilhjalmur Thorsteinsson
All rights reserved
See the accompanying README.md file for further licensing and copyright information.
This module contains a function that retrieves the URL of an image corresponding to
a (person) name. It uses a Google API on top of the Google Custom Search feature.
Retrieved image information is cached in the database.
"""
from typing import List, Dict, Optional
import sys
import json
import logging
import urllib.request
import urllib.parse
from urllib.error import HTTPError
from io import BytesIO
from datetime import datetime, timedelta
from collections import namedtuple
from contextlib import closing
import requests
from db import SessionContext
from db.models import Link, BlacklistedLink
from settings import Settings
from util import google_api_key
# HTTP request timeout
QUERY_TIMEOUT = 4.0
def _server_query(url: str, q: Dict[str, str]) -> Optional[bytes]:
""" Query a server via HTTP GET with a URL-encoded query string obtained q """
doc = None
if len(q):
url += "?" + urllib.parse.urlencode(q)
try:
with closing(urllib.request.urlopen(url, timeout=QUERY_TIMEOUT)) as response:
if response:
# Decode the HTML Content-type header to obtain the
# document type and the charset (content encoding), if specified
encoding = "ISO-8859-1"
ctype = response.getheader("Content-type", "")
if ";" in ctype:
s = ctype.split(";")
ctype = s[0]
enc = s[1].strip()
s = enc.split("=")
if s[0] == "charset" and len(s) == 2:
encoding = s[1]
if ctype == "application/json":
doc = response.read() # doc is a bytes object
if doc:
doc = doc.decode(encoding)
except HTTPError as ex:
logging.warning("server_query exception: {0}".format(ex))
return doc
# Google Custom Search Engine identifier
_CX = "001858240983628375092:9aogptqla5e"
# The content type we're using in the links table
_CTYPE = "image-search-"
# Time (in days) before cached items expire
_CACHE_EXPIRATION_DAYS = 30
# Number of image URLs to fetch and store
_NUM_IMG_URLS = 6
# The returned image descriptor tuple
Img = namedtuple("Img", ["src", "width", "height", "link", "origin", "name"])
def get_image_url(
name: str,
*,
hints: List = [],
size: str = "large",
thumb: bool = False,
enclosing_session: Optional[SessionContext] = None,
cache_only: bool = False,
):
""" Use Google Custom Search API to obtain an image corresponding to a (person) name """
jdoc = None
ctype = _CTYPE + size
with SessionContext(commit=True, session=enclosing_session) as session:
q = (
session.query(Link.content, Link.timestamp)
.filter(Link.ctype == ctype)
.filter(Link.key == name)
.one_or_none()
)
if q is not None:
# Found in cache. If the result is old, purge it
period = timedelta(days=_CACHE_EXPIRATION_DAYS)
expired = datetime.utcnow() - q.timestamp > period
if expired and not cache_only:
_purge_single(name, ctype=ctype, enclosing_session=session)
else:
jdoc = q.content
if not jdoc and cache_only:
return None
if not jdoc:
# Not found in cache: prepare to ask Google
key = google_api_key()
if not key:
# No API key: can't ask for an image
logging.warning("No API key for image lookup")
return None
# Assemble the query parameters
search_str = '"{0}" {1}'.format(name, " ".join(hints)).strip()
q = dict(
q=search_str,
num=_NUM_IMG_URLS,
start=1,
imgSize=size,
# imgType = "face", # Only images with faces
lr="lang_is", # Higher priority for Icelandic language pages
gl="is", # Higher priority for .is results
searchType="image",
cx=_CX,
key=key,
)
if Settings.DEBUG:
print(
"Sending Google image search request for '{0}'".format(search_str)
)
jdoc = _server_query("https://www.googleapis.com/customsearch/v1", q)
if Settings.DEBUG:
print("Back from Google image search for '{0}'".format(search_str))
if jdoc:
# Store in the cache
lnk = Link(
ctype=ctype, key=name, content=jdoc, timestamp=datetime.utcnow()
)
session.add(lnk)
if not jdoc:
return None
answer = json.loads(jdoc)
if (
answer
and "items" in answer
and answer["items"]
and "link" in answer["items"][0]
):
blacklist = _blacklisted_urls_for_key(name, enclosing_session=session)
for item in answer["items"]:
k = item["link"] if not thumb else item["image"]["thumbnailLink"]
if k and item["link"] not in blacklist:
image = item["image"]
h = image["height"] if not thumb else image["thumbnailHeight"]
w = image["width"] if not thumb else image["thumbnailWidth"]
return Img(k, w, h, image["contextLink"], item["displayLink"], name)
# No answer that makes sense
return None
def blacklist_image_url(name: str, url: str) -> Optional[str]:
""" Blacklist image URL for a given key """
with SessionContext(commit=True) as session:
# Verify that URL exists in DB
if not _get_cached_entry(name, url, enclosing_session=session):
return None
# Check if already blacklisted
if url in _blacklisted_urls_for_key(name, enclosing_session=session):
return None
# Add to blacklist
b = BlacklistedLink(
key=name, url=url, link_type="image", timestamp=datetime.utcnow()
)
session.add(b)
return get_image_url(name, enclosing_session=session)
def update_broken_image_url(name: str, url: str) -> Optional[str]:
""" Refetch image URL for name if broken """
with SessionContext() as session:
# Verify that URL exists in DB
r = _get_cached_entry(name, url, enclosing_session=session)
if r:
# Verify that URL is indeed broken
if not check_image_url(url):
# Blacklist the URL, purge results from cache and refetch
blacklist_image_url(name, url)
_purge_single(name, ctype=r.ctype, enclosing_session=session)
return get_image_url(name)
return None
def check_image_url(url: str) -> bool:
""" Check if image exists at URL by sending HEAD request """
req = urllib.request.Request(url, method="HEAD")
try:
response = urllib.request.urlopen(req, timeout=2.0)
return response.status == 200
except Exception:
pass
return False
def _blacklisted_urls_for_key(
key: str, enclosing_session: Optional[SessionContext] = None
) -> List[str]:
""" Fetch blacklisted urls for a given key """
with SessionContext(commit=True, session=enclosing_session) as session:
q = (
session.query(BlacklistedLink.url)
.filter(BlacklistedLink.link_type == "image")
.filter(BlacklistedLink.key == key)
.all()
)
return [r for (r,) in q]
def _get_cached_entry(
name: str, url: str, enclosing_session: Optional[SessionContext] = None
):
""" Fetch cached entry by key and url """
with SessionContext(commit=True, session=enclosing_session) as session:
# TODO: content column should be converted to jsonb
# from varchar to query faster & more intelligently
return (
session.query(Link)
.filter(Link.key == name)
.filter(Link.content.like("%" + url + "%"))
.one_or_none()
)
def _purge_single(
key: str,
ctype: Optional[str] = None,
enclosing_session: Optional[SessionContext] = None,
):
""" Remove cache entry """
with SessionContext(commit=True, session=enclosing_session) as session:
filters = [Link.key == key]
if ctype:
filters.append(Link.ctype == ctype)
session.query(Link).filter(*filters).delete()
def _purge():
""" Remove all cache entries """
if input("Purge all cached data? (y/n): ").lower().startswith("y"):
with SessionContext(commit=True) as session:
session.query(Link).delete()
STATICMAP_URL = (
"https://maps.googleapis.com/maps/api/staticmap?"
"zoom={0}&style=feature:poi%7Cvisibility:off"
"&size={1}x{2}&language=is&scale=2&maptype=roadmap"
"&key={3}&markers={4},{5}"
)
def get_staticmap_image(
latitude: float,
longitude: float,
zoom: int = 6,
width: int = 180,
height: int = 180,
) -> Optional[BytesIO]:
""" Request image from Google Static Maps API, return image data as bytes """
key = google_api_key()
if not key:
return None
url = STATICMAP_URL.format(zoom, width, height, key, latitude, longitude)
# TODO: Use urllib instead of requests here
try:
r = requests.get(url, stream=True)
except Exception as e:
logging.warning(str(e))
return None
if r.status_code == 200:
r.raw.decode_content = True
return BytesIO(r.raw.data)
logging.warning("Status {0} when requesting static map image".format(r.status_code))
return None
def _test():
""" Test image lookup """
print("Testing...")
print("Bjarni Benediktsson")
img = get_image_url("Bjarni Benediktsson")
print("{0}".format(img))
print("Vilhjálmur Þorsteinsson")
img = get_image_url("Vilhjálmur Þorsteinsson")
print("{0}".format(img))
print("Blængur Klængsson Eyfjörð")
img = get_image_url("Blængur Klængsson Eyfjörð")
print("{0}".format(img)) # Should be None
if __name__ == "__main__":
cmap = {"test": _test, "purge": _purge}
cmd = sys.argv[1] if len(sys.argv) > 1 else "test"
if cmd in cmap.keys():
cmap[cmd]()
elif cmd:
# Any other arg is a name to fetch an image for
img = get_image_url(cmd)
print("{0}".format(img))