Skip to content

Commit

Permalink
First format and fixes from ruff
Browse files Browse the repository at this point in the history
  • Loading branch information
derneuere committed Feb 4, 2025
1 parent 0299d7d commit a0c0a6c
Show file tree
Hide file tree
Showing 58 changed files with 276 additions and 423 deletions.
12 changes: 5 additions & 7 deletions api/all_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from django.utils import timezone
from django_q.tasks import AsyncTask, schedule

import api.util as util
from api import util
from api.models.long_running_job import LongRunningJob


Expand Down Expand Up @@ -62,7 +62,7 @@ def zip_photos_task(job_id, user, photos, filename):
output_file.write(mf.getvalue())

except Exception as e:
util.logger.error("Error while converting files to zip: {}".format(e))
util.logger.error(f"Error while converting files to zip: {e}")

lrj.finished_at = datetime.now().replace(tzinfo=pytz.utc)
lrj.finished = True
Expand All @@ -77,15 +77,13 @@ def delete_zip_file(filename):
file_path = os.path.join(settings.MEDIA_ROOT, "zip", filename)
try:
if not os.path.exists(file_path):
util.logger.error(
"Error while deleting file not found at : {}".format(file_path)
)
util.logger.error(f"Error while deleting file not found at : {file_path}")
return
else:
os.remove(file_path)
util.logger.info("file deleted sucessfully at path : {}".format(file_path))
util.logger.info(f"file deleted sucessfully at path : {file_path}")
return

except Exception as e:
util.logger.error("Error while deleting file: {}".format(e))
util.logger.error(f"Error while deleting file: {e}")
return e
14 changes: 4 additions & 10 deletions api/api_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ def median_value(queryset, term):
def calc_megabytes(bytes):
if bytes == 0 or bytes is None:
return 0
return round(((bytes / 1024) / 1024))
return round((bytes / 1024) / 1024)


def get_server_stats():
Expand Down Expand Up @@ -842,18 +842,14 @@ def get_photo_month_counts(user):
def get_searchterms_wordcloud(user):
query = {}
out = {"captions": [], "people": [], "locations": []}
query[
"captions"
] = """
query["captions"] = """
with captionList as (
select unnest(regexp_split_to_array(search_captions,' , ')) caption
from api_photo where owner_id = %(userid)s
)
select caption, count(*) from captionList group by caption order by count(*) desc limit 100;
"""
query[
"people"
] = """
query["people"] = """
with NameList as (
select api_person.name
from api_photo join api_face on image_hash = api_face.photo_id
Expand All @@ -862,9 +858,7 @@ def get_searchterms_wordcloud(user):
)
select name, count(*) from NameList group by name order by count(*) desc limit 100;
"""
query[
"locations"
] = """
query["locations"] = """
with arrayloctable as (
select jsonb_array_elements(jsonb_extract_path(api_photo.geolocation_json, 'features')::jsonb) arrayloc , image_hash
from api_photo where owner_id = %(userid)s
Expand Down
35 changes: 14 additions & 21 deletions api/autoalbum.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def regenerate_event_titles(user, job_id):
aus = AlbumAuto.objects.filter(owner=user).prefetch_related("photos")
target_count = len(aus)
for idx, au in enumerate(aus):
logger.info("job {}: {}".format(job_id, idx))
logger.info(f"job {job_id}: {idx}")
au._generate_title()
au.save()

Expand All @@ -47,7 +47,7 @@ def regenerate_event_titles(user, job_id):
lrj.finished = True
lrj.finished_at = datetime.now().replace(tzinfo=pytz.utc)
lrj.save()
logger.info("job {}: updated lrj entry to db".format(job_id))
logger.info(f"job {job_id}: updated lrj entry to db")

except Exception:
logger.exception("An error occurred")
Expand Down Expand Up @@ -88,24 +88,21 @@ def group(photos, dt=timedelta(hours=6)):
if len(groups) == 0:
groups.append([])
groups[-1].append(photo)
# Photos are sorted by timestamp, so we can just check the last photo of the last group
# to see if it is within the time delta
elif photo.exif_timestamp - groups[-1][-1].exif_timestamp < dt:
groups[-1].append(photo)
# If the photo is not within the time delta, we create a new group
else:
# Photos are sorted by timestamp, so we can just check the last photo of the last group
# to see if it is within the time delta
if photo.exif_timestamp - groups[-1][-1].exif_timestamp < dt:
groups[-1].append(photo)
# If the photo is not within the time delta, we create a new group
else:
groups.append([])
groups[-1].append(photo)
groups.append([])
groups[-1].append(photo)
return groups

# Group images that are on the same 1 day and 12 hours interval
groups = group(photos, dt=timedelta(days=1, hours=12))
target_count = len(groups)
logger.info(
"job {}: made {} groups out of {} images".format(
job_id, target_count, len(photos)
)
f"job {job_id}: made {target_count} groups out of {len(photos)} images"
)

album_locations = []
Expand All @@ -116,7 +113,7 @@ def group(photos, dt=timedelta(hours=6)):
lastKey = group[-1].exif_timestamp + timedelta(hours=11, minutes=59)
logger.info(str(key.date) + " - " + str(lastKey.date))
logger.info(
"job {}: processing auto album with date: ".format(job_id)
f"job {job_id}: processing auto album with date: "
+ key.strftime(date_format)
+ " to "
+ lastKey.strftime(date_format)
Expand All @@ -134,9 +131,7 @@ def group(photos, dt=timedelta(hours=6)):
album.timestamp = key
album.save()

logger.info(
"job {}: generate auto album {}".format(job_id, album.id)
)
logger.info(f"job {job_id}: generate auto album {album.id}")
locs = []
for item in items:
album.photos.add(item)
Expand All @@ -155,7 +150,7 @@ def group(photos, dt=timedelta(hours=6)):
continue
if qs.count() == 1:
album = qs.first()
logger.info("job {}: update auto album {}".format(job_id, album.id))
logger.info(f"job {job_id}: update auto album {album.id}")
for item in items:
if item in album.photos.all():
continue
Expand All @@ -167,9 +162,7 @@ def group(photos, dt=timedelta(hours=6)):
if qs.count() > 1:
# To-Do: Merge both auto albums
logger.info(
"job {}: found multiple auto albums for date {}".format(
job_id, key.strftime(date_format)
)
f"job {job_id}: found multiple auto albums for date {key.strftime(date_format)}"
)
continue

Expand Down
6 changes: 3 additions & 3 deletions api/batch_jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import pytz
from django.db.models import Q

import api.util as util
from api import util
from api.image_similarity import build_image_similarity_index
from api.models.long_running_job import LongRunningJob
from api.models.photo import Photo
Expand Down Expand Up @@ -37,7 +37,7 @@ def batch_calculate_clip_embedding(user):
torch.multiprocessing.set_start_method("spawn", force=True)

BATCH_SIZE = 64
util.logger.info("Using threads: {}".format(torch.get_num_threads()))
util.logger.info(f"Using threads: {torch.get_num_threads()}")

done_count = 0
while done_count < count:
Expand Down Expand Up @@ -67,7 +67,7 @@ def batch_calculate_clip_embedding(user):
obj.clip_embeddings_magnitude = magnitude
obj.save()
except Exception as e:
util.logger.error("Error calculating clip embeddings: {}".format(e))
util.logger.error(f"Error calculating clip embeddings: {e}")

lrj.progress_current = done_count
lrj.progress_target = count
Expand Down
6 changes: 3 additions & 3 deletions api/cluster_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,9 +99,9 @@ def try_add_cluster(
# Set initial metadata on the split clusters based on known faces
for new_cluster in added_clusters:
new_cluster.set_metadata(encoding_by_person[new_cluster.person.id])
mean_encoding_by_cluster[
new_cluster.id
] = new_cluster.get_mean_encoding_array()
mean_encoding_by_cluster[new_cluster.id] = (
new_cluster.get_mean_encoding_array()
)

# Clear the face IDs list to prepare for processing the unknown faces
for new_cluster in added_clusters:
Expand Down
6 changes: 2 additions & 4 deletions api/date_time_extractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,7 @@ class RuleTypes:


class TimeExtractionRule:
"""
The goal is to help extract local time, but for historical reason it is expected the returned
"""The goal is to help extract local time, but for historical reason it is expected the returned
datetime will have timezone to be set to pytz.utc (so local time + timezone equal to UTC)..
Some sources of data might give us very rich information, e.g. timestamp + timezone,
Expand Down Expand Up @@ -303,8 +302,7 @@ def apply(
raise ValueError(f"Unknown rule type {self.rule_type}")

def _get_tz(self, description, gps_lat, gps_lon, user_default_tz):
"""
None is a valid timezone returned here (meaning that we want to use server local time).
"""None is a valid timezone returned here (meaning that we want to use server local time).
This is why this function returns a tuple with the first element specifying success of
determining the timezone, and the second element - the timezone itself.
"""
Expand Down
Loading

0 comments on commit a0c0a6c

Please sign in to comment.