From e16743e13f589043da0a23fc823db431a281fd0c Mon Sep 17 00:00:00 2001 From: ztl-35 Date: Fri, 30 Sep 2022 11:38:07 +0800 Subject: [PATCH 001/101] update kd --- easynlp/appzoo/text_match/evaluator.py | 4 ++ .../knowledge_distillation/run_vanilla_kd.sh | 64 +++++++++---------- 2 files changed, 36 insertions(+), 32 deletions(-) diff --git a/easynlp/appzoo/text_match/evaluator.py b/easynlp/appzoo/text_match/evaluator.py index fb16eaaf..33a56b12 100644 --- a/easynlp/appzoo/text_match/evaluator.py +++ b/easynlp/appzoo/text_match/evaluator.py @@ -33,6 +33,8 @@ def __init__(self, valid_dataset, **kwargs): super(TextMatchEvaluator, self).__init__(valid_dataset, **kwargs) self.metrics = ["accuracy", "f1"] self.two_tower = kwargs.get('user_defined_parameters').get('app_parameters').get('two_tower', False) + self.kd_type = kwargs.get('user_defined_parameters').get('app_parameters').get('type', None) + self.enable_distillation = kwargs.get('user_defined_parameters').get('app_parameters').get('enable_distillation', False) def evaluate(self, model): model.eval() @@ -56,6 +58,8 @@ def evaluate(self, model): infer_start_time = time.time() with torch.no_grad(): label_ids = batch.pop("label_ids") + if self.kd_type == "vanilla_kd" and self.enable_distillation: + batch.pop("teacher_logits") outputs = model(batch) infer_end_time = time.time() total_spent_time += infer_end_time - infer_start_time diff --git a/examples/knowledge_distillation/run_vanilla_kd.sh b/examples/knowledge_distillation/run_vanilla_kd.sh index 68a8071f..844a2af2 100644 --- a/examples/knowledge_distillation/run_vanilla_kd.sh +++ b/examples/knowledge_distillation/run_vanilla_kd.sh @@ -26,40 +26,40 @@ LOGITS_PATH=results/large-sst2-teacher echo '=========[ Finetune the teacher model ]=========' easynlp \ - --app_name=text_match \ - --mode=train \ - --worker_count=${WORKER_COUNT} \ - --worker_gpu=${WORKER_GPU} \ - --tables=train.tsv,dev.tsv \ - --input_schema=label:str:1,id:str:1,id2:str:1,sent1:str:1,sent2:str:1 \ - --first_sequence=sent1 \ - --second_sequence=sent2 \ - --label_name=label \ - --label_enumerate_values=0,1 \ - --checkpoint_dir=${TEACHER_CKPT} \ - --learning_rate=3e-5 \ - --epoch_num=1 \ - --random_seed=42 \ - --save_checkpoint_steps=100 \ - --sequence_length=128 \ - --micro_batch_size=32 \ - --user_defined_parameters="pretrain_model_name_or_path=${TEACHER_MODEL}" + --app_name=text_match \ + --mode=train \ + --worker_count=${WORKER_COUNT} \ + --worker_gpu=${WORKER_GPU} \ + --tables=train.tsv,dev.tsv \ + --input_schema=label:str:1,id:str:1,id2:str:1,sent1:str:1,sent2:str:1 \ + --first_sequence=sent1 \ + --second_sequence=sent2 \ + --label_name=label \ + --label_enumerate_values=0,1 \ + --checkpoint_dir=${TEACHER_CKPT} \ + --learning_rate=3e-5 \ + --epoch_num=1 \ + --random_seed=42 \ + --save_checkpoint_steps=100 \ + --sequence_length=128 \ + --micro_batch_size=32 \ + --user_defined_parameters="pretrain_model_name_or_path=${TEACHER_MODEL}" echo '=========[ Save the teacher logits ]=========' -easynlp \ - --mode=predict \ - --worker_count=${WORKER_COUNT} \ - --worker_gpu=${WORKER_GPU} \ - --tables=train.tsv \ - --outputs=${LOGITS_PATH}/pred.tsv \ - --input_schema=label:str:1,id:str:1,id2:str:1,sent1:str:1,sent2:str:1 \ - --output_schema=logits \ - --first_sequence=sent1 \ - --second_sequence=sent2 \ - --checkpoint_path=${TEACHER_CKPT} \ - --micro_batch_size=32 \ - --sequence_length=128 \ - --app_name=text_match + easynlp \ + --mode=predict \ + --worker_count=${WORKER_COUNT} \ + --worker_gpu=${WORKER_GPU} \ + --tables=train.tsv \ + --outputs=${LOGITS_PATH}/pred.tsv \ + --input_schema=label:str:1,id:str:1,id2:str:1,sent1:str:1,sent2:str:1 \ + --output_schema=logits \ + --first_sequence=sent1 \ + --second_sequence=sent2 \ + --checkpoint_path=${TEACHER_CKPT} \ + --micro_batch_size=32 \ + --sequence_length=128 \ + --app_name=text_match echo '=========[ Finetune the student model w/ KD ]=========' easynlp \ From 9faf54131928c4869f5f1db11c2ffeee72bfee1e Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Fri, 11 Nov 2022 10:18:24 +0800 Subject: [PATCH 002/101] add text2video retrieval --- easynlp/appzoo/__init__.py | 8 + easynlp/appzoo/api.py | 9 + .../appzoo/text2video_retrieval/__init__.py | 15 + easynlp/appzoo/text2video_retrieval/data.py | 279 ++++++++++++++++++ .../appzoo/text2video_retrieval/evaluator.py | 74 +++++ easynlp/appzoo/text2video_retrieval/model.py | 121 ++++++++ .../appzoo/text2video_retrieval/predictor.py | 143 +++++++++ easynlp/utils/arguments.py | 2 +- examples/text2video_retrieval/README.md | 25 ++ examples/text2video_retrieval/main.py | 84 ++++++ .../preprocess_video_frame.py | 105 +++++++ .../preprocess_video_frame.sh | 25 ++ .../run_clip_local_appzoo.sh | 75 +++++ .../run_clip_local_user_defined.sh | 82 +++++ 14 files changed, 1046 insertions(+), 1 deletion(-) create mode 100644 easynlp/appzoo/text2video_retrieval/__init__.py create mode 100644 easynlp/appzoo/text2video_retrieval/data.py create mode 100644 easynlp/appzoo/text2video_retrieval/evaluator.py create mode 100644 easynlp/appzoo/text2video_retrieval/model.py create mode 100644 easynlp/appzoo/text2video_retrieval/predictor.py create mode 100644 examples/text2video_retrieval/README.md create mode 100644 examples/text2video_retrieval/main.py create mode 100644 examples/text2video_retrieval/preprocess_video_frame.py create mode 100644 examples/text2video_retrieval/preprocess_video_frame.sh create mode 100644 examples/text2video_retrieval/run_clip_local_appzoo.sh create mode 100644 examples/text2video_retrieval/run_clip_local_user_defined.sh diff --git a/easynlp/appzoo/__init__.py b/easynlp/appzoo/__init__.py index faacd20d..e5df7938 100644 --- a/easynlp/appzoo/__init__.py +++ b/easynlp/appzoo/__init__.py @@ -26,6 +26,7 @@ "data_augmentation.model": ["DataAugmentation"], "geep_classification.model": ["GEEPClassification"], "multi_modal.model": ["MultiModal"], + "text2video_retrieval.model": ["Text2VideoRetrieval"], "wukong.model": ["WukongCLIP"], "text2image_generation.model": ["TextImageGeneration", "TextImageGeneration_knowl"], "image2text_generation.model": ['VQGANGPTImageTextGeneration', 'CLIPGPTImageTextGeneration'], @@ -39,6 +40,7 @@ "text_match.evaluator": ['TextMatchEvaluator'], "geep_classification.evaluator": ['GEEPClassificationEvaluator'], "multi_modal.evaluator": ['MultiModalEvaluator'], + "text2video_retrieval.evaluator": ['Text2VideoRetrievalEvaluator'], "wukong.evaluator": ['WukongEvaluator'], "text2image_generation.evaluator": ["TextImageGenerationEvaluator"], "image2text_generation.evaluator": ["ImageTextGenerationEvaluator"], @@ -53,6 +55,7 @@ "data_augmentation.predictor": ['DataAugmentationPredictor'], "geep_classification.predictor": ['GEEPClassificationPredictor'], "multi_modal.predictor": ['MultiModalPredictor'], + "text2video_retrieval.predictor": ['Text2VideoRetrievalPredictor'], "wukong.predictor": ['WukongPredictor'], "text2image_generation.predictor": ['TextImageGenerationPredictor', 'TextImageGenerationKnowlPredictor'], "image2text_generation.predictor": ['VQGANGPTImageTextGenerationPredictor', 'CLIPGPTImageTextGenerationPredictor'], @@ -63,6 +66,7 @@ "geep_classification.data": ['GEEPClassificationDataset'], "language_modeling.data": ['LanguageModelingDataset'], "multi_modal.data": ['MultiModalDataset'], + "text2video_retrieval.data": ['Text2VideoRetrievalDataset'], "wukong.data": ['WukongDataset'], "sequence_classification.data": ['ClassificationDataset', 'DistillatoryClassificationDataset', 'FewshotSequenceClassificationDataset'], "sequence_labeling.data": ['SequenceLabelingDataset', 'SequenceLabelingAutoDataset'], @@ -85,6 +89,7 @@ from .data_augmentation.model import DataAugmentation from .geep_classification.model import GEEPClassification from .multi_modal.model import MultiModal + from .text2video_retrieval.model import Text2VideoRetrieval from .wukong.model import WukongCLIP from .text2image_generation.model import TextImageGeneration, TextImageGeneration_knowl from .image2text_generation.model import VQGANGPTImageTextGeneration, CLIPGPTImageTextGeneration @@ -98,6 +103,7 @@ from .text_match.evaluator import TextMatchEvaluator from .geep_classification.evaluator import GEEPClassificationEvaluator from .multi_modal.evaluator import MultiModalEvaluator + from .text2video_retrieval.evaluator import Text2VideoRetrievalEvaluator from .wukong.evaluator import WukongEvaluator from .text2image_generation.evaluator import TextImageGenerationEvaluator from .image2text_generation.evaluator import ImageTextGenerationEvaluator @@ -112,6 +118,7 @@ from .data_augmentation.predictor import DataAugmentationPredictor from .geep_classification.predictor import GEEPClassificationPredictor from .multi_modal.predictor import MultiModalPredictor + from .text2video_retrieval.predictor import Text2VideoRetrievalPredictor from .wukong.predictor import WukongPredictor from .text2image_generation.predictor import TextImageGenerationPredictor, TextImageGenerationKnowlPredictor from .image2text_generation.predictor import VQGANGPTImageTextGenerationPredictor, CLIPGPTImageTextGenerationPredictor @@ -125,6 +132,7 @@ from .text_match.data import TwoTowerDataset, SingleTowerDataset, DistillatorySingleTowerDataset, FewshotSingleTowerTextMatchDataset, SiameseDataset from .geep_classification.data import GEEPClassificationDataset from .multi_modal.data import MultiModalDataset + from .text2video_retrieval.data import Text2VideoRetrievalDataset from .wukong.data import WukongDataset from .text2image_generation.data import TextImageDataset, TextImageKnowlDataset from .image2text_generation.data import CLIPGPTImageTextDataset, VQGANGPTImageTextDataset diff --git a/easynlp/appzoo/api.py b/easynlp/appzoo/api.py index af965c22..65f5cb88 100644 --- a/easynlp/appzoo/api.py +++ b/easynlp/appzoo/api.py @@ -26,6 +26,7 @@ from easynlp.appzoo import TextMatch, TextMatchTwoTower, DistillatoryTextMatch, FewshotSingleTowerTextMatch, CptFewshotSingleTowerTextMatch from easynlp.appzoo import SequenceLabeling, LanguageModeling, FeatureVectorization, DataAugmentation, GEEPClassification from easynlp.appzoo import MultiModal +from easynlp.appzoo import Text2VideoRetrieval from easynlp.appzoo import WukongCLIP from easynlp.appzoo import TextImageGeneration from easynlp.appzoo import VQGANGPTImageTextGeneration, CLIPGPTImageTextGeneration @@ -40,6 +41,7 @@ from easynlp.appzoo import SequenceClassificationEvaluator, SequenceMultiLabelClassificationEvaluator from easynlp.appzoo import SequenceLabelingEvaluator, LanguageModelingEvaluator, TextMatchEvaluator, GEEPClassificationEvaluator from easynlp.appzoo import MultiModalEvaluator +from easynlp.appzoo import Text2VideoRetrievalEvaluator from easynlp.appzoo import WukongEvaluator from easynlp.appzoo import TextImageGenerationEvaluator from easynlp.appzoo import ImageTextGenerationEvaluator @@ -52,6 +54,7 @@ from easynlp.appzoo import TextMatchPredictor, TextMatchTwoTowerPredictor, FewshotSingleTowerTextMatchPredictor, CptFewshotSingleTowerTextMatchPredictor from easynlp.appzoo import DataAugmentationPredictor, GEEPClassificationPredictor from easynlp.appzoo import MultiModalPredictor +from easynlp.appzoo import Text2VideoRetrievalPredictor from easynlp.appzoo import WukongPredictor from easynlp.appzoo import TextImageGenerationPredictor from easynlp.appzoo import VQGANGPTImageTextGenerationPredictor, CLIPGPTImageTextGenerationPredictor @@ -65,6 +68,7 @@ from easynlp.appzoo import SequenceGenerationDataset from easynlp.appzoo import GEEPClassificationDataset from easynlp.appzoo import MultiModalDataset +from easynlp.appzoo import Text2VideoRetrievalDataset from easynlp.appzoo import WukongDataset from easynlp.appzoo import TextImageDataset from easynlp.appzoo import CLIPGPTImageTextDataset, VQGANGPTImageTextDataset @@ -93,6 +97,7 @@ 'sequence_labeling': SequenceLabelingDataset, 'language_modeling': LanguageModelingDataset, 'geep_classify': GEEPClassificationDataset, + 'clip4clip': Text2VideoRetrievalDataset, 'clip': MultiModalDataset, 'wukong': WukongDataset, 'text2image_generation': TextImageDataset, @@ -126,6 +131,7 @@ 'vectorization': FeatureVectorization, 'data_augmentation': DataAugmentation, 'geep_classify': GEEPClassification, + 'clip4clip': Text2VideoRetrieval, 'clip': MultiModal, 'wukong': WukongCLIP, 'text2image_generation': TextImageGeneration, @@ -154,6 +160,7 @@ }, 'sequence_labeling': SequenceLabeling, 'geep_classify': GEEPClassification, + 'clip4clip': Text2VideoRetrieval, 'clip': MultiModal, 'wukong': WukongCLIP, 'text2image_generation': TextImageGeneration, @@ -183,6 +190,7 @@ 'language_modeling': LanguageModelingEvaluator, 'sequence_labeling': SequenceLabelingEvaluator, 'geep_classify': GEEPClassificationEvaluator, + 'clip4clip': Text2VideoRetrievalEvaluator, 'clip': MultiModalEvaluator, 'wukong': WukongEvaluator, 'text2image_generation': TextImageGenerationEvaluator, @@ -212,6 +220,7 @@ 'vectorization': [FeatureVectorizationPredictor, FeatureVectorization], 'data_augmentation': [DataAugmentationPredictor, DataAugmentation], 'geep_classify': [GEEPClassificationPredictor, GEEPClassification], + 'clip4clip': [Text2VideoRetrievalPredictor, Text2VideoRetrieval], 'clip': [MultiModalPredictor, MultiModal], 'wukong': [WukongPredictor, WukongCLIP], 'text2image_generation': [TextImageGenerationPredictor, TextImageGeneration], diff --git a/easynlp/appzoo/text2video_retrieval/__init__.py b/easynlp/appzoo/text2video_retrieval/__init__.py new file mode 100644 index 00000000..0935d7f9 --- /dev/null +++ b/easynlp/appzoo/text2video_retrieval/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright (c) 2020 Alibaba PAI team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/easynlp/appzoo/text2video_retrieval/data.py b/easynlp/appzoo/text2video_retrieval/data.py new file mode 100644 index 00000000..f7299313 --- /dev/null +++ b/easynlp/appzoo/text2video_retrieval/data.py @@ -0,0 +1,279 @@ +# coding=utf-8 +# Copyright (c) 2020 Alibaba PAI team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import numpy as np +from ...modelzoo import BertTokenizer +from ...utils import io +from ..dataset import BaseDataset +from ...modelzoo.models.clip.openclip_tokenizer import SimpleTokenizer +from PIL import Image +import base64 +from io import BytesIO +import json +from ...utils import losses, get_pretrain_model_path, get_args +from typing import Union,List +import os + +def _center_crop(image, size): + """ + Crops `image` to the given size using a center crop. Note that if the image is too small to be cropped to the + size is given, it will be padded (so the returned result has the size asked). + Args: + image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): + The image to resize. + size (`int` or `Tuple[int, int]`): + The size to which crop the image. + """ + + if not isinstance(size, tuple): + size = (size, size) + + image_width, image_height = image.size + crop_height, crop_width = size + + crop_top = int((image_height - crop_height + 1) * 0.5) + crop_left = int((image_width - crop_width + 1) * 0.5) + + return image.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height)) + +def _resize(image, size, resample=Image.BICUBIC): + """ + Resizes `image`. Note that this will trigger a conversion of `image` to a PIL Image. + Args: + image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): + The image to resize. + size (`int` or `Tuple[int, int]`): + The size to use for resizing the image. If `int` it will be resized to match the shorter side + resample (`int`, *optional*, defaults to `PIL.Image.BILINEAR`): + The filter to user for resampling. + """ + + if isinstance(size, tuple): + new_w, new_h = size + else: + width, height = image.size + short, long = (width, height) if width <= height else (height, width) + if short == size: + return image + new_short, new_long = size, int(size * long / short) + new_w, new_h = (new_short, new_long) if width <= height else (new_long, new_short) + return image.resize((new_w, new_h), resample) + +def _to_numpy_array(image, rescale=None, channel_first=True): + """ + Converts `image` to a numpy array. Optionally rescales it and puts the channel dimension as the first + dimension. + Args: + image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): + The image to convert to a NumPy array. + rescale (`bool`, *optional*): + Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will + default to `True` if the image is a PIL Image or an array/tensor of integers, `False` otherwise. + channel_first (`bool`, *optional*, defaults to `True`): + Whether or not to permute the dimensions of the image to put the channel dimension first. + """ + + if isinstance(image, Image.Image): + image = np.array(image) + + if rescale is None: + rescale = isinstance(image.flat[0], np.integer) + + if rescale: + image = image.astype(np.float32) / 255.0 + + if channel_first and image.ndim == 3: + image = image.transpose(2, 0, 1) + + return image + +def _normalize(image, mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]): + """ + Normalizes `image` with `mean` and `std`. Note that this will trigger a conversion of `image` to a NumPy array + if it's a PIL Image. + Args: + image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): + The image to normalize. + mean (`List[float]` or `np.ndarray` or `torch.Tensor`): + The mean (per channel) to use for normalization. + std (`List[float]` or `np.ndarray` or `torch.Tensor`): + The standard deviation (per channel) to use for normalization. + """ + + if isinstance(image, Image.Image): + image=image.convert('RGB') + image = _to_numpy_array(image) + + if isinstance(image, np.ndarray): + if not isinstance(mean, np.ndarray): + mean = np.array(mean).astype(image.dtype) + if not isinstance(std, np.ndarray): + std = np.array(std).astype(image.dtype) + elif is_torch_tensor(image): + import torch + + if not isinstance(mean, torch.Tensor): + mean = torch.tensor(mean) + if not isinstance(std, torch.Tensor): + std = torch.tensor(std) + + if image.ndim == 3 and image.shape[0] in [1, 3]: + return (image - mean[:, None, None]) / std[:, None, None] + else: + return (image - mean) / std + +def openclip_tokenize(texts: Union[str, List[str]], context_length: int = 77,_tokenizer:str='empty') -> torch.LongTensor: + """ + Returns the tokenized representation of given input string(s) + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + context_length : int + The context length to use; all CLIP models use 77 as the context length + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] + """ + if isinstance(texts, str): + texts = [texts] + + sot_token = _tokenizer.encoder[""] + eot_token = _tokenizer.encoder[""] + all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: # Truncate + tokens = tokens[:context_length] + result[i, :len(tokens)] = torch.tensor(tokens) + + return result + +class Text2VideoRetrievalDataset(BaseDataset): + """ + Classification Dataset + Args: + pretrained_model_name_or_path: for init tokenizer. + data_file: input data file. + max_seq_length: max sequence length of each input instance. + first_sequence: input text + label_name: label column name + second_sequence: set as None + label_enumerate_values: a list of label values + multi_label: set as True if perform multi-label classification, otherwise False + """ + + def __init__(self, + pretrained_model_name_or_path, + data_file, + max_seq_length, + input_schema=None, + first_sequence=None, + label_name=None, + second_sequence=None, + label_enumerate_values=None, + user_defined_parameters=None, + *args, + **kwargs): + + pretrained_model_name_or_path = get_pretrain_model_path(pretrained_model_name_or_path) + # 先处理配置,再决定后续如何加载权重 + with open(pretrained_model_name_or_path+'/config.json','r') as config_handle: + self.raw_config=json.load(config_handle) + if ('model_type' in self.raw_config) and (self.raw_config['model_type']=='open_clip'): + self.model_type='open_clip' + + super().__init__(data_file, + input_schema=input_schema, + output_format="dict", + *args, + **kwargs) + self.text_col = first_sequence + self.image_col = second_sequence + if self.model_type=='open_clip': + self.openclip_tokenizer = SimpleTokenizer(bpe_path=pretrained_model_name_or_path+'/vocab.txt') + + self.max_text_length=max_seq_length + self.do_resize=True + self.size=224 + self.resample=Image.BICUBIC + self.do_center_crop=True + self.crop_size=224 + self.do_normalize=True + self.max_frames=12 + + def convert_single_row_to_example(self, row): + """Convert sample token to indices. + Args: + row: contains sequence and label. + text_a: the first sequence in row. + text_b: the second sequence in row if self.second_sequence is true. + label: label token if self.label_name is true. + Returns: sing example + encoding: an example contains token indices. + """ + + _text=row[self.text_col] + try: + images = [] + for frame in os.listdir(row[self.image_col]): + images.append(Image.open(os.path.join(row[self.image_col], frame))) + video_len = len(images) + if video_len < self.max_frames: + for _ in range(self.max_frames - len(images)): + images.append(Image.new('RGB', (self.size, self.size), (0, 0, 0))) + except: + print(row) + raise Exception(210) + if self.model_type=='open_clip': + bpe_result = openclip_tokenize(texts=[_text],context_length=77,_tokenizer=self.openclip_tokenizer) + tk_result={'input_ids':bpe_result} + + # transformations (resizing + center cropping + normalization) + if self.do_resize and self.size is not None and self.resample is not None: + images = [_resize(image=image, size=self.size, resample=self.resample) for image in images] + if self.do_center_crop and self.crop_size is not None: + images = [_center_crop(image, self.crop_size) for image in images] + if self.do_normalize: + images = [_normalize(image=image) for image in images] + video_mask = np.zeros((1, self.max_frames), dtype=np.long) + video_mask[0][:video_len] = 1 + return {'text':tk_result,'pixel_values':torch.tensor(images).unsqueeze(0),'video_masks':torch.tensor(video_mask)} + + def batch_fn(self, features): + # """ + # Divide examples into batches. + # """ + output={'pixel_values':[],'video_masks':[],'input_ids':[],'token_type_ids':[],'attention_mask':[]} + for dic in features: + output['pixel_values'].append(dic['pixel_values']) + output['video_masks'].append(dic['video_masks']) + output['input_ids'].append(dic['text']['input_ids']) + if 'token_type_ids' in dic['text']: + output['token_type_ids'].append(dic['text']['token_type_ids']) + if 'attention_mask' in dic['text']: + output['attention_mask'].append(dic['text']['attention_mask']) + + output['pixel_values']=torch.cat(output['pixel_values'],dim=0) + output['video_masks']=torch.cat(output['video_masks'],dim=0) + output['input_ids']=torch.cat(output['input_ids'],dim=0) + if len(output['token_type_ids'])>0: + output['token_type_ids']=torch.cat(output['token_type_ids'],dim=0) + if len(output['attention_mask'])>0: + output['attention_mask']=torch.cat(output['attention_mask'],dim=0) + output['label_ids']=[] + return output diff --git a/easynlp/appzoo/text2video_retrieval/evaluator.py b/easynlp/appzoo/text2video_retrieval/evaluator.py new file mode 100644 index 00000000..e61b156d --- /dev/null +++ b/easynlp/appzoo/text2video_retrieval/evaluator.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# Copyright (c) 2020 Alibaba PAI team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import torch +from torch import nn +import numpy as np +from scipy.stats import pearsonr, spearmanr +from sklearn.metrics import matthews_corrcoef, roc_auc_score, classification_report +from sklearn.metrics import f1_score, precision_score, recall_score +from ...utils import losses +from ...utils.logger import logger +from ...core.evaluator import Evaluator +import tqdm + +class Text2VideoRetrievalEvaluator(Evaluator): + def __init__(self, valid_dataset, **kwargs): + super().__init__(valid_dataset, **kwargs) + self.metrics = ["accuracy", "f1"] + self.before=0.0 + + def evaluate(self, model): + model.eval() + total_spent_time = 0.0 + video_embeds_all=[] + text_embeds_all=[] + print('Start calculating features of samples ...') + for _step, batch in tqdm.tqdm(enumerate(self.valid_loader)): + infer_start_time = time.time() + with torch.no_grad(): + outputs = model(batch) + infer_end_time = time.time() + total_spent_time += infer_end_time - infer_start_time + video_embeds_all.append(outputs['video_embeds']) + text_embeds_all.append(outputs['text_embeds']) + video_embeds_tensor=torch.cat(video_embeds_all,dim=0) + text_embeds_tensor=torch.cat(text_embeds_all,dim=0) + query_len=text_embeds_tensor.size()[0] + agreement=text_embeds_tensor@video_embeds_tensor.t() + agreement_size=agreement.size() + r1_stat, r5_stat, r10_stat = 0, 0, 0 + print('Start calculating recall score ...') + for idx in tqdm.tqdm(range(0,agreement_size[0])): + tmp=agreement[idx].detach() + reordered,ridx=torch.sort(tmp,descending=True) + if idx in ridx[:1]: + r1_stat+=1 + if idx in ridx[:5]: + r5_stat+=1 + if idx in ridx[:10]: + r10_stat+=1 + r1, r5, r10 = r1_stat * 1.0 / query_len, r5_stat * 1.0 / query_len, r10_stat * 1.0 / query_len + mean_recall = (r1 + r5 + r10) / 3.0 + result = [mean_recall, r1, r5, r10] + result = [item * 100 for item in result] + print('r1_num:'+str(r1_stat),'r5_num:'+str(r5_stat),'r10_num:'+str(r10_stat),'query_num:'+str(query_len)) + print('r1(%):'+str(result[1]),'r5(%):'+str(result[2]),'r10(%):'+str(result[3]),'mean_recall(%):'+str(result[0])) + logger.info("Inference time = {:.2f}s, [{:.4f} ms / sample] ".format( + total_spent_time, total_spent_time * 1000 / query_len)) + eval_outputs = list() + eval_outputs.append(("mean_recall", mean_recall)) + return eval_outputs \ No newline at end of file diff --git a/easynlp/appzoo/text2video_retrieval/model.py b/easynlp/appzoo/text2video_retrieval/model.py new file mode 100644 index 00000000..00a5dc58 --- /dev/null +++ b/easynlp/appzoo/text2video_retrieval/model.py @@ -0,0 +1,121 @@ +# coding=utf-8 +# Copyright (c) 2020 Alibaba PAI team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ast import literal_eval +import numpy as np +import torch +import torch.nn as nn +from ...modelzoo.models.roberta.modeling_roberta import RobertaModel +from ...modelzoo.models.clip.modeling_clip import CLIPVisionModel,CLIPTextModel +from ...modelzoo.models.clip.configuration_clip import CLIPConfig,CLIPTextConfig,CLIPVisionConfig +from ...modelzoo.models.clip.modeling_openclip import OPEN_CLIP +from ...modelzoo.models.clip.modeling_chineseclip import CHINESE_CLIP +from ...utils import losses, get_pretrain_model_path, get_args +from ..application import Application +import math +from torch import Tensor +from typing import List, Optional +import json + +class Config_Wrapper: + def __init__(self,json_data): + self.json_data=json_data + + def to_json_string(self): + json_str=json.dumps(self.json_data,ensure_ascii=False) + return json_str + +class Text2VideoRetrieval(Application): + + @classmethod + def from_pretrained(self, pretrained_model_name_or_path, user_defined_parameters={},**kwargs): + instance=Text2VideoRetrieval(pretrained_model_name_or_path,user_defined_parameters) + return instance + + def __init__(self, pretrained_model_name_or_path=None,user_defined_parameters=None, **kwargs): + super().__init__() + if pretrained_model_name_or_path is not None: + pretrained_model_name_or_path = get_pretrain_model_path(pretrained_model_name_or_path) + # 先处理配置,再决定后续如何加载权重 + with open(pretrained_model_name_or_path+'/config.json','r') as config_handle: + self.raw_config=json.load(config_handle) + + if ('model_type' in self.raw_config) and (self.raw_config['model_type']=='open_clip'): + print(self.raw_config) + self.model_type='open_clip' + self.config=Config_Wrapper(self.raw_config)# used by trainer + self.open_clip = OPEN_CLIP(**self.config.json_data) + checkpoint = torch.load(pretrained_model_name_or_path+'/pytorch_model.bin', map_location=torch.device('cpu')) + all_model_state_dict = {k.replace('open_clip.',''): v for k, v in checkpoint.items()} + self.open_clip.load_state_dict(all_model_state_dict) + + def forward(self, inputs,feat=None): + if self.model_type=='open_clip': + _device=self.open_clip.text_projection.device + logit_scale = self.open_clip.logit_scale.exp() + + if 'pixel_values' in inputs: + inputs['pixel_values']=inputs['pixel_values'].to(_device) + inputs['video_masks']=inputs['video_masks'].to(_device) + B,T,C,H,W = inputs['pixel_values'].shape + inputs['pixel_values'] = inputs['pixel_values'].view(B*T,C,H,W) + else: + inputs['pixel_values']=None + if 'input_ids' in inputs: + inputs['input_ids']=inputs['input_ids'].to(_device) + else: + inputs['input_ids']=None + if self.model_type=='open_clip': + video_embeds=None + text_embeds=None + if inputs['pixel_values'] is not None: + image_features = self.open_clip.encode_image(inputs['pixel_values']) + image_features = image_features.view(B,T,-1) + image_features = image_features / image_features.norm(dim=-1, keepdim=True) + video_features = self._mean_pooling_for_similarity_visual(image_features, inputs['video_masks']) + video_embeds = video_features / video_features.norm(dim=-1, keepdim=True) + if inputs['input_ids'] is not None: + text_features = self.open_clip.encode_text(inputs['input_ids']) + text_embeds = text_features / text_features.norm(dim=-1, keepdim=True) + + if feat is True: + return {'video_embeds':video_embeds,'text_embeds':text_embeds} + # cosine similarity as logits + logits_per_text = torch.matmul(text_embeds, video_embeds.t()) * logit_scale + logits_per_video = logits_per_text.T + return {'logits_per_text':logits_per_text,'logits_per_video':logits_per_video,'video_embeds':video_embeds,'text_embeds':text_embeds} + + def _mean_pooling_for_similarity_visual(self, visual_output, video_mask,): + video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1) + visual_output = visual_output * video_mask_un + video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float) + video_mask_un_sum[video_mask_un_sum == 0.] = 1. + video_out = torch.sum(visual_output, dim=1) / video_mask_un_sum + return video_out + + # contrastive loss function, adapted from + # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html + def contrastive_loss(self,logits: torch.Tensor) -> torch.Tensor: + return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) + + def clip_loss(self,similarity: torch.Tensor) -> torch.Tensor: + caption_loss = self.contrastive_loss(similarity) + image_loss = self.contrastive_loss(similarity.T) + return (caption_loss + image_loss) / 2.0 + + def compute_loss(self, forward_outputs, label_ids, **kwargs): + loss = self.clip_loss(forward_outputs['logits_per_text']) + return {'loss': loss} + + diff --git a/easynlp/appzoo/text2video_retrieval/predictor.py b/easynlp/appzoo/text2video_retrieval/predictor.py new file mode 100644 index 00000000..bae8b8fb --- /dev/null +++ b/easynlp/appzoo/text2video_retrieval/predictor.py @@ -0,0 +1,143 @@ +# coding=utf-8 +# Copyright (c) 2020 Alibaba PAI team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +import os +import torch +import json +from ...utils import io +from ...core.predictor import Predictor, get_model_predictor +from ...modelzoo.models.clip.openclip_tokenizer import SimpleTokenizer +from ...modelzoo import BertTokenizer +from easynlp.utils import get_pretrain_model_path +from PIL import Image +import base64 +from io import BytesIO +import numpy as np +from .model import Text2VideoRetrieval +from .data import _center_crop, _resize, _to_numpy_array, _normalize,openclip_tokenize + +class Text2VideoRetrievalPredictor(Predictor): + def __init__(self, model_dir, model_cls=None, *args, **kwargs): + super().__init__(*args, **kwargs) + model_dir = get_pretrain_model_path(model_dir) + if "oss://" in model_dir: + local_dir = model_dir.split("/")[-1] + local_dir = os.path.join("~/.cache", local_dir) + os.makedirs(local_dir, exist_ok=True) + io.copytree(model_dir, local_dir) + model_dir = local_dir + # 先处理配置,再决定后续如何加载权重 + with open(model_dir+'/config.json','r') as config_handle: + self.raw_config=json.load(config_handle) + if ('model_type' in self.raw_config) and (self.raw_config['model_type']=='open_clip'): + self.model_type='open_clip' + + if self.model_type=='open_clip': + self.openclip_tokenizer = SimpleTokenizer(bpe_path=model_dir+'/vocab.txt') + + self.multi_modal=Text2VideoRetrieval.from_pretrained(model_dir, *args, **kwargs).cuda() + self.first_sequence = kwargs.pop("first_sequence", "first_sequence") + self.second_sequence = kwargs.pop("second_sequence", "second_sequence") + self.sequence_length = kwargs.pop("sequence_length", 128) + self.do_resize=True + self.size=224 + self.resample=Image.BICUBIC + self.do_center_crop=True + self.crop_size=224 + self.do_normalize=True + self.batch_cnt=0 + self.max_frames=12 + + def preprocess(self, in_data): + if not in_data: + raise RuntimeError("Input data should not be None.") + if not isinstance(in_data, list): + in_data = [in_data] + for_next=[] + max_seq_length = -1 + for record in in_data: + if not "sequence_length" in record: + break + max_seq_length = max(max_seq_length, record["sequence_length"]) + max_seq_length = self.sequence_length if (max_seq_length == -1) else max_seq_length + for record in in_data: + first_sequence_content = record.get(self.first_sequence, None) + second_sequence_content = record.get(self.second_sequence, None) + if self.first_sequence == 'text': + if self.model_type=='open_clip': + bpe_result = openclip_tokenize(texts=[first_sequence_content],context_length=77,_tokenizer=self.openclip_tokenizer) + record["input_ids"]=bpe_result + + elif self.first_sequence == 'image': + images = [] + for frame in os.listdir(first_sequence_content): + images.append(Image.open(os.path.join(first_sequence_content, frame))) + video_len = len(images) + if video_len < self.max_frames: + for _ in range(self.max_frames - len(images)): + images.append(Image.new('RGB', (self.size, self.size), (0, 0, 0))) + # transformations (resizing + center cropping + normalization) + if self.do_resize and self.size is not None and self.resample is not None: + images = [_resize(image=image, size=self.size, resample=self.resample) for image in images] + if self.do_center_crop and self.crop_size is not None: + images = [_center_crop(image, self.crop_size) for image in images] + if self.do_normalize: + images = [_normalize(image=image) for image in images] + images=torch.tensor(images).unsqueeze(0) + record['pixel_values']=images + video_mask = np.zeros((1, self.max_frames), dtype=np.long) + video_mask[0][:video_len] = 1 + record['video_masks']=torch.tensor(video_mask) + return in_data + + def predict(self, in_data): + if 'pixel_values' in in_data[0]: + output={'pixel_values':[],'video_masks':[]} + for one_data in in_data: + output['pixel_values'].append(one_data['pixel_values']) + output['video_masks'].append(one_data['video_masks']) + output['pixel_values']=torch.cat(output['pixel_values'],dim=0) + output['video_masks']=torch.cat(output['video_masks'],dim=0) + if 'input_ids' in in_data[0]: + output={'input_ids':[],'token_type_ids':[],'attention_mask':[]} + for one_data in in_data: + output['input_ids'].append(one_data['input_ids']) + if 'token_type_ids' in one_data: + output['token_type_ids'].append(one_data['token_type_ids']) + if 'attention_mask' in one_data: + output['attention_mask'].append(one_data['attention_mask']) + output['input_ids']=torch.cat(output['input_ids'],dim=0) + if len(output['token_type_ids'])>0: + output['token_type_ids']=torch.cat(output['token_type_ids'],dim=0) + if len(output['attention_mask'])>0: + output['attention_mask']=torch.cat(output['attention_mask'],dim=0) + forward_result=self.multi_modal(output,feat=True) + return forward_result + + def postprocess(self, result): + if result['video_embeds'] is not None: + video_embeds_arr=result['video_embeds'].detach().cpu().numpy() + _tmp_video=[] + for one_emb in video_embeds_arr: + _tmp_video.append({'video_feat':'\t'.join([str(x) for x in one_emb])}) + return _tmp_video + + if result['text_embeds'] is not None: + text_embeds_arr=result['text_embeds'].detach().cpu().numpy() + _tmp_text=[] + for one_emb in text_embeds_arr: + _tmp_text.append({'text_feat':'\t'.join([str(x) for x in one_emb])}) + return _tmp_text diff --git a/easynlp/utils/arguments.py b/easynlp/utils/arguments.py index ed530fed..a60b56bb 100644 --- a/easynlp/utils/arguments.py +++ b/easynlp/utils/arguments.py @@ -216,7 +216,7 @@ def _add_easynlp_args(parser: argparse.ArgumentParser): 'text2image_generation', 'image2text_generation', 'image2text_generation_vqgan', 'video2text_generation', - 'clip', 'wukong', + 'clip', 'clip4clip', 'wukong', 'machine_reading_comprehension' ], help='name of the application') diff --git a/examples/text2video_retrieval/README.md b/examples/text2video_retrieval/README.md new file mode 100644 index 00000000..becb9002 --- /dev/null +++ b/examples/text2video_retrieval/README.md @@ -0,0 +1,25 @@ +# Retrieval video with CLIP + +### 准备工作 +* 安装好EasyNLP +* 进入目录 ./examples/text2video_retrieval + +### 数据格式 +对于train与evaluate 数据格式为制表符分隔的两列 文本\t视频提取帧存放路径 +对于predict 数据格式为单列 文本 或 视频提取帧存放路径 + +### Train +执行命令 sh run_clip_local_appzoo.sh 0 train_en +其中0是所用显卡编号,含义同CUDA_VISIBLE_DEVICES=0 + +### evaluate +执行命令 sh run_clip_local_appzoo.sh 0 evaluate_en + +### predict +predict用于生成测试数据的CLIP特征 +sh run_clip_local_appzoo.sh 0 predict_en_text +默认将生成测试文本的特征,修改run_clip_local_appzoo.sh文件中的代码可生成测试video的特征 + +### 自定义开发 +* batch size 和 learning rate等参数在run_clip_local_appzoo.sh文件中修改 +* 修改模型底层逻辑如dataset,loss,evaluator等, 请参考easynlp/appzoo/text2video_retrieval 和 easynlp/modelzoo/models/clip 这两个文件夹, 修改完成后记得python setup.py install对修改版本进行安装以生效. diff --git a/examples/text2video_retrieval/main.py b/examples/text2video_retrieval/main.py new file mode 100644 index 00000000..62c0bff0 --- /dev/null +++ b/examples/text2video_retrieval/main.py @@ -0,0 +1,84 @@ +import imp +import sys +import os + +sys.path.append('./') + +print('*'*50) +print('running local main...\n') +from easynlp.core import Trainer +# from easynlp.appzoo import get_application_evaluator + +from easynlp.appzoo.text2video_retrieval.data import Text2VideoRetrievalDataset +from easynlp.appzoo.text2video_retrieval.model import Text2VideoRetrieval +from easynlp.appzoo.text2video_retrieval.evaluator import Text2VideoRetrievalEvaluator +from easynlp.appzoo.text2video_retrieval.predictor import Text2VideoRetrievalPredictor +from easynlp.utils import initialize_easynlp, get_args +from easynlp.utils.global_vars import parse_user_defined_parameters +from easynlp.core import PredictorManager +from easynlp.utils import get_pretrain_model_path + + +if __name__ == "__main__": + print('log: starts to init...\n') + # os.environ["NCCL_DEBUG_SUBSYS"] = "ALL" + # os.environ["NCCL_DEBUG"] = "INFO" + + initialize_easynlp() + args = get_args() + + print('log: starts to process user params...\n') + user_defined_parameters = parse_user_defined_parameters(args.user_defined_parameters) + if args.mode == "train" or not args.checkpoint_dir: + args.pretrained_model_name_or_path = user_defined_parameters.get('pretrain_model_name_or_path', None) + else: + args.pretrained_model_name_or_path = args.checkpoint_dir + pretrained_model_name_or_path = get_pretrain_model_path(args.pretrained_model_name_or_path) + + + if args.mode == "predict": + predictor = Text2VideoRetrievalPredictor(model_dir=args.checkpoint_dir, model_cls=Text2VideoRetrieval, + first_sequence=args.first_sequence, user_defined_parameters=user_defined_parameters) + + predictor_manager = PredictorManager( + predictor=predictor, + input_file=args.tables.split(",")[0], + input_schema=args.input_schema, + output_file=args.outputs, + output_schema=args.output_schema, + append_cols=args.append_cols, + batch_size=args.micro_batch_size + ) + predictor_manager.run() + exit() + + + print('log: starts to process dataset...\n') + + train_dataset = Text2VideoRetrievalDataset( + pretrained_model_name_or_path=pretrained_model_name_or_path, + data_file=args.tables.split(",")[0], + max_seq_length=args.sequence_length, + input_schema=args.input_schema, + first_sequence=args.first_sequence, + second_sequence=args.second_sequence, + user_defined_parameters=user_defined_parameters, + is_training=True) + + valid_dataset = Text2VideoRetrievalDataset( + pretrained_model_name_or_path=pretrained_model_name_or_path, + data_file=args.tables.split(",")[-1], + max_seq_length=args.sequence_length, + input_schema=args.input_schema, + first_sequence=args.first_sequence, + second_sequence=args.second_sequence, + user_defined_parameters=user_defined_parameters, + is_training=False) + + + model = Text2VideoRetrieval(pretrained_model_name_or_path=pretrained_model_name_or_path, user_defined_parameters=user_defined_parameters) + evaluator = Text2VideoRetrievalEvaluator(valid_dataset=valid_dataset, user_defined_parameters=user_defined_parameters) + + trainer = Trainer(model=model, train_dataset=train_dataset, user_defined_parameters=user_defined_parameters, + evaluator=evaluator) + trainer.train() diff --git a/examples/text2video_retrieval/preprocess_video_frame.py b/examples/text2video_retrieval/preprocess_video_frame.py new file mode 100644 index 00000000..4de78a68 --- /dev/null +++ b/examples/text2video_retrieval/preprocess_video_frame.py @@ -0,0 +1,105 @@ +import os +from xmlrpc.client import boolean +from cv2 import idct +from tqdm import tqdm +import numpy as np +import pandas as pd +import random +import json +from PIL import Image + +# ! pip install decord +from decord import VideoReader + +import argparse + +import base64 +from io import BytesIO + +# save image to file +def image_to_file(img_numpy, save_path, size=224, resample=Image.BICUBIC): + # 将info转成为uint8格式,否则报错"cannot convert this type" + img = Image.fromarray(np.uint8(img_numpy)).convert('RGB') + width, height = img.size + short, long = (width, height) if width <= height else (height, width) + if short == size: + img.save(save_path) + return save_path + new_short, new_long = size, int(size * long / short) + new_w, new_h = (new_short, new_long) if width <= height else (new_long, new_short) + img.resize((new_w, new_h), resample) + img.save(save_path) + return save_path + +# extracte several candidate frames from videos (one frame per second) +def extract_video_frames(video_path, frame_num = -1): + container = VideoReader(video_path, num_threads=16) + total_frames = len(container) + + if total_frames <= frame_num: + idx = np.arange(total_frames - 1) + else: + idx = np.linspace(0, total_frames - 1, num=frame_num, dtype=int) + + assert len(idx) == frame_num, video_path + + # shape=time*H*W*C 的numpy数组 通道顺序为RGB + image_list = container.get_batch(idx).asnumpy() + + return image_list + +# save video frames to image files +def extraction_process_save_path(csv_dir, json_dir, video_dir, frame_num, frame_dir, output): + if not os.path.exists(frame_dir): + os.mkdir(frame_dir) + + csv = pd.read_csv(csv_dir) + data = json.load(open(json_dir, 'r')) + output_file = [] + video_ids = list(csv['video_id'].values) + processed = [] + for itm in tqdm(data['sentences']): + if itm['video_id'] in video_ids: + video_id = itm['video_id'] + output_file.append(itm['caption']+'\t'+os.path.join(frame_dir, video_id)) + + if video_id in processed: + continue + else: + processed.append(video_id) + + if os.path.exists(os.path.join(frame_dir, video_id)): + continue + + video_path = os.path.join(video_dir, "{}.mp4".format(video_id)) + try: + image_list = extract_video_frames(video_path, frame_num).tolist() # + except: + print(video_id) + + for frame_idx in range(len(image_list)): + image = image_list[frame_idx] + if not os.path.exists(os.path.join(frame_dir, video_id)): + os.mkdir(os.path.join(frame_dir, video_id)) + save_path = '{}.jpeg'.format(frame_idx) + save_path = os.path.join(frame_dir, video_id, save_path) + imagepath = image_to_file(image, save_path) + + with open(output, 'w') as of: + of.write('\n'.join(output_file)) + print("Finished processing {} videos in total.".format(len(video_ids))) + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='PyTorch table IO') + parser.add_argument('--csv_dir', default='', type=str, help='the dir of csv file') + parser.add_argument('--json_dir', default='', type=str, help='the dir of json file') + parser.add_argument('--video_dir', default='', type=str, help='the dir of input videos') + parser.add_argument('--frame_num', default=-1, type=str, help='the number of frames to be extracted') + parser.add_argument('--frame_dir', default='', type=str, help='the root dir of saved frames') + parser.add_argument('--output', default='', type=str, help='the root dir of output txt') + + args = parser.parse_args() + + frame_num = int(args.frame_num) + + extraction_process_save_path(args.csv_dir, args.json_dir, args.video_dir, frame_num, args.frame_dir, args.output) diff --git a/examples/text2video_retrieval/preprocess_video_frame.sh b/examples/text2video_retrieval/preprocess_video_frame.sh new file mode 100644 index 00000000..c04ceb39 --- /dev/null +++ b/examples/text2video_retrieval/preprocess_video_frame.sh @@ -0,0 +1,25 @@ +# Download data +if [ ! -f ./msrvtt_data/MSRVTT_data.json ]; then + wget https://github.com/ArrowLuo/CLIP4Clip/releases/download/v0.0/msrvtt_data.zip + wget https://www.robots.ox.ac.uk/~maxbain/frozen-in-time/data/MSRVTT.zip + unzip msrvtt_data.zip + unzip MSRVTT.zip -d msrvtt_data/ + rm msrvtt_data.zip + rm MSRVTT.zip +fi + +python preprocess_video_frame.py \ + --csv_dir=./msrvtt_data/MSRVTT_train.9k.csv \ + --json_dir=./msrvtt_data/MSRVTT_data.json \ + --video_dir=./msrvtt_data/MSRVTT/videos/all \ + --frame_num=12 \ + --frame_dir=./msrvtt_data/MSRVTT_extracted_frames \ + --output=./msrvtt_data/MSRVTT_train.tsv + +python preprocess_video_frame.py \ + --csv_dir=./msrvtt_data/MSRVTT_JSFUSION_test.csv \ + --json_dir=./msrvtt_data/MSRVTT_data.json \ + --video_dir=./msrvtt_data/MSRVTT/videos/all \ + --frame_num=12 \ + --frame_dir=./msrvtt_data/MSRVTT_extracted_frames \ + --output=./msrvtt_data/MSRVTT_test_all.tsv diff --git a/examples/text2video_retrieval/run_clip_local_appzoo.sh b/examples/text2video_retrieval/run_clip_local_appzoo.sh new file mode 100644 index 00000000..8988de1c --- /dev/null +++ b/examples/text2video_retrieval/run_clip_local_appzoo.sh @@ -0,0 +1,75 @@ +export CUDA_VISIBLE_DEVICES=$1 + +mode=$2 + +if [ "$mode" = "train_en" ]; then + easynlp \ + --mode train \ + --worker_gpu=1 \ + --tables=./msrvtt_data/MSRVTT_train.tsv,./msrvtt_data/MSRVTT_test_1to1_1000.tsv \ + --input_schema=text:str:1,image:str:1 \ + --first_sequence=text \ + --second_sequence=image \ + --checkpoint_dir=./clip4clip_en_model/ \ + --learning_rate=1e-7 \ + --epoch_num=5 \ + --random_seed=42 \ + --logging_steps=10 \ + --save_checkpoint_steps 1000 \ + --sequence_length=32 \ + --micro_batch_size=32 \ + --app_name=clip4clip \ + --save_all_checkpoints \ + --user_defined_parameters='pretrain_model_name_or_path=alibaba-pai/pai-clip-commercial-base-en' + +elif [ "$mode" = "evaluate_en" ]; then + easynlp \ + --mode evaluate \ + --worker_gpu=1 \ + --tables=./msrvtt_data/MSRVTT_test_1to1_1000.tsv \ + --input_schema=text:str:1,image:str:1 \ + --first_sequence=text \ + --second_sequence=image \ + --checkpoint_dir=./clip4clip_en_model/ \ + --random_seed=42 \ + --logging_steps=100 \ + --save_checkpoint_steps=500 \ + --sequence_length=32 \ + --micro_batch_size=32 \ + --app_name=clip4clip + +elif [ "$mode" = "predict_en_text" ]; then + easynlp \ + --mode predict \ + --worker_gpu=1 \ + --tables=./msrvtt_data/MSRVTT_test_1to1_1000_part_text.tsv \ + --input_schema=text:str:1 \ + --output_schema=text_feat \ + --outputs ./msrvtt_data/MSRVTT_test_1to1_1000_text_feat.tsv \ + --first_sequence=text \ + --checkpoint_dir=./clip4clip_en_model/ \ + --random_seed=42 \ + --logging_steps=100 \ + --save_checkpoint_steps=500 \ + --sequence_length=32 \ + --micro_batch_size=32 \ + --app_name=clip4clip + +elif [ "$mode" = "predict_en_video" ]; then + easynlp \ + --mode predict \ + --worker_gpu=1 \ + --tables=./msrvtt_data/MSRVTT_test_1to1_1000_part_video.tsv \ + --input_schema=image:str:1 \ + --output_schema=video_feat \ + --outputs ./msrvtt_data/MSRVTT_test_1to1_1000_video_feat.tsv \ + --first_sequence=image \ + --checkpoint_dir=./clip4clip_en_model/ \ + --random_seed=42 \ + --logging_steps=100 \ + --save_checkpoint_steps=500 \ + --sequence_length=32 \ + --micro_batch_size=32 \ + --app_name=clip4clip + +fi diff --git a/examples/text2video_retrieval/run_clip_local_user_defined.sh b/examples/text2video_retrieval/run_clip_local_user_defined.sh new file mode 100644 index 00000000..0237c240 --- /dev/null +++ b/examples/text2video_retrieval/run_clip_local_user_defined.sh @@ -0,0 +1,82 @@ +export CUDA_VISIBLE_DEVICES=$1 + +MASTER_ADDR=localhost +MASTER_PORT=6027 +GPUS_PER_NODE=1 +NNODES=1 +NODE_RANK=0 + +DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" +mode=$2 + +if [ "$mode" = "train_en" ]; then + python -m torch.distributed.launch $DISTRIBUTED_ARGS ./main.py \ + --mode train \ + --worker_gpu=1 \ + --tables=./msrvtt_data/MSRVTT_train.tsv,./msrvtt_data/MSRVTT_test_1to1_1000.tsv \ + --input_schema=text:str:1,image:str:1 \ + --first_sequence=text \ + --second_sequence=image \ + --checkpoint_dir=./clip4clip_en_model/ \ + --learning_rate=1e-7 \ + --epoch_num=5 \ + --random_seed=42 \ + --logging_steps=10 \ + --save_checkpoint_steps 1000 \ + --sequence_length=32 \ + --micro_batch_size=32 \ + --app_name=clip4clip \ + --save_all_checkpoints \ + --user_defined_parameters='pretrain_model_name_or_path=alibaba-pai/pai-clip-commercial-base-en' + +elif [ "$mode" = "evaluate_en" ]; then + python -m torch.distributed.launch $DISTRIBUTED_ARGS ./main.py \ + --mode evaluate \ + --worker_gpu=1 \ + --tables=./msrvtt_data/MSRVTT_test_1to1_1000.tsv \ + --input_schema=text:str:1,image:str:1 \ + --first_sequence=text \ + --second_sequence=image \ + --checkpoint_dir=./clip4clip_en_model/ \ + --random_seed=42 \ + --logging_steps=100 \ + --save_checkpoint_steps=500 \ + --sequence_length=32 \ + --micro_batch_size=32 \ + --app_name=clip4clip + +elif [ "$mode" = "predict_en_text" ]; then + python -m torch.distributed.launch $DISTRIBUTED_ARGS ./main.py \ + --mode predict \ + --worker_gpu=1 \ + --tables=./msrvtt_data/MSRVTT_test_1to1_1000_part_text.tsv \ + --input_schema=text:str:1 \ + --output_schema=text_feat \ + --outputs ./msrvtt_data/MSRVTT_test_1to1_1000_text_feat.tsv \ + --first_sequence=text \ + --checkpoint_dir=./clip4clip_en_model/ \ + --random_seed=42 \ + --logging_steps=100 \ + --save_checkpoint_steps=500 \ + --sequence_length=32 \ + --micro_batch_size=32 \ + --app_name=clip4clip + +elif [ "$mode" = "predict_en_video" ]; then + python -m torch.distributed.launch $DISTRIBUTED_ARGS ./main.py \ + --mode predict \ + --worker_gpu=1 \ + --tables=./msrvtt_data/MSRVTT_test_1to1_1000_part_video.tsv \ + --input_schema=image:str:1 \ + --output_schema=video_feat \ + --outputs ./msrvtt_data/MSRVTT_test_1to1_1000_video_feat.tsv \ + --first_sequence=image \ + --checkpoint_dir=./clip4clip_en_model/ \ + --random_seed=42 \ + --logging_steps=100 \ + --save_checkpoint_steps=500 \ + --sequence_length=32 \ + --micro_batch_size=32 \ + --app_name=clip4clip + +fi From e47d0a9c36f4a620b04ad3bd3b8e40b158745dde Mon Sep 17 00:00:00 2001 From: ztl-35 Date: Fri, 2 Dec 2022 12:25:46 +0800 Subject: [PATCH 003/101] your git commit message --- easynlp/appzoo/language_modeling/data.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/easynlp/appzoo/language_modeling/data.py b/easynlp/appzoo/language_modeling/data.py index 95ab6393..c819dd0b 100644 --- a/easynlp/appzoo/language_modeling/data.py +++ b/easynlp/appzoo/language_modeling/data.py @@ -379,10 +379,13 @@ def batch_fn(self, batch): token_ids[_index] = batch[_index][0][:t_length - gap - 1] + [batch[_index][0][-1]] mask_labels[_index] = batch[_index][1][:t_length - gap - 1] + [batch[_index][1][-1]] lengths = [len(t) for t in token_ids] + # lengths = self.max_seq_length # Max for paddings max_seq_len_ = max(lengths) + # max_seq_len_ = self.max_seq_length assert max_seq_len_ <= self.max_seq_length if self.dkplm_model_prefix: + max_seq_len_ = self.max_seq_length ent_pos = [t[2] for t in batch] relation_id = [t[3] for t in batch] replaced_entity_id = [t[4] for t in batch] @@ -409,8 +412,7 @@ def batch_fn(self, batch): insert_know_emb = insert_know_emb insert_relation_emb = insert_relation_emb insert_know_labels = torch.LongTensor(insert_know_labels) - attn_mask = torch.arange(token_ids.size(1), dtype=torch.long, - device=lengths.device) < lengths[:, None] + attn_mask = torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None] attn_mask = attn_mask.long() input_ids, label_ids = self.mask_tokens(token_ids, mask_labels) @@ -515,8 +517,7 @@ def mask_tokens(self, inputs, mask_labels): inputs[indices_replaced] = self.mask_idx # 10% of the time, we replace masked input tokens with random word - indices_random = torch.bernoulli(torch.full( - labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced + indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced random_words = torch.randint(self.vocab_size, labels.shape, dtype=torch.long) inputs[indices_random] = random_words[indices_random] From b903410b082b84d0da6d099d71ea26d526b2a8b4 Mon Sep 17 00:00:00 2001 From: ztl-35 Date: Fri, 2 Dec 2022 12:27:04 +0800 Subject: [PATCH 004/101] fix dkplm unit test --- easynlp/appzoo/language_modeling/data.py | 1 - 1 file changed, 1 deletion(-) diff --git a/easynlp/appzoo/language_modeling/data.py b/easynlp/appzoo/language_modeling/data.py index c819dd0b..956eb4c8 100644 --- a/easynlp/appzoo/language_modeling/data.py +++ b/easynlp/appzoo/language_modeling/data.py @@ -379,7 +379,6 @@ def batch_fn(self, batch): token_ids[_index] = batch[_index][0][:t_length - gap - 1] + [batch[_index][0][-1]] mask_labels[_index] = batch[_index][1][:t_length - gap - 1] + [batch[_index][1][-1]] lengths = [len(t) for t in token_ids] - # lengths = self.max_seq_length # Max for paddings max_seq_len_ = max(lengths) # max_seq_len_ = self.max_seq_length From 737b90d7a70f47086597610485f0f282664e0166 Mon Sep 17 00:00:00 2001 From: ztl-35 Date: Fri, 2 Dec 2022 12:29:04 +0800 Subject: [PATCH 005/101] fix dkplm unit test --- easynlp/appzoo/language_modeling/data.py | 1 + 1 file changed, 1 insertion(+) diff --git a/easynlp/appzoo/language_modeling/data.py b/easynlp/appzoo/language_modeling/data.py index 956eb4c8..f3f60f41 100644 --- a/easynlp/appzoo/language_modeling/data.py +++ b/easynlp/appzoo/language_modeling/data.py @@ -381,6 +381,7 @@ def batch_fn(self, batch): lengths = [len(t) for t in token_ids] # Max for paddings max_seq_len_ = max(lengths) + # max_seq_len_ = self.max_seq_length assert max_seq_len_ <= self.max_seq_length if self.dkplm_model_prefix: From 0d0278376cd35622e65af1d9936b361307c810d3 Mon Sep 17 00:00:00 2001 From: ztl-35 Date: Fri, 2 Dec 2022 12:34:50 +0800 Subject: [PATCH 006/101] fix dkplm unit test --- easynlp/appzoo/language_modeling/data.py | 1 - 1 file changed, 1 deletion(-) diff --git a/easynlp/appzoo/language_modeling/data.py b/easynlp/appzoo/language_modeling/data.py index f3f60f41..956eb4c8 100644 --- a/easynlp/appzoo/language_modeling/data.py +++ b/easynlp/appzoo/language_modeling/data.py @@ -381,7 +381,6 @@ def batch_fn(self, batch): lengths = [len(t) for t in token_ids] # Max for paddings max_seq_len_ = max(lengths) - # max_seq_len_ = self.max_seq_length assert max_seq_len_ <= self.max_seq_length if self.dkplm_model_prefix: From 9c40f6ce638c655f412e255f8a4f25cde28cddf2 Mon Sep 17 00:00:00 2001 From: chywang Date: Sun, 4 Dec 2022 14:52:15 +0800 Subject: [PATCH 007/101] Create requirements_diffusion.txt --- requirements_diffusion.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 requirements_diffusion.txt diff --git a/requirements_diffusion.txt b/requirements_diffusion.txt new file mode 100644 index 00000000..c8348eb0 --- /dev/null +++ b/requirements_diffusion.txt @@ -0,0 +1 @@ +pytorch_lightning From 91ad0254613de1813c10dff743e347770d773f14 Mon Sep 17 00:00:00 2001 From: chywang Date: Sun, 4 Dec 2022 15:01:52 +0800 Subject: [PATCH 008/101] Update main.py --- examples/latent_diffusion_fashion/main.py | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/latent_diffusion_fashion/main.py b/examples/latent_diffusion_fashion/main.py index 8658a899..f124e396 100644 --- a/examples/latent_diffusion_fashion/main.py +++ b/examples/latent_diffusion_fashion/main.py @@ -7,7 +7,6 @@ print('*'*50) print('running local main...\n') from easynlp.core import Trainer -# from easynlp.appzoo import get_application_evaluator from easynlp.appzoo.multi_modal.data import MultiModalDataset from easynlp.appzoo.latent_diffusion.model import LatentDiffusion From 848e5ac056656173faaad2665752a8fb09a19e7f Mon Sep 17 00:00:00 2001 From: zjy <756628577@qq.com> Date: Wed, 7 Dec 2022 12:06:12 +0800 Subject: [PATCH 009/101] ie --- easynlp/appzoo/__init__.py | 8 + easynlp/appzoo/api.py | 19 +- easynlp/appzoo/information_extraction/data.py | 114 +++++++++ .../information_extraction/evaluator.py | 157 +++++++++++++ .../appzoo/information_extraction/model.py | 129 ++++++++++ .../information_extraction/predictor.py | 222 ++++++++++++++++++ easynlp/utils/arguments.py | 3 +- examples/information_extraction/main.py | 86 +++++++ ...run_train_eval_predict_appzoo_cli_local.sh | 54 +++++ ...n_train_eval_predict_user_defined_local.sh | 62 +++++ tests/modelzoo_alibaba.json | 84 +++++++ tests/tmp/da_test_0.out | 115 +++++++++ 12 files changed, 1047 insertions(+), 6 deletions(-) create mode 100644 easynlp/appzoo/information_extraction/data.py create mode 100644 easynlp/appzoo/information_extraction/evaluator.py create mode 100644 easynlp/appzoo/information_extraction/model.py create mode 100644 easynlp/appzoo/information_extraction/predictor.py create mode 100644 examples/information_extraction/main.py create mode 100644 examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh create mode 100644 examples/information_extraction/run_train_eval_predict_user_defined_local.sh create mode 100644 tests/modelzoo_alibaba.json create mode 100644 tests/tmp/da_test_0.out diff --git a/easynlp/appzoo/__init__.py b/easynlp/appzoo/__init__.py index db33e9d0..a823730c 100644 --- a/easynlp/appzoo/__init__.py +++ b/easynlp/appzoo/__init__.py @@ -33,6 +33,7 @@ "video2text_generation.model": ['CLIPGPTFrameTextGeneration'], "sequence_generation.model": ["SequenceGeneration"], "machine_reading_comprehension.model": ["MachineReadingComprehension"], + "information_extraction.model": ["InformationExtractionModel"], "sequence_classification.evaluator": ['SequenceClassificationEvaluator', 'SequenceMultiLabelClassificationEvaluator'], "sequence_labeling.evaluator": ['SequenceLabelingEvaluator'], @@ -46,6 +47,7 @@ "video2text_generation.evaluator": ["FrameTextGenerationEvaluator"], "sequence_generation.evaluator": ["SequenceGenerationEvaluator"], "machine_reading_comprehension.evaluator": ["MachineReadingComprehensionEvaluator"], + "information_extraction.evaluator": ["InformationExtractionEvaluator"], "sequence_classification.predictor": ['SequenceClassificationPredictor', 'FewshotSequenceClassificationPredictor', 'CptFewshotSequenceClassificationPredictor'], "sequence_labeling.predictor": ['SequenceLabelingPredictor'], @@ -61,6 +63,7 @@ "video2text_generation.predictor": ['CLIPGPTFrameTextGenerationPredictor'], "sequence_generation.predictor": ['SequenceGenerationPredictor'], "machine_reading_comprehension.predictor": ["MachineReadingComprehensionPredictor"], + "information_extraction.predictor": ["InformationExtractionPredictor"], "geep_classification.data": ['GEEPClassificationDataset'], "language_modeling.data": ['LanguageModelingDataset'], @@ -74,6 +77,7 @@ "video2text_generation.data": ['CLIPGPTFrameTextDataset'], "sequence_generation.data": ['SequenceGenerationDataset'], "machine_reading_comprehension.data": ["MachineReadingComprehensionDataset"], + "information_extraction.data": ["InformationExtractionDataset"], "dataset": ['BaseDataset', 'GeneralDataset', 'load_dataset', 'list_datasets'], "api": ['get_application_dataset', 'get_application_model', 'get_application_model_for_evaluation', 'get_application_evaluator', 'get_application_predictor'], } @@ -94,6 +98,7 @@ from .video2text_generation.model import CLIPGPTFrameTextGeneration from .sequence_generation.model import SequenceGeneration from .machine_reading_comprehension.model import MachineReadingComprehension + from .information_extraction.model import InformationExtractionModel from .sequence_classification.evaluator import SequenceClassificationEvaluator, SequenceMultiLabelClassificationEvaluator from .sequence_labeling.evaluator import SequenceLabelingEvaluator @@ -107,6 +112,7 @@ from .video2text_generation.evaluator import FrameTextGenerationEvaluator from .sequence_generation.evaluator import SequenceGenerationEvaluator from .machine_reading_comprehension.evaluator import MachineReadingComprehensionEvaluator + from .information_extraction.evaluator import InformationExtractionEvaluator from .sequence_classification.predictor import SequenceClassificationPredictor, FewshotSequenceClassificationPredictor, CptFewshotSequenceClassificationPredictor from .sequence_labeling.predictor import SequenceLabelingPredictor @@ -122,6 +128,7 @@ from .video2text_generation.predictor import CLIPGPTFrameTextGenerationPredictor from .sequence_generation.predictor import SequenceGenerationPredictor from .machine_reading_comprehension.predictor import MachineReadingComprehensionPredictor + from .information_extraction.predictor import InformationExtractionPredictor from .sequence_classification.data import ClassificationDataset, DistillatoryClassificationDataset, FewshotSequenceClassificationDataset from .sequence_labeling.data import SequenceLabelingDataset, SequenceLabelingAutoDataset @@ -135,6 +142,7 @@ from .video2text_generation.data import CLIPGPTFrameTextDataset from .sequence_generation.data import SequenceGenerationDataset from .machine_reading_comprehension.data import MachineReadingComprehensionDataset + from .information_extraction.data import InformationExtractionDataset from .dataset import BaseDataset, GeneralDataset from .dataset import load_dataset, list_datasets diff --git a/easynlp/appzoo/api.py b/easynlp/appzoo/api.py index 1d6be819..bf539b06 100644 --- a/easynlp/appzoo/api.py +++ b/easynlp/appzoo/api.py @@ -32,6 +32,7 @@ from easynlp.appzoo import CLIPGPTFrameTextGeneration from easynlp.appzoo.sequence_generation.model import SequenceGeneration from easynlp.appzoo import MachineReadingComprehension +from easynlp.appzoo import InformationExtractionModel from easynlp.fewshot_learning.fewshot_evaluator import PromptEvaluator as FewshotSequenceClassificationEvaluator from easynlp.fewshot_learning.fewshot_evaluator import CPTEvaluator as CptFewshotSequenceClassificationEvaluator @@ -46,6 +47,7 @@ from easynlp.appzoo import FrameTextGenerationEvaluator from easynlp.appzoo import SequenceGenerationEvaluator from easynlp.appzoo import MachineReadingComprehensionEvaluator +from easynlp.appzoo import InformationExtractionEvaluator from easynlp.appzoo import SequenceClassificationPredictor, FewshotSequenceClassificationPredictor, CptFewshotSequenceClassificationPredictor from easynlp.appzoo import SequenceLabelingPredictor, FeatureVectorizationPredictor @@ -58,6 +60,7 @@ from easynlp.appzoo import CLIPGPTFrameTextGenerationPredictor from easynlp.appzoo import SequenceGenerationPredictor from easynlp.appzoo import MachineReadingComprehensionPredictor +from easynlp.appzoo import InformationExtractionPredictor from easynlp.appzoo import ClassificationDataset, DistillatoryClassificationDataset, FewshotSequenceClassificationDataset from easynlp.appzoo import SequenceLabelingDataset, LanguageModelingDataset @@ -70,6 +73,7 @@ from easynlp.appzoo import CLIPGPTImageTextDataset, VQGANGPTImageTextDataset from easynlp.appzoo import CLIPGPTFrameTextDataset from easynlp.appzoo import MachineReadingComprehensionDataset +from easynlp.appzoo import InformationExtractionDataset from easynlp.core import PredictorManager, Trainer, DistillatoryTrainer from easynlp.utils.logger import logger @@ -112,7 +116,8 @@ }, 'video2text_generation': CLIPGPTFrameTextDataset, 'sequence_generation': SequenceGenerationDataset, - 'machine_reading_comprehension': MachineReadingComprehensionDataset + 'machine_reading_comprehension': MachineReadingComprehensionDataset, + 'information_extraction': InformationExtractionDataset } ModelMapping = { @@ -146,7 +151,8 @@ 'vqgan_image2text_generation': VQGANGPTImageTextGeneration, 'video2text_generation': CLIPGPTFrameTextGeneration, 'sequence_generation': SequenceGeneration, - 'machine_reading_comprehension': MachineReadingComprehension + 'machine_reading_comprehension': MachineReadingComprehension, + 'information_extraction': InformationExtractionModel } Eval_Model_Mapping = { @@ -174,7 +180,8 @@ 'vqgan_image2text_generation': VQGANGPTImageTextGeneration, 'video2text_generation': CLIPGPTFrameTextGeneration, 'sequence_generation': SequenceGeneration, - 'machine_reading_comprehension': MachineReadingComprehension + 'machine_reading_comprehension': MachineReadingComprehension, + 'information_extraction': InformationExtractionModel } Evaluator_Mapping = { @@ -202,7 +209,8 @@ }, 'video2text_generation': FrameTextGenerationEvaluator, 'sequence_generation': SequenceGenerationEvaluator, - 'machine_reading_comprehension': MachineReadingComprehensionEvaluator + 'machine_reading_comprehension': MachineReadingComprehensionEvaluator, + 'information_extraction': InformationExtractionEvaluator } Predictor_Mapping = { @@ -231,7 +239,8 @@ }, 'video2text_generation': [CLIPGPTFrameTextGenerationPredictor, CLIPGPTFrameTextGeneration], 'sequence_generation': [SequenceGenerationPredictor, SequenceGeneration], - 'machine_reading_comprehension': [MachineReadingComprehensionPredictor, MachineReadingComprehension] + 'machine_reading_comprehension': [MachineReadingComprehensionPredictor, MachineReadingComprehension], + 'information_extraction': [InformationExtractionPredictor, InformationExtractionModel] } diff --git a/easynlp/appzoo/information_extraction/data.py b/easynlp/appzoo/information_extraction/data.py new file mode 100644 index 00000000..24b51cc3 --- /dev/null +++ b/easynlp/appzoo/information_extraction/data.py @@ -0,0 +1,114 @@ +import json +import torch +from threading import Lock +from ..dataset import BaseDataset +from ...modelzoo.models.bert import BertTokenizerFast + +class InformationExtractionDataset(BaseDataset): + + def __init__(self, + pretrained_model_name_or_path, + data_file, + input_schema, + max_seq_length, + *args, + **kwargs): + + super(InformationExtractionDataset, self).__init__(data_file, + input_schema=input_schema, + output_format="dict", + *args, + **kwargs) + + self.max_seq_length = max_seq_length + self.pretrained_model_name_or_path = pretrained_model_name_or_path + self.tokenizer = BertTokenizerFast.from_pretrained(pretrained_model_name_or_path) + + def convert_single_row_to_example(self, row): + + id = row[self.column_names[0]] + instruction = row[self.column_names[1]][2:-2] + start = row[self.column_names[2]][1: -1] + if start == "": + start = [] + else: + start = start.split(",") + start = [int(i) for i in start] + end = row[self.column_names[3]][1: -1] + if end == "": + end = [] + else: + end = end.split(",") + end = [int(i) for i in end] + target = row[self.column_names[4]] + + example = self.tokenizer( + instruction, + truncation=True, + max_length=self.max_seq_length, + padding="max_length", + return_offsets_mapping=True + ) + + example["id"] = id + example["instruction"] = instruction + example["start"] = start + example["end"] = end + example["target"] = target + + return example + + def batch_fn(self, features): + + batch = [] + for f in features: + batch.append({'input_ids': f['input_ids'], + 'token_type_ids': f['token_type_ids'], + 'attention_mask': f['attention_mask']}) + + batch = self.tokenizer.pad( + batch, + padding='max_length', # 为了index不出错直接Padding到max length,如果用longest,后面的np.unravel_index也要改 + max_length=self.max_seq_length, + return_tensors="pt" + ) + + labels = torch.zeros(len(features), 1, self.max_seq_length, self.max_seq_length) # 阅读理解任务entity种类为1 [bz, 1, max_len, max_len] + for feature_id, feature in enumerate(features): # 遍历每个样本 + starts, ends = feature['start'], feature['end'] + offset = feature['offset_mapping'] # 表示tokenizer生成的token对应原始文本中字符级别的位置区间 + position_map = {} + for i, (m, n) in enumerate(offset): + if i != 0 and m == 0 and n == 0: + continue + for k in range(m, n + 1): + position_map[k] = i # 字符级别的第k个字符属于分词i + for start, end in zip(starts, ends): + end -= 1 + # MRC 没有答案时则把label指向CLS + if start == 0: + assert end == -1 + labels[feature_id, 0, 0, 0] = 1 + else: + if start in position_map and end in position_map: + # 指定下列元素为1,说明表示第feature_id个样本的预测区间 + labels[feature_id, 0, position_map[start], position_map[end]] = 1 + + batch["label_ids"] = labels + + tempid = [] + tempinstruction = [] + tempoffset_mapping = [] + temptarget = [] + for i in range(len(features)): + tempid.append(features[i]["id"]) + tempinstruction.append(features[i]["instruction"]) + tempoffset_mapping.append(features[i]["offset_mapping"]) + temptarget.append(features[i]["target"]) + + batch["id"] = tempid + batch["instruction"] = tempinstruction + batch["offset_mapping"] = tempoffset_mapping + batch["target"] = temptarget + + return batch \ No newline at end of file diff --git a/easynlp/appzoo/information_extraction/evaluator.py b/easynlp/appzoo/information_extraction/evaluator.py new file mode 100644 index 00000000..eaed63fb --- /dev/null +++ b/easynlp/appzoo/information_extraction/evaluator.py @@ -0,0 +1,157 @@ +import torch +import numpy as np +from ...utils.logger import logger +from ...core.evaluator import Evaluator + +def fush_multi_answer(has_answer, new_answer): + # 对于某个id测试集,出现多个example时(例如同一个测试样本使用了多个模板而生成了多个example),此时将预测的topk结果进行合并 + # has为已经合并的结果,new为当前新产生的结果, + # has格式为 {'ans': {'prob': float(prob[index_ids[ei]]), 'pos': (s, e)}, ...} + # new {'ans': {'prob': float(prob[index_ids[ei]]), 'pos': (s, e)}, ...} + # print('has_answer=', has_answer) + for ans, value in new_answer.items(): + if ans not in has_answer.keys(): + has_answer[ans] = value + else: + has_answer[ans]['prob'] += value['prob'] + has_answer[ans]['pos'].extend(value['pos']) + return has_answer + +def get_predict_result(batchs, probs, indices, max_seq_length): + probs = probs.squeeze(1) # topk结果的概率 + indices = indices.squeeze(1) # topk结果的索引 + + predictions = {} + topk_predictions = {} + + for _id, instruction, offset_mapping, prob, index in zip(batchs["id"], batchs["instruction"], batchs["offset_mapping"], probs, indices): + + index_ids = torch.Tensor([i for i in range(len(index))]).long() + answer = [] + topk_answer_dict = dict() + # TODO 1. 调节阈值 2. 处理输出实体重叠问题 + entity_index = index[prob > 0.6] + index_ids = index_ids[prob > 0.6] + + for ei, entity in enumerate(entity_index): + + entity = entity.cpu().numpy() + start_end = np.unravel_index(entity, (max_seq_length, max_seq_length)) + + s = offset_mapping[start_end[0]][0] + e = offset_mapping[start_end[1]][1] + ans = instruction[s: e] + + if ans not in answer: + answer.append(ans) + # topk_answer.append({'answer': ans, 'prob': float(prob[index_ids[ei]]), 'pos': (s, e)}) + topk_answer_dict[ans] = {'prob': float(prob[index_ids[ei]]), 'pos': [(s, e)]} + predictions[_id] = answer + + if _id not in topk_predictions.keys(): + topk_predictions[_id] = topk_answer_dict + else: + topk_predictions[_id] = fush_multi_answer(topk_predictions[_id], topk_answer_dict) + + for id_, values in topk_predictions.items(): + + answer_list = list() + for ans, value in values.items(): + answer_list.append({'answer': ans, 'prob': value['prob'], 'pos': value['pos']}) + topk_predictions[id_] = answer_list + + return predictions, topk_predictions + +class InformationExtractionEvaluator(Evaluator): + def __init__(self, valid_dataset, **kwargs): + super().__init__(valid_dataset, **kwargs) + + self.max_seq_length = kwargs.get("sequence_length") + + def _compute(self, label, pred, hit): + if label == 0: + recall = 1 if pred == 0 else 0 + precision = 1 if pred == 0 else (hit / pred) + else: + recall = hit / label + precision = 0 if pred == 0 else (hit / pred) + f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (precision + recall) + return recall, precision, f1 + + def calc_metric(self, golden, predictions): + f1 = 0. + acc = 0. + for k in golden.keys(): + hit_entities = [e for e in predictions[k] if e in golden[k]] + _recall, _precision, _f1 = self._compute( + len(golden[k]), + len(predictions[k]), + len(hit_entities) + ) + f1 += _f1 + acc += _precision + return { + 'acc': acc/len(golden.keys()), + 'f1': f1/len(golden.keys()) + } + + def evaluate(self, model): + + model.eval() + dataname_map = {} + golden = {} + predictions = {} + for _step, batch in enumerate(self.valid_loader): + + try: + batch = { + key: val.cuda() if isinstance(val, torch.Tensor) else val + for key, val in batch.items() + } + except RuntimeError: + batch = {key: val for key, val in batch.items()} + + with torch.no_grad(): + outputs = model(batch) + topk_probs = outputs["topk_probs"] + topk_indices = outputs["topk_indices"] + + prediction, _ = get_predict_result(batch, topk_probs, topk_indices, self.max_seq_length) + predictions.update(prediction) #更新字典的操作 + + for i in range(len(batch["id"])): + id_ = batch["id"][i] + dataname = "-".join(id_.split("-")[:-2]) + if dataname in dataname_map: + dataname_map[dataname].append(id_) + else: + dataname_map[dataname] = [id_] + + golden[id_] = batch["target"][i].split('|') + + all_metrics = { + "macro_f1": 0., + "micro_f1": 0., + "eval_num": 0, + } + + for dataname, data_ids in dataname_map.items(): + gold = {k: v for k, v in golden.items() if k in data_ids} + pred = {k: v for k, v in predictions.items() if k in data_ids} + score = self.calc_metric(golden=gold, predictions=pred) + acc, f1 = score['acc'], score['f1'] + + all_metrics["macro_f1"] += f1 + all_metrics["micro_f1"] += f1 * len(data_ids) + all_metrics["eval_num"] += len(data_ids) + all_metrics[dataname] = round(acc, 4) + all_metrics["macro_f1"] = round(all_metrics["macro_f1"] / len(dataname_map), 4) + all_metrics["micro_f1"] = round(all_metrics["micro_f1"] / all_metrics["eval_num"], 4) + + eval_outputs = list() + + for key, value in all_metrics.items(): + eval_outputs.append((key, value)) + logger.info("{}: {}".format(key, value)) + + return eval_outputs diff --git a/easynlp/appzoo/information_extraction/model.py b/easynlp/appzoo/information_extraction/model.py new file mode 100644 index 00000000..5520aecd --- /dev/null +++ b/easynlp/appzoo/information_extraction/model.py @@ -0,0 +1,129 @@ +import torch +import torch.nn as nn +from typing import Optional +from dataclasses import dataclass +from ..application import Application +from ...modelzoo import AutoConfig, AutoModel +from transformers.file_utils import ModelOutput + +class SinusoidalPositionEmbedding(nn.Module): + """定义Sin-Cos位置Embedding + """ + + def __init__( + self, output_dim, merge_mode='add', custom_position_ids=False): + super(SinusoidalPositionEmbedding, self).__init__() + self.output_dim = output_dim + self.merge_mode = merge_mode + self.custom_position_ids = custom_position_ids + + def forward(self, inputs): + if self.custom_position_ids: + seq_len = inputs.shape[1] + inputs, position_ids = inputs + position_ids = position_ids.type(torch.float) + else: + input_shape = inputs.shape + batch_size, seq_len = input_shape[0], input_shape[1] + position_ids = torch.arange(seq_len).type(torch.float)[None] + indices = torch.arange(self.output_dim // 2).type(torch.float) + indices = torch.pow(10000.0, -2 * indices / self.output_dim) + embeddings = torch.einsum('bn,d->bnd', position_ids, indices) + embeddings = torch.stack([torch.sin(embeddings), torch.cos(embeddings)], dim=-1) + embeddings = torch.reshape(embeddings, (-1, seq_len, self.output_dim)) + if self.merge_mode == 'add': + return inputs + embeddings.to(inputs.device) + elif self.merge_mode == 'mul': + return inputs * (embeddings + 1.0).to(inputs.device) + elif self.merge_mode == 'zero': + return embeddings.to(inputs.device) + +def multilabel_categorical_crossentropy(y_pred, y_true): + y_pred = (1 - 2 * y_true) * y_pred # -1 -> pos classes, 1 -> neg classes + y_pred_neg = y_pred - y_true * 1e12 # mask the pred outputs of pos classes + y_pred_pos = y_pred - (1 - y_true) * 1e12 # mask the pred outputs of neg classes + zeros = torch.zeros_like(y_pred[..., :1]) + y_pred_neg = torch.cat([y_pred_neg, zeros], dim=-1) + y_pred_pos = torch.cat([y_pred_pos, zeros], dim=-1) + neg_loss = torch.logsumexp(y_pred_neg, dim=-1) + pos_loss = torch.logsumexp(y_pred_pos, dim=-1) + # print(y_pred, y_true, pos_loss) + return (neg_loss + pos_loss).mean() + +class InformationExtractionModel(Application): + + def __init__(self, pretrained_model_name_or_path=None, **kwargs): + super().__init__() + + if kwargs.get('from_config'): + # for evaluation and prediction + self.config = kwargs.get('from_config') + self.backbone = AutoModel.from_config(self.config) + else: + self.config = AutoConfig.from_pretrained(pretrained_model_name_or_path) + self.backbone = AutoModel.from_pretrained(pretrained_model_name_or_path) + + self.hidden_size = self.config.hidden_size + + self.ent_type_size = 1 + self.inner_dim = 64 + self.RoPE = True + + self.dense_1 = nn.Linear(self.hidden_size, self.inner_dim * 2) + self.dense_2 = nn.Linear(self.hidden_size, self.ent_type_size * 2) # 原版的dense2是(inner_dim * 2, ent_type_size * 2) + + def forward(self, inputs): + + input_ids = inputs["input_ids"] + attention_mask = inputs["attention_mask"] + token_type_ids = inputs["token_type_ids"] + + context_outputs = self.backbone(input_ids, attention_mask, token_type_ids) + last_hidden_state = context_outputs.last_hidden_state + outputs = self.dense_1(last_hidden_state) + qw, kw = outputs[..., ::2], outputs[..., 1::2] + batch_size = input_ids.shape[0] + + if self.RoPE: + pos = SinusoidalPositionEmbedding(self.inner_dim, 'zero')(outputs) + cos_pos = pos[..., 1::2].repeat_interleave(2, dim=-1) # e.g. [0.34, 0.90] -> [0.34, 0.34, 0.90, 0.90] + sin_pos = pos[..., ::2].repeat_interleave(2, dim=-1) + qw2 = torch.stack([-qw[..., 1::2], qw[..., ::2]], 3) + qw2 = torch.reshape(qw2, qw.shape) + qw = qw * cos_pos + qw2 * sin_pos + kw2 = torch.stack([-kw[..., 1::2], kw[..., ::2]], 3) + kw2 = torch.reshape(kw2, kw.shape) + kw = kw * cos_pos + kw2 * sin_pos + + logits = torch.einsum('bmd,bnd->bmn', qw, kw) / self.inner_dim ** 0.5 + bias = torch.einsum('bnh->bhn', self.dense_2(last_hidden_state)) / 2 + logits = logits[:, None] + bias[:, ::2, None] + bias[:, 1::2, :, None] # logits[:, None] 增加一个维度 + #logits.shape=[2,1,512,512] + + mask = torch.triu(attention_mask.unsqueeze(2) * attention_mask.unsqueeze(1)) + + with torch.no_grad(): + prob = torch.sigmoid(logits) * mask.unsqueeze(1) + topk = torch.topk(prob.view(batch_size, self.ent_type_size, -1), 50, dim=-1) + + return { + 'input_ids': input_ids, + 'attention_mask': attention_mask, + 'logits': logits, + 'topk_probs': topk.values, + 'topk_indices': topk.indices + } + + def compute_loss(self, forward_outputs, label_ids, **kwargs): + + input_ids = forward_outputs['input_ids'] + attention_mask = forward_outputs['attention_mask'] + logits = forward_outputs['logits'] + + mask = torch.triu(attention_mask.unsqueeze(2) * attention_mask.unsqueeze(1)) + y_pred = logits - (1-mask.unsqueeze(1))*1e12 + y_true = label_ids.view(input_ids.shape[0] * self.ent_type_size, -1) + y_pred = y_pred.view(input_ids.shape[0] * self.ent_type_size, -1) + loss = multilabel_categorical_crossentropy(y_pred, y_true) + + return {'loss': loss} \ No newline at end of file diff --git a/easynlp/appzoo/information_extraction/predictor.py b/easynlp/appzoo/information_extraction/predictor.py new file mode 100644 index 00000000..635dff50 --- /dev/null +++ b/easynlp/appzoo/information_extraction/predictor.py @@ -0,0 +1,222 @@ +import json +import torch +import numpy as np +from threading import Lock +from ...core.predictor import Predictor +from .evaluator import get_predict_result +from ...modelzoo.models.bert import BertTokenizerFast +from ...core.predictor import Predictor, get_model_predictor + +#假定一个文件中只有一种任务,NER或RE或EE +#对于NER任务,scheme为xx;xx;xx, +#输出形式为id content q_and_a 注意,q_and_a的形式为[[实体类型 答案 答案开始位置 答案结束位置 答案的可能性大小]] +#对于非NER任务,schem为竞赛名称:主办方,承办方;比赛:冠军,亚军 +#输出形式为id content 竞赛名称 答案 答案开始位置 答案结束位置 答案的可能性大小 +#以及id content q_and_a 注意,q_and_a的形式为[[竞赛名称(xx)-主办方 答案 答案开始位置 答案结束位置 答案的可能性大小]] + +class InformationExtractionPredictor(Predictor): + + def __init__(self, model_dir, model_cls, *args, **kwargs): + super(InformationExtractionPredictor, self).__init__() + + self.MUTEX = Lock() + + self.task = kwargs.pop("task") + self.max_seq_length = kwargs.pop("max_seq_length") + self.input_schema = kwargs.pop("input_schema") + self.column_names = [t.split(":")[0] for t in self.input_schema.split(",")] + + self.tokenizer = BertTokenizerFast.from_pretrained(model_dir) + self.model_predictor = get_model_predictor( + model_dir=model_dir, + model_cls=model_cls, + input_keys=[("input_ids", torch.LongTensor), + ("attention_mask", torch.LongTensor), + ("token_type_ids", torch.LongTensor) + ], + output_keys=["topk_probs", "topk_indices"] + ) + + def preprocess(self, in_data): + if not in_data: + raise RuntimeError("Input data should not be None.") + + if not isinstance(in_data, list): + in_data = [in_data] + + rst = { + "id": [], + "scheme": [], + "content": [], + "entity_type": [], + "instruction": [], + "offset_mapping": [], + "input_ids": [], + "token_type_ids": [], + "attention_mask": [] + } + + for record in in_data: + + id = record[self.column_names[0]] + scheme = record[self.column_names[1]] + content = record[self.column_names[2]] + + if self.task == "NER": + entity_types = scheme.split(";") + else: + entity_types = [t.split(":")[0] for t in scheme.split(";")] + + try: + self.MUTEX.acquire() + examples = [] + for i in range(len(entity_types)): + instruction = "找到文章中所有【{}】类型的实体?文章:【{}】".format(entity_types[i], content) + example = self.tokenizer( + instruction, + truncation=True, + max_length=self.max_seq_length, + padding="max_length", + return_offsets_mapping=True) + example["id"] = id + example["scheme"] = scheme + example["content"] = content + example["entity_type"] = entity_types[i] + example["instruction"] = instruction + examples.append(example) + finally: + self.MUTEX.release() + + for e_index, example in enumerate(examples): + rst["id"].append(example["id"]) + rst["scheme"].append(example["scheme"]) + rst["content"].append(example["content"]) + rst["entity_type"].append(example["entity_type"]) + rst["instruction"].append(example["instruction"]) + rst["offset_mapping"].append(example["offset_mapping"]) + rst["input_ids"].append(example["input_ids"]) + rst["token_type_ids"].append(example["token_type_ids"]) + rst["attention_mask"].append(example["attention_mask"]) + + return rst + + def predict(self, in_data): + + answers = self.model_predictor.predict(in_data) + answers = self.get_predict_result(answers, self.max_seq_length) + + if self.task == "NER": + return [in_data, answers] + else: + + rst = { + "id": [], + "content": [], + "instruction": [], + "question": [], + "offset_mapping": [], + "input_ids": [], + "token_type_ids": [], + "attention_mask": [] + } + + try: + self.MUTEX.acquire() + examples = [] + for i in range(len(answers)): + + id = in_data["id"][i] + scheme = in_data["scheme"][i] + types = {t.split(":")[0]:t.split(":")[1].split(",") for t in scheme.split(";")} + content = in_data["content"][i] + entity_type = in_data["entity_type"][i] + for j in range(len(answers[i])): + identified_entity = answers[i][j]["ans"] + + for k in range(len(types[entity_type])): + instruction = "找到文章中【{}】的【{}】?文章:【{}】".format(identified_entity, types[entity_type][k], content) + example = self.tokenizer( + instruction, + truncation=True, + max_length=self.max_seq_length, + padding="max_length", + return_offsets_mapping=True) + example["id"] = id + example["content"] = content + example["instruction"] = instruction + example["question"] = "{}({})-{}".format(entity_type, identified_entity, types[entity_type][k]) + examples.append(example) + finally: + self.MUTEX.release() + + for e_index, example in enumerate(examples): + rst["id"].append(example["id"]) + rst["content"].append(example["content"]) + rst["instruction"].append(example["instruction"]) + rst["question"].append(example["question"]) + rst["offset_mapping"].append(example["offset_mapping"]) + rst["input_ids"].append(example["input_ids"]) + rst["token_type_ids"].append(example["token_type_ids"]) + rst["attention_mask"].append(example["attention_mask"]) + + answers = self.model_predictor.predict(rst) + answers = self.get_predict_result(answers, self.max_seq_length) + + return [rst, answers] + + def get_predict_result(self, lists, max_seq_length): + + probs = lists["topk_probs"].squeeze(1) # topk结果的概率 + indices = lists["topk_indices"].squeeze(1) # topk结果的索引 + answers = [] + + for id, instruction, offset_mapping, prob, index in zip(lists["id"], lists["instruction"], lists["offset_mapping"], probs, indices): + + answer = [] + index_ids = torch.Tensor([i for i in range(len(index))]).long() + entity_index = index[prob > 0.6] + index_ids = index_ids[prob > 0.6] + for ei, entity in enumerate(entity_index): + + start_end = np.unravel_index(entity, (max_seq_length, max_seq_length)) + + s = offset_mapping[start_end[0]][0] + e = offset_mapping[start_end[1]][1] + ans = instruction[s: e] + + answer.append({'ans':ans, 'prob': float(prob[index_ids[ei]]), 'pos': [s, e]}) + + answers.append(answer) + + return answers + + def postprocess(self, data): + + in_data, answers = data[0], data[1] + + output_dict_list = [] + + temp_id = "" + for i in range(len(answers)): + + if in_data["id"][i] != temp_id: + output_dict = {} + output_dict["id"] = in_data["id"][i] + output_dict["content"] = in_data["content"][i] + output_dict["q_and_a"] = [] + temp_id = in_data["id"][i] + for j in range(len(answers[i])): + temp = [] + if self.task == "NER": + temp.append(in_data["entity_type"][i]) #question + else: + temp.append(in_data["question"][i]) #question + temp.append(answers[i][j]["ans"]) #answer + temp.append(answers[i][j]["prob"]) #answer_prob + temp.append(answers[i][j]["pos"][0]) #answer_start + temp.append(answers[i][j]["pos"][1]) #eanswer_endnd + output_dict["q_and_a"].append(temp) + if in_data["id"][i] != temp_id or i == len(answers)-1: + output_dict_list.append(output_dict) + + return output_dict_list diff --git a/easynlp/utils/arguments.py b/easynlp/utils/arguments.py index eb1ca934..7c62cd23 100644 --- a/easynlp/utils/arguments.py +++ b/easynlp/utils/arguments.py @@ -256,7 +256,8 @@ def _add_easynlp_args(parser: argparse.ArgumentParser): 'image2text_generation', 'image2text_generation_vqgan', 'video2text_generation', 'clip', 'wukong_clip', - 'machine_reading_comprehension','latent_diffusion' + 'machine_reading_comprehension','latent_diffusion', + 'information_extraction' ], help='name of the application') diff --git a/examples/information_extraction/main.py b/examples/information_extraction/main.py new file mode 100644 index 00000000..f94fff9d --- /dev/null +++ b/examples/information_extraction/main.py @@ -0,0 +1,86 @@ +import os +import torch +from easynlp.core import Trainer +from easynlp.core import PredictorManager +from easynlp.utils import initialize_easynlp +from easynlp.appzoo import InformationExtractionDataset +from easynlp.utils.global_vars import parse_user_defined_parameters +from easynlp.appzoo import get_application_model, get_application_evaluator, get_application_predictor, get_application_model_for_evaluation + +if __name__ == "__main__": + + args = initialize_easynlp() + user_defined_parameters = parse_user_defined_parameters(args.user_defined_parameters) + + if args.mode == "predict": + + args.task = user_defined_parameters.get('task') + + predictor = get_application_predictor( + app_name=args.app_name, + model_dir=args.checkpoint_dir, + input_schema=args.input_schema, + max_seq_length=args.sequence_length, + task=args.task, + user_defined_parameters=user_defined_parameters) + predictor_manager = PredictorManager( + predictor=predictor, + input_file=args.tables.split(",")[0], + input_schema=args.input_schema, + output_file=args.tables.split(",")[-1], + output_schema=args.output_schema, + append_cols=args.append_cols, + batch_size=args.micro_batch_size + ) + predictor_manager.run() + exit() + + elif args.mode == "train": + + train_dataset = InformationExtractionDataset( + pretrained_model_name_or_path=args.pretrained_model_name_or_path, + data_file=args.tables.split(",")[0], + input_schema=args.input_schema, + max_seq_length=args.sequence_length + ) + + valid_dataset = InformationExtractionDataset( + pretrained_model_name_or_path=args.pretrained_model_name_or_path, + data_file=args.tables.split(",")[-1], + input_schema=args.input_schema, + max_seq_length=args.sequence_length + ) + + model = get_application_model(app_name=args.app_name, + pretrained_model_name_or_path=args.pretrained_model_name_or_path, + user_defined_parameters=user_defined_parameters) + + trainer = Trainer(model=model, train_dataset=train_dataset, evaluator=get_application_evaluator(app_name=args.app_name, valid_dataset=valid_dataset, + user_defined_parameters=user_defined_parameters, + sequence_length=args.sequence_length)) + + trainer.train() + + elif args.mode == "evaluate": + + valid_dataset = InformationExtractionDataset( + pretrained_model_name_or_path=args.checkpoint_dir, + data_file=args.tables, + input_schema=args.input_schema, + max_seq_length=args.sequence_length + ) + + model = get_application_model_for_evaluation(app_name=args.app_name, + pretrained_model_name_or_path=args.checkpoint_dir, + user_defined_parameters=[user_defined_parameters]) + + evaluator = get_application_evaluator(app_name=args.app_name, valid_dataset=valid_dataset, user_defined_parameters=user_defined_parameters, + sequence_length=args.sequence_length) + + if args.n_gpu > 0: + model.to(torch.cuda.current_device()) + else: + model.to("cpu") + evaluator.evaluate(model=model) + + diff --git a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh new file mode 100644 index 00000000..f33bfd92 --- /dev/null +++ b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh @@ -0,0 +1,54 @@ +export CUDA_VISIBLE_DEVICES=$1 + +mode=$2 + +if [ "$mode" = "train" ]; then + + easynlp + --mode $mode \ + --tables=train.tsv,dev.tsv \ + --input_schema=id:str:1,instruction:str:1,start:str:1,end:str:1,target:str:1 \ + --worker_gpu=4 \ + --app_name=information_extraction \ + --sequence_length=512 \ + --weight_decay=0.0 \ + --micro_batch_size=4 \ + --checkpoint_dir=./information_extraction_model/ \ + --data_threads=5 \ + --user_defined_parameters='pretrain_model_name_or_path=hfl/macbert-large-zh' \ + --save_checkpoint_steps=500 \ + --gradient_accumulation_steps=8 \ + --epoch_num=3 \ + --learning_rate=2e-05 \ + --random_seed=42 + +elif [ "$mode" = "evaluate" ]; then + + easynlp + --mode $mode \ + --tables=dev.tsv \ + --input_schema=id:str:1,instruction:str:1,start:str:1,end:str:1,target:str:1 \ + --worker_gpu=4 \ + --app_name=information_extraction \ + --sequence_length=512 \ + --weight_decay=0.0 \ + --micro_batch_size=4 \ + --checkpoint_dir=./information_extraction_model/ \ + --data_threads=5 + +elif [ "$mode" = "predict" ]; then + + easynlp + --mode=$mode \ + --tables=predict_input_EE.tsv,predict_output_EE.tsv \ + --input_schema=id:str:1,scheme:str:1,content:str:1 \ + --output_schema=id,content,q_and_a \ + --worker_gpu=4 \ + --app_name=information_extraction \ + --sequence_length=512 \ + --weight_decay=0.0 \ + --micro_batch_size=4 \ + --checkpoint_dir=./information_extraction_model/ \ + --data_threads=5 \ + --user_defined_parameters='task=EE' +fi \ No newline at end of file diff --git a/examples/information_extraction/run_train_eval_predict_user_defined_local.sh b/examples/information_extraction/run_train_eval_predict_user_defined_local.sh new file mode 100644 index 00000000..5dd2c5ff --- /dev/null +++ b/examples/information_extraction/run_train_eval_predict_user_defined_local.sh @@ -0,0 +1,62 @@ +export CUDA_VISIBLE_DEVICES=$1 + +MASTER_ADDR=localhost +MASTER_PORT=6018 +GPUS_PER_NODE=4 +NNODES=1 +NODE_RANK=0 + +DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" + +mode=$2 + +if [ "$mode" = "train" ]; then + + python -m torch.distributed.launch $DISTRIBUTED_ARGS main.py \ + --mode $mode \ + --tables=train.tsv,dev.tsv \ + --input_schema=id:str:1,instruction:str:1,start:str:1,end:str:1,target:str:1 \ + --worker_gpu=4 \ + --app_name=information_extraction \ + --sequence_length=512 \ + --weight_decay=0.0 \ + --micro_batch_size=4 \ + --checkpoint_dir=./information_extraction_model/ \ + --data_threads=5 \ + --user_defined_parameters='pretrain_model_name_or_path=hfl/macbert-large-zh' \ + --save_checkpoint_steps=500 \ + --gradient_accumulation_steps=8 \ + --epoch_num=3 \ + --learning_rate=2e-05 \ + --random_seed=42 + +elif [ "$mode" = "evaluate" ]; then + + python -m torch.distributed.launch $DISTRIBUTED_ARGS main.py \ + --mode $mode \ + --tables=dev.tsv \ + --input_schema=id:str:1,instruction:str:1,start:str:1,end:str:1,target:str:1 \ + --worker_gpu=4 \ + --app_name=information_extraction \ + --sequence_length=512 \ + --weight_decay=0.0 \ + --micro_batch_size=4 \ + --checkpoint_dir=./information_extraction_model/ \ + --data_threads=5 + +elif [ "$mode" = "predict" ]; then + + python -m torch.distributed.launch $DISTRIBUTED_ARGS main.py \ + --mode=$mode \ + --tables=predict_input_EE.tsv,predict_output_EE.tsv \ + --input_schema=id:str:1,scheme:str:1,content:str:1 \ + --output_schema=id,content,q_and_a \ + --worker_gpu=4 \ + --app_name=information_extraction \ + --sequence_length=512 \ + --weight_decay=0.0 \ + --micro_batch_size=4 \ + --checkpoint_dir=./information_extraction_model/ \ + --data_threads=5 \ + --user_defined_parameters='task=EE' +fi \ No newline at end of file diff --git a/tests/modelzoo_alibaba.json b/tests/modelzoo_alibaba.json new file mode 100644 index 00000000..8c823ef3 --- /dev/null +++ b/tests/modelzoo_alibaba.json @@ -0,0 +1,84 @@ +{ + "bert-small-uncased": "public/bert-small-uncased.tgz", + "bert-base-uncased": "public/bert-base-uncased.tgz", + "bert-large-uncased": "public/bert-large-uncased.tgz", + "bert-tiny-uncased": "public/bert-tiny-uncased.tgz", + "bert-base-chinese": "public/bert-base-chinese.tgz", + "kbert-base-chinese": "public/kbert-base-chinese.tgz", + "wobert-base-chinese":"public/wobert-base-chinese.tgz", + "bert13b-base-chinese": "public/bert13b-base-chinese.tgz", + "bert600m-base-chinese": "public/bert600m-base-chinese.tgz", + "roberta-base-en": "public/roberta-base.tgz", + "roberta-large-en": "public/roberta-large.tgz", + "finbert-base-zh": "public/finbert-base-zh.tgz", + "clip_vit_large_patch14": "openai/clip_vit_large_patch14.tgz", + "alibaba-pai/clip_chinese_roberta_base_vit_base": "alibaba-pai/clip_chinese_roberta_base_vit_base.tgz", + "wukong_vit_l_14_clip": "public/wukong_vit_l_14_clip.tgz", + "langboat/mengzi-bert-base": "public/mengzi-bert-base.tgz", + "langboat/mengzi-bert-base-fin": "public/mengzi-bert-base-fin.tgz", + "IDEA-CCNL/Erlangshen-MegatronBert-1.3B": "public/Erlangshen-MegatronBert-1.3B.tgz", + "hfl/macbert-base-zh": "public/hfl/macbert-base-zh.tgz", + "hfl/macbert-48-zh-stackbert": "public/hfl/macbert-48-zh-stackbert.tgz", + "hfl/macbert-large-zh": "public/hfl/macbert-large-zh.tgz", + "hfl/macbert-32-zh-stackbert": "public/hfl/macbert-32-zh-stackbert.tgz", + "hfl/macbert-80-zh-stackbert": "public/hfl/macbert-80-zh-stackbert.tgz", + "hfl/chinese-roberta-wwm-ext": "public/hfl/chinese-roberta-wwm-ext.tgz", + "hfl/chinese-roberta-wwm-ext-large": "public/hfl/chinese-roberta-wwm-ext-large.tgz", + "hfl/bloom-350m": "public/hfl/bloom-350m.tgz", + "hfl/randeng-summary-generation-large-zh": "public/hfl/randeng-523M-Summary-Chinese.tgz", + "hfl/randeng-summary-generation-base-zh": "public/hfl/randeng-238M-Summary-Chinese.tgz", + "hfl/brio-summary-generation-large-en": "public/hfl/brio-cnndm-uncased.tgz", + "alibaba-pai/randeng-title-generation-large-zh": "alibaba-pai/randeng-523M-Summary-Chinese-tuned.tgz", + "alibaba-pai/randeng-title-generation-base-zh": "alibaba-pai/randeng-238M-Summary-Chinese-tuned.tgz", + "mg/glm-generation-large-zh": "public/mg/glm-large-chinese.tgz", + "mg/glm-generation-large-en": "public/mg/glm-large-generation.tgz", + "alibaba-pai/randeng-advertise-generation-base-zh": "alibaba-pai/advertise-gen_base_cn", + "hfl/bart-generation-base-zh": "public/hfl/bart-base-chinese.tgz", + "hfl/bart-generation-large-zh": "public/hfl/bart-large-chinese.tgz", + "alibaba-pai/randeng-essay-generation-base-zh": "alibaba-pai/randeng-essay-generation-base-zh.tgz", + "alibaba-pai/randeng-question-generation-base-zh": "alibaba-pai/randeng-question-generation-base-zh.tgz", + "alibaba-pai/bart-essay-generation-large-zh": "alibaba-pai/bart-essay-generation-large-zh.tgz", + "alibaba-pai/bart-question-generation-large-zh": "alibaba-pai/bart-question-generation-base-zh.tgz", + "alibaba-pai/glm-essay-generation-large-zh": "alibaba-pai/mg/glm-essay-generation-large-zh.tgz", + "alibaba-pai/bart-poem-generation-large-zh": "alibaba-pai/bart-poem-generation-large-zh.tgz", + "alibaba-pai/randeng-poem-generation-base-zh": "alibaba-pai/randeng-poem-generation-base-zh.tgz", + "alibaba-pai/pai-kangaroo-base-chinese": "alibaba-pai/pai-kangaroo-base-chinese.tgz", + "alibaba-pai/pai-kangaroo-fin-base-chinese": "alibaba-pai/pai-kangaroo-fin-base-chinese.tgz", + "alibaba-pai/textcnn-en": "alibaba-pai/textcnn-en.tgz", + "alibaba-pai/mt5-title-generation-zh": "alibaba-pai/mt5-title-generation-zh.tgz", + "alibaba-pai/pegasus-summary-generation-en": "alibaba-pai/pegasus-summary-generation-en.tgz", + "alibaba-pai/gpt2-chitchat-zh": "alibaba-pai/gpt2-chitchat-zh.tgz", + "alibaba-pai/pai-dkplm-medical-base-zh": "alibaba-pai/pai-dkplm-medical-base-zh.tgz", + "alibaba-pai/pai-dkplm-medical-small-zh": "alibaba-pai/pai-dkplm-medical-small-zh.tgz", + "alibaba-pai/pai-kgbert-tax-base-zh": "alibaba-pai/pai-kgbert-tax-base-zh.tgz", + "alibaba-pai/pai-kgbert-insurance-base-zh": "alibaba-pai/pai-kgbert-insurance-base-zh.tgz", + "alibaba-pai/pai-bert-medical-base-zh": "alibaba-pai/pai-bert-medical-base-zh.tgz", + "alibaba-pai/pai-bert-base-zh": "alibaba-pai/pai-bert-base-zh.tgz", + "alibaba-pai/pai-bert-small-zh": "alibaba-pai/pai-bert-small-zh.tgz", + "alibaba-pai/pai-bert-tiny-zh": "alibaba-pai/pai-bert-tiny-zh.tgz", + "alibaba-pai/pai-geep-base-uncased": "alibaba-pai/geep-base-uncased.tgz", + "alibaba-pai/pai-painter-base-zh": "alibaba-pai/pai-painter-base-zh.tgz", + "alibaba-pai/pai-painter-large-zh": "alibaba-pai/pai-painter-large-zh.tgz", + "alibaba-pai/pai-painter-huge-zh": "alibaba-pai/pai-painter-huge-zh.tgz", + "alibaba-pai/pai-artist-knowl-base-zh": "alibaba-pai/pai-artist-knowl-base-zh.tgz", + "alibaba-pai/pai-artist-t2i-base-zh": "alibaba-pai/pai-artist-t2i-base-zh.tgz", + "alibaba-pai/pai-artist-t2i-large-zh": "alibaba-pai/pai-artist-t2i-large-zh.tgz", + "alibaba-pai/pai-artist-i2t-base-zh": "alibaba-pai/pai-artist-i2t-base-zh.tgz", + "alibaba-pai/pai-artist-i2t-large-zh": "alibaba-pai/pai-artist-i2t-large-zh.tgz", + "alibaba-pai/pai-vqgan-gpt-i2t-base-zh": "alibaba-pai/pai-vqgan-gpt-i2t-base-zh.tgz", + "alibaba-pai/pai-vqgan-gpt-i2t-large-zh": "alibaba-pai/pai-vqgan-gpt-i2t-large-zh.tgz", + "alibaba-pai/pai-clip-gpt-i2t-base-zh": "alibaba-pai/pai-clip-gpt-i2t-base-zh.tgz", + "alibaba-pai/pai-clip-gpt-i2t-large-zh": "alibaba-pai/pai-clip-gpt-i2t-large-zh.tgz", + "alibaba-pai/pai-clip-commercial-base-en": "alibaba-pai/pai-clip-commercial-base-en.tgz", + "alibaba-pai/pai-clip-commercial-large-en": "alibaba-pai/pai-clip-commercial-large-en.tgz", + "alibaba-pai/pai-ck_bert-base-zh": "alibaba-pai/pai-ck_bert-base-zh.tgz", + "alibaba-pai/pai-ck_bert-large-zh": "alibaba-pai/pai-ck_bert-large-zh.tgz", + "alibaba-pai/pai-ck_bert-huge-zh": "alibaba-pai/pai-ck_bert-huge-zh.tgz", + "alibaba-pai/pai-ldm-fashion-base-zh": "alibaba-pai/pai-ldm-fashion-base-zh.tgz", + "alibaba-pai/pai-ldm-food-base-zh": "alibaba-pai/pai-ldm-food-base-zh.tgz", + "m6-students/m6-text-cls-student-small": "m6-students/m6-text-cls-student-small.tgz", + "m6-students/m6-text-cls-student-tiny": "m6-students/m6-text-cls-student-tiny.tgz", + "m6-students/m6-text-cls-student-base": "m6-students/m6-text-cls-student-base.tgz", + "m6-students/m6-text-gen-student": "m6-students/m6-text-gen-student.tgz", + "globalpointforie/globalpointforie-zh": "globalpointforie/globalpointforie-zh.tgz" +} \ No newline at end of file diff --git a/tests/tmp/da_test_0.out b/tests/tmp/da_test_0.out new file mode 100644 index 00000000..1239aca6 --- /dev/null +++ b/tests/tmp/da_test_0.out @@ -0,0 +1,115 @@ +--2022-12-07 12:00:45-- http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/easynlp_modelzoo/modelzoo_alibaba.json +Resolving atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com (atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com)... 47.101.88.27 +Connecting to atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com (atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com)|47.101.88.27|:80... connected. +HTTP request sent, awaiting response... 200 OK +Length: 6605 (6.5K) [application/json] +Saving to: ‘modelzoo_alibaba.json’ + + 0K ...... 100% 681M=0s + +2022-12-07 12:00:45 (681 MB/s) - ‘modelzoo_alibaba.json’ saved [6605/6605] + +/apsarapangu/disk2/zhoujiyong.zjy/miniconda3/envs/ie/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use_env is set by default in torchrun. +If your script expects `--local_rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + warnings.warn( +[W socket.cpp:401] [c10d] The server socket cannot be initialized on [::]:52463 (errno: 97 - Address family not supported by protocol). +[W socket.cpp:558] [c10d] The client socket cannot be initialized to connect to [localhost]:52463 (errno: 97 - Address family not supported by protocol). +[W socket.cpp:558] [c10d] The client socket cannot be initialized to connect to [localhost]:52463 (errno: 97 - Address family not supported by protocol). +NOTE: if you wish to use GLM models, please refer to EasyNLP/examples/appzoo_tutorials/sequence_generation/README.md! +name 'deepspeed' is not defined +The following parameters are not recognized: [] +------------------------ arguments ------------------------ + app_name ........................................ data_augmentation + append_cols ..................................... None + buckets ......................................... None + checkpoint_dir .................................. _ + chief_hosts ..................................... + data_threads .................................... 10 + distributed_backend ............................. nccl + do_lower_case ................................... False + epoch_num ....................................... 3 + export_tf_checkpoint_type ....................... easytransfer + first_sequence .................................. sent + gradient_accumulation_steps ..................... 1 + input_schema .................................... index:str:1,sent:str:1,label:str:1 + is_chief ........................................ + is_master_node .................................. True + job_name ........................................ None + label_enumerate_values .......................... None + label_name ...................................... None + learning_rate ................................... 5e-05 + local_rank ...................................... 0 + logging_steps ................................... 100 + master_port ..................................... 23456 + max_grad_norm ................................... 1.0 + mg_model ........................................ False + micro_batch_size ................................ 8 + mode ............................................ predict + modelzoo_base_dir ............................... + n_cpu ........................................... 1 + n_gpu ........................................... 0 + odps_config ..................................... None + optimizer_type .................................. AdamW + output_schema ................................... augmented_data + outputs ......................................... tmp/train_aug.tsv + predict_queue_size .............................. 1024 + predict_slice_size .............................. 4096 + predict_table_read_thread_num ................... 16 + predict_thread_num .............................. 1 + ps_hosts ........................................ + random_seed ..................................... 1234 + rank ............................................ 0 + read_odps ....................................... False + restore_works_dir ............................... ./.easynlp_predict_restore_works_dir + resume_from_checkpoint .......................... None + save_all_checkpoints ............................ False + save_checkpoint_steps ........................... None + second_sequence ................................. None + sequence_length ................................. 128 + skip_first_line ................................. False + tables .......................................... tmp/train_head.tsv + task_count ...................................... 1 + task_index ...................................... 0 + use_amp ......................................... False + use_torchacc .................................... False + user_defined_parameters ......................... pretrain_model_name_or_path=bert-small-uncased type=mlm_da expansion_rate=2 mask_proportion=0.1 remove_blanks=True append_original=True + user_entry_file ................................. None + user_script ..................................... None + warmup_proportion ............................... 0.1 + weight_decay .................................... 0.0001 + worker_count .................................... 1 + worker_cpu ...................................... -1 + worker_gpu ...................................... -1 + worker_hosts .................................... None + world_size ...................................... 1 +-------------------- end of arguments --------------------- +> initializing torch distributed ... +[W socket.cpp:558] [c10d] The client socket cannot be initialized to connect to [localhost]:52463 (errno: 97 - Address family not supported by protocol). +[W socket.cpp:558] [c10d] The client socket cannot be initialized to connect to [localhost]:52463 (errno: 97 - Address family not supported by protocol). +Init dist done. World size: 1, rank 0, l_rank 0 +> setting random seeds to 1234 ... +`/home/zhoujiyong.zjy/.easynlp/modelzoo/public/bert-small-uncased.tgz` already exists + + Loaded weights of the model: + [bert.embeddings.word_embeddings.weight,bert.embeddings.position_embeddings.weight,bert.embeddings.token_type_embeddings.weight,bert.embeddings.LayerNorm.weight,bert.embeddings.LayerNorm.bias,bert.encoder.layer.0.attention.self.query.weight,bert.encoder.layer.0.attention.self.query.bias,bert.encoder.layer.0.attention.self.key.weight,bert.encoder.layer.0.attention.self.key.bias,bert.encoder.layer.0.attention.self.value.weight,bert.encoder.layer.0.attention.self.value.bias,bert.encoder.layer.0.attention.output.dense.weight,bert.encoder.layer.0.attention.output.dense.bias,bert.encoder.layer.0.attention.output.LayerNorm.weight,bert.encoder.layer.0.attention.output.LayerNorm.bias,bert.encoder.layer.0.intermediate.dense.weight,bert.encoder.layer.0.intermediate.dense.bias,bert.encoder.layer.0.output.dense.weight,bert.encoder.layer.0.output.dense.bias,bert.encoder.layer.0.output.LayerNorm.weight,bert.encoder.layer.0.output.LayerNorm.bias,bert.encoder.layer.1.attention.self.query.weight,bert.encoder.layer.1.attention.self.query.bias,bert.encoder.layer.1.attention.self.key.weight,bert.encoder.layer.1.attention.self.key.bias,bert.encoder.layer.1.attention.self.value.weight,bert.encoder.layer.1.attention.self.value.bias,bert.encoder.layer.1.attention.output.dense.weight,bert.encoder.layer.1.attention.output.dense.bias,bert.encoder.layer.1.attention.output.LayerNorm.weight,bert.encoder.layer.1.attention.output.LayerNorm.bias,bert.encoder.layer.1.intermediate.dense.weight,bert.encoder.layer.1.intermediate.dense.bias,bert.encoder.layer.1.output.dense.weight,bert.encoder.layer.1.output.dense.bias,bert.encoder.layer.1.output.LayerNorm.weight,bert.encoder.layer.1.output.LayerNorm.bias,bert.encoder.layer.2.attention.self.query.weight,bert.encoder.layer.2.attention.self.query.bias,bert.encoder.layer.2.attention.self.key.weight,bert.encoder.layer.2.attention.self.key.bias,bert.encoder.layer.2.attention.self.value.weight,bert.encoder.layer.2.attention.self.value.bias,bert.encoder.layer.2.attention.output.dense.weight,bert.encoder.layer.2.attention.output.dense.bias,bert.encoder.layer.2.attention.output.LayerNorm.weight,bert.encoder.layer.2.attention.output.LayerNorm.bias,bert.encoder.layer.2.intermediate.dense.weight,bert.encoder.layer.2.intermediate.dense.bias,bert.encoder.layer.2.output.dense.weight,bert.encoder.layer.2.output.dense.bias,bert.encoder.layer.2.output.LayerNorm.weight,bert.encoder.layer.2.output.LayerNorm.bias,bert.encoder.layer.3.attention.self.query.weight,bert.encoder.layer.3.attention.self.query.bias,bert.encoder.layer.3.attention.self.key.weight,bert.encoder.layer.3.attention.self.key.bias,bert.encoder.layer.3.attention.self.value.weight,bert.encoder.layer.3.attention.self.value.bias,bert.encoder.layer.3.attention.output.dense.weight,bert.encoder.layer.3.attention.output.dense.bias,bert.encoder.layer.3.attention.output.LayerNorm.weight,bert.encoder.layer.3.attention.output.LayerNorm.bias,bert.encoder.layer.3.intermediate.dense.weight,bert.encoder.layer.3.intermediate.dense.bias,bert.encoder.layer.3.output.dense.weight,bert.encoder.layer.3.output.dense.bias,bert.encoder.layer.3.output.LayerNorm.weight,bert.encoder.layer.3.output.LayerNorm.bias,bert.pooler.dense.weight,bert.pooler.dense.bias,cls.predictions.bias,cls.predictions.transform.dense.weight,cls.predictions.transform.dense.bias,cls.predictions.transform.LayerNorm.weight,cls.predictions.transform.LayerNorm.bias,cls.predictions.decoder.weight,cls.predictions.decoder.bias,cls.seq_relationship.weight,cls.seq_relationship.bias]. + + + Unloaded weights of the model: + [cls.seq_relationship.weight,cls.seq_relationship.bias]. + This IS expected if you initialize A model from B. + This IS NOT expected if you initialize A model from A. + + 0it [00:00, ?it/s] 1it [00:00, 4.60it/s] 2it [00:00, 4.54it/s] 3it [00:00, 4.69it/s] 4it [00:00, 4.78it/s] 5it [00:01, 4.80it/s] 6it [00:01, 4.84it/s] 7it [00:01, 4.86it/s] 8it [00:01, 4.86it/s] 8it [00:01, 4.80it/s] +[2022-12-07 12:00:52,114 INFO] Duration time: 2.3603076934814453 s +NOTE: if you wish to use GLM models, please refer to EasyNLP/examples/appzoo_tutorials/sequence_generation/README.md! +The following parameters are not recognized: [] +Trying downloading name_mapping.json +Success +`/home/zhoujiyong.zjy/.easynlp/modelzoo/public/bert-small-uncased.tgz` already exists +python -m torch.distributed.launch --nproc_per_node 1 --nnodes 1 --node_rank 0 --master_addr localhost --master_port 52463 /home/zhoujiyong.zjy/.local/lib/python3.8/site-packages/pai_easynlp-0.1.0-py3.8.egg/easynlp/appzoo/api.py --mode predict --tables tmp/train_head.tsv --input_schema index:str:1,sent:str:1,label:str:1 --first_sequence sent --checkpoint_dir _ --export_tf_checkpoint_type easytransfer --learning_rate 5e-05 --epoch_num 3 --random_seed 1234 --predict_queue_size 1024 --predict_slice_size 4096 --predict_thread_num 1 --outputs tmp/train_aug.tsv --output_schema augmented_data --restore_works_dir ./.easynlp_predict_restore_works_dir --sequence_length 128 --micro_batch_size 8 --app_name data_augmentation --user_defined_parameters pretrain_model_name_or_path=bert-small-uncased type=mlm_da expansion_rate=2 mask_proportion=0.1 remove_blanks=True append_original=True From 3c533c184b4b14e9a083e8938b159163b2fd453c Mon Sep 17 00:00:00 2001 From: zjy <756628577@qq.com> Date: Wed, 7 Dec 2022 14:23:19 +0800 Subject: [PATCH 010/101] ie --- tests/modelzoo_alibaba.json | 84 -------------------------- tests/tmp/da_test_0.out | 115 ------------------------------------ 2 files changed, 199 deletions(-) delete mode 100644 tests/modelzoo_alibaba.json delete mode 100644 tests/tmp/da_test_0.out diff --git a/tests/modelzoo_alibaba.json b/tests/modelzoo_alibaba.json deleted file mode 100644 index 8c823ef3..00000000 --- a/tests/modelzoo_alibaba.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "bert-small-uncased": "public/bert-small-uncased.tgz", - "bert-base-uncased": "public/bert-base-uncased.tgz", - "bert-large-uncased": "public/bert-large-uncased.tgz", - "bert-tiny-uncased": "public/bert-tiny-uncased.tgz", - "bert-base-chinese": "public/bert-base-chinese.tgz", - "kbert-base-chinese": "public/kbert-base-chinese.tgz", - "wobert-base-chinese":"public/wobert-base-chinese.tgz", - "bert13b-base-chinese": "public/bert13b-base-chinese.tgz", - "bert600m-base-chinese": "public/bert600m-base-chinese.tgz", - "roberta-base-en": "public/roberta-base.tgz", - "roberta-large-en": "public/roberta-large.tgz", - "finbert-base-zh": "public/finbert-base-zh.tgz", - "clip_vit_large_patch14": "openai/clip_vit_large_patch14.tgz", - "alibaba-pai/clip_chinese_roberta_base_vit_base": "alibaba-pai/clip_chinese_roberta_base_vit_base.tgz", - "wukong_vit_l_14_clip": "public/wukong_vit_l_14_clip.tgz", - "langboat/mengzi-bert-base": "public/mengzi-bert-base.tgz", - "langboat/mengzi-bert-base-fin": "public/mengzi-bert-base-fin.tgz", - "IDEA-CCNL/Erlangshen-MegatronBert-1.3B": "public/Erlangshen-MegatronBert-1.3B.tgz", - "hfl/macbert-base-zh": "public/hfl/macbert-base-zh.tgz", - "hfl/macbert-48-zh-stackbert": "public/hfl/macbert-48-zh-stackbert.tgz", - "hfl/macbert-large-zh": "public/hfl/macbert-large-zh.tgz", - "hfl/macbert-32-zh-stackbert": "public/hfl/macbert-32-zh-stackbert.tgz", - "hfl/macbert-80-zh-stackbert": "public/hfl/macbert-80-zh-stackbert.tgz", - "hfl/chinese-roberta-wwm-ext": "public/hfl/chinese-roberta-wwm-ext.tgz", - "hfl/chinese-roberta-wwm-ext-large": "public/hfl/chinese-roberta-wwm-ext-large.tgz", - "hfl/bloom-350m": "public/hfl/bloom-350m.tgz", - "hfl/randeng-summary-generation-large-zh": "public/hfl/randeng-523M-Summary-Chinese.tgz", - "hfl/randeng-summary-generation-base-zh": "public/hfl/randeng-238M-Summary-Chinese.tgz", - "hfl/brio-summary-generation-large-en": "public/hfl/brio-cnndm-uncased.tgz", - "alibaba-pai/randeng-title-generation-large-zh": "alibaba-pai/randeng-523M-Summary-Chinese-tuned.tgz", - "alibaba-pai/randeng-title-generation-base-zh": "alibaba-pai/randeng-238M-Summary-Chinese-tuned.tgz", - "mg/glm-generation-large-zh": "public/mg/glm-large-chinese.tgz", - "mg/glm-generation-large-en": "public/mg/glm-large-generation.tgz", - "alibaba-pai/randeng-advertise-generation-base-zh": "alibaba-pai/advertise-gen_base_cn", - "hfl/bart-generation-base-zh": "public/hfl/bart-base-chinese.tgz", - "hfl/bart-generation-large-zh": "public/hfl/bart-large-chinese.tgz", - "alibaba-pai/randeng-essay-generation-base-zh": "alibaba-pai/randeng-essay-generation-base-zh.tgz", - "alibaba-pai/randeng-question-generation-base-zh": "alibaba-pai/randeng-question-generation-base-zh.tgz", - "alibaba-pai/bart-essay-generation-large-zh": "alibaba-pai/bart-essay-generation-large-zh.tgz", - "alibaba-pai/bart-question-generation-large-zh": "alibaba-pai/bart-question-generation-base-zh.tgz", - "alibaba-pai/glm-essay-generation-large-zh": "alibaba-pai/mg/glm-essay-generation-large-zh.tgz", - "alibaba-pai/bart-poem-generation-large-zh": "alibaba-pai/bart-poem-generation-large-zh.tgz", - "alibaba-pai/randeng-poem-generation-base-zh": "alibaba-pai/randeng-poem-generation-base-zh.tgz", - "alibaba-pai/pai-kangaroo-base-chinese": "alibaba-pai/pai-kangaroo-base-chinese.tgz", - "alibaba-pai/pai-kangaroo-fin-base-chinese": "alibaba-pai/pai-kangaroo-fin-base-chinese.tgz", - "alibaba-pai/textcnn-en": "alibaba-pai/textcnn-en.tgz", - "alibaba-pai/mt5-title-generation-zh": "alibaba-pai/mt5-title-generation-zh.tgz", - "alibaba-pai/pegasus-summary-generation-en": "alibaba-pai/pegasus-summary-generation-en.tgz", - "alibaba-pai/gpt2-chitchat-zh": "alibaba-pai/gpt2-chitchat-zh.tgz", - "alibaba-pai/pai-dkplm-medical-base-zh": "alibaba-pai/pai-dkplm-medical-base-zh.tgz", - "alibaba-pai/pai-dkplm-medical-small-zh": "alibaba-pai/pai-dkplm-medical-small-zh.tgz", - "alibaba-pai/pai-kgbert-tax-base-zh": "alibaba-pai/pai-kgbert-tax-base-zh.tgz", - "alibaba-pai/pai-kgbert-insurance-base-zh": "alibaba-pai/pai-kgbert-insurance-base-zh.tgz", - "alibaba-pai/pai-bert-medical-base-zh": "alibaba-pai/pai-bert-medical-base-zh.tgz", - "alibaba-pai/pai-bert-base-zh": "alibaba-pai/pai-bert-base-zh.tgz", - "alibaba-pai/pai-bert-small-zh": "alibaba-pai/pai-bert-small-zh.tgz", - "alibaba-pai/pai-bert-tiny-zh": "alibaba-pai/pai-bert-tiny-zh.tgz", - "alibaba-pai/pai-geep-base-uncased": "alibaba-pai/geep-base-uncased.tgz", - "alibaba-pai/pai-painter-base-zh": "alibaba-pai/pai-painter-base-zh.tgz", - "alibaba-pai/pai-painter-large-zh": "alibaba-pai/pai-painter-large-zh.tgz", - "alibaba-pai/pai-painter-huge-zh": "alibaba-pai/pai-painter-huge-zh.tgz", - "alibaba-pai/pai-artist-knowl-base-zh": "alibaba-pai/pai-artist-knowl-base-zh.tgz", - "alibaba-pai/pai-artist-t2i-base-zh": "alibaba-pai/pai-artist-t2i-base-zh.tgz", - "alibaba-pai/pai-artist-t2i-large-zh": "alibaba-pai/pai-artist-t2i-large-zh.tgz", - "alibaba-pai/pai-artist-i2t-base-zh": "alibaba-pai/pai-artist-i2t-base-zh.tgz", - "alibaba-pai/pai-artist-i2t-large-zh": "alibaba-pai/pai-artist-i2t-large-zh.tgz", - "alibaba-pai/pai-vqgan-gpt-i2t-base-zh": "alibaba-pai/pai-vqgan-gpt-i2t-base-zh.tgz", - "alibaba-pai/pai-vqgan-gpt-i2t-large-zh": "alibaba-pai/pai-vqgan-gpt-i2t-large-zh.tgz", - "alibaba-pai/pai-clip-gpt-i2t-base-zh": "alibaba-pai/pai-clip-gpt-i2t-base-zh.tgz", - "alibaba-pai/pai-clip-gpt-i2t-large-zh": "alibaba-pai/pai-clip-gpt-i2t-large-zh.tgz", - "alibaba-pai/pai-clip-commercial-base-en": "alibaba-pai/pai-clip-commercial-base-en.tgz", - "alibaba-pai/pai-clip-commercial-large-en": "alibaba-pai/pai-clip-commercial-large-en.tgz", - "alibaba-pai/pai-ck_bert-base-zh": "alibaba-pai/pai-ck_bert-base-zh.tgz", - "alibaba-pai/pai-ck_bert-large-zh": "alibaba-pai/pai-ck_bert-large-zh.tgz", - "alibaba-pai/pai-ck_bert-huge-zh": "alibaba-pai/pai-ck_bert-huge-zh.tgz", - "alibaba-pai/pai-ldm-fashion-base-zh": "alibaba-pai/pai-ldm-fashion-base-zh.tgz", - "alibaba-pai/pai-ldm-food-base-zh": "alibaba-pai/pai-ldm-food-base-zh.tgz", - "m6-students/m6-text-cls-student-small": "m6-students/m6-text-cls-student-small.tgz", - "m6-students/m6-text-cls-student-tiny": "m6-students/m6-text-cls-student-tiny.tgz", - "m6-students/m6-text-cls-student-base": "m6-students/m6-text-cls-student-base.tgz", - "m6-students/m6-text-gen-student": "m6-students/m6-text-gen-student.tgz", - "globalpointforie/globalpointforie-zh": "globalpointforie/globalpointforie-zh.tgz" -} \ No newline at end of file diff --git a/tests/tmp/da_test_0.out b/tests/tmp/da_test_0.out deleted file mode 100644 index 1239aca6..00000000 --- a/tests/tmp/da_test_0.out +++ /dev/null @@ -1,115 +0,0 @@ ---2022-12-07 12:00:45-- http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/easynlp_modelzoo/modelzoo_alibaba.json -Resolving atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com (atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com)... 47.101.88.27 -Connecting to atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com (atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com)|47.101.88.27|:80... connected. -HTTP request sent, awaiting response... 200 OK -Length: 6605 (6.5K) [application/json] -Saving to: ‘modelzoo_alibaba.json’ - - 0K ...... 100% 681M=0s - -2022-12-07 12:00:45 (681 MB/s) - ‘modelzoo_alibaba.json’ saved [6605/6605] - -/apsarapangu/disk2/zhoujiyong.zjy/miniconda3/envs/ie/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated -and will be removed in future. Use torchrun. -Note that --use_env is set by default in torchrun. -If your script expects `--local_rank` argument to be set, please -change it to read from `os.environ['LOCAL_RANK']` instead. See -https://pytorch.org/docs/stable/distributed.html#launch-utility for -further instructions - - warnings.warn( -[W socket.cpp:401] [c10d] The server socket cannot be initialized on [::]:52463 (errno: 97 - Address family not supported by protocol). -[W socket.cpp:558] [c10d] The client socket cannot be initialized to connect to [localhost]:52463 (errno: 97 - Address family not supported by protocol). -[W socket.cpp:558] [c10d] The client socket cannot be initialized to connect to [localhost]:52463 (errno: 97 - Address family not supported by protocol). -NOTE: if you wish to use GLM models, please refer to EasyNLP/examples/appzoo_tutorials/sequence_generation/README.md! -name 'deepspeed' is not defined -The following parameters are not recognized: [] ------------------------- arguments ------------------------ - app_name ........................................ data_augmentation - append_cols ..................................... None - buckets ......................................... None - checkpoint_dir .................................. _ - chief_hosts ..................................... - data_threads .................................... 10 - distributed_backend ............................. nccl - do_lower_case ................................... False - epoch_num ....................................... 3 - export_tf_checkpoint_type ....................... easytransfer - first_sequence .................................. sent - gradient_accumulation_steps ..................... 1 - input_schema .................................... index:str:1,sent:str:1,label:str:1 - is_chief ........................................ - is_master_node .................................. True - job_name ........................................ None - label_enumerate_values .......................... None - label_name ...................................... None - learning_rate ................................... 5e-05 - local_rank ...................................... 0 - logging_steps ................................... 100 - master_port ..................................... 23456 - max_grad_norm ................................... 1.0 - mg_model ........................................ False - micro_batch_size ................................ 8 - mode ............................................ predict - modelzoo_base_dir ............................... - n_cpu ........................................... 1 - n_gpu ........................................... 0 - odps_config ..................................... None - optimizer_type .................................. AdamW - output_schema ................................... augmented_data - outputs ......................................... tmp/train_aug.tsv - predict_queue_size .............................. 1024 - predict_slice_size .............................. 4096 - predict_table_read_thread_num ................... 16 - predict_thread_num .............................. 1 - ps_hosts ........................................ - random_seed ..................................... 1234 - rank ............................................ 0 - read_odps ....................................... False - restore_works_dir ............................... ./.easynlp_predict_restore_works_dir - resume_from_checkpoint .......................... None - save_all_checkpoints ............................ False - save_checkpoint_steps ........................... None - second_sequence ................................. None - sequence_length ................................. 128 - skip_first_line ................................. False - tables .......................................... tmp/train_head.tsv - task_count ...................................... 1 - task_index ...................................... 0 - use_amp ......................................... False - use_torchacc .................................... False - user_defined_parameters ......................... pretrain_model_name_or_path=bert-small-uncased type=mlm_da expansion_rate=2 mask_proportion=0.1 remove_blanks=True append_original=True - user_entry_file ................................. None - user_script ..................................... None - warmup_proportion ............................... 0.1 - weight_decay .................................... 0.0001 - worker_count .................................... 1 - worker_cpu ...................................... -1 - worker_gpu ...................................... -1 - worker_hosts .................................... None - world_size ...................................... 1 --------------------- end of arguments --------------------- -> initializing torch distributed ... -[W socket.cpp:558] [c10d] The client socket cannot be initialized to connect to [localhost]:52463 (errno: 97 - Address family not supported by protocol). -[W socket.cpp:558] [c10d] The client socket cannot be initialized to connect to [localhost]:52463 (errno: 97 - Address family not supported by protocol). -Init dist done. World size: 1, rank 0, l_rank 0 -> setting random seeds to 1234 ... -`/home/zhoujiyong.zjy/.easynlp/modelzoo/public/bert-small-uncased.tgz` already exists - - Loaded weights of the model: - [bert.embeddings.word_embeddings.weight,bert.embeddings.position_embeddings.weight,bert.embeddings.token_type_embeddings.weight,bert.embeddings.LayerNorm.weight,bert.embeddings.LayerNorm.bias,bert.encoder.layer.0.attention.self.query.weight,bert.encoder.layer.0.attention.self.query.bias,bert.encoder.layer.0.attention.self.key.weight,bert.encoder.layer.0.attention.self.key.bias,bert.encoder.layer.0.attention.self.value.weight,bert.encoder.layer.0.attention.self.value.bias,bert.encoder.layer.0.attention.output.dense.weight,bert.encoder.layer.0.attention.output.dense.bias,bert.encoder.layer.0.attention.output.LayerNorm.weight,bert.encoder.layer.0.attention.output.LayerNorm.bias,bert.encoder.layer.0.intermediate.dense.weight,bert.encoder.layer.0.intermediate.dense.bias,bert.encoder.layer.0.output.dense.weight,bert.encoder.layer.0.output.dense.bias,bert.encoder.layer.0.output.LayerNorm.weight,bert.encoder.layer.0.output.LayerNorm.bias,bert.encoder.layer.1.attention.self.query.weight,bert.encoder.layer.1.attention.self.query.bias,bert.encoder.layer.1.attention.self.key.weight,bert.encoder.layer.1.attention.self.key.bias,bert.encoder.layer.1.attention.self.value.weight,bert.encoder.layer.1.attention.self.value.bias,bert.encoder.layer.1.attention.output.dense.weight,bert.encoder.layer.1.attention.output.dense.bias,bert.encoder.layer.1.attention.output.LayerNorm.weight,bert.encoder.layer.1.attention.output.LayerNorm.bias,bert.encoder.layer.1.intermediate.dense.weight,bert.encoder.layer.1.intermediate.dense.bias,bert.encoder.layer.1.output.dense.weight,bert.encoder.layer.1.output.dense.bias,bert.encoder.layer.1.output.LayerNorm.weight,bert.encoder.layer.1.output.LayerNorm.bias,bert.encoder.layer.2.attention.self.query.weight,bert.encoder.layer.2.attention.self.query.bias,bert.encoder.layer.2.attention.self.key.weight,bert.encoder.layer.2.attention.self.key.bias,bert.encoder.layer.2.attention.self.value.weight,bert.encoder.layer.2.attention.self.value.bias,bert.encoder.layer.2.attention.output.dense.weight,bert.encoder.layer.2.attention.output.dense.bias,bert.encoder.layer.2.attention.output.LayerNorm.weight,bert.encoder.layer.2.attention.output.LayerNorm.bias,bert.encoder.layer.2.intermediate.dense.weight,bert.encoder.layer.2.intermediate.dense.bias,bert.encoder.layer.2.output.dense.weight,bert.encoder.layer.2.output.dense.bias,bert.encoder.layer.2.output.LayerNorm.weight,bert.encoder.layer.2.output.LayerNorm.bias,bert.encoder.layer.3.attention.self.query.weight,bert.encoder.layer.3.attention.self.query.bias,bert.encoder.layer.3.attention.self.key.weight,bert.encoder.layer.3.attention.self.key.bias,bert.encoder.layer.3.attention.self.value.weight,bert.encoder.layer.3.attention.self.value.bias,bert.encoder.layer.3.attention.output.dense.weight,bert.encoder.layer.3.attention.output.dense.bias,bert.encoder.layer.3.attention.output.LayerNorm.weight,bert.encoder.layer.3.attention.output.LayerNorm.bias,bert.encoder.layer.3.intermediate.dense.weight,bert.encoder.layer.3.intermediate.dense.bias,bert.encoder.layer.3.output.dense.weight,bert.encoder.layer.3.output.dense.bias,bert.encoder.layer.3.output.LayerNorm.weight,bert.encoder.layer.3.output.LayerNorm.bias,bert.pooler.dense.weight,bert.pooler.dense.bias,cls.predictions.bias,cls.predictions.transform.dense.weight,cls.predictions.transform.dense.bias,cls.predictions.transform.LayerNorm.weight,cls.predictions.transform.LayerNorm.bias,cls.predictions.decoder.weight,cls.predictions.decoder.bias,cls.seq_relationship.weight,cls.seq_relationship.bias]. - - - Unloaded weights of the model: - [cls.seq_relationship.weight,cls.seq_relationship.bias]. - This IS expected if you initialize A model from B. - This IS NOT expected if you initialize A model from A. - - 0it [00:00, ?it/s] 1it [00:00, 4.60it/s] 2it [00:00, 4.54it/s] 3it [00:00, 4.69it/s] 4it [00:00, 4.78it/s] 5it [00:01, 4.80it/s] 6it [00:01, 4.84it/s] 7it [00:01, 4.86it/s] 8it [00:01, 4.86it/s] 8it [00:01, 4.80it/s] -[2022-12-07 12:00:52,114 INFO] Duration time: 2.3603076934814453 s -NOTE: if you wish to use GLM models, please refer to EasyNLP/examples/appzoo_tutorials/sequence_generation/README.md! -The following parameters are not recognized: [] -Trying downloading name_mapping.json -Success -`/home/zhoujiyong.zjy/.easynlp/modelzoo/public/bert-small-uncased.tgz` already exists -python -m torch.distributed.launch --nproc_per_node 1 --nnodes 1 --node_rank 0 --master_addr localhost --master_port 52463 /home/zhoujiyong.zjy/.local/lib/python3.8/site-packages/pai_easynlp-0.1.0-py3.8.egg/easynlp/appzoo/api.py --mode predict --tables tmp/train_head.tsv --input_schema index:str:1,sent:str:1,label:str:1 --first_sequence sent --checkpoint_dir _ --export_tf_checkpoint_type easytransfer --learning_rate 5e-05 --epoch_num 3 --random_seed 1234 --predict_queue_size 1024 --predict_slice_size 4096 --predict_thread_num 1 --outputs tmp/train_aug.tsv --output_schema augmented_data --restore_works_dir ./.easynlp_predict_restore_works_dir --sequence_length 128 --micro_batch_size 8 --app_name data_augmentation --user_defined_parameters pretrain_model_name_or_path=bert-small-uncased type=mlm_da expansion_rate=2 mask_proportion=0.1 remove_blanks=True append_original=True From 179deba948181cba0a90b2a383d84e305c477b20 Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Thu, 8 Dec 2022 11:10:26 +0800 Subject: [PATCH 011/101] add sr to ldm --- easynlp/appzoo/__init__.py | 8 +- easynlp/appzoo/api.py | 3 + easynlp/appzoo/latent_diffusion/model.py | 21 +- .../models/latent_diffusion/RRDBNet_arch.py | 95 +++++++ .../modelzoo/models/latent_diffusion/ddpm.py | 4 +- .../modelzoo/models/latent_diffusion/plms.py | 236 ++++++++++++++++++ easynlp/pipelines/__init__.py | 34 ++- .../main.py | 0 examples/latent_diffusion/pipeline_example.py | 56 +++++ ...run_latent_diffusion_local_user_defined.sh | 2 +- .../pipeline_example.py | 33 --- 11 files changed, 442 insertions(+), 50 deletions(-) create mode 100644 easynlp/modelzoo/models/latent_diffusion/RRDBNet_arch.py create mode 100644 easynlp/modelzoo/models/latent_diffusion/plms.py rename examples/{latent_diffusion_fashion => latent_diffusion}/main.py (100%) create mode 100644 examples/latent_diffusion/pipeline_example.py rename examples/{latent_diffusion_fashion => latent_diffusion}/run_latent_diffusion_local_user_defined.sh (93%) delete mode 100644 examples/latent_diffusion_fashion/pipeline_example.py diff --git a/easynlp/appzoo/__init__.py b/easynlp/appzoo/__init__.py index db33e9d0..327bae5a 100644 --- a/easynlp/appzoo/__init__.py +++ b/easynlp/appzoo/__init__.py @@ -26,7 +26,7 @@ "data_augmentation.model": ["DataAugmentation"], "geep_classification.model": ["GEEPClassification"], "clip.model": ["CLIPApp"], -# "latent_diffusion.model": ["LatentDiffusion"], + "latent_diffusion.model": ["LatentDiffusion"], "wukong_clip.model": ["WukongCLIP"], "text2image_generation.model": ["TextImageGeneration", "TextImageGeneration_knowl"], "image2text_generation.model": ['VQGANGPTImageTextGeneration', 'CLIPGPTImageTextGeneration'], @@ -54,7 +54,7 @@ "data_augmentation.predictor": ['DataAugmentationPredictor'], "geep_classification.predictor": ['GEEPClassificationPredictor'], "clip.predictor": ['CLIPPredictor'], -# "latent_diffusion.predictor": ['LatentDiffusionPredictor'], + "latent_diffusion.predictor": ['LatentDiffusionPredictor'], "wukong_clip.predictor": ['WukongCLIPPredictor'], "text2image_generation.predictor": ['TextImageGenerationPredictor', 'TextImageGenerationKnowlPredictor'], "image2text_generation.predictor": ['VQGANGPTImageTextGenerationPredictor', 'CLIPGPTImageTextGenerationPredictor'], @@ -87,7 +87,7 @@ from .data_augmentation.model import DataAugmentation from .geep_classification.model import GEEPClassification from .clip.model import CLIPApp -# from .latent_diffusion.model import LatentDiffusion + from .latent_diffusion.model import LatentDiffusion from .wukong_clip.model import WukongCLIP from .text2image_generation.model import TextImageGeneration, TextImageGeneration_knowl from .image2text_generation.model import VQGANGPTImageTextGeneration, CLIPGPTImageTextGeneration @@ -115,7 +115,7 @@ from .data_augmentation.predictor import DataAugmentationPredictor from .geep_classification.predictor import GEEPClassificationPredictor from .clip.predictor import CLIPPredictor -# from .latent_diffusion.predictor import LatentDiffusionPredictor + from .latent_diffusion.predictor import LatentDiffusionPredictor from .wukong_clip.predictor import WukongCLIPPredictor from .text2image_generation.predictor import TextImageGenerationPredictor, TextImageGenerationKnowlPredictor from .image2text_generation.predictor import VQGANGPTImageTextGenerationPredictor, CLIPGPTImageTextGenerationPredictor diff --git a/easynlp/appzoo/api.py b/easynlp/appzoo/api.py index 1d6be819..374e85bc 100644 --- a/easynlp/appzoo/api.py +++ b/easynlp/appzoo/api.py @@ -28,6 +28,7 @@ from easynlp.appzoo import CLIPApp from easynlp.appzoo import WukongCLIP from easynlp.appzoo import TextImageGeneration +from easynlp.appzoo import LatentDiffusion from easynlp.appzoo import VQGANGPTImageTextGeneration, CLIPGPTImageTextGeneration from easynlp.appzoo import CLIPGPTFrameTextGeneration from easynlp.appzoo.sequence_generation.model import SequenceGeneration @@ -54,6 +55,7 @@ from easynlp.appzoo import CLIPPredictor from easynlp.appzoo import WukongCLIPPredictor from easynlp.appzoo import TextImageGenerationPredictor +from easynlp.appzoo import LatentDiffusionPredictor from easynlp.appzoo import VQGANGPTImageTextGenerationPredictor, CLIPGPTImageTextGenerationPredictor from easynlp.appzoo import CLIPGPTFrameTextGenerationPredictor from easynlp.appzoo import SequenceGenerationPredictor @@ -224,6 +226,7 @@ 'clip': [CLIPPredictor, CLIPApp], 'wukong_clip': [WukongCLIPPredictor, WukongCLIP], 'text2image_generation': [TextImageGenerationPredictor, TextImageGeneration], + 'latent_diffusion': [LatentDiffusionPredictor, LatentDiffusion], 'image2text_generation': { 'enable_vit': [CLIPGPTImageTextGenerationPredictor, CLIPGPTImageTextGeneration], 'enable_vqgan': [VQGANGPTImageTextGenerationPredictor, VQGANGPTImageTextGeneration], diff --git a/easynlp/appzoo/latent_diffusion/model.py b/easynlp/appzoo/latent_diffusion/model.py index 42704dba..328e4364 100644 --- a/easynlp/appzoo/latent_diffusion/model.py +++ b/easynlp/appzoo/latent_diffusion/model.py @@ -25,10 +25,12 @@ from einops import rearrange from PIL import Image -from ...modelzoo.models.latent_diffusion.ddpm import ModelZoo_LatentDiffusion +from ...modelzoo.models.latent_diffusion.ddpm import LatentDiffusionModel from ...modelzoo.models.latent_diffusion.autoencoder import AutoencoderKL from ...modelzoo.models.latent_diffusion.wukong import FrozenWukongCLIPTextEmbedder -from ...modelzoo.models.latent_diffusion.ddim import DDIMSampler +# from ...modelzoo.models.latent_diffusion.ddim import DDIMSampler +from ...modelzoo.models.latent_diffusion.plms import PLMSSampler +from ...modelzoo.models.latent_diffusion.RRDBNet_arch import ESRGAN from ...utils import losses, get_pretrain_model_path, get_args from ..application import Application @@ -62,28 +64,29 @@ def __init__(self, pretrained_model_name_or_path=None,args=None,user_defined_par all_params["cond_stage_config"]["params"]["version"]=os.path.join(pretrained_model_name_or_path,'wukong_vit_l_14_clip') all_params["cond_stage_model"]=FrozenWukongCLIPTextEmbedder(**all_params["cond_stage_config"]["params"]) - self.model=ModelZoo_LatentDiffusion(**all_params) + self.model=LatentDiffusionModel(**all_params) m, u = self.model.load_state_dict(sd, strict=False) - if len(m) > 0 and verbose: + if len(m) > 0: print("missing keys:") print(m) - if len(u) > 0 and verbose: + if len(u) > 0: print("unexpected keys:") print(u) self.model.eval() _device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") self.model = self.model.to(_device) - self.sampler = DDIMSampler(self.model) - + self.sr_model = ESRGAN(os.path.join(pretrained_model_name_or_path,'RRDB_ESRGAN_x4.pth'), _device) + self.sampler = PLMSSampler(self.model) self.scale=float(user_defined_parameters.pop('scale',5.0)) self.n_samples=int(user_defined_parameters.pop('n_samples',4)) self.n_iter=int(user_defined_parameters.pop('n_iter',1)) self.H=int(user_defined_parameters.pop('H',256)) self.W=int(user_defined_parameters.pop('W',256)) - self.ddim_steps=int(user_defined_parameters.pop('ddim_steps',50)) + self.ddim_steps=int(user_defined_parameters.pop('ddim_steps',20)) self.ddim_eta=float(user_defined_parameters.pop('ddim_eta',0.0)) self.image_prefix=user_defined_parameters.pop('image_prefix','./') + self.do_sr=user_defined_parameters.pop('do_sr',False) if 'write_image' in user_defined_parameters: self.write_image=True else: @@ -110,6 +113,8 @@ def forward(self, inputs): eta=self.ddim_eta) x_samples_ddim = self.model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) + if self.do_sr is True: + x_samples_ddim = self.sr_model.super_resolution(x_samples_ddim) all_samples.append({'image_tensor':x_samples_ddim,'text':one_input["text"]}) return all_samples diff --git a/easynlp/modelzoo/models/latent_diffusion/RRDBNet_arch.py b/easynlp/modelzoo/models/latent_diffusion/RRDBNet_arch.py new file mode 100644 index 00000000..209833ad --- /dev/null +++ b/easynlp/modelzoo/models/latent_diffusion/RRDBNet_arch.py @@ -0,0 +1,95 @@ +import functools +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def make_layer(block, n_layers): + layers = [] + for _ in range(n_layers): + layers.append(block()) + return nn.Sequential(*layers) + + +class ResidualDenseBlock_5C(nn.Module): + def __init__(self, nf=64, gc=32, bias=True): + super(ResidualDenseBlock_5C, self).__init__() + # gc: growth channel, i.e. intermediate channels + self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias) + self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias) + self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias) + self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias) + self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias) + self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) + + # initialization + # mutil.initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1) + + def forward(self, x): + x1 = self.lrelu(self.conv1(x)) + x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1))) + x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1))) + x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1))) + x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) + return x5 * 0.2 + x + + +class RRDB(nn.Module): + '''Residual in Residual Dense Block''' + + def __init__(self, nf, gc=32): + super(RRDB, self).__init__() + self.RDB1 = ResidualDenseBlock_5C(nf, gc) + self.RDB2 = ResidualDenseBlock_5C(nf, gc) + self.RDB3 = ResidualDenseBlock_5C(nf, gc) + + def forward(self, x): + out = self.RDB1(x) + out = self.RDB2(out) + out = self.RDB3(out) + return out * 0.2 + x + + +class RRDBNet(nn.Module): + def __init__(self, in_nc, out_nc, nf, nb, gc=32): + super(RRDBNet, self).__init__() + RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc) + + self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True) + self.RRDB_trunk = make_layer(RRDB_block_f, nb) + self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) + #### upsampling + self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) + self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) + self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) + self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True) + + self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) + + def forward(self, x): + fea = self.conv_first(x) + trunk = self.trunk_conv(self.RRDB_trunk(fea)) + fea = fea + trunk + + fea = self.lrelu(self.upconv1(F.interpolate(fea, scale_factor=2, mode='nearest'))) + fea = self.lrelu(self.upconv2(F.interpolate(fea, scale_factor=2, mode='nearest'))) + out = self.conv_last(self.lrelu(self.HRconv(fea))) + + return out + +class ESRGAN: + def __init__(self, model_path, device): + self.device = device + self.model = RRDBNet(3, 3, 64, 23, gc=32) + self.model.load_state_dict(torch.load(model_path), strict=True) + self.model.eval() + self.model = self.model.to(device) + + def super_resolution(self, img_LR): + img_LR = img_LR[:,[2,1,0],:,:] + img_LR = img_LR.to(self.device) + with torch.no_grad(): + output = self.model(img_LR) + output = output.data.float().cpu().clamp_(0, 1) + output = output[:,[2,1,0],:,:] + return output \ No newline at end of file diff --git a/easynlp/modelzoo/models/latent_diffusion/ddpm.py b/easynlp/modelzoo/models/latent_diffusion/ddpm.py index 9dc64b22..8ef1b56f 100644 --- a/easynlp/modelzoo/models/latent_diffusion/ddpm.py +++ b/easynlp/modelzoo/models/latent_diffusion/ddpm.py @@ -422,7 +422,7 @@ def configure_optimizers(self): return opt -class ModelZoo_LatentDiffusion(DDPM): +class LatentDiffusionModel(DDPM): """main class""" def __init__(self, first_stage_config, @@ -1320,7 +1320,7 @@ def forward(self, x, t, c_concat: list = None, c_crossattn: list = None): return out -class Layout2ImgDiffusion(ModelZoo_LatentDiffusion): +class Layout2ImgDiffusion(LatentDiffusionModel): # TODO: move all layout-specific hacks to this class def __init__(self, cond_stage_key, *args, **kwargs): assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' diff --git a/easynlp/modelzoo/models/latent_diffusion/plms.py b/easynlp/modelzoo/models/latent_diffusion/plms.py new file mode 100644 index 00000000..4826346d --- /dev/null +++ b/easynlp/modelzoo/models/latent_diffusion/plms.py @@ -0,0 +1,236 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial + +from .util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like + + +class PLMSSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + if ddim_eta != 0: + raise ValueError('ddim_eta must be 0 for PLMS') + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for PLMS sampling is {size}') + + samples, intermediates = self.plms_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + ) + return samples, intermediates + + @torch.no_grad() + def plms_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None,): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running PLMS Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) + old_eps = [] + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + old_eps=old_eps, t_next=ts_next) + img, pred_x0, e_t = outs + old_eps.append(e_t) + if len(old_eps) >= 4: + old_eps.pop(0) + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None): + b, *_, device = *x.shape, x.device + + def get_model_output(x, t): + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + return e_t + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + + def get_x_prev_and_pred_x0(e_t, index): + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + e_t = get_model_output(x, t) + if len(old_eps) == 0: + # Pseudo Improved Euler (2nd order) + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) + e_t_next = get_model_output(x_prev, t_next) + e_t_prime = (e_t + e_t_next) / 2 + elif len(old_eps) == 1: + # 2nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (3 * e_t - old_eps[-1]) / 2 + elif len(old_eps) == 2: + # 3nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 + elif len(old_eps) >= 3: + # 4nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 + + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) + + return x_prev, pred_x0, e_t diff --git a/easynlp/pipelines/__init__.py b/easynlp/pipelines/__init__.py index 5e48e372..a7946c87 100644 --- a/easynlp/pipelines/__init__.py +++ b/easynlp/pipelines/__init__.py @@ -57,10 +57,40 @@ 'model_cls': TextImageGeneration, 'default': 'artist-base-zh', }, - 'latent_diffusion': { + 'chinese-ldm-general': { 'impl': LatentDiffusionPipeline, 'model_cls': LatentDiffusion, - 'default': 'latent_diffusion_fashion_cn', + 'default': 'chinese-ldm-general', + }, + 'chinese-ldm-fashion': { + 'impl': LatentDiffusionPipeline, + 'model_cls': LatentDiffusion, + 'default': 'chinese-ldm-fashion', + }, + 'chinese-ldm-art': { + 'impl': LatentDiffusionPipeline, + 'model_cls': LatentDiffusion, + 'default': 'chinese-ldm-art', + }, + 'chinese-ldm-poem': { + 'impl': LatentDiffusionPipeline, + 'model_cls': LatentDiffusion, + 'default': 'chinese-ldm-poem', + }, + 'chinese-ldm-anime': { + 'impl': LatentDiffusionPipeline, + 'model_cls': LatentDiffusion, + 'default': 'chinese-ldm-anime', + }, + 'chinese-ldm-pet': { + 'impl': LatentDiffusionPipeline, + 'model_cls': LatentDiffusion, + 'default': 'chinese-ldm-pet', + }, + 'chinese-ldm-food': { + 'impl': LatentDiffusionPipeline, + 'model_cls': LatentDiffusion, + 'default': 'chinese-ldm-food', }, 'image2text_generation': { 'impl': ImageTextGenerationPipeline, diff --git a/examples/latent_diffusion_fashion/main.py b/examples/latent_diffusion/main.py similarity index 100% rename from examples/latent_diffusion_fashion/main.py rename to examples/latent_diffusion/main.py diff --git a/examples/latent_diffusion/pipeline_example.py b/examples/latent_diffusion/pipeline_example.py new file mode 100644 index 00000000..2bd868e7 --- /dev/null +++ b/examples/latent_diffusion/pipeline_example.py @@ -0,0 +1,56 @@ +from easynlp.pipelines import pipeline +from PIL import Image +import base64 +from io import BytesIO +import numpy as np +from einops import rearrange +import os + +# convert image to base64 +def image_to_base64(img): + img_buffer = BytesIO() + img.save(img_buffer,format='png') + byte_data = img_buffer.getvalue() + base64_str = str(base64.b64encode(byte_data), 'utf-8') + return base64_str + +# # general +# generator = pipeline('chinese-ldm-general',pipeline_params={"n_samples":1,"do_sr":True}) +# data = ['绿色外套搭配蓝色牛仔裤'] + +# # fashion +# generator = pipeline('chinese-ldm-fashion',pipeline_params={"n_samples":1,"do_sr":True}) +# data = ['黄色连帽卫衣'] + +# # art +# generator = pipeline('chinese-ldm-art',pipeline_params={"n_samples":1,"do_sr":True}) +# data = ['有群牛羊在吃草'] + +# # poem +# generator = pipeline('chinese-ldm-poem',pipeline_params={"n_samples":1,"do_sr":True}) +# data = ['一行白鹭上青天'] + +# # anime +# generator = pipeline('chinese-ldm-anime',pipeline_params={"n_samples":1,"do_sr":True}) +# data = ['粉色头发,穿裙子的少女'] + +# # pet +# generator = pipeline('chinese-ldm-pet',pipeline_params={"n_samples":1,"do_sr":True}) +# data = ['一只黄色的猫'] + +# food +generator = pipeline('chinese-ldm-food',pipeline_params={"n_samples":1,"do_sr":True}) +data = ['青椒炒牛肉'] + +# 生成结果 +result=generator(data) +for one_prompt in result: + # print(one_prompt) + for idx,one_image_tensor_raw in enumerate(one_prompt['image_tensor']): + one_image_tensor = 255. * rearrange(one_image_tensor_raw.cpu().numpy(), 'c h w -> h w c') + pil_image=Image.fromarray(one_image_tensor.astype(np.uint8)) + # 保存图片 + pil_image.save(os.path.join('./',one_prompt['text']+f"_{idx:04}.png")) + # 生成base64 + # b64_image=image_to_base64(pil_image) + # print(b64_image) \ No newline at end of file diff --git a/examples/latent_diffusion_fashion/run_latent_diffusion_local_user_defined.sh b/examples/latent_diffusion/run_latent_diffusion_local_user_defined.sh similarity index 93% rename from examples/latent_diffusion_fashion/run_latent_diffusion_local_user_defined.sh rename to examples/latent_diffusion/run_latent_diffusion_local_user_defined.sh index 1c3f3360..6555db9f 100644 --- a/examples/latent_diffusion_fashion/run_latent_diffusion_local_user_defined.sh +++ b/examples/latent_diffusion/run_latent_diffusion_local_user_defined.sh @@ -22,7 +22,7 @@ if [ "$mode" = "predict" ]; then --output_schema=text \ --outputs ./output_placeholder.tsv \ --first_sequence=text \ - --checkpoint_dir=alibaba-pai/latent_diffusion_fashion_cn_860M \ + --checkpoint_dir=alibaba-pai/pai-diffusion-food-large-zh \ --random_seed=42 \ --logging_steps=100 \ --save_checkpoint_steps=500 \ diff --git a/examples/latent_diffusion_fashion/pipeline_example.py b/examples/latent_diffusion_fashion/pipeline_example.py deleted file mode 100644 index 95868540..00000000 --- a/examples/latent_diffusion_fashion/pipeline_example.py +++ /dev/null @@ -1,33 +0,0 @@ -from easynlp.pipelines import pipeline -from PIL import Image -import base64 -from io import BytesIO -import numpy as np -from einops import rearrange -import os - -# convert image to base64 -def image_to_base64(img): - img_buffer = BytesIO() - img.save(img_buffer,format='png') - byte_data = img_buffer.getvalue() - base64_str = str(base64.b64encode(byte_data), 'utf-8') - return base64_str - -# init pipeline. -generator = pipeline('latent_diffusion',pipeline_params={"n_samples":2}) - -# input data -data = ['远处的雪山,表面覆盖着厚厚的积雪','湖边有一片森林'] -result=generator(data) - -for one_prompt in result: - # print(one_prompt) - for idx,one_image_tensor_raw in enumerate(one_prompt['image_tensor']): - one_image_tensor = 255. * rearrange(one_image_tensor_raw.cpu().numpy(), 'c h w -> h w c') - pil_image=Image.fromarray(one_image_tensor.astype(np.uint8)) - # 保存图片 - # pil_image.save(os.path.join('./',one_prompt['text']+f"_{idx:04}.png")) - # 生成base64 - # b64_image=image_to_base64(pil_image) - # print(b64_image) \ No newline at end of file From fea73f97c2b3712065739cf8cb0a53a4f6a54822 Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Thu, 8 Dec 2022 11:43:09 +0800 Subject: [PATCH 012/101] change python 3.6 to python 3.6.15 --- .github/workflows/unit_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/unit_test.yml b/.github/workflows/unit_test.yml index d5953d9b..eca0f4de 100644 --- a/.github/workflows/unit_test.yml +++ b/.github/workflows/unit_test.yml @@ -28,7 +28,7 @@ jobs: - name: Set up Python 3.6 uses: actions/setup-python@v2 with: - python-version: 3.6 + python-version: 3.6.15 # Runs a single command using the runners shell - name: Run a one-line script From d59b29f1ddbd8742291e1160805403e224c46c82 Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Thu, 8 Dec 2022 13:11:50 +0800 Subject: [PATCH 013/101] change 3.6.15 to '3.6' --- .github/workflows/unit_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/unit_test.yml b/.github/workflows/unit_test.yml index eca0f4de..cc3ae7be 100644 --- a/.github/workflows/unit_test.yml +++ b/.github/workflows/unit_test.yml @@ -28,7 +28,7 @@ jobs: - name: Set up Python 3.6 uses: actions/setup-python@v2 with: - python-version: 3.6.15 + python-version: '3.6' # Runs a single command using the runners shell - name: Run a one-line script From 3e6ae9f82b704e5bdf4e3f43000e885cceda1557 Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Thu, 8 Dec 2022 13:15:08 +0800 Subject: [PATCH 014/101] change '3.6' to '3.6.15' --- .github/workflows/unit_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/unit_test.yml b/.github/workflows/unit_test.yml index cc3ae7be..11f74069 100644 --- a/.github/workflows/unit_test.yml +++ b/.github/workflows/unit_test.yml @@ -28,7 +28,7 @@ jobs: - name: Set up Python 3.6 uses: actions/setup-python@v2 with: - python-version: '3.6' + python-version: '3.6.15' # Runs a single command using the runners shell - name: Run a one-line script From d61063dcbc5c3e67840ac06917d23fd9d0e8df25 Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Thu, 8 Dec 2022 14:12:45 +0800 Subject: [PATCH 015/101] use ubuntu20.04 --- .github/workflows/unit_test.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/unit_test.yml b/.github/workflows/unit_test.yml index 11f74069..d54117c2 100644 --- a/.github/workflows/unit_test.yml +++ b/.github/workflows/unit_test.yml @@ -18,7 +18,7 @@ jobs: # This workflow contains a single job called "build" build: # The type of runner that the job will run on - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 # Steps represent a sequence of tasks that will be executed as part of the job steps: @@ -28,7 +28,8 @@ jobs: - name: Set up Python 3.6 uses: actions/setup-python@v2 with: - python-version: '3.6.15' + python-version: '3.6' # 指定python版本 + # Runs a single command using the runners shell - name: Run a one-line script From d1a51dbe2642626b39b621daf6dd2bf659d8833f Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Thu, 8 Dec 2022 14:34:47 +0800 Subject: [PATCH 016/101] add try-except for pytorch_lightning --- easynlp/appzoo/latent_diffusion/model.py | 180 ++++++++++++----------- 1 file changed, 92 insertions(+), 88 deletions(-) diff --git a/easynlp/appzoo/latent_diffusion/model.py b/easynlp/appzoo/latent_diffusion/model.py index 328e4364..688859b7 100644 --- a/easynlp/appzoo/latent_diffusion/model.py +++ b/easynlp/appzoo/latent_diffusion/model.py @@ -25,101 +25,105 @@ from einops import rearrange from PIL import Image -from ...modelzoo.models.latent_diffusion.ddpm import LatentDiffusionModel -from ...modelzoo.models.latent_diffusion.autoencoder import AutoencoderKL -from ...modelzoo.models.latent_diffusion.wukong import FrozenWukongCLIPTextEmbedder -# from ...modelzoo.models.latent_diffusion.ddim import DDIMSampler -from ...modelzoo.models.latent_diffusion.plms import PLMSSampler -from ...modelzoo.models.latent_diffusion.RRDBNet_arch import ESRGAN +try: -from ...utils import losses, get_pretrain_model_path, get_args -from ..application import Application + from ...modelzoo.models.latent_diffusion.ddpm import LatentDiffusionModel + from ...modelzoo.models.latent_diffusion.autoencoder import AutoencoderKL + from ...modelzoo.models.latent_diffusion.wukong import FrozenWukongCLIPTextEmbedder + # from ...modelzoo.models.latent_diffusion.ddim import DDIMSampler + from ...modelzoo.models.latent_diffusion.plms import PLMSSampler + from ...modelzoo.models.latent_diffusion.RRDBNet_arch import ESRGAN -class LatentDiffusion(Application): + from ...utils import losses, get_pretrain_model_path, get_args + from ..application import Application - @classmethod - def from_pretrained(self, pretrained_model_name_or_path,args, user_defined_parameters={}): - instance=LatentDiffusion(pretrained_model_name_or_path,args,user_defined_parameters) - return instance + class LatentDiffusion(Application): - def __init__(self, pretrained_model_name_or_path=None,args=None,user_defined_parameters=None): - super().__init__() + @classmethod + def from_pretrained(self, pretrained_model_name_or_path,args, user_defined_parameters={}): + instance=LatentDiffusion(pretrained_model_name_or_path,args,user_defined_parameters) + return instance - if pretrained_model_name_or_path is not None: - pretrained_model_name_or_path = get_pretrain_model_path(pretrained_model_name_or_path) - # 先处理配置,再决定后续如何加载权重 - with open(pretrained_model_name_or_path+'/config.json','r') as config_handle: - self.config=json.load(config_handle) - # print(self.config) - checkpoint = torch.load(os.path.join(pretrained_model_name_or_path,'pytorch_model.bin'), map_location=torch.device('cpu')) - - sd = checkpoint["state_dict"] - all_params=self.config["model"]["params"] + def __init__(self, pretrained_model_name_or_path=None,args=None,user_defined_parameters=None): + super().__init__() - # 权重放在此处统一管理 - # 一阶段权重 - all_params["first_stage_config"]["params"]["ckpt_path"]=os.path.join(pretrained_model_name_or_path,'first_stage_kl_f8.ckpt') - all_params["first_stage_model"]=AutoencoderKL(**all_params["first_stage_config"]["params"]) - # 条件阶段权重 - all_params["cond_stage_config"]["params"]["version"]=os.path.join(pretrained_model_name_or_path,'wukong_vit_l_14_clip') - all_params["cond_stage_model"]=FrozenWukongCLIPTextEmbedder(**all_params["cond_stage_config"]["params"]) - - self.model=LatentDiffusionModel(**all_params) - - m, u = self.model.load_state_dict(sd, strict=False) - if len(m) > 0: - print("missing keys:") - print(m) - if len(u) > 0: - print("unexpected keys:") - print(u) - self.model.eval() - _device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - self.model = self.model.to(_device) - self.sr_model = ESRGAN(os.path.join(pretrained_model_name_or_path,'RRDB_ESRGAN_x4.pth'), _device) - self.sampler = PLMSSampler(self.model) - self.scale=float(user_defined_parameters.pop('scale',5.0)) - self.n_samples=int(user_defined_parameters.pop('n_samples',4)) - self.n_iter=int(user_defined_parameters.pop('n_iter',1)) - self.H=int(user_defined_parameters.pop('H',256)) - self.W=int(user_defined_parameters.pop('W',256)) - self.ddim_steps=int(user_defined_parameters.pop('ddim_steps',20)) - self.ddim_eta=float(user_defined_parameters.pop('ddim_eta',0.0)) - self.image_prefix=user_defined_parameters.pop('image_prefix','./') - self.do_sr=user_defined_parameters.pop('do_sr',False) - if 'write_image' in user_defined_parameters: - self.write_image=True - else: - self.write_image=False + if pretrained_model_name_or_path is not None: + pretrained_model_name_or_path = get_pretrain_model_path(pretrained_model_name_or_path) + # 先处理配置,再决定后续如何加载权重 + with open(pretrained_model_name_or_path+'/config.json','r') as config_handle: + self.config=json.load(config_handle) + # print(self.config) + checkpoint = torch.load(os.path.join(pretrained_model_name_or_path,'pytorch_model.bin'), map_location=torch.device('cpu')) - def forward(self, inputs): - all_samples=list() - for one_input in inputs: - with torch.no_grad(): - with self.model.ema_scope(): - uc = None - if self.scale != 1.0: - uc = self.model.get_learned_conditioning(self.n_samples * [""]) - for n in trange(self.n_iter, desc="Sampling"): - c = self.model.get_learned_conditioning(self.n_samples * [one_input["text"]]) - shape = [4, self.H//8, self.W//8] - samples_ddim, _ = self.sampler.sample(S=self.ddim_steps, - conditioning=c, - batch_size=self.n_samples, - shape=shape, - verbose=False, - unconditional_guidance_scale=self.scale, - unconditional_conditioning=uc, - eta=self.ddim_eta) - x_samples_ddim = self.model.decode_first_stage(samples_ddim) - x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) - if self.do_sr is True: - x_samples_ddim = self.sr_model.super_resolution(x_samples_ddim) - all_samples.append({'image_tensor':x_samples_ddim,'text':one_input["text"]}) - return all_samples + sd = checkpoint["state_dict"] + all_params=self.config["model"]["params"] - def compute_loss(self, forward_outputs, label_ids, **kwargs): - pass - + # 权重放在此处统一管理 + # 一阶段权重 + all_params["first_stage_config"]["params"]["ckpt_path"]=os.path.join(pretrained_model_name_or_path,'first_stage_kl_f8.ckpt') + all_params["first_stage_model"]=AutoencoderKL(**all_params["first_stage_config"]["params"]) + # 条件阶段权重 + all_params["cond_stage_config"]["params"]["version"]=os.path.join(pretrained_model_name_or_path,'wukong_vit_l_14_clip') + all_params["cond_stage_model"]=FrozenWukongCLIPTextEmbedder(**all_params["cond_stage_config"]["params"]) + + self.model=LatentDiffusionModel(**all_params) + + m, u = self.model.load_state_dict(sd, strict=False) + if len(m) > 0: + print("missing keys:") + print(m) + if len(u) > 0: + print("unexpected keys:") + print(u) + self.model.eval() + _device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + self.model = self.model.to(_device) + self.sr_model = ESRGAN(os.path.join(pretrained_model_name_or_path,'RRDB_ESRGAN_x4.pth'), _device) + self.sampler = PLMSSampler(self.model) + self.scale=float(user_defined_parameters.pop('scale',5.0)) + self.n_samples=int(user_defined_parameters.pop('n_samples',4)) + self.n_iter=int(user_defined_parameters.pop('n_iter',1)) + self.H=int(user_defined_parameters.pop('H',256)) + self.W=int(user_defined_parameters.pop('W',256)) + self.ddim_steps=int(user_defined_parameters.pop('ddim_steps',20)) + self.ddim_eta=float(user_defined_parameters.pop('ddim_eta',0.0)) + self.image_prefix=user_defined_parameters.pop('image_prefix','./') + self.do_sr=user_defined_parameters.pop('do_sr',False) + if 'write_image' in user_defined_parameters: + self.write_image=True + else: + self.write_image=False + + def forward(self, inputs): + all_samples=list() + for one_input in inputs: + with torch.no_grad(): + with self.model.ema_scope(): + uc = None + if self.scale != 1.0: + uc = self.model.get_learned_conditioning(self.n_samples * [""]) + for n in trange(self.n_iter, desc="Sampling"): + c = self.model.get_learned_conditioning(self.n_samples * [one_input["text"]]) + shape = [4, self.H//8, self.W//8] + samples_ddim, _ = self.sampler.sample(S=self.ddim_steps, + conditioning=c, + batch_size=self.n_samples, + shape=shape, + verbose=False, + unconditional_guidance_scale=self.scale, + unconditional_conditioning=uc, + eta=self.ddim_eta) + x_samples_ddim = self.model.decode_first_stage(samples_ddim) + x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) + if self.do_sr is True: + x_samples_ddim = self.sr_model.super_resolution(x_samples_ddim) + all_samples.append({'image_tensor':x_samples_ddim,'text':one_input["text"]}) + return all_samples + + def compute_loss(self, forward_outputs, label_ids, **kwargs): + pass +except Exception as ex: + + print("出现如下异常%s"%ex) From 3d2514d529a5e870e18d8a8ec8cefc7bf460ff67 Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Thu, 8 Dec 2022 14:41:16 +0800 Subject: [PATCH 017/101] add dummy class --- easynlp/appzoo/latent_diffusion/model.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/easynlp/appzoo/latent_diffusion/model.py b/easynlp/appzoo/latent_diffusion/model.py index 688859b7..4f2323c5 100644 --- a/easynlp/appzoo/latent_diffusion/model.py +++ b/easynlp/appzoo/latent_diffusion/model.py @@ -123,7 +123,18 @@ def forward(self, inputs): def compute_loss(self, forward_outputs, label_ids, **kwargs): pass except Exception as ex: - - print("出现如下异常%s"%ex) + + class LatentDiffusion(Application): + + @classmethod + def from_pretrained(self, pretrained_model_name_or_path,args, user_defined_parameters={}): + instance=LatentDiffusion(pretrained_model_name_or_path,args,user_defined_parameters) + return instance + + def __init__(self, pretrained_model_name_or_path=None,args=None,user_defined_parameters=None): + super().__init__() + self.hello='world' + + print("出现如下异常 %s"%ex) From 3cda6b5b018a79e2d810071193bd47a0200f9a84 Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Thu, 8 Dec 2022 14:47:34 +0800 Subject: [PATCH 018/101] fix dependency --- easynlp/appzoo/latent_diffusion/model.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/easynlp/appzoo/latent_diffusion/model.py b/easynlp/appzoo/latent_diffusion/model.py index 4f2323c5..31514b3f 100644 --- a/easynlp/appzoo/latent_diffusion/model.py +++ b/easynlp/appzoo/latent_diffusion/model.py @@ -24,6 +24,8 @@ from tqdm import tqdm, trange from einops import rearrange from PIL import Image +from ...utils import losses, get_pretrain_model_path, get_args +from ..application import Application try: @@ -34,9 +36,6 @@ from ...modelzoo.models.latent_diffusion.plms import PLMSSampler from ...modelzoo.models.latent_diffusion.RRDBNet_arch import ESRGAN - from ...utils import losses, get_pretrain_model_path, get_args - from ..application import Application - class LatentDiffusion(Application): @classmethod From dbcabb37c77ec8a775085fd3868b5f336789fd29 Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Thu, 8 Dec 2022 16:39:24 +0800 Subject: [PATCH 019/101] add __init__.py for information_extraction --- easynlp/appzoo/information_extraction/__init__.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 easynlp/appzoo/information_extraction/__init__.py diff --git a/easynlp/appzoo/information_extraction/__init__.py b/easynlp/appzoo/information_extraction/__init__.py new file mode 100644 index 00000000..0935d7f9 --- /dev/null +++ b/easynlp/appzoo/information_extraction/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright (c) 2020 Alibaba PAI team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + From d8e5836c21ee48e7d829eb35a62f70dd6d12f04e Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Thu, 8 Dec 2022 16:45:38 +0800 Subject: [PATCH 020/101] not to load ie since it relies on 'transformers' --- easynlp/appzoo/__init__.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/easynlp/appzoo/__init__.py b/easynlp/appzoo/__init__.py index 9a027139..25b5ef2b 100644 --- a/easynlp/appzoo/__init__.py +++ b/easynlp/appzoo/__init__.py @@ -33,7 +33,7 @@ "video2text_generation.model": ['CLIPGPTFrameTextGeneration'], "sequence_generation.model": ["SequenceGeneration"], "machine_reading_comprehension.model": ["MachineReadingComprehension"], - "information_extraction.model": ["InformationExtractionModel"], + # "information_extraction.model": ["InformationExtractionModel"], "sequence_classification.evaluator": ['SequenceClassificationEvaluator', 'SequenceMultiLabelClassificationEvaluator'], "sequence_labeling.evaluator": ['SequenceLabelingEvaluator'], @@ -47,7 +47,7 @@ "video2text_generation.evaluator": ["FrameTextGenerationEvaluator"], "sequence_generation.evaluator": ["SequenceGenerationEvaluator"], "machine_reading_comprehension.evaluator": ["MachineReadingComprehensionEvaluator"], - "information_extraction.evaluator": ["InformationExtractionEvaluator"], + # "information_extraction.evaluator": ["InformationExtractionEvaluator"], "sequence_classification.predictor": ['SequenceClassificationPredictor', 'FewshotSequenceClassificationPredictor', 'CptFewshotSequenceClassificationPredictor'], "sequence_labeling.predictor": ['SequenceLabelingPredictor'], @@ -63,7 +63,7 @@ "video2text_generation.predictor": ['CLIPGPTFrameTextGenerationPredictor'], "sequence_generation.predictor": ['SequenceGenerationPredictor'], "machine_reading_comprehension.predictor": ["MachineReadingComprehensionPredictor"], - "information_extraction.predictor": ["InformationExtractionPredictor"], + # "information_extraction.predictor": ["InformationExtractionPredictor"], "geep_classification.data": ['GEEPClassificationDataset'], "language_modeling.data": ['LanguageModelingDataset'], @@ -77,7 +77,7 @@ "video2text_generation.data": ['CLIPGPTFrameTextDataset'], "sequence_generation.data": ['SequenceGenerationDataset'], "machine_reading_comprehension.data": ["MachineReadingComprehensionDataset"], - "information_extraction.data": ["InformationExtractionDataset"], + # "information_extraction.data": ["InformationExtractionDataset"], "dataset": ['BaseDataset', 'GeneralDataset', 'load_dataset', 'list_datasets'], "api": ['get_application_dataset', 'get_application_model', 'get_application_model_for_evaluation', 'get_application_evaluator', 'get_application_predictor'], } @@ -98,7 +98,7 @@ from .video2text_generation.model import CLIPGPTFrameTextGeneration from .sequence_generation.model import SequenceGeneration from .machine_reading_comprehension.model import MachineReadingComprehension - from .information_extraction.model import InformationExtractionModel + # from .information_extraction.model import InformationExtractionModel from .sequence_classification.evaluator import SequenceClassificationEvaluator, SequenceMultiLabelClassificationEvaluator from .sequence_labeling.evaluator import SequenceLabelingEvaluator @@ -112,7 +112,7 @@ from .video2text_generation.evaluator import FrameTextGenerationEvaluator from .sequence_generation.evaluator import SequenceGenerationEvaluator from .machine_reading_comprehension.evaluator import MachineReadingComprehensionEvaluator - from .information_extraction.evaluator import InformationExtractionEvaluator + # from .information_extraction.evaluator import InformationExtractionEvaluator from .sequence_classification.predictor import SequenceClassificationPredictor, FewshotSequenceClassificationPredictor, CptFewshotSequenceClassificationPredictor from .sequence_labeling.predictor import SequenceLabelingPredictor @@ -128,7 +128,7 @@ from .video2text_generation.predictor import CLIPGPTFrameTextGenerationPredictor from .sequence_generation.predictor import SequenceGenerationPredictor from .machine_reading_comprehension.predictor import MachineReadingComprehensionPredictor - from .information_extraction.predictor import InformationExtractionPredictor + # from .information_extraction.predictor import InformationExtractionPredictor from .sequence_classification.data import ClassificationDataset, DistillatoryClassificationDataset, FewshotSequenceClassificationDataset from .sequence_labeling.data import SequenceLabelingDataset, SequenceLabelingAutoDataset @@ -142,7 +142,7 @@ from .video2text_generation.data import CLIPGPTFrameTextDataset from .sequence_generation.data import SequenceGenerationDataset from .machine_reading_comprehension.data import MachineReadingComprehensionDataset - from .information_extraction.data import InformationExtractionDataset + # from .information_extraction.data import InformationExtractionDataset from .dataset import BaseDataset, GeneralDataset from .dataset import load_dataset, list_datasets From d4b244a0428e4ef688d38129b3febd1c85af63e5 Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Thu, 8 Dec 2022 16:59:11 +0800 Subject: [PATCH 021/101] comment informationextraction --- easynlp/appzoo/api.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/easynlp/appzoo/api.py b/easynlp/appzoo/api.py index 019a2a53..8676040e 100644 --- a/easynlp/appzoo/api.py +++ b/easynlp/appzoo/api.py @@ -33,7 +33,7 @@ from easynlp.appzoo import CLIPGPTFrameTextGeneration from easynlp.appzoo.sequence_generation.model import SequenceGeneration from easynlp.appzoo import MachineReadingComprehension -from easynlp.appzoo import InformationExtractionModel +# from easynlp.appzoo import InformationExtractionModel from easynlp.fewshot_learning.fewshot_evaluator import PromptEvaluator as FewshotSequenceClassificationEvaluator from easynlp.fewshot_learning.fewshot_evaluator import CPTEvaluator as CptFewshotSequenceClassificationEvaluator @@ -48,7 +48,7 @@ from easynlp.appzoo import FrameTextGenerationEvaluator from easynlp.appzoo import SequenceGenerationEvaluator from easynlp.appzoo import MachineReadingComprehensionEvaluator -from easynlp.appzoo import InformationExtractionEvaluator +# from easynlp.appzoo import InformationExtractionEvaluator from easynlp.appzoo import SequenceClassificationPredictor, FewshotSequenceClassificationPredictor, CptFewshotSequenceClassificationPredictor from easynlp.appzoo import SequenceLabelingPredictor, FeatureVectorizationPredictor @@ -62,7 +62,7 @@ from easynlp.appzoo import CLIPGPTFrameTextGenerationPredictor from easynlp.appzoo import SequenceGenerationPredictor from easynlp.appzoo import MachineReadingComprehensionPredictor -from easynlp.appzoo import InformationExtractionPredictor +# from easynlp.appzoo import InformationExtractionPredictor from easynlp.appzoo import ClassificationDataset, DistillatoryClassificationDataset, FewshotSequenceClassificationDataset from easynlp.appzoo import SequenceLabelingDataset, LanguageModelingDataset @@ -75,7 +75,7 @@ from easynlp.appzoo import CLIPGPTImageTextDataset, VQGANGPTImageTextDataset from easynlp.appzoo import CLIPGPTFrameTextDataset from easynlp.appzoo import MachineReadingComprehensionDataset -from easynlp.appzoo import InformationExtractionDataset +# from easynlp.appzoo import InformationExtractionDataset from easynlp.core import PredictorManager, Trainer, DistillatoryTrainer from easynlp.utils.logger import logger @@ -119,7 +119,7 @@ 'video2text_generation': CLIPGPTFrameTextDataset, 'sequence_generation': SequenceGenerationDataset, 'machine_reading_comprehension': MachineReadingComprehensionDataset, - 'information_extraction': InformationExtractionDataset + # 'information_extraction': InformationExtractionDataset } ModelMapping = { @@ -154,7 +154,7 @@ 'video2text_generation': CLIPGPTFrameTextGeneration, 'sequence_generation': SequenceGeneration, 'machine_reading_comprehension': MachineReadingComprehension, - 'information_extraction': InformationExtractionModel + # 'information_extraction': InformationExtractionModel } Eval_Model_Mapping = { @@ -183,7 +183,7 @@ 'video2text_generation': CLIPGPTFrameTextGeneration, 'sequence_generation': SequenceGeneration, 'machine_reading_comprehension': MachineReadingComprehension, - 'information_extraction': InformationExtractionModel + # 'information_extraction': InformationExtractionModel } Evaluator_Mapping = { @@ -212,7 +212,7 @@ 'video2text_generation': FrameTextGenerationEvaluator, 'sequence_generation': SequenceGenerationEvaluator, 'machine_reading_comprehension': MachineReadingComprehensionEvaluator, - 'information_extraction': InformationExtractionEvaluator + # 'information_extraction': InformationExtractionEvaluator } Predictor_Mapping = { @@ -243,7 +243,7 @@ 'video2text_generation': [CLIPGPTFrameTextGenerationPredictor, CLIPGPTFrameTextGeneration], 'sequence_generation': [SequenceGenerationPredictor, SequenceGeneration], 'machine_reading_comprehension': [MachineReadingComprehensionPredictor, MachineReadingComprehension], - 'information_extraction': [InformationExtractionPredictor, InformationExtractionModel] + # 'information_extraction': [InformationExtractionPredictor, InformationExtractionModel] } From c3393007e1f11ea92f27b313c41781e2e90c002b Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Fri, 9 Dec 2022 18:45:44 +0800 Subject: [PATCH 022/101] FashionKLIP codes --- examples/fashionklip/README.md | 155 +++++ examples/fashionklip/clip/__init__.py | 0 examples/fashionklip/clip/clip.py | 202 ++++++ examples/fashionklip/clip/model.py | 433 ++++++++++++ .../clip/model_configs/ViT-B-32.json | 12 + .../clip/model_configs/ViT-L-14.json | 12 + examples/fashionklip/clip/tokenizer.py | 140 ++++ examples/fashionklip/eval/data.py | 97 +++ examples/fashionklip/eval/extract_features.py | 196 ++++++ examples/fashionklip/eval/predict_evaluate.py | 244 +++++++ examples/fashionklip/run_local.sh | 123 ++++ examples/fashionklip/training/data.py | 476 +++++++++++++ examples/fashionklip/training/logger.py | 90 +++ .../fashionklip/training/main_all_concept.py | 332 +++++++++ .../training/model_configs/ViT-B-32.json | 12 + .../training/model_configs/ViT-L-14.json | 12 + examples/fashionklip/training/params.py | 282 ++++++++ examples/fashionklip/training/scheduler.py | 20 + .../fashionklip/training/train_all_concept.py | 629 ++++++++++++++++++ examples/fashionklip/training/train_epoch.py | 149 +++++ 20 files changed, 3616 insertions(+) create mode 100644 examples/fashionklip/README.md create mode 100644 examples/fashionklip/clip/__init__.py create mode 100644 examples/fashionklip/clip/clip.py create mode 100644 examples/fashionklip/clip/model.py create mode 100644 examples/fashionklip/clip/model_configs/ViT-B-32.json create mode 100644 examples/fashionklip/clip/model_configs/ViT-L-14.json create mode 100644 examples/fashionklip/clip/tokenizer.py create mode 100644 examples/fashionklip/eval/data.py create mode 100644 examples/fashionklip/eval/extract_features.py create mode 100644 examples/fashionklip/eval/predict_evaluate.py create mode 100755 examples/fashionklip/run_local.sh create mode 100644 examples/fashionklip/training/data.py create mode 100644 examples/fashionklip/training/logger.py create mode 100644 examples/fashionklip/training/main_all_concept.py create mode 100644 examples/fashionklip/training/model_configs/ViT-B-32.json create mode 100644 examples/fashionklip/training/model_configs/ViT-L-14.json create mode 100644 examples/fashionklip/training/params.py create mode 100644 examples/fashionklip/training/scheduler.py create mode 100644 examples/fashionklip/training/train_all_concept.py create mode 100644 examples/fashionklip/training/train_epoch.py diff --git a/examples/fashionklip/README.md b/examples/fashionklip/README.md new file mode 100644 index 00000000..923ef8ae --- /dev/null +++ b/examples/fashionklip/README.md @@ -0,0 +1,155 @@ +# FashionKLIP + +## Preparations: Data and Models + +* Download the preprocessed training and validation [FashionGen](https://arxiv.org/abs/1806.08317) data: + +``` +if [ ! -f ./tmp/fashion-gen.tgz ]; then + wget -P ./tmp https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/fashionklip/fashion-gen.tgz + tar zxvf ./tmp/fashion-gen.tgz -C ./tmp +fi +``` + +* Preprare our FashionMMKG sample data: + +``` +if [ ! -f ./tmp/fashion_kb.tgz ]; then + wget -P ./tmp https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/fashionklip/fashion_kb.tgz + tar zxvf ./tmp/fashion_kb.tgz -C ./tmp +fi +``` + +* For confidentiality reasons, we only provide sample data of FashionMMKG. If you'd like to construct a larger MMKG or utilize the data of your own for better knowledge injected training, we suggest that the knowledge data organized in the following schema. We take the processed concept files in ./tmp/fashion_kb as examples. + + * concepts_images_sample.224.npz: image file in npz format, including image ids with its numpy array after transformation. + * concepts_fathers.jsonl: "phrase" for specific concept, "phrase_father" for concept hypernym. + ``` + {"query_id": 15, "phrase": "woman sunglasses", "phrase_father": "sunglasses"} + ``` + * concepts_queries.jsonl: "phrase" for specific concept, "query_text" for concept prompt. + ``` + {"query_id": 15, "query_text": "a photo of woman sunglasses", "phrase": "woman sunglasses"} + ``` + +* Download CLIP pretrained checkpoint (ViT-B/32 as image encoder): + +``` +if [ ! -f ./tmp/pretrained_models/ViT-B-32.pt ]; then + wget -P ./tmp2/pretrained_models https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/fashionklip/openai/ViT-B-32.pt +fi +``` + + +## Start training + +* Training + +``` +DATAPATH=./tmp +PRETRAINED_MODEL=./tmp/pretrained_models + +MASTER_ADDR=tcp://127.0.0.1:12345 + +python3 -u training/main_all_concept.py \ + --save-most-recent \ + --save-frequency 1 \ + --report-to tensorboard \ + --train-data="${DATAPATH}/fashiongen/train/fashion-gen_train_queries_phrases.jsonl" \ + --train-img="${DATAPATH}/fashiongen/train/train.224.npz" \ + --txt-id-filename="${DATAPATH}/fashiongen/train/fashion-gen_concepts_queries.jsonl" \ + --kb-txt-id-filename="${DATAPATH}/fashion_kb/icbu_concepts_queries.jsonl" \ + --val-data="${DATAPATH}/fashiongen/val/fashion-gen_val_queries.jsonl" \ + --val-img="${DATAPATH}/fashiongen/val/val.224.npz" \ + --img-data-sets="${DATAPATH}/fashion_kb/icbu_train_images_5w_00.224.npz" \ + --concept-data="${DATAPATH}/fashiongen/train/fashion-gen_concepts_queries.jsonl" \ + --kb-concept-data="${DATAPATH}/fashion_kb/icbu_concepts_queries.jsonl" \ + --resume="${PRETRAINED_MODEL}/pai-clip-commercial-base-en/pai-clip-commercial-base-en.pt" \ + --is-concept \ + --is-data-concept \ + --is-update \ + --dist-url=$MASTER_ADDR \ + --dataset-type jsonl \ + --warmup 500 \ + --batch-size=32 \ + --eval-batch-size=256 \ + --lr=1e-5 \ + --wd=0.001 \ + --epochs=20 \ + --workers=0 \ + --model ViT-B/32 + +``` + +If you want to only use the FashionKLIP training strategy without loading our provided checkpoint, please remove the argument "resume" and set "openai-pretrained". + + +* Extracting features + + The resume path can be replaced with your trained model. + + ``` + SAVEFOLDER=./tmp/predictions_finetune + + if [ ! -d $SAVEFOLDER ]; then + mkdir $SAVEFOLDER + fi + + python3 -u eval/extract_features.py \ + --extract-image-feats \ + --extract-text-feats \ + --image-data="${DATAPATH}/fashiongen/val/val.224.npz" \ + --text-data="${DATAPATH}/fashiongen/val/fashion-gen_val_queries.jsonl" \ + --img-batch-size=32 \ + --text-batch-size=32 \ + --resume="${PRETRAINED_MODEL}/pai-clip-commercial-base-en/pai-clip-commercial-base-en.pt" \ + --image-feat-output-path="${SAVEFOLDER}/fashion-gen_test_imgs.img_feat.jsonl" \ + --text-feat-output-path="${SAVEFOLDER}/fashion-gen_test_texts.txt_feat.jsonl" \ + --model ViT-B-32 + + ``` + + The output feature files are saved in user-defined $SAVEFOLDER in the following format: + ("image_id" for images predictions and "query_id" for the texts) + + ``` + {"image_id": "94", "feature": [0.05916735157370567,....]} + {"query_id": 10, "feature": [0.05950947478413582,....]} + + ``` + +* Predict and Evaluate + + Note that the prediction and envaluation procedure should be executed after extracting features. + ``` + python3 -u eval/predict_evaluate.py \ + --image-feats="${SAVEFOLDER}/fashion-gen_test_imgs.img_feat.jsonl" \ + --text-feats="${SAVEFOLDER}/fashion-gen_test_texts.txt_feat.jsonl" \ + --top-k=10 \ + --eval-batch-size=32768 \ + --output-images="${SAVEFOLDER}/test_imgs_predictions.jsonl" \ + --output-texts="${SAVEFOLDER}/test_txts_predictions.jsonl" \ + --text-standard-path="${DATAPATH}/fashiongen/val/fashion-gen_val_queries.jsonl" \ + --image-standard-path="${DATAPATH}/fashiongen/val/fashion-gen_val_images.jsonl" \ + --text-out-path="${SAVEFOLDER}//fashion_text_output.json" \ + --image-out-path="${SAVEFOLDER}/./fashion_image_output.json" + + ``` + + The commands will output evaluation files, saved in user-defined "text-out-path" and "image-out-path" in the following format: + + ``` + {"success": true, "score": 12.189873028560887, "scoreJson": {"score": 12.189873028560887, "mean_recall": 12.189873028560887, "r1": 4.60233037169121, "r5": 13.078365665447167, "r10": 18.88892304854429}} + ``` + + +## Reference +* Fashion-Gen: The Generative Fashion Dataset and Challenge. [[paper](https://arxiv.org/abs/1806.08317)][[website](https://fashion-gen.com/)] +* Learning Transferable Visual Models From Natural Language Supervision. [[paper](https://arxiv.org/abs/1806.08317)][[website](https://fashion-gen.com/)] + + +## Acknowledgements + +The implementation of FashionKLIP relies on resources from OpenAI's [CLIP](https://github.com/openai/CLIP), and the implementation version [OpenCLIP](https://github.com/mlfoundations/open_clip), portions of models/ modelling and tokenizer code are adaptations of official repositories. We thank the original authors for their open-sourcing. + + diff --git a/examples/fashionklip/clip/__init__.py b/examples/fashionklip/clip/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/examples/fashionklip/clip/clip.py b/examples/fashionklip/clip/clip.py new file mode 100644 index 00000000..02e28a76 --- /dev/null +++ b/examples/fashionklip/clip/clip.py @@ -0,0 +1,202 @@ +# Code ported from https://github.com/openai/CLIP + +import hashlib +import os +import urllib +import warnings +from typing import Union, List + +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, RandomResizedCrop +from tqdm import tqdm + +from clip.model import build_model +from clip.tokenizer import SimpleTokenizer as _Tokenizer +_tokenizer = _Tokenizer() + +__all__ = ["available_models", "load", "tokenize"] + +_MODELS = { + "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", + "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", +} + + +def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + + expected_sha256 = url.split("/")[-2] + download_target = os.path.join(root, filename) + + if os.path.exists(download_target) and not os.path.isfile(download_target): + raise RuntimeError(f"{download_target} exists and is not a regular file") + + if os.path.isfile(download_target): + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: + return download_target + else: + warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") + + with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: + with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop: + while True: + buffer = source.read(8192) + if not buffer: + break + + output.write(buffer) + loop.update(len(buffer)) + + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: + raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") + + return download_target + +def _convert_to_rgb(image): + return image.convert('RGB') + +def _transform(n_px: int, is_train: bool): + normalize = Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) + if is_train: + return Compose([ + RandomResizedCrop(n_px, scale=(0.9, 1.0), interpolation=Image.BICUBIC), + _convert_to_rgb, + ToTensor(), + normalize, + ]) + else: + return Compose([ + Resize(n_px, interpolation=Image.BICUBIC), + CenterCrop(n_px), + _convert_to_rgb, + ToTensor(), + normalize, + ]) + + + +def available_models() -> List[str]: + """Returns the names of available CLIP models""" + return list(_MODELS.keys()) + + +def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True, is_train=False, pretrained=True): + """Load a CLIP model + Parameters + ---------- + name : str + A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict + device : Union[str, torch.device] + The device to put the loaded model + jit : bool + Whether to load the optimized JIT model (default) or more hackable non-JIT model. + Returns + ------- + model : torch.nn.Module + The CLIP model + preprocess : Callable[[PIL.Image], torch.Tensor] + A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input + """ + + model_path = "./tmp/pretrained_models/{}.pt".format(name.replace("/", "-")) + + try: + # loading JIT archive + model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval() + state_dict = None + except RuntimeError: + # loading saved state dict + if jit: + warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") + jit = False + state_dict = torch.load(model_path, map_location="cpu") + + if not jit: + try: + model = build_model(state_dict or model.state_dict()).to(device) + except KeyError: + sd = {k[7:]: v for k,v in state_dict["state_dict"].items()} + model = build_model(sd).to(device) + + if str(device) == "cpu": + model.float() + return model, \ + _transform(model.visual.input_resolution, is_train=True), \ + _transform(model.visual.input_resolution, is_train=False) + + # patch the device names + device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) + device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] + + def patch_device(module): + graphs = [module.graph] if hasattr(module, "graph") else [] + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("prim::Constant"): + if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): + node.copyAttributes(device_node) + + model.apply(patch_device) + patch_device(model.encode_image) + patch_device(model.encode_text) + + # patch dtype to float32 on CPU + if str(device) == "cpu": + float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) + float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] + float_node = float_input.node() + + def patch_float(module): + graphs = [module.graph] if hasattr(module, "graph") else [] + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("aten::to"): + inputs = list(node.inputs()) + for i in [1, 2]: # dtype can be the second or third argument to aten::to() + if inputs[i].node()["value"] == 5: + inputs[i].node().copyAttributes(float_node) + + model.apply(patch_float) + patch_float(model.encode_image) + patch_float(model.encode_text) + + model.float() + + return model, \ + _transform(model.input_resolution.item(), is_train=True), \ + _transform(model.input_resolution.item(), is_train=False) + + +def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor: + """ + Returns the tokenized representation of given input string(s) + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + context_length : int + The context length to use; all CLIP models use 77 as the context length + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] + """ + if isinstance(texts, str): + texts = [texts] + + sot_token = _tokenizer.encoder[""] + eot_token = _tokenizer.encoder[""] + all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: # Truncate + tokens = tokens[:context_length] + result[i, :len(tokens)] = torch.tensor(tokens) + + return result diff --git a/examples/fashionklip/clip/model.py b/examples/fashionklip/clip/model.py new file mode 100644 index 00000000..ae8f81cb --- /dev/null +++ b/examples/fashionklip/clip/model.py @@ -0,0 +1,433 @@ +from collections import OrderedDict +from typing import Tuple, Union + +import os +import json +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1): + super().__init__() + + # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1 + self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + + self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + + self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity() + + self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + + self.relu = nn.ReLU(inplace=True) + self.downsample = None + self.stride = stride + + if stride > 1 or inplanes != planes * Bottleneck.expansion: + # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1 + self.downsample = nn.Sequential(OrderedDict([ + ("-1", nn.AvgPool2d(stride)), + ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)), + ("1", nn.BatchNorm2d(planes * self.expansion)) + ])) + + def forward(self, x: torch.Tensor): + identity = x + + out = self.relu(self.bn1(self.conv1(x))) + out = self.relu(self.bn2(self.conv2(out))) + out = self.avgpool(out) + out = self.bn3(self.conv3(out)) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + return out + + +class AttentionPool2d(nn.Module): + def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None): + super().__init__() + self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) + self.k_proj = nn.Linear(embed_dim, embed_dim) + self.q_proj = nn.Linear(embed_dim, embed_dim) + self.v_proj = nn.Linear(embed_dim, embed_dim) + self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) + self.num_heads = num_heads + + def forward(self, x): + x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC + x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC + x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC + x, _ = F.multi_head_attention_forward( + query=x, key=x, value=x, + embed_dim_to_check=x.shape[-1], + num_heads=self.num_heads, + q_proj_weight=self.q_proj.weight, + k_proj_weight=self.k_proj.weight, + v_proj_weight=self.v_proj.weight, + in_proj_weight=None, + in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), + bias_k=None, + bias_v=None, + add_zero_attn=False, + dropout_p=0, + out_proj_weight=self.c_proj.weight, + out_proj_bias=self.c_proj.bias, + use_separate_proj_weight=True, + training=self.training, + need_weights=False + ) + + return x[0] + + +class ModifiedResNet(nn.Module): + """ + A ResNet class that is similar to torchvision's but contains the following changes: + - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool. + - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1 + - The final pooling layer is a QKV attention instead of an average pool + """ + + def __init__(self, layers, output_dim, heads, input_resolution=224, width=64): + super().__init__() + self.output_dim = output_dim + self.input_resolution = input_resolution + + # the 3-layer stem + self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(width // 2) + self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(width // 2) + self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False) + self.bn3 = nn.BatchNorm2d(width) + self.avgpool = nn.AvgPool2d(2) + self.relu = nn.ReLU(inplace=True) + + # residual layers + self._inplanes = width # this is a *mutable* variable used during construction + self.layer1 = self._make_layer(width, layers[0]) + self.layer2 = self._make_layer(width * 2, layers[1], stride=2) + self.layer3 = self._make_layer(width * 4, layers[2], stride=2) + self.layer4 = self._make_layer(width * 8, layers[3], stride=2) + + embed_dim = width * 32 # the ResNet feature dimension + self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim) + + def _make_layer(self, planes, blocks, stride=1): + layers = [Bottleneck(self._inplanes, planes, stride)] + + self._inplanes = planes * Bottleneck.expansion + for _ in range(1, blocks): + layers.append(Bottleneck(self._inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + def stem(x): + for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]: + x = self.relu(bn(conv(x))) + x = self.avgpool(x) + return x + + x = x.type(self.conv1.weight.dtype) + x = stem(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + x = self.attnpool(x) + + return x + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): + super().__init__() + + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x: torch.Tensor): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x: torch.Tensor): + x = x + self.attention(self.ln_1(x)) + x = x + self.mlp(self.ln_2(x)) + return x + + +class Transformer(nn.Module): + def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None): + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]) + + def forward(self, x: torch.Tensor): + return self.resblocks(x) + + +class VisualTransformer(nn.Module): + def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads) + + self.ln_post = LayerNorm(width) + self.proj = nn.Parameter(scale * torch.randn(width, output_dim)) + + def forward(self, x: torch.Tensor): + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + + x = self.ln_post(x[:, 0, :]) + + if self.proj is not None: + x = x @ self.proj + + return x + + +class CLIP(nn.Module): + def __init__(self, + embed_dim: int, + # vision + image_resolution: int, + vision_layers: Union[Tuple[int, int, int, int], int], + vision_width: int, + vision_patch_size: int, + # text + context_length: int, + vocab_size: int, + transformer_width: int, + transformer_heads: int, + transformer_layers: int + ): + super().__init__() + + self.context_length = context_length + + if isinstance(vision_layers, (tuple, list)): + vision_heads = vision_width * 32 // 64 + self.visual = ModifiedResNet( + layers=vision_layers, + output_dim=embed_dim, + heads=vision_heads, + input_resolution=image_resolution, + width=vision_width + ) + else: + vision_heads = vision_width // 64 + self.visual = VisualTransformer( + input_resolution=image_resolution, + patch_size=vision_patch_size, + width=vision_width, + layers=vision_layers, + heads=vision_heads, + output_dim=embed_dim + ) + + self.transformer = Transformer( + width=transformer_width, + layers=transformer_layers, + heads=transformer_heads, + attn_mask=self.build_attention_mask() + ) + + self.vocab_size = vocab_size + self.token_embedding = nn.Embedding(vocab_size, transformer_width) + self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width)) + self.ln_final = LayerNorm(transformer_width) + + self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim)) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + self.initialize_parameters() + + def initialize_parameters(self): + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.positional_embedding, std=0.01) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + if isinstance(self.visual, ModifiedResNet): + if self.visual.attnpool is not None: + std = self.visual.attnpool.c_proj.in_features ** -0.5 + nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std) + nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std) + nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std) + nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std) + + for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]: + for name, param in resnet_block.named_parameters(): + if name.endswith("bn3.weight"): + nn.init.zeros_(param) + + proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5) + attn_std = self.transformer.width ** -0.5 + fc_std = (2 * self.transformer.width) ** -0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + if self.text_projection is not None: + nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5) + + def build_attention_mask(self): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + @property + def dtype(self): + return self.visual.conv1.weight.dtype + + def encode_image(self, image): + return self.visual(image.type(self.dtype)) + + def encode_text(self, text): + x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] + + x = x + self.positional_embedding.type(self.dtype) + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x).type(self.dtype) + + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection + + return x + + def forward(self, image, text): + if image is None: + return self.encode_text(text) + elif text is None: + return self.encode_image(image) + image_features = self.encode_image(image) + text_features = self.encode_text(text) + + image_features = image_features / image_features.norm(dim=-1, keepdim=True) + text_features = text_features / text_features.norm(dim=-1, keepdim=True) + + return image_features, text_features, self.logit_scale.exp() + + +def convert_weights(model: nn.Module): + """Convert applicable model parameters to fp16""" + + def _convert_weights_to_fp16(l): + if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): + l.weight.data = l.weight.data.half() + if l.bias is not None: + l.bias.data = l.bias.data.half() + + if isinstance(l, nn.MultiheadAttention): + for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: + tensor = getattr(l, attr) + if tensor is not None: + tensor.data = tensor.data.half() + + for name in ["text_projection", "proj"]: + if hasattr(l, name): + attr = getattr(l, name) + if attr is not None: + attr.data = attr.data.half() + + model.apply(_convert_weights_to_fp16) + + +def build_model(state_dict: dict): + vit = "visual.proj" in state_dict + + if vit: + vision_width = state_dict["visual.conv1.weight"].shape[0] + vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) + vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_resolution = vision_patch_size * grid_size + else: + counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]] + vision_layers = tuple(counts) + vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] + output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) + vision_patch_size = None + assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] + image_resolution = output_width * 32 + + embed_dim = state_dict["text_projection"].shape[1] + context_length = state_dict["positional_embedding"].shape[0] + vocab_size = state_dict["token_embedding.weight"].shape[0] + transformer_width = state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) + + model = CLIP( + embed_dim, + image_resolution, vision_layers, vision_width, vision_patch_size, + context_length, vocab_size, transformer_width, transformer_heads, transformer_layers + ) + + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in state_dict: + del state_dict[key] + + convert_weights(model) + model.load_state_dict(state_dict) + + return model.eval() \ No newline at end of file diff --git a/examples/fashionklip/clip/model_configs/ViT-B-32.json b/examples/fashionklip/clip/model_configs/ViT-B-32.json new file mode 100644 index 00000000..57b94e50 --- /dev/null +++ b/examples/fashionklip/clip/model_configs/ViT-B-32.json @@ -0,0 +1,12 @@ +{ + "embed_dim": 512, + "image_resolution": 224, + "vision_layers": 12, + "vision_width": 768, + "vision_patch_size": 32, + "context_length": 77, + "vocab_size": 49408, + "transformer_width": 512, + "transformer_heads": 8, + "transformer_layers": 12 +} \ No newline at end of file diff --git a/examples/fashionklip/clip/model_configs/ViT-L-14.json b/examples/fashionklip/clip/model_configs/ViT-L-14.json new file mode 100644 index 00000000..9dee5c46 --- /dev/null +++ b/examples/fashionklip/clip/model_configs/ViT-L-14.json @@ -0,0 +1,12 @@ +{ + "embed_dim": 768, + "image_resolution": 224, + "vision_layers": 24, + "vision_width": 1024, + "vision_patch_size": 14, + "context_length": 77, + "vocab_size": 49408, + "transformer_width": 768, + "transformer_heads": 12, + "transformer_layers": 12 +} diff --git a/examples/fashionklip/clip/tokenizer.py b/examples/fashionklip/clip/tokenizer.py new file mode 100644 index 00000000..7a2e07f8 --- /dev/null +++ b/examples/fashionklip/clip/tokenizer.py @@ -0,0 +1,140 @@ +import gzip +import html +import os +from functools import lru_cache + +import ftfy +import regex as re + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str = default_bpe(), special_tokens=None): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') + merges = merges[1:49152-256-2+1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v+'' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + if not special_tokens: + special_tokens = ['', ''] + else: + special_tokens = ['', ''] + special_tokens + vocab.extend(special_tokens) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {t:t for t in special_tokens} + special = "|".join(special_tokens) + self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) + + self.vocab_size = len(self.encoder) + self.all_special_ids = [self.encoder[t] for t in special_tokens] + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + ( token[-1] + '',) + pairs = get_pairs(word) + + if not pairs: + return token+'' + + while True: + bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word)-1 and word[i+1] == second: + new_word.append(first+second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') + return text \ No newline at end of file diff --git a/examples/fashionklip/eval/data.py b/examples/fashionklip/eval/data.py new file mode 100644 index 00000000..dc76a6d1 --- /dev/null +++ b/examples/fashionklip/eval/data.py @@ -0,0 +1,97 @@ +import os +import logging +import json + +import numpy as np + +import torch +from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler +from torch.utils.data.distributed import DistributedSampler +from torch.utils.data.sampler import SequentialSampler + +from training.data import DataInfo + +from clip.clip import tokenize + +class EvalTxtDataset(Dataset): + def __init__(self, jsonl_filename): + assert os.path.exists(jsonl_filename), "The annotation datafile {} not exists!".format(jsonl_filename) + + logging.debug(f'Loading jsonl data from {jsonl_filename}.') + self.queries = [] + with open(jsonl_filename, "r") as fin: + for line in fin: + obj = json.loads(line.strip()) + query_id = obj['query_id'] + query = obj['query_text'] + self.queries.append((query_id, query)) + logging.debug(f'Finished loading jsonl data from {jsonl_filename}.') + + def __len__(self): + return len(self.queries) + + def __getitem__(self, idx): + query_id, query = self.queries[idx] + text = tokenize([str(query)])[0] + return query_id, text + +class EvalImgDataset(Dataset): + def __init__(self, img_filename): + assert os.path.exists(img_filename), "The image npz datafile {} not exists!".format(img_filename) + + logging.debug(f'Loading image npzfile from {img_filename}.') + self.imgs = np.load(img_filename, "r") + self.img_ids = list(self.imgs.keys()) + logging.debug(f'Finished loading image npzfile from {img_filename}.') + + def _read_img_tensor_from_npzfile(self, img_id): + img_array = self.imgs[str(img_id)] + return torch.from_numpy(img_array) + + def __len__(self): + return len(self.img_ids) + + def __getitem__(self, idx): + img_id = self.img_ids[idx] + image = self._read_img_tensor_from_npzfile(img_id) + return img_id, image + +def get_eval_txt_dataset(args): + input_filename = args.text_data + dataset = EvalTxtDataset( + input_filename) + num_samples = len(dataset) + sampler = SequentialSampler(dataset) + + dataloader = DataLoader( + dataset, + batch_size=args.text_batch_size, + num_workers=0, + pin_memory=True, + sampler=sampler, + drop_last=False, + ) + dataloader.num_samples = num_samples + dataloader.num_batches = len(dataloader) + + return DataInfo(dataloader, sampler) + +def get_eval_img_dataset(args): + img_filename = args.image_data + dataset = EvalImgDataset( + img_filename) + num_samples = len(dataset) + sampler = SequentialSampler(dataset) + + dataloader = DataLoader( + dataset, + batch_size=args.img_batch_size, + num_workers=0, + pin_memory=True, + sampler=sampler, + drop_last=False, + ) + dataloader.num_samples = num_samples + dataloader.num_batches = len(dataloader) + + return DataInfo(dataloader, sampler) \ No newline at end of file diff --git a/examples/fashionklip/eval/extract_features.py b/examples/fashionklip/eval/extract_features.py new file mode 100644 index 00000000..9eb5faf1 --- /dev/null +++ b/examples/fashionklip/eval/extract_features.py @@ -0,0 +1,196 @@ +# -*- coding: utf-8 -*- +''' +This script extracts image and text features for evaluation. (with single-GPU) +''' + +import os +import argparse +import logging +from pathlib import Path +import json + +import torch +from tqdm import tqdm + +from clip.model import convert_weights, CLIP +from clip.model import build_model +from eval.data import get_eval_img_dataset, get_eval_txt_dataset + +def convert_models_to_fp32(model): + for p in model.parameters(): + p.data = p.data.float() + if p.grad: + p.grad.data = p.grad.data.float() + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--extract-image-feats', + action="store_true", + default=False, + help="Whether to extract image features." + ) + parser.add_argument( + '--extract-text-feats', + action="store_true", + default=False, + help="Whether to extract text features." + ) + parser.add_argument( + '--image-data', + type=str, + default="../Multimodal_Retrieval/test_imgs.224.npz", + help="If --extract-image-feats is True, specify the path of processed image npzfile." + ) + parser.add_argument( + '--text-data', + type=str, + default="../Multimodal_Retrieval/test_queries.jsonl", + help="If --extract-text-feats is True, specify the path of test query jsonl file." + ) + parser.add_argument( + '--image-feat-output-path', + type=str, + default=None, + help="If --extract-image-feats is True, specify the path of output image features." + ) + parser.add_argument( + '--text-feat-output-path', + type=str, + default=None, + help="If --extract-image-feats is True, specify the path of output text features." + ) + parser.add_argument( + "--img-batch-size", type=int, default=64, help="Image batch size." + ) + parser.add_argument( + "--text-batch-size", type=int, default=64, help="Text batch size." + ) + parser.add_argument( + "--resume", + default=None, + type=str, + help="path to latest checkpoint (default: none)", + ) + parser.add_argument( + "--precision", + choices=["amp", "fp16", "fp32"], + default="amp", + help="Floating point precition." + ) + parser.add_argument( + "--model", + choices=["ViT-B-32", "ViT-L-14"], + default="ViT-L-14", + help="Name of the vision backbone to use.", + ) + parser.add_argument( + "--debug", + default=False, + action="store_true", + help="If true, more information is logged." + ) + args = parser.parse_args() + + return args + + +if __name__ == "__main__": + args = parse_args() + + assert args.extract_image_feats or args.extract_text_feats, "--extract-image-feats and --extract-text-feats cannot both be False!" + + # Log params. + print("Params:") + for name in sorted(vars(args)): + val = getattr(args, name) + print(f" {name}: {val}") + + args.gpu = 0 + torch.cuda.set_device(args.gpu) + + # Initialize the model. + model_config_file = Path(__file__).parent / f"../clip/model_configs/{args.model.replace('/', '-')}.json" + print('Loading model from', model_config_file) + assert os.path.exists(model_config_file) + with open(model_config_file, 'r') as f: + model_info = json.load(f) + model = CLIP(**model_info) + convert_weights(model) + + # See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372 + if args.precision == "amp" or args.precision == "fp32": + convert_models_to_fp32(model) + model.cuda(args.gpu) + if args.precision == "fp16": + convert_weights(model) + + # Get data. + if args.extract_image_feats: + print("Preparing image inference dataset.") + img_data = get_eval_img_dataset(args) + if args.extract_text_feats: + print("Preparing text inference dataset.") + text_data = get_eval_txt_dataset(args) + + # Resume from a checkpoint. + print("Begin to load model checkpoint from {}.".format(args.resume)) + assert os.path.exists(args.resume), "The checkpoint file {} not exists!".format(args.resume) + # Map model to be loaded to specified single gpu. + loc = "cuda:{}".format(args.gpu) + + # finetuned models load + checkpoint = torch.load(args.resume, map_location='cpu') + start_epoch = checkpoint["epoch"] + sd = checkpoint["state_dict"] + + if next(iter(sd.items()))[0].startswith('module'): + sd = {k[len('module.'):]: v for k, v in sd.items()} + model.load_state_dict(sd) + + print( + f"=> loaded checkpoint '{args.resume}' (epoch {checkpoint['epoch']})" + ) + + # Make inference for images + if args.extract_image_feats: + print('Make inference for images...') + if args.image_feat_output_path is None: + args.image_feat_output_path = "{}.img_feat.jsonl".format(args.image_data[:-4]) + write_cnt = 0 + with open(args.image_feat_output_path, "w") as fout: + model.eval() + dataloader = img_data.dataloader + with torch.no_grad(): + for batch in tqdm(dataloader): + image_ids, images = batch + images = images.cuda(args.gpu, non_blocking=True) + image_features = model(images, None) + image_features /= image_features.norm(dim=-1, keepdim=True) + for image_id, image_feature in zip(image_ids, image_features.tolist()): + fout.write("{}\n".format(json.dumps({"image_id": image_id, "feature": image_feature}))) + write_cnt += 1 + print('{} image features are stored in {}'.format(write_cnt, args.image_feat_output_path)) + + + # Make inference for texts + if args.extract_text_feats: + print('Make inference for texts...') + if args.text_feat_output_path is None: + args.text_feat_output_path = "{}.txt_feat.jsonl".format(args.text_data[:-6]) + write_cnt = 0 + with open(args.text_feat_output_path, "w") as fout: + model.eval() + dataloader = text_data.dataloader + with torch.no_grad(): + for batch in tqdm(dataloader): + query_ids, texts = batch + texts = texts.cuda(args.gpu, non_blocking=True) + text_features = model(None, texts) + text_features /= text_features.norm(dim=-1, keepdim=True) + for query_id, text_feature in zip(query_ids.tolist(), text_features.tolist()): + fout.write("{}\n".format(json.dumps({"query_id": query_id, "feature": text_feature}))) + write_cnt += 1 + print('{} query text features are stored in {}'.format(write_cnt, args.text_feat_output_path)) + + print("Done!") \ No newline at end of file diff --git a/examples/fashionklip/eval/predict_evaluate.py b/examples/fashionklip/eval/predict_evaluate.py new file mode 100644 index 00000000..890379bb --- /dev/null +++ b/examples/fashionklip/eval/predict_evaluate.py @@ -0,0 +1,244 @@ +# -*- coding: utf-8 -*- +''' +This scripts performs kNN search on inferenced image and text features (on single-GPU) and outputs prediction file for evaluation. +''' + +import argparse +import numpy +from tqdm import tqdm +import json + +import numpy as np +import torch +import pandas as pd + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--image-feats', + type=str, + required=True, + help="Specify the path of image features." + ) + parser.add_argument( + '--text-feats', + type=str, + required=True, + help="Specify the path of text features." + ) + parser.add_argument( + '--top-k', + type=int, + default=10, + help="Specify the k value of top-k predictions." + ) + parser.add_argument( + '--eval-batch-size', + type=int, + default=8192, + help="Specify the image-side batch size when computing the inner products, default to 8192" + ) + parser.add_argument( + '--output-texts', + type=str, + required=True, + help="Specify the texts output jsonl prediction filepath." + ) + parser.add_argument( + '--output-images', + type=str, + required=True, + help="Specify the images output jsonl prediction filepath." + ) + parser.add_argument( + '--image-standard-path', + type=str, + required=True, + help="Specify the path of image ground-truth file." + ) + parser.add_argument( + '--text-standard-path', + type=str, + required=True, + help="Specify the path of text ground-truth file." + ) + parser.add_argument( + '--image-out-path', + type=str, + required=True, + help="Specify the image output json filepath." + ) + parser.add_argument( + '--text-out-path', + type=str, + required=True, + help="Specify the text output json filepath." + ) + return parser.parse_args() + + +def dump_2_json(info, path): + with open(path, 'w') as output_json_file: + json.dump(info, output_json_file) + + +def report_error_msg(detail, showMsg, out_p): + error_dict=dict() + error_dict['errorDetail']=detail + error_dict['errorMsg']=showMsg + error_dict['score']=0 + error_dict['scoreJson']={} + error_dict['success']=False + dump_2_json(error_dict,out_p) + + +def report_score(r1, r5, r10, out_p): + result = dict() + result['success']=True + mean_recall = (r1 + r5 + r10) / 3.0 + result['score'] = mean_recall * 100 + result['scoreJson'] = {'score': mean_recall * 100, 'mean_recall': mean_recall * 100, 'r1': r1 * 100, 'r5': r5 * 100, 'r10': r10 * 100} + dump_2_json(result,out_p) + + +def read_reference(path, key="query_id"): + fin = open(path) + reference = dict() + for line in fin: + line = line.strip() + obj = json.loads(line) + if 'item_ids' in obj: + reference[obj[key]] = obj['item_ids'] + else: + reference[obj[key]] = [obj['text']] + return reference + +def compute_score(query_key, golden_file, predictions, output_path): + # read ground-truth + reference = read_reference(golden_file, query_key) + + # compute score for each query + r1_stat, r5_stat, r10_stat = 0, 0, 0 + for qid in reference.keys(): + ground_truth_ids = set(reference[qid]) + top10_pred_ids = predictions[qid] + if any([idx in top10_pred_ids[:1] for idx in ground_truth_ids]): + r1_stat += 1 + if any([idx in top10_pred_ids[:5] for idx in ground_truth_ids]): + r5_stat += 1 + if any([idx in top10_pred_ids[:10] for idx in ground_truth_ids]): + r10_stat += 1 + # the higher score, the better + r1, r5, r10 = r1_stat * 1.0 / len(reference), r5_stat * 1.0 / len(reference), r10_stat * 1.0 / len(reference) + mean_recall = (r1 + r5 + r10) / 3.0 + result = [mean_recall, r1, r5, r10] + result = [item * 100 for item in result] + + report_score(r1, r5, r10, output_path) + + return result + +def evaluation(query_key, standard_path, submit_path, out_path): + try: + evaluation_result = compute_score(query_key, standard_path, submit_path, out_path) + print("The evaluation is saved in {}".format(out_path)) + except Exception as e: + report_error_msg(e.args[0], e.args[0], out_path) + print("The evaluation failed: {}".format(e.args[0])) + + +if __name__ == "__main__": + args = parse_args() + + # Log params. + print("Params:") + for name in sorted(vars(args)): + val = getattr(args, name) + print(f" {name}: {val}") + + print("Begin to load image features...") + image_ids = [] + image_feats = [] + + with open(args.image_feats, "r") as fin: + for line in tqdm(fin): + obj = json.loads(line.strip()) + image_ids.append(obj['image_id']) + image_feats.append(obj['feature']) + image_feats_array = np.array(image_feats, dtype=np.float32) + print("Finished loading image features.") + + print("Begin to load text features...") + text_ids = [] + text_feats = [] + with open(args.text_feats, "r") as fin: + for line in tqdm(fin): + obj = json.loads(line.strip()) + if "query_id" not in obj: + text_ids.append(obj['image_id']) + else: + text_ids.append(obj['query_id']) + text_feats.append(obj['feature']) + text_feats_array = np.array(text_feats, dtype=np.float32) + print("Finished loading text features.") + + text_predictions = {} + print("Begin to compute top-{} predictions for queries...".format(args.top_k)) + with open(args.output_texts, "w") as fout: + with open(args.text_feats, "r") as fin: + for line in tqdm(fin): + obj = json.loads(line.strip()) + + query_id = obj['query_id'] + text_feat = obj['feature'] + + score_tuples = [] + text_feat_tensor = torch.tensor([text_feat], dtype=torch.float).cuda() # [1, feature_dim] + idx = 0 + while idx < len(image_ids): + img_feats_tensor = torch.from_numpy(image_feats_array[idx : min(idx + args.eval_batch_size, len(image_ids))]).cuda() # [batch_size, feature_dim] + batch_scores = text_feat_tensor @ img_feats_tensor.t() # [1, batch_size] + for image_id, score in zip(image_ids[idx : min(idx + args.eval_batch_size, len(image_ids))], batch_scores.squeeze(0).tolist()): + score_tuples.append((image_id, score)) + idx += args.eval_batch_size + top_k_predictions = sorted(score_tuples, key=lambda x:x[1], reverse=True)[:args.top_k] + fout.write("{}\n".format(json.dumps({"query_id": query_id, "item_ids": [entry[0] for entry in top_k_predictions]}))) + + text_predictions[query_id] = [entry[0] for entry in top_k_predictions] + + print("Top-{} predictions are saved in {}".format(args.top_k, args.output_texts)) + print("Done!") + + image_predictions = {} + print("Begin to compute top-{} predictions for images...".format(args.top_k)) + with open(args.output_images, "w") as fout: + with open(args.image_feats, "r") as fin: + for line in tqdm(fin): + obj = json.loads(line.strip()) + + image_id = obj['image_id'] + image_feat = obj['feature'] + + score_tuples = [] + image_feat_tensor = torch.tensor([image_feat], dtype=torch.float).cuda() # [1, feature_dim] + idx = 0 + while idx < len(text_ids): + text_feats_tensor = torch.from_numpy(text_feats_array[idx : min(idx + args.eval_batch_size, len(text_ids))]).cuda() # [batch_size, feature_dim] + batch_scores = image_feat_tensor @ text_feats_tensor.t() # [1, batch_size] + for text_id, score in zip(text_ids[idx : min(idx + args.eval_batch_size, len(text_ids))], batch_scores.squeeze(0).tolist()): + score_tuples.append((text_id, score)) + idx += args.eval_batch_size + top_k_predictions = sorted(score_tuples, key=lambda x:x[1], reverse=True)[:args.top_k] + fout.write("{}\n".format(json.dumps({"image_id": image_id, "item_ids": [entry[0] for entry in top_k_predictions]}))) + + image_predictions[image_id] = [entry[0] for entry in top_k_predictions] + + print("Top-{} predictions are saved in {}".format(args.top_k, args.output_images)) + print("Done!") + + print("Begin to evaluate images and texts...") + + evaluation("query_id", args.text_standard_path, text_predictions, args.text_out_path) + evaluation("image_id", args.image_standard_path, image_predictions, args.image_out_path) + + print("Done evaluation!") \ No newline at end of file diff --git a/examples/fashionklip/run_local.sh b/examples/fashionklip/run_local.sh new file mode 100755 index 00000000..5bd49ed2 --- /dev/null +++ b/examples/fashionklip/run_local.sh @@ -0,0 +1,123 @@ +export PYTHONPATH="$PYTHONPATH:$PWD/src" +export CUDA_VISIBLE_DEVICES=$1 + +MASTER_ADDR=tcp://127.0.0.1:11907 + +mode=$2 + +if [ ! -f ./tmp/fashion-gen.tgz ]; then + wget -P ./tmp https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/fashionklip/fashion-gen.tgz + tar zxvf ./tmp/fashion-gen.tgz -C ./tmp +fi + +if [ ! -f ./tmp/fashion_kb.tgz ]; then + wget -P ./tmp https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/fashionklip/fashion_kb.tgz + tar zxvf ./tmp/fashion_kb.tgz -C ./tmp +fi + +if [ ! -f ./tmp/pretrained_models/ViT-B-32.pt ]; then + wget -P ./tmp2/pretrained_models https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/fashionklip/openai/ViT-B-32.pt +fi + +if [ ! -f ./tmp/pretrained_models/pai-clip-commercial-base-en.tgz ]; then + wget -P ./tmp/pretrained_models https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/fashionklip/pai-clip-commercial-base-en.tgz + tar zxvf ./tmp/pretrained_models/pai-clip-commercial-base-en.tgz -C ./tmp/pretrained_models +fi + +DATAPATH=./tmp +PRETRAINED_MODEL=./tmp/pretrained_models +SAVEFOLDER=./tmp/predictions_finetune + +if [ "$mode" = "finetune" ]; then + + python3 -u training/main_all_concept.py \ + --save-most-recent \ + --save-frequency 1 \ + --report-to tensorboard \ + --train-data="${DATAPATH}/fashiongen/train/fashion-gen_train_queries_phrases.jsonl" \ + --train-img="${DATAPATH}/fashiongen/train/train.224.npz" \ + --txt-id-filename="${DATAPATH}/fashiongen/train/fashion-gen_concepts_queries.jsonl" \ + --kb-txt-id-filename="${DATAPATH}/fashion_kb/icbu_concepts_queries.jsonl" \ + --val-data="${DATAPATH}/fashiongen/val/fashion-gen_val_queries.jsonl" \ + --val-img="${DATAPATH}/fashiongen/val/val.224.npz" \ + --img-data-sets="${DATAPATH}/fashion_kb/icbu_train_images_5w_00.224.npz" \ + --concept-data="${DATAPATH}/fashiongen/train/fashion-gen_concepts_queries.jsonl" \ + --kb-concept-data="${DATAPATH}/fashion_kb/icbu_concepts_queries.jsonl" \ + --resume="${PRETRAINED_MODEL}/pai-clip-commercial-base-en/pai-clip-commercial-base-en.pt" \ + --is-concept \ + --is-data-concept \ + --is-update \ + --dist-url=$MASTER_ADDR \ + --dataset-type jsonl \ + --warmup 500 \ + --batch-size=32 \ + --eval-batch-size=256 \ + --lr=1e-5 \ + --wd=0.001 \ + --epochs=25 \ + --workers=0 \ + --model ViT-B/32 + +elif [ "$mode" = "finetune_only" ]; then + + python3 -u training/main_all_concept.py \ + --save-most-recent \ + --save-frequency 1 \ + --report-to tensorboard \ + --train-data="${DATAPATH}/fashiongen/train/fashion-gen_train_queries_phrases.jsonl" \ + --train-img="${DATAPATH}/fashiongen/train/train.224.npz" \ + --txt-id-filename="${DATAPATH}/fashiongen/train/fashion-gen_concepts_queries.jsonl" \ + --kb-txt-id-filename="${DATAPATH}/fashion_kb/icbu_concepts_queries.jsonl" \ + --val-data="${DATAPATH}/fashiongen/val/fashion-gen_val_queries.jsonl" \ + --val-img="${DATAPATH}/fashiongen/val/val.224.npz" \ + --img-data-sets="${DATAPATH}/fashion_kb/icbu_train_images_5w_00.224.npz" \ + --concept-data="${DATAPATH}/fashiongen/train/fashion-gen_concepts_queries.jsonl" \ + --kb-concept-data="${DATAPATH}/fashion_kb/icbu_concepts_queries.jsonl" \ + --is-concept \ + --is-data-concept \ + --is-update \ + --dist-url=$MASTER_ADDR \ + --dataset-type jsonl \ + --warmup 500 \ + --batch-size=32 \ + --eval-batch-size=256 \ + --lr=1e-5 \ + --wd=0.001 \ + --epochs=30 \ + --workers=0 \ + --model ViT-B/32 \ + --openai-pretrained + +elif [ "$mode" = "evaluate" ]; then + + if [ ! -d $SAVEFOLDER ]; then + mkdir $SAVEFOLDER + fi + + python3 -u eval/extract_features.py \ + --extract-image-feats \ + --extract-text-feats \ + --image-data="${DATAPATH}/fashiongen/val/val.224.npz" \ + --text-data="${DATAPATH}/fashiongen/val/fashion-gen_val_queries.jsonl" \ + --img-batch-size=32 \ + --text-batch-size=32 \ + --resume="${PRETRAINED_MODEL}/pai-clip-commercial-base-en/pai-clip-commercial-base-en.pt" \ + --image-feat-output-path="${SAVEFOLDER}/fashion-gen_test_imgs.img_feat.jsonl" \ + --text-feat-output-path="${SAVEFOLDER}/fashion-gen_test_texts.txt_feat.jsonl" \ + --model ViT-B-32 + + python3 -u eval/predict_evaluate.py \ + --image-feats="${SAVEFOLDER}/fashion-gen_test_imgs.img_feat.jsonl" \ + --text-feats="${SAVEFOLDER}/fashion-gen_test_texts.txt_feat.jsonl" \ + --top-k=10 \ + --eval-batch-size=32768 \ + --output-images="${SAVEFOLDER}/test_imgs_predictions.jsonl" \ + --output-texts="${SAVEFOLDER}/test_txts_predictions.jsonl" \ + --text-standard-path="${DATAPATH}/fashiongen/val/fashion-gen_val_queries.jsonl" \ + --image-standard-path="${DATAPATH}/fashiongen/val/fashion-gen_val_images.jsonl" \ + --text-out-path="${SAVEFOLDER}//fashion_text_output.json" \ + --image-out-path="${SAVEFOLDER}/./fashion_image_output.json" + +fi + +# --resume="${PRETRAINED_MODEL}/pai-clip-commercial-base-en/pai-clip-commercial-base-en.pt" \ \ No newline at end of file diff --git a/examples/fashionklip/training/data.py b/examples/fashionklip/training/data.py new file mode 100644 index 00000000..e01377e2 --- /dev/null +++ b/examples/fashionklip/training/data.py @@ -0,0 +1,476 @@ +import os +import sys +import math +import logging +import functools +import braceexpand +import random +import pdb +import json + +import pandas as pd +import numpy as np +import pyarrow as pa +from PIL import Image + +from typing import Union +from dataclasses import dataclass + +import torch +from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler +from torch.utils.data.sampler import SequentialSampler +from torch.utils.data.distributed import DistributedSampler +import torchvision.datasets as datasets + +from clip.clip import tokenize + +import io +from PIL import Image +import random + +from tqdm import tqdm + +Image.MAX_IMAGE_PIXELS = 933120000 + +def parse_obj_dict(file_path, keywords): + ids_objs = {} + with open(file_path, "r") as fin: + for line in tqdm(fin): + obj = json.loads(line.strip()) + item_id = obj[keywords[0]] + item_objs = obj[keywords[1]] + + ids_objs[item_id] = item_objs + + return ids_objs + +class JsonlDataset(Dataset): + def __init__(self, jsonl_filename, img_filename): + assert os.path.exists(jsonl_filename), "The annotation datafile {} not exists!".format(jsonl_filename) + assert os.path.exists(img_filename), "The image npz datafile {} not exists!".format(img_filename) + + logging.debug(f'Loading jsonl data from {jsonl_filename}.') + self.samples = [] + with open(jsonl_filename, "r") as fin: + for line in fin: + if len(line) >0: + try: + obj = json.loads(line.strip()) + query_id = obj['query_id'] + query = obj['query_text'] + for target in obj['item_ids']: + self.samples.append((query_id, query, target)) + except: + print('-------------------') + print(line) + print('-------------------') + + logging.debug(f'Finished loading jsonl data from {jsonl_filename}.') + print((f'Finished loading jsonl data from {jsonl_filename}.')) + + logging.debug(f'Loading image npzfile from {img_filename}.') + print(f'Start loading image npzfile from {img_filename}.') + + self.imgs = np.load(img_filename, "r") + + logging.debug(f'Finished loading image npzfile from {img_filename}.') + print(f'Finished loading image npzfile from {img_filename}.') + + def _read_img_tensor_from_npzfile(self, img_id): + img_array = self.imgs[str(img_id)] + return torch.from_numpy(img_array) + + def __len__(self): + return len(self.samples) + + def __getitem__(self, idx): + query_id, query, img_id = self.samples[idx] + image = self._read_img_tensor_from_npzfile(img_id) + text = tokenize([str(query)])[0] + return image, text + +class Jsonl_Concept_Dataset_Update(Dataset): + def __init__(self, jsonl_filename, img_filename, txt_id_filename): + assert os.path.exists(jsonl_filename), "The annotation datafile {} not exists!".format(jsonl_filename) + assert os.path.exists(img_filename), "The image npz datafile {} not exists!".format(img_filename) + + logging.debug(f'Loading jsonl data from {jsonl_filename}.') + self.samples = [] + + self.text2id = parse_obj_dict(txt_id_filename, ["phrase", "query_id"]) + self.pred_keys = self.text2id.keys() + + with open(jsonl_filename, "r") as fin: + for line in fin: + if len(line) > 0: + try: + obj = json.loads(line.strip()) + query_id = obj['query_id'] + query = obj['query_text'] + phrases = obj['match_phrases'] + phrases = [phrase_item for phrase_item in phrases if len(phrase_item) >= 2 and phrase_item in self.pred_keys] + # phrases = [phrase_item for phrase_item in phrases if len(phrase_item) >= 3] + for target in obj['item_ids']: + self.samples.append((query_id, query, target, phrases)) + except: + print('-------------------') + print(line) + print('-------------------') + + logging.debug(f'Finished loading jsonl data from {jsonl_filename}.') + print((f'Finished loading jsonl data from {jsonl_filename}.')) + + logging.debug(f'Loading image npzfile from {img_filename}.') + print(f'Start loading image npzfile from {img_filename}.') + + self.imgs = np.load(img_filename, "r") + self.prompt = "a photo of {}" + + logging.debug(f'Finished loading image npzfile from {img_filename}.') + print(f'Finished loading image npzfile from {img_filename}.') + + self.con_len = 5 + + def _read_img_tensor_from_npzfile(self, img_id): + img_array = self.imgs[str(img_id)] + return torch.from_numpy(img_array) + + def __len__(self): + return len(self.samples) + + def __getitem__(self, idx): + query_id, query, img_id, query_concept = self.samples[idx] + image = self._read_img_tensor_from_npzfile(img_id) + text = tokenize([str(query)])[0] + + concept_len = len(query_concept) + if concept_len < self.con_len: + for obj_idx in range(concept_len, self.con_len): + query_concept.append("") + elif concept_len >= self.con_len: + query_concept = query_concept[:self.con_len] + + concepts = tokenize([self.prompt.format(query_item) for query_item in query_concept]) + concept_ids = [self.text2id[item] if item != "" else -1 for item in query_concept] + + return image, text, concepts, concept_ids + +class Jsonl_All_Concept_Dataset_Update_Hierarchy(Dataset): + def __init__(self, jsonl_filename, img_filename, txt_id_filename, kb_txt_id_filename): + assert os.path.exists(jsonl_filename), "The annotation datafile {} not exists!".format(jsonl_filename) + assert os.path.exists(img_filename), "The image npz datafile {} not exists!".format(img_filename) + + logging.debug(f'Loading jsonl data from {jsonl_filename}.') + self.samples = [] + + # self.predictions = parse_obj_dict(txt_img_prediction, ["query_id", "item_ids"]) + self.text2id = parse_obj_dict(txt_id_filename, ["phrase", "query_id"]) + self.text2id_kb = parse_obj_dict(kb_txt_id_filename, ["phrase", "query_id"]) + + self.concept_fathers = parse_obj_dict("./tmp/fashion_kb/icbu_concepts_fathers.jsonl", ["phrase", "phrase_father"]) + self.kb_concept_fathers = parse_obj_dict("./tmp/fashiongen/train/fashion-gen_concepts_fathers.jsonl", ["phrase", "phrase_father"]) + + self.pred_keys = self.text2id.keys() + self.pred_kb_keys = self.text2id_kb.keys() + + with open(jsonl_filename, "r") as fin: + for line in fin: + if len(line) > 0: + try: + obj = json.loads(line.strip()) + query_id = obj['query_id'] + query = obj['query_text'] + phrases = obj['phrases'] + match_phrases = obj['match_phrases'] + + phrases = [phrase_item for phrase_item in phrases if 2 <= len(phrase_item.split()) <= 6 and phrase_item in self.pred_keys and phrase_item not in match_phrases] + match_phrases = [phrase_item for phrase_item in match_phrases if len(phrase_item) >= 2 and phrase_item in self.pred_kb_keys] + for target in obj['item_ids']: + self.samples.append((query_id, query, target, phrases, match_phrases)) + except: + print('-------------------') + print(line) + print('-------------------') + + logging.debug(f'Finished loading jsonl data from {jsonl_filename}.') + print((f'Finished loading jsonl data from {jsonl_filename}.')) + + logging.debug(f'Loading image npzfile from {img_filename}.') + print(f'Start loading image npzfile from {img_filename}.') + + self.imgs = np.load(img_filename, "r") + self.prompt = "a photo of {}" + + logging.debug(f'Finished loading image npzfile from {img_filename}.') + print(f'Finished loading image npzfile from {img_filename}.') + + self.con_len = 5 + + def _read_img_tensor_from_npzfile(self, img_id): + img_array = self.imgs[str(img_id)] + return torch.from_numpy(img_array) + + def __len__(self): + return len(self.samples) + + def _process_concepts(self, concepts, concept_father_dict, text2id_dict): + concept_len = len(concepts) + concept_fathers = list(set([concept_father_dict[concept_item] for concept_item in concepts if concept_item in concept_father_dict and concept_father_dict[concept_item] != "" ])) + concept_grand_fathers = list(set([concept_father_dict[concept_item] for concept_item in concept_fathers if concept_item in concept_father_dict and concept_father_dict[concept_item] != ""])) + + father_len = len(concept_fathers) + grand_father_len = len(concept_grand_fathers) + + if concept_len < self.con_len: + if (concept_len + father_len) < self.con_len: + concepts.extend(concept_fathers) + concepts = list(set(concepts)) + concept_len = len(concepts) + + if (concept_len + grand_father_len) < self.con_len: + concepts.extend(concept_fathers) + concepts = list(set(concepts)) + concept_len = len(concepts) + + if concept_len < self.con_len: + for obj_idx in range(concept_len, self.con_len): + concepts.append("") + elif concept_len >= self.con_len: + concepts = concepts[:self.con_len] + + tokenized_concepts = tokenize([self.prompt.format(concept_item) for concept_item in concepts]) + concept_ids = [text2id_dict[item] if item != "" and item in text2id_dict else -1 for item in concepts] + + return tokenized_concepts, concept_ids + + def __getitem__(self, idx): + query_id, query, img_id, query_concept, query_kb_concept = self.samples[idx] + image = self._read_img_tensor_from_npzfile(img_id) + text = tokenize([str(query)])[0] + + data_concepts, data_concept_ids = self._process_concepts(query_concept, self.concept_fathers, self.text2id) + kb_concepts, kb_concept_ids = self._process_concepts(query_kb_concept, self.kb_concept_fathers, self.text2id_kb) + + return image, text, data_concepts, kb_concepts, data_concept_ids, kb_concept_ids + + +class EvalImgDataset(Dataset): + def __init__(self, img_filename): + assert os.path.exists(img_filename), "The image npz datafile {} not exists!".format(img_filename) + + logging.debug(f'Loading image npzfile from {img_filename}.') + self.imgs = np.load(img_filename, "r") + self.img_ids = list(self.imgs.keys()) + logging.debug(f'Finished loading image npzfile from {img_filename}.') + + def _read_img_tensor_from_npzfile(self, img_id): + img_array = self.imgs[str(img_id)] + return torch.from_numpy(img_array) + + def __len__(self): + return len(self.img_ids) + + def __getitem__(self, idx): + img_id = self.img_ids[idx] + image = self._read_img_tensor_from_npzfile(img_id) + return img_id, image + + +class EvalImgDatasetList(Dataset): + def __init__(self, img_file_sets): + self.img_ids = [] + self.imgs = {} + for img_filename in img_file_sets: + assert os.path.exists(img_filename), "The image npz datafile {} not exists!".format(img_filename) + logging.debug(f'Loading image npzfile from {img_filename}.') + + self.imgs.update(np.load(img_filename, "r")) + self.img_ids.extend(list(self.imgs.keys())) + + logging.debug(f'Finished loading image npzfile from {img_filename}.') + + def _read_img_tensor_from_npzfile(self, img_id): + img_array = self.imgs[str(img_id)] + return torch.from_numpy(img_array) + + def __len__(self): + return len(self.img_ids) + + def __getitem__(self, idx): + img_id = self.img_ids[idx] + image = self._read_img_tensor_from_npzfile(img_id) + return img_id, image + + +class EvalTxtDataset(Dataset): + def __init__(self, jsonl_filename): + assert os.path.exists(jsonl_filename), "The annotation datafile {} not exists!".format(jsonl_filename) + + logging.debug(f'Loading jsonl data from {jsonl_filename}.') + self.queries = [] + with open(jsonl_filename, "r") as fin: + for line in fin: + obj = json.loads(line.strip()) + query_id = obj['query_id'] + query = obj['query_text'] + self.queries.append((query_id, query)) + logging.debug(f'Finished loading jsonl data from {jsonl_filename}.') + + def __len__(self): + return len(self.queries) + + def __getitem__(self, idx): + query_id, query = self.queries[idx] + text = tokenize([str(query)])[0] + return query_id, text + + +@dataclass +class DataInfo: + dataloader: DataLoader + sampler: DistributedSampler + +def preprocess_txt(text): + return tokenize([str(text)])[0] + +def get_dataset_size(shards): + shards_list = list(braceexpand.braceexpand(shards)) + dir_path = os.path.dirname(shards) + sizes_filename = os.path.join(dir_path, 'sizes.json') + sizes = json.load(open(sizes_filename, 'r')) + total_size = sum( + [int(sizes[os.path.basename(shard)]) for shard in shards_list]) + num_shards = len(shards_list) + return total_size, num_shards + +def get_jsonl_dataset(args, preprocess_fn, is_train): + input_filename = args.train_data if is_train else args.val_data + img_filename = args.train_img if is_train else args.val_img + + if is_train and args.is_concept and args.is_update: + if args.is_data_concept: + dataset = Jsonl_All_Concept_Dataset_Update_Hierarchy( + input_filename, + img_filename, + args.txt_id_filename, + args.kb_txt_id_filename) + else: + dataset = Jsonl_Concept_Dataset_Update( + input_filename, + img_filename, + args.txt_img_prediction, + args.txt_id_filename) + else: + dataset = JsonlDataset( + input_filename, + img_filename) + num_samples = len(dataset) + sampler = DistributedSampler(dataset) if args.distributed and is_train else None + shuffle = is_train and sampler is None + + dataloader = DataLoader( + dataset, + batch_size=args.batch_size, + shuffle=shuffle, + num_workers=0, + pin_memory=True, + sampler=sampler, + drop_last=is_train, + ) + dataloader.num_samples = num_samples + dataloader.num_batches = len(dataloader) + + return DataInfo(dataloader, sampler) + +def get_dataset_fn(data_path, dataset_type): + if dataset_type == "jsonl": + return get_jsonl_dataset + elif dataset_type == "auto": + ext = data_path.split('.')[-1] + if ext in ['jsonl']: + return get_json_dataset + else: + raise ValueError( + f"Tried to figure out dataset type, but failed for extention {ext}.") + else: + raise ValueError(f"Unsupported dataset type: {dataset_type}") + + +def get_data(args, preprocess_fns): + preprocess_train, preprocess_val = preprocess_fns + data = {} + + if args.train_data: + data["train"] = get_dataset_fn(args.train_data, args.dataset_type)( + args, preprocess_train, is_train=True) + if args.val_data: + data["val"] = get_dataset_fn(args.val_data, args.dataset_type)( + args, preprocess_val, is_train=False) + + return data + + +def get_eval_img_dataset(args): + img_filename = args.img_data + dataset = EvalImgDataset( + img_filename) + num_samples = len(dataset) + sampler = DistributedSampler(dataset) if args.distributed else SequentialSampler(dataset) + + dataloader = DataLoader( + dataset, + batch_size=args.eval_batch_size, + shuffle=False, + num_workers=0, + pin_memory=True, + sampler=sampler, + drop_last=False, + ) + dataloader.num_samples = num_samples + dataloader.num_batches = len(dataloader) + + return DataInfo(dataloader, sampler) + +def get_eval_img_dataset_list(args): + img_file_sets = args.img_data_sets + dataset = EvalImgDatasetList( + img_file_sets) + num_samples = len(dataset) + sampler = DistributedSampler(dataset) if args.distributed else SequentialSampler(dataset) + + dataloader = DataLoader( + dataset, + batch_size=args.eval_batch_size, + shuffle=False, + num_workers=0, + pin_memory=True, + sampler=sampler, + drop_last=False, + ) + dataloader.num_samples = num_samples + dataloader.num_batches = len(dataloader) + + return DataInfo(dataloader, sampler) + + +def get_eval_txt_dataset(args, is_kb=False, max_txt_length=77): + input_filename = args.concept_data if not is_kb else args.kb_concept_data + dataset = EvalTxtDataset( + input_filename, + max_txt_length=max_txt_length) + num_samples = len(dataset) + sampler = DistributedSampler(dataset) if args.distributed else SequentialSampler(dataset) + + dataloader = DataLoader( + dataset, + batch_size=args.eval_batch_size, + shuffle=False, + num_workers=0, + pin_memory=True, + sampler=sampler, + drop_last=False, + ) + dataloader.num_samples = num_samples + dataloader.num_batches = len(dataloader) + + return DataInfo(dataloader, sampler) diff --git a/examples/fashionklip/training/logger.py b/examples/fashionklip/training/logger.py new file mode 100644 index 00000000..3dbf150a --- /dev/null +++ b/examples/fashionklip/training/logger.py @@ -0,0 +1,90 @@ +import argparse +import logging +from logging import Filter +from logging.handlers import QueueHandler, QueueListener + +import torch +import torch.distributed as dist +import torch.multiprocessing as mp +from torch.multiprocessing import Queue + + +def setup_primary_logging(log_file, level): + log_queue = Queue(-1) + + file_handler = logging.FileHandler(filename=log_file) + stream_handler = logging.StreamHandler() + + formatter = logging.Formatter( + '%(asctime)s | %(levelname)s | %(message)s', + datefmt='%Y-%m-%d,%H:%M:%S') + + file_handler.setFormatter(formatter) + stream_handler.setFormatter(formatter) + + file_handler.setLevel(level) + stream_handler.setLevel(level) + + listener = QueueListener(log_queue, file_handler, stream_handler) + + listener.start() + + return log_queue + + +class WorkerLogFilter(Filter): + def __init__(self, rank=-1): + super().__init__() + self._rank = rank + + def filter(self, record): + if self._rank != -1: + record.msg = f"Rank {self._rank} | {record.msg}" + return True + + +def setup_worker_logging(rank, log_queue, level): + queue_handler = QueueHandler(log_queue) + + worker_filter = WorkerLogFilter(rank) + queue_handler.addFilter(worker_filter) + + queue_handler.setLevel(level) + + root_logger = logging.getLogger() + root_logger.addHandler(queue_handler) + + root_logger.setLevel(level) + + +def fake_worker(rank: int, world_size: int, log_queue: Queue): + setup_worker_logging(rank, log_queue, logging.DEBUG) + logging.info("Test worker log") + logging.error("Test worker error log") + torch.cuda.set_device(rank) + dist.init_process_group( + backend='nccl', + init_method='tcp://127.0.0.1:6100', + world_size=world_size, + rank=rank, + ) + +if __name__ == "__main__": + # Set multiprocessing type to spawn + torch.multiprocessing.set_start_method("spawn") + + parser = argparse.ArgumentParser() + parser.add_argument("-g", "--gpu-list", type=int, help="List of GPU IDs", nargs="+", required=True) + + args = parser.parse_args() + + world_size = len(args.gpu_list) + + # Initialize the primary logging handlers. Use the returned `log_queue` + # to which the worker processes would use to push their messages + log_queue = setup_primary_logging("/usr/lusers/gamaga/out.log", logging.DEBUG) + + if world_size == 1: + worker(0, world_size, log_queue) + else: + mp.spawn(fake_worker, args=(world_size, log_queue), nprocs=world_size) \ No newline at end of file diff --git a/examples/fashionklip/training/main_all_concept.py b/examples/fashionklip/training/main_all_concept.py new file mode 100644 index 00000000..d12f705e --- /dev/null +++ b/examples/fashionklip/training/main_all_concept.py @@ -0,0 +1,332 @@ +import sys +import os +import time +import logging +from time import gmtime, strftime +from pathlib import Path +import json + +import wandb +import torch +from torch import optim +import torch.distributed as dist +import torch.multiprocessing as mp +import torch.backends.cudnn as cudnn +from torch.utils.tensorboard import SummaryWriter +from torch.cuda.amp import GradScaler + +from clip.clip import _transform, load +from clip.model import convert_weights, CLIP +from training.train_all_concept import train, evaluate +from training.train_epoch import train_only +from training.data import get_data, get_eval_img_dataset_list, get_eval_txt_dataset +from training.params import parse_args +from training.logger import setup_primary_logging, setup_worker_logging +from training.scheduler import cosine_lr + +# Used by https://github.com/openai/CLIP/issues/83 but not below. +# Keeping it incase needed. +def convert_models_to_fp32(model): + for p in model.parameters(): + p.data = p.data.float() + if p.grad: + p.grad.data = p.grad.data.float() + +def is_master(args): + return (not args.distributed) or args.gpu == 0 or args.dp + +def main_worker(gpu, ngpus_per_node, log_queue, args): + args.gpu = gpu + args.rank = gpu + setup_worker_logging(args.rank, log_queue, args.log_level) + + # Log and save params. + if is_master(args): + logging.info("Params:") + params_file = os.path.join(args.logs, args.name, "params.txt") + with open(params_file, "w") as f: + for name in sorted(vars(args)): + val = getattr(args, name) + logging.info(f" {name}: {val}") + f.write(f"{name}: {val}\n") + + if args.distributed: + dist.init_process_group( + backend=args.dist_backend, + init_method=args.dist_url, + world_size=args.world_size, + rank=args.rank, + ) + + if args.dp: + args.batch_size *= args.world_size + + if args.gpu is not None: + logging.info(f"Use GPU: {args.gpu} for training") + torch.cuda.set_device(args.gpu) + + # Do not use skip_reset unless you want to use on of the CLIP model + + model_config_file = Path(__file__).parent / f"model_configs/{args.model.replace('/', '-')}.json" + print('Loading model from', model_config_file) + assert os.path.exists(model_config_file) + with open(model_config_file, 'r') as f: + model_info = json.load(f) + + embed_dim = model_info["embed_dim"] + + if args.openai_pretrained: + print("Start loading pretrained CLIP...") + model, preprocess_train, preprocess_val = load( + args.model, + jit=False, + is_train=True) + else: + model = CLIP(**model_info) + convert_weights(model) + preprocess_train = _transform(model.visual.input_resolution, is_train=True) + preprocess_val = _transform(model.visual.input_resolution, is_train=False) + + # See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372 + if args.precision == "amp" or args.precision == "fp32" or args.gpu is None: + convert_models_to_fp32(model) + + if not torch.cuda.is_available(): + model.float() + logging.warning("using CPU, this will be slow") + else: + model.cuda(args.gpu) + if args.precision == "fp16": + convert_weights(model) + + if args.distributed and args.use_bn_sync: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + if args.dp: + model = torch.nn.DataParallel(model, device_ids=args.multigpu) + + if args.precision == "fp16": + convert_weights(model) + + print("Start loading data...") + data = get_data(args, (preprocess_train, preprocess_val)) + img_data_sets = get_eval_img_dataset_list(args) + concept_data = get_eval_txt_dataset(args) + kb_concept_data = get_eval_txt_dataset(args, is_kb=True) + print("Finished loading data...") + + exclude = lambda n : "bn" in n or "ln" in n or "bias" in n or 'logit_scale' in n + include = lambda n : not exclude(n) + + named_parameters = list(model.named_parameters()) + gain_or_bias_params = [p for n, p in named_parameters if exclude(n) and p.requires_grad] + rest_params = [p for n, p in named_parameters if include(n) and p.requires_grad] + + if args.train_data is None: + optimizer = None + scheduler = None + else: + optimizer = optim.AdamW( + [ + {"params": gain_or_bias_params, "weight_decay": 0.}, + {"params": rest_params, "weight_decay": args.wd}, + ], + lr=args.lr, + betas=(args.beta1, args.beta2), + eps=args.eps, + ) + total_steps = data["train"].dataloader.num_batches * args.epochs + scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps) + + scaler = GradScaler() if args.precision == "amp" else None + + # optionally resume from a checkpoint + start_epoch = 0 + if args.resume is not None: + if os.path.isfile(args.resume): + if args.gpu is None: + checkpoint = torch.load(args.resume) + else: + # Map model to be loaded to specified single gpu. + loc = "cuda:{}".format(args.gpu) + checkpoint = torch.load(args.resume, map_location=loc) + start_epoch = checkpoint["epoch"] + sd = checkpoint["state_dict"] + if not args.distributed and next(iter(sd.items()))[0].startswith('module'): + sd = {k[len('module.'):]: v for k, v in sd.items()} + model.load_state_dict(sd) + # if optimizer is not None: + # optimizer.load_state_dict(checkpoint["optimizer"]) + logging.info( + f"=> loaded checkpoint '{args.resume}' (epoch {checkpoint['epoch']})" + ) + else: + logging.info("=> no checkpoint found at '{}'".format(args.resume)) + + cudnn.benchmark = True + cudnn.deterministic = False + + # determine if this worker should save logs and checkpoints. + # only do so if it is the 0th worker. + args.save_logs = (args.logs is not None and args.logs != '' and args.logs.lower() != 'none') and ( + (not args.distributed) or args.gpu == 0 + ) + writer = None + if args.save_logs and args.tensorboard: + writer = SummaryWriter(args.tensorboard_path) + + if args.wandb and is_master(args): + logging.debug('Starting wandb.') + args.train_sz = data["train"].dataloader.num_samples + if args.val_data is not None: + args.val_sz = data["val"].dataloader.num_samples + # you will have to configure this for your project! + wandb.init( + project="open-clip", + notes=args.wandb_notes, + tags=[], + config=vars(args), + ) + if args.debug: + wandb.watch(model, log='all') + wandb.save(params_file) + logging.debug('Finished loading wandb.') + + if args.train_data is None: + evaluate(model, data, start_epoch, args, writer, 0) + return + + for epoch in range(start_epoch, args.epochs): + print((f'Start epoch {epoch}')) + if args.gpu == 0: + logging.info(f'Start epoch {epoch}') + + if args.is_finetune_only and epoch < 10: + print((f'Start training epoch {epoch} w/o knowledge...')) + train_only(model, data, epoch, optimizer, scaler, scheduler, args, writer) + else: + print((f'Start training epoch {epoch} with knowledge...')) + train(model, data, img_data_sets, concept_data, kb_concept_data, embed_dim, epoch, optimizer, scaler, scheduler, args, writer) + steps = data["train"].dataloader.num_batches * (epoch + 1) + + if args.val_data is not None: + print((f'Start evaluation epoch {epoch}...')) + evaluate(model, data, epoch + 1, args, writer, steps) + + # Saving checkpoints. + if args.save_logs and (args.gpu == 0 or (not args.distributed)): + if (epoch + 1) == args.epochs or ( + args.save_frequency > 0 and ((epoch + 1) % args.save_frequency) == 0 + ): + torch.save( + { + "epoch": epoch + 1, + "name": args.name, + "state_dict": model.state_dict(), + "optimizer": optimizer.state_dict(), + }, + os.path.join(args.checkpoint_path, f"epoch_{epoch + 1}.pt"), + ) + if args.save_most_recent: + torch.save( + { + "epoch": epoch + 1, + "name": args.name, + "state_dict": model.state_dict(), + "optimizer": optimizer.state_dict(), + }, + os.path.join(args.checkpoint_path, f"epoch_latest.pt"), + ) + + if args.wandb and (args.gpu == 0 or (not args.distributed)): + wandb.finish() + + +def main(): + args = parse_args() + + # get the name of the experiments + if args.name is None: + args.name = strftime( + f"lr={args.lr}_" + f"wd={args.wd}_" + f"agg={args.aggregate}_" + f"model={args.model}_" + f"batchsize={args.batch_size}_workers={args.workers}_date=%Y-%m-%d-%H-%M-%S", + gmtime(), + ) + + if args.copy_codebase: + import sys, subprocess + from shutil import copytree, ignore_patterns + new_code_path = os.path.join(args.logs, args.name, "code") + if os.path.exists(new_code_path): + print( + f"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment." + ) + return -1 + print(f"Copying codebase to {new_code_path}") + current_code_path = os.path.realpath(__file__) + for _ in range(3): + current_code_path = os.path.dirname(current_code_path) + copytree(current_code_path, new_code_path, ignore=ignore_patterns('log', 'logs', 'wandb')) + print("Done copying code.") + os.environ["PYTHONPATH"] = f"{os.environ['PYTHONPATH']}:{os.path.join(new_code_path, 'src')}" + main_file = os.path.join(new_code_path, "src", "training", "main.py") + argv = sys.argv + argv.remove('--copy-codebase') + argv.extend(['--name', args.name]) + command = [sys.executable] + argv + print("Executing command:", " ".join(command)) + subprocess.check_call(command) + return 1 + + args.log_path = os.path.join(args.logs, args.name, "out.log") + if os.path.exists(args.log_path): + print( + "Error. Experiment already exists. Use --name {} to specify a new experiment." + ) + return -1 + + assert args.precision in ['amp', 'fp16', 'fp32'] + #assert args.model in ['RN50', 'RN101', 'RN50x4', 'ViT-B/32'] or os.path.exists(args.model) + + args.ngpus_per_node = torch.cuda.device_count() + + args.wandb = 'wandb' in args.report_to or 'all' in args.report_to + args.tensorboard = 'tensorboard' in args.report_to or 'all' in args.report_to + + args.tensorboard_path = os.path.join(args.logs, args.name, "tensorboard") if args.tensorboard else '' + args.checkpoint_path = os.path.join(args.logs, args.name, "checkpoints") + for dirname in [args.tensorboard_path, args.checkpoint_path]: + if dirname: + os.makedirs(dirname, exist_ok=True) + + + # Set multiprocessing type to spawn. + # This is important for logging to work with multiprocessing. + torch.multiprocessing.set_start_method("spawn") + + # Set logger + args.log_level = logging.DEBUG if args.debug else logging.INFO + log_queue = setup_primary_logging(args.log_path, args.log_level) + + # Distributed training = training on more than one GPU. + # Also easily possible to extend to multiple nodes & multiple GPUs. + args.distributed = (args.gpu is None) and torch.cuda.is_available() and (not args.dp) + if args.distributed: + ngpus_per_node = torch.cuda.device_count() + args.world_size = ngpus_per_node + mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, log_queue, args)) + else: + if args.dp: + args.gpu = args.multigpu[0] + args.world_size = len(args.multigpu) + else: + args.world_size = 1 + main_worker(args.gpu, None, log_queue, args) + + +if __name__ == "__main__": + main() diff --git a/examples/fashionklip/training/model_configs/ViT-B-32.json b/examples/fashionklip/training/model_configs/ViT-B-32.json new file mode 100644 index 00000000..57b94e50 --- /dev/null +++ b/examples/fashionklip/training/model_configs/ViT-B-32.json @@ -0,0 +1,12 @@ +{ + "embed_dim": 512, + "image_resolution": 224, + "vision_layers": 12, + "vision_width": 768, + "vision_patch_size": 32, + "context_length": 77, + "vocab_size": 49408, + "transformer_width": 512, + "transformer_heads": 8, + "transformer_layers": 12 +} \ No newline at end of file diff --git a/examples/fashionklip/training/model_configs/ViT-L-14.json b/examples/fashionklip/training/model_configs/ViT-L-14.json new file mode 100644 index 00000000..9dee5c46 --- /dev/null +++ b/examples/fashionklip/training/model_configs/ViT-L-14.json @@ -0,0 +1,12 @@ +{ + "embed_dim": 768, + "image_resolution": 224, + "vision_layers": 24, + "vision_width": 1024, + "vision_patch_size": 14, + "context_length": 77, + "vocab_size": 49408, + "transformer_width": 768, + "transformer_heads": 12, + "transformer_layers": 12 +} diff --git a/examples/fashionklip/training/params.py b/examples/fashionklip/training/params.py new file mode 100644 index 00000000..628d5544 --- /dev/null +++ b/examples/fashionklip/training/params.py @@ -0,0 +1,282 @@ +import argparse + + +def get_default_params(model_name): + # Params from paper (https://arxiv.org/pdf/2103.00020.pdf) + if model_name in ["ViT-B/32"]: + return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.98, "eps": 1.0e-6} + elif model_name in ["ViT-L/14"]: + return {"lr": 4.0e-4, "beta1": 0.9, "beta2": 0.98, "eps": 1.0e-6} + else: + return {} + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--train-data", + type=str, + default=None, + help="Path to csv filewith training data", + ) + parser.add_argument( + "--val-data", + type=str, + default=None, + help="Path to csv file with validation data", + ) + parser.add_argument( + "--train-img", + type=str, + default=None, + help="Path to csv filewith training images npz file", + ) + parser.add_argument( + "--val-img", + type=str, + default=None, + help="Path to csv file with validation images npz file", + ) + parser.add_argument( + "--dataset-type", + choices=["jsonl", "json", "csv", "auto"], + default="auto", + help="Which type of dataset to process." + ) + parser.add_argument( + "--csv-separator", + type=str, + default=",", + help="For csv-like datasets, which separator to use." + ) + parser.add_argument( + "--csv-img-key", + type=str, + default="filepath", + help="For csv-like datasets, the name of the key for the image paths." + ) + parser.add_argument( + "--csv-caption-key", + type=str, + default="title", + help="For csv-like datasets, the name of the key for the captions." + ) + parser.add_argument( + "--txt-img-prediction", + type=str, + default=None, + help="For jsonl-like file of fashion base, with concepts with their predicted images." + ) + parser.add_argument( + "--txt-id-filename", + type=str, + default=None, + help="For jsonl-like file fashion concepts, with concepts with their ids." + ) + parser.add_argument( + "--kb-txt-id-filename", + type=str, + default=None, + help="For jsonl-like file icbu concepts, with concepts with their ids." + ) + parser.add_argument( + "--img-data", + type=str, + default=None, + help="Path to concept images npz file", + ) + parser.add_argument( + "--img-data-sets", + action='append', + default=None, + help="Path to a set of concept images npz files", + ) + parser.add_argument( + "--concept-data", + type=str, + default=None, + help="Path to jsonl file with concept queries", + ) + parser.add_argument( + "--kb-concept-data", + type=str, + default=None, + help="Path to jsonl file with knowledge base concept queries", + ) + parser.add_argument( + "--top-k", + type=int, + default=10, + help="k predictions for each concept", + ) + parser.add_argument( + "--logs", + type=str, + default="./logs/", + help="Where to store tensorboard logs. Use None to avoid storing logs.", + ) + parser.add_argument( + "--name", + type=str, + default=None, + help="Optional identifier for the experiment when storing logs. Otherwise use current time.", + ) + parser.add_argument( + "--workers", type=int, default=1, help="Number of workers per GPU." + ) + parser.add_argument( + "--batch-size", type=int, default=64, help="Batch size per GPU." + ) + parser.add_argument( + "--eval-batch-size", type=int, default=256, help="Batch size per GPU." + ) + parser.add_argument( + "--epochs", type=int, default=32, help="Number of epochs to train for." + ) + parser.add_argument("--lr", type=float, default=None, help="Learning rate.") + parser.add_argument("--beta1", type=float, default=None, help="Adam beta 1.") + parser.add_argument("--beta2", type=float, default=None, help="Adam beta 2.") + parser.add_argument("--eps", type=float, default=None, help="Adam epsilon.") + parser.add_argument("--wd", type=float, default=0.2, help="Weight decay.") + parser.add_argument( + "--warmup", type=int, default=10000, help="Number of steps to warmup for." + ) + parser.add_argument("--use-bn-sync", + default=False, + action="store_true", + help="Whether to use batch norm sync.") + parser.add_argument( + "--gpu", + type=int, + default=None, + help="Specify a single GPU to run the code on for debugging." + "Leave at None to use all available GPUs.", + ) + parser.add_argument( + "--skip-scheduler", + action="store_true", + default=False, + help="Use this flag to skip the learning rate decay.", + ) + parser.add_argument( + "--save-frequency", type=int, default=1, help="How often to save checkpoints." + ) + parser.add_argument( + "--save-most-recent", + action="store_true", + default=False, + help="Always save the most recent model trained to epoch_latest.pt.", + ) + parser.add_argument( + "--resume", + default=None, + type=str, + help="path to latest checkpoint (default: none)", + ) + parser.add_argument( + "--precision", + choices=["amp", "fp16", "fp32"], + default="amp", + help="Floating point precition." + ) + parser.add_argument( + "--model", + choices=["ViT-B/32", "ViT-L/14"], + default="RN50", + help="Name of the vision backbone to use.", + ) + parser.add_argument( + "--openai-pretrained", + default=False, + action='store_true', + help="Use the openai pretrained models.", + ) + parser.add_argument( + "--is-concept", + default=False, + action='store_true', + help="Augmented with Fashion KB.", + ) + parser.add_argument( + "--is-data-concept", + default=False, + action='store_true', + help="Augmented with dataset specific Fashion KB.", + ) + parser.add_argument( + "--is-update", + default=False, + action='store_true', + help="Update concept-image prediction by epoch.", + ) + parser.add_argument( + "--is-finetune-only", + default=False, + action='store_true', + help="", + ) + # arguments for distributed training + parser.add_argument( + "--dist-url", + default="tcp://127.0.0.1:6100", + type=str, + help="url used to set up distributed training", + ) + parser.add_argument( + "--dist-backend", default="nccl", type=str, help="distributed backend" + ) + parser.add_argument( + "--skip-aggregate", + default=False, + action="store_true", + help="whether to aggregate features across gpus before computing the loss" + ) + parser.add_argument( + "--report-to", + default='', + type=str, + help="Options are ['wandb', 'tensorboard', 'wandb,tensorboard']" + ) + parser.add_argument( + "--wandb-notes", + default='', + type=str, + help="Notes if logging with wandb" + ) + parser.add_argument( + "--C", type=float, default=3.16, help="inverse regularizer for logistic reg." + ) + parser.add_argument( + "--debug", + default=False, + action="store_true", + help="If true, more information is logged." + ) + parser.add_argument( + "--copy-codebase", + default=False, + action="store_true", + help="If true, we copy the entire base on the log diretory, and execute from there." + ) + parser.add_argument( + "--dp", + default=False, + action="store_true", + help="Use DP instead of DDP." + ) + parser.add_argument( + "--multigpu", + default=None, + type=lambda x: [int(a) for a in x.split(",")], + help="In DP, which GPUs to use for multigpu training", + ) + args = parser.parse_args() + args.aggregate = not args.skip_aggregate + + # If some params are not passed, we use the default values based on model name. + default_params = get_default_params(args.model) + for name, val in default_params.items(): + if getattr(args, name) is None: + setattr(args, name, val) + + return args diff --git a/examples/fashionklip/training/scheduler.py b/examples/fashionklip/training/scheduler.py new file mode 100644 index 00000000..200cec80 --- /dev/null +++ b/examples/fashionklip/training/scheduler.py @@ -0,0 +1,20 @@ +import numpy as np + +def assign_learning_rate(optimizer, new_lr): + for param_group in optimizer.param_groups: + param_group["lr"] = new_lr + +def _warmup_lr(base_lr, warmup_length, step): + return base_lr * (step + 1) / warmup_length + +def cosine_lr(optimizer, base_lr, warmup_length, steps): + def _lr_adjuster(step): + if step < warmup_length: + lr = _warmup_lr(base_lr, warmup_length, step) + else: + e = step - warmup_length + es = steps - warmup_length + lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr + assign_learning_rate(optimizer, lr) + return lr + return _lr_adjuster \ No newline at end of file diff --git a/examples/fashionklip/training/train_all_concept.py b/examples/fashionklip/training/train_all_concept.py new file mode 100644 index 00000000..e8d40cab --- /dev/null +++ b/examples/fashionklip/training/train_all_concept.py @@ -0,0 +1,629 @@ +import os +import time +import json +import numpy as np + +import torch +import torch.nn as nn + +from torch.cuda.amp import autocast +import torch.distributed as dist + +import sys +import pdb +import wandb + +import logging + +from tqdm import tqdm + +import faiss + +from sklearn.metrics.pairwise import cosine_similarity +from typing import List + +def mmr(query_embedding: np.ndarray, + candidate_embeddings: np.ndarray, + idxes: List[str], + top_n: int = 10, + diversity: float = 0.8) -> List[str]: + + query_candidate_similarity = cosine_similarity(candidate_embeddings, query_embedding) + candidate_similarity = cosine_similarity(candidate_embeddings) + + keywords_idx = [np.argmax(query_candidate_similarity)] + candidates_idx = [i for i in range(len(idxes)) if i != keywords_idx[0]] + + for _ in range(top_n - 1): + candidate_similarities = query_candidate_similarity[candidates_idx, :] + target_similarities = np.max(candidate_similarity[candidates_idx][:, keywords_idx], axis=1) + + # Calculate MMR + mmr = (1 - diversity) * candidate_similarities - diversity * target_similarities.reshape(-1, 1) + mmr_idx = candidates_idx[np.argmax(mmr)] + + # Update keywords & candidates + keywords_idx.append(mmr_idx) + candidates_idx.remove(mmr_idx) + + return [idxes[idx] for idx in keywords_idx] + +def load_image_data(args, model, image_data_sets): + dataloader = image_data_sets.dataloader + image_feat_dict = {} + m = model.module if args.distributed or args.dp else model + m.eval() + cnt = 0 + with torch.no_grad(): + for batch in tqdm(dataloader): + image_ids, images = batch + images = images.cuda(args.gpu, non_blocking=True) + image_features = m(images, None) + image_features /= image_features.norm(dim=-1, keepdim=True) + for image_id, image_feature in zip(image_ids, image_features.tolist()): + image_feat_dict[image_id] = image_feature + cnt += 1 + + print("Finished forwarding image features with {} items.".format(str(cnt))) + + return image_feat_dict + + +def load_concept_data(args, model, concept_data): + dataloader = concept_data.dataloader + concept_feat_dict = {} + m = model.module if args.distributed or args.dp else model + m.eval() + cnt = 0 + with torch.no_grad(): + for batch in tqdm(dataloader): + query_ids, texts = batch + texts = texts.cuda(args.gpu, non_blocking=True) + text_features = m(None, texts) + text_features /= text_features.norm(dim=-1, keepdim=True) + for query_id, text_feature in zip(query_ids.tolist(), text_features.tolist()): + concept_feat_dict[query_id] = text_feature + cnt += 1 + print('Finished forwarding concept features with {} items'.format(str(cnt))) + + return concept_feat_dict + + +def faiss_prediction(dim, concept_features, image_features): + concept_prediction_dict = {} + + concept_ids = [] + concept_feats = [] + for concept_id, concept_feat in tqdm(concept_features.items()): + concept_ids.append(concept_id) + concept_feats.append(concept_feat) + + concept_feats = np.array(concept_feats[:], dtype=np.float32) + + image_ids = [] + image_feats = [] + for image_id, image_feat in tqdm(image_features.items()): + image_ids.append(int(image_id)) + image_feats.append(image_feat) + + image_feats = np.array(image_feats[:], dtype=np.float32) + image_ids = np.array(image_ids[:]) + + nlist, k = 1, 20 + quantizer = faiss.IndexFlatIP(dim) + index = faiss.IndexIVFFlat(quantizer, dim, nlist, faiss.METRIC_INNER_PRODUCT) + faiss.normalize_L2(image_feats) + index.train(image_feats) + index.add_with_ids(image_feats, image_ids) + faiss.normalize_L2(concept_feats) + distances, indexes = index.search(concept_feats, k) + + for concept_id, prediction in zip(concept_ids, indexes): + prediction = [str(item) for item in prediction] + pred_feats = [] + for pred_id in prediction: + pred_feats.append(image_features[pred_id]) + pred_feats = np.array(pred_feats[:]) + concept_id_feat = np.array([concept_features[concept_id]]) + diverse_preds = mmr(concept_id_feat, pred_feats, prediction) + diverse_preds = [int(pred_item) for pred_item in diverse_preds] + concept_prediction_dict[concept_id] = diverse_preds + + return concept_prediction_dict + + +def is_master(args): + return (not args.distributed) or args.gpu == 0 + +def get_loss(model, images, texts, concepts, concepts_images_features, loss_img, loss_txt, args): + image_features, text_features, logit_scale = model(images, texts) + + concepts_features = [] + bs, con_len, context_length = concepts.shape + bs_len, feat_len = image_features.shape + for concept_idx in range(con_len): + text_concepts = [concept[concept_idx].unsqueeze(0) for concept in concepts] + text_concepts = torch.cat(text_concepts) + + concept_features = model.module(None, text_concepts) + concept_features = concept_features / concept_features.norm(p=2, dim=-1, keepdim=True) + concepts_features.append(concept_features) + + logit_scale = logit_scale.mean() + if args.distributed and args.aggregate: + world_size = dist.get_world_size() + rank = dist.get_rank() + + # We gather tensors from all gpus to get more negatives to contrast with. + gathered_image_features = [ + torch.zeros_like(image_features) for _ in range(world_size) + ] + gathered_text_features = [ + torch.zeros_like(text_features) for _ in range(world_size) + ] + dist.all_gather(gathered_image_features, image_features) + dist.all_gather(gathered_text_features, text_features) + + all_image_features = torch.cat( + [image_features] + + gathered_image_features[:rank] + + gathered_image_features[rank + 1 :] + ) + all_text_features = torch.cat( + [text_features] + + gathered_text_features[:rank] + + gathered_text_features[rank + 1 :] + ) + + gathered_all_concepts_features = [] + for concept_features in concepts_features: + gathered_concept_features = [torch.zeros_like(concept_features) for _ in range(world_size)] + dist.all_gather(gathered_concept_features, concept_features) + + all_concept_features = torch.cat([concept_features] + gathered_concept_features[:rank] + gathered_concept_features[rank + 1 :]) + gathered_all_concepts_features.append(all_concept_features) + + gathered_all_concepts_images_features = [ + torch.zeros_like(concepts_images_features) for _ in range(world_size) + ] + dist.all_gather(gathered_all_concepts_images_features, concepts_images_features) + all_concepts_images_features = torch.cat( + [concepts_images_features] + + gathered_all_concepts_images_features[:rank] + + gathered_all_concepts_images_features[rank + 1 :] + ) + + + # print(all_concepts_images_features.shape) + # expected torch.Size([64, 3, 10, 512]) + + # this is needed to send gradients back everywhere. + logits_per_image = logit_scale * all_image_features @ all_text_features.t() + logits_per_text = logits_per_image.t() + + total_bs, img_feat_len = all_image_features.shape + + all_image_features_resize = all_image_features.unsqueeze(1) + all_image_features_resize = all_image_features_resize.permute(0, 2, 1) + + # print(all_image_features_resize.shape) + # expected torch.Size([64, 512, 1]) + + all_image_con_concepts_images_index = [] + all_image_con_concepts_weights = [] + for con_idx in range(con_len): + con_concepts_images_features = all_concepts_images_features[:, con_idx, :, :].squeeze(dim=1) + + image_con_concepts_images = torch.bmm(con_concepts_images_features, all_image_features_resize).permute(0, 2, 1) + + image_con_concepts_images_index_values, image_con_concepts_images_index = image_con_concepts_images.topk(k=5, dim=2) + + image_con_concepts_images_index = image_con_concepts_images_index.squeeze(1) + image_con_concepts_images_index_values = image_con_concepts_images_index_values.squeeze(1) + + max_concepts_images_features = [] + for topk_idx in range(5): + image_con_concepts_images_index_values[:, topk_idx] = image_con_concepts_images_index_values[:, topk_idx] / image_con_concepts_images_index_values[:, topk_idx].norm(p=2, dim=-1, keepdim=True) + max_concepts_images_features_idx = torch.cat([con_concepts_images_features[bs_idx, max_idx, :].unsqueeze(0) for bs_idx, max_idx in enumerate(image_con_concepts_images_index[:, topk_idx].tolist())]) + max_concepts_images_features_idx = max_concepts_images_features_idx.unsqueeze(dim=1) + + max_concepts_images_features.append(max_concepts_images_features_idx) + + max_concepts_images_features = torch.cat(max_concepts_images_features, dim=1) + + all_image_con_concepts_images_index.append(max_concepts_images_features) + all_image_con_concepts_weights.append(image_con_concepts_images_index_values) + + logits_concept_per_image = [] + logits_per_concept = [] + for topk_idx in range(5): + logits_concept_per_image_idx = [logit_scale * all_image_con_concepts_images_index[concept_idx][:, topk_idx, :] @ gathered_all_concepts_features[concept_idx].t() for concept_idx in range(con_len)] + logits_per_concept_idx = [logits_concept_image.t() for logits_concept_image in logits_concept_per_image_idx] + + logits_concept_per_image.append(logits_concept_per_image_idx) + logits_per_concept.append(logits_per_concept_idx) + + else: + logits_per_image = logit_scale * image_features @ text_features.t() + logits_per_text = logit_scale * text_features @ image_features.t() + + ground_truth = torch.arange(len(logits_per_image)).long() + if args.gpu is not None: + ground_truth = ground_truth.cuda(args.gpu, non_blocking=True) + + total_loss = ( + loss_img(logits_per_image, ground_truth) + + loss_txt(logits_per_text, ground_truth) + ) / 2 + + concept_loss = 0 + for concept_idx in range(con_len): + concept_idx_loss = 0 + concept_weight_sum = sum(sum(all_image_con_concepts_weights[concept_idx])) + for topk_idx in range(5): + concept_idx_weight = sum(all_image_con_concepts_weights[concept_idx][:, topk_idx]) / concept_weight_sum + concept_idx_weight = concept_idx_weight.detach() + + logits_concept_per_image_idx = logits_concept_per_image[concept_idx][topk_idx].cuda(args.gpu) + logits_per_concept_idx = logits_per_concept[concept_idx][topk_idx].cuda(args.gpu) + + idx_weight = all_image_con_concepts_weights[concept_idx][:, topk_idx] + + loss_con_img = nn.CrossEntropyLoss(weight=idx_weight.detach()) + loss_con_txt = nn.CrossEntropyLoss(weight=idx_weight.detach()) + + if args.gpu is not None: + loss_con_img = loss_con_img.cuda(args.gpu) + loss_con_txt = loss_con_txt.cuda(args.gpu) + + concept_idx_loss += concept_idx_weight * (loss_con_img(logits_concept_per_image_idx, ground_truth) + loss_con_txt(logits_per_concept_idx, ground_truth)) / 2 + + concept_loss += concept_idx_loss + + concept_loss /= con_len + + return total_loss, concept_loss + + +def predict_concept_images(args, ids, predictions, concept_image_feats, feat_dim): + bs = len(ids[0]) + con_len = len(ids) + + preds_resize = [] + for idx in range(bs): + batch_preds = [] + for j in range(con_len): + if ids[j][idx] != -1: + concepts_preds = predictions[int(ids[j][idx].item())] + else: + concepts_preds = [""] * 10 + batch_preds.append(concepts_preds) + preds_resize.append(batch_preds) + + all_concept_images = [] + for pred in preds_resize: + concept_images = [] + for pred_item in pred: + pred_features = [torch.from_numpy(np.array(concept_image_feats[str(pred_id)], dtype=np.float32)).cuda(args.gpu).unsqueeze(0) + if pred_id != "" else torch.Tensor(np.array([0] * feat_dim, dtype=np.float32)).unsqueeze(0).cuda(args.gpu, non_blocking=True) + for pred_id in pred_item] + pred_features = torch.cat(pred_features) + concept_images.append(pred_features.unsqueeze(0).cuda(args.gpu, non_blocking=True)) + concept_images = torch.cat(concept_images).cuda(args.gpu, non_blocking=True) + + all_concept_images.append(concept_images.unsqueeze(0)) + + all_concept_images = torch.cat(all_concept_images).cuda(args.gpu, non_blocking=True) + # print(all_concept_images.shape) + + return all_concept_images + + +def gather_text_feat_dict(text_feature_dict, rank, world_size, args): + text_feature_dict_keys = torch.Tensor(list(text_feature_dict.keys())).cuda(args.gpu) + text_feature_dict_values = torch.Tensor(list(text_feature_dict.values())).cuda(args.gpu) + + gathered_text_feature_dict_keys = [ + torch.zeros_like(text_feature_dict_keys) for _ in range(world_size) + ] + gathered_text_feature_dict_values = [ + torch.zeros_like(text_feature_dict_values) for _ in range(world_size) + ] + dist.all_gather(gathered_text_feature_dict_keys, text_feature_dict_keys) + dist.all_gather(gathered_text_feature_dict_values, text_feature_dict_values) + + all_text_feature_dict_keys = torch.cat( + [text_feature_dict_keys] + + gathered_text_feature_dict_keys[:rank] + + gathered_text_feature_dict_keys[rank + 1 :] + ) + all_text_feature_dict_values = torch.cat( + [text_feature_dict_values] + + gathered_text_feature_dict_values[:rank] + + gathered_text_feature_dict_values[rank + 1 :] + ) + + all_text_feature_dict_keys = all_text_feature_dict_keys.cpu().numpy().tolist() + all_text_feature_dict_values = all_text_feature_dict_values.cpu().numpy().tolist() + + all_text_feature_dict = {} + for key_idx, feat_key_id in tqdm(enumerate(all_text_feature_dict_keys)): + feat_value = all_text_feature_dict_values[key_idx] + all_text_feature_dict[feat_key_id] = feat_value + + return all_text_feature_dict + + +def gather_image_feat_dict(image_feature_dict, rank, world_size, args): + image_feature_dict_keys = [int(pred_id) for pred_id in list(image_feature_dict.keys())] + + image_feature_dict_keys = torch.Tensor(image_feature_dict_keys).cuda(args.gpu) + + image_feature_dict_values = torch.Tensor(list(image_feature_dict.values())).cuda(args.gpu) + + gathered_image_feature_dict_keys = [ + torch.zeros_like(image_feature_dict_keys) for _ in range(world_size) + ] + gathered_image_feature_dict_values = [ + torch.zeros_like(image_feature_dict_values) for _ in range(world_size) + ] + dist.all_gather(gathered_image_feature_dict_keys, image_feature_dict_keys) + dist.all_gather(gathered_image_feature_dict_values, image_feature_dict_values) + + all_image_feature_dict_keys = torch.cat( + [image_feature_dict_keys] + + gathered_image_feature_dict_keys[:rank] + + gathered_image_feature_dict_keys[rank + 1 :] + ) + all_image_feature_dict_values = torch.cat( + [image_feature_dict_values] + + gathered_image_feature_dict_values[:rank] + + gathered_image_feature_dict_values[rank + 1 :] + ) + + all_image_feature_dict_keys = all_image_feature_dict_keys.cpu().numpy().tolist() + all_image_feature_dict_values = all_image_feature_dict_values.cpu().numpy().tolist() + + all_image_feature_dict = {} + for key_idx, feat_key_id in tqdm(enumerate(all_image_feature_dict_keys)): + feat_value = all_image_feature_dict_values[key_idx] + all_image_feature_dict[str(int(feat_key_id))] = feat_value + + return all_image_feature_dict + +def gather_prediction_dict(concept_image_predictions, rank, world_size, args): + concept_image_predictions_keys = torch.Tensor(list(concept_image_predictions.keys())).cuda(args.gpu) + concept_image_predictions_values = torch.Tensor(list(concept_image_predictions.values())).cuda(args.gpu) + + gathered_concept_image_predictions_keys = [ + torch.zeros_like(concept_image_predictions_keys) for _ in range(world_size) + ] + gathered_concept_image_predictions_values = [ + torch.zeros_like(concept_image_predictions_values) for _ in range(world_size) + ] + dist.all_gather(gathered_concept_image_predictions_keys, concept_image_predictions_keys) + dist.all_gather(gathered_concept_image_predictions_values, concept_image_predictions_values) + + all_concept_image_predictions_keys = torch.cat( + [concept_image_predictions_keys] + + gathered_concept_image_predictions_keys[:rank] + + gathered_concept_image_predictions_keys[rank + 1 :] + ) + all_concept_image_predictions_values = torch.cat( + [concept_image_predictions_values] + + gathered_concept_image_predictions_values[:rank] + + gathered_concept_image_predictions_values[rank + 1 :] + ) + + all_concept_image_predictions_keys = all_concept_image_predictions_keys.cpu().numpy().tolist() + all_concept_image_predictions_values = all_concept_image_predictions_values.cpu().numpy().tolist() + + all_concept_image_predictions = {} + for key_idx, concept_key_id in tqdm(enumerate(all_concept_image_predictions_keys)): + concept_image_preds = all_concept_image_predictions_values[key_idx] + concept_image_preds = [str(int(concept_image_pred_item)) for concept_image_pred_item in concept_image_preds] + all_concept_image_predictions[concept_key_id] = concept_image_preds + + print("All-gather concept-image prediction keys:", len(all_concept_image_predictions.keys())) + + return all_concept_image_predictions + +def train(model, data, image_data_sets, concept_data, kb_concept_data, feat_dim, epoch, optimizer, scaler, scheduler, args, tb_writer=None): + print("Training starts...") + # os.environ["WDS_EPOCH"] = str(epoch) + + concept_image_feats = load_image_data(args, model, image_data_sets) + print("Concept images length:", len(concept_image_feats.values())) + + concept_feats = load_concept_data(args, model, concept_data) + kb_concept_feats = load_concept_data(args, model, kb_concept_data) + print("Concepts length:", len(concept_feats.values())) + + concept_image_predictions = faiss_prediction(feat_dim, concept_feats, concept_image_feats) + kb_concept_image_predictions = faiss_prediction(feat_dim, kb_concept_feats, concept_image_feats) + + if args.distributed and args.aggregate: + world_size = dist.get_world_size() + rank = dist.get_rank() + + concept_feats = gather_text_feat_dict(concept_feats, rank, world_size, args) + kb_concept_feats = gather_text_feat_dict(concept_feats, rank, world_size, args) + + print("All-gather concepts length:", len(concept_feats.keys())) + print("All-gather KB concepts length:", len(kb_concept_feats.keys())) + + concept_image_feats = gather_image_feat_dict(concept_image_feats, rank, world_size, args) + print("All-gather concept images length:", len(concept_image_feats.keys())) + + concept_image_predictions = gather_prediction_dict(concept_image_predictions, rank, world_size, args) + kb_concept_image_predictions = gather_prediction_dict(kb_concept_image_predictions, rank, world_size, args) + + print("All-gather concept-image prediction keys:", len(concept_image_predictions.keys())) + print("All-gather KB concept-image prediction keys:", len(kb_concept_image_predictions.keys())) + + model.train() + + dataloader, sampler = data['train'].dataloader, data['train'].sampler + + loss_img = nn.CrossEntropyLoss() + loss_txt = nn.CrossEntropyLoss() + if args.gpu is not None: + loss_img = loss_img.cuda(args.gpu) + loss_txt = loss_txt.cuda(args.gpu) + + if args.distributed and sampler is not None: + sampler.set_epoch(epoch) + + num_batches_per_epoch = dataloader.num_batches + print(f"batches: {num_batches_per_epoch}, dataloader: {len(dataloader)}") + + end = time.time() + for i, batch in enumerate(dataloader): + step = num_batches_per_epoch * epoch + i + scheduler(step) + + optimizer.zero_grad() + + images, texts, concepts, kb_concepts, ids, kb_ids = batch + + all_data_concept_images = predict_concept_images(args, ids, concept_image_predictions, concept_image_feats, feat_dim) + all_kb_concept_images = predict_concept_images(args, kb_ids, kb_concept_image_predictions, concept_image_feats, feat_dim) + + all_concept_images = torch.cat([all_data_concept_images, all_kb_concept_images], dim=1) + + all_concepts = torch.cat([concepts, kb_concepts], dim=1) + print("All concepts length:", all_concepts.shape) + + if args.gpu is not None: + images = images.cuda(args.gpu, non_blocking=True) + texts = texts.cuda(args.gpu, non_blocking=True) + all_concepts = concepts.cuda(args.gpu, non_blocking=True) + + data_time = time.time() - end + + m = model.module if args.distributed or args.dp else model + + # with automatic mixed precision. + if args.precision == "amp": + with autocast(): + contrast_loss, concept_loss = get_loss(model, images, texts, all_concepts, all_concept_images, loss_img, loss_txt, args) + + concept_weight = (concept_loss / contrast_loss).detach() + concept_weight = 1 / (5 * concept_weight) if concept_weight != 0.0 else 0.0 + + contrast_weight = 1.0 - concept_weight + + total_loss = contrast_weight * contrast_loss + concept_weight * concept_loss + + scaler.scale(total_loss).backward() + scaler.step(optimizer) + scaler.update() + + else: + contrast_loss, concept_loss = get_loss(model, images, texts, concepts, all_concept_images, loss_img, loss_txt, args) + + concept_weight = (concept_loss / contrast_loss).detach() + concept_weight = 1 / (5 * concept_weight) if concept_weight != 0.0 else 0.0 + + contrast_weight = 1.0 - concept_weight + + total_loss = contrast_weight * contrast_loss + concept_weight * concept_loss + + total_loss.backward() + optimizer.step() + + # Note: we clamp to 4.6052 = ln(100), as in the original paper. + m.logit_scale.data = torch.clamp(m.logit_scale.data, 0, 4.6052) + + batch_time = time.time() - end + end = time.time() + + if is_master(args) == 0: + num_samples = i * len(images) * args.world_size + samples_per_epoch = dataloader.num_samples + percent_complete = 100.0 * i / num_batches_per_epoch + + logging.info( + f"Train Epoch: {epoch} [{num_samples}/{samples_per_epoch} ({percent_complete:.0f}%)]\t" + f"Loss: {total_loss.item():.6f}\t Contrastive_Loss: {contrast_loss.item():.6f}\t Concept_Loss: {concept_loss.item():.6f}\tData (t) {data_time:.3f}\tBatch (t) {batch_time:.3f}" + f"\tLR: {optimizer.param_groups[0]['lr']:5f}\tlogit_scale {m.logit_scale.data:.3f}" + ) + # save train loss / etc. + + timestep = epoch * num_batches_per_epoch + i + log_data = { + "loss": total_loss.item(), + "contrastive_loss": contrast_loss.item(), + "concept_loss": concept_loss.item(), + "data_time": data_time, + "batch_time": batch_time, + "scale": m.logit_scale.data.item(), + "lr": optimizer.param_groups[0]["lr"] + } + + for name, val in log_data.items(): + name = "train/" + name + if tb_writer is not None: + tb_writer.add_scalar(name, val, timestep) + if args.wandb: + wandb.log({name: val, 'step': timestep}) + + +def evaluate(model, data, epoch, args, tb_writer=None, steps=None): + if not is_master(args): + return + + logging.info(f"Begin to eval epoch: {epoch}...") + print((f"Begin to eval epoch: {epoch}...")) + + model.eval() + + dataloader = data['val'].dataloader + + loss_img = nn.CrossEntropyLoss() + loss_txt = nn.CrossEntropyLoss() + if args.gpu is not None: + loss_img = loss_img.cuda(args.gpu) + loss_txt = loss_txt.cuda(args.gpu) + + cumulative_loss = 0.0 + num_elements = 0.0 + all_image_features, all_text_features = [], [] + with torch.no_grad(): + for batch in tqdm(dataloader): + images, texts = batch + if args.gpu is not None: + images = images.cuda(args.gpu, non_blocking=True) + texts = texts.cuda(args.gpu, non_blocking=True) + + image_features, text_features, logit_scale = model(images, texts) + all_image_features.append(image_features) + all_text_features.append(text_features) + logit_scale = logit_scale.mean() + logits_per_image = logit_scale * image_features @ text_features.t() + logits_per_text = logits_per_image.t() + + ground_truth = torch.arange(len(images)).long() + if args.gpu is not None: + ground_truth = ground_truth.cuda(args.gpu, non_blocking=True) + total_loss = ( + loss_img(logits_per_image, ground_truth) + + loss_txt(logits_per_text, ground_truth) + ) / 2 + + batch_size = len(images) + cumulative_loss += total_loss * batch_size + num_elements += batch_size + + metrics = {} + loss = cumulative_loss / num_elements + metrics.update( + **{"val_loss": loss.item(), "epoch": epoch, "num_elements": num_elements} + ) + + logging.info( + f"Eval Epoch: {epoch} " + + "\t".join([f"{k}: {v:.4f}" for k, v in metrics.items()]) + ) + + return metrics \ No newline at end of file diff --git a/examples/fashionklip/training/train_epoch.py b/examples/fashionklip/training/train_epoch.py new file mode 100644 index 00000000..b167e84d --- /dev/null +++ b/examples/fashionklip/training/train_epoch.py @@ -0,0 +1,149 @@ +import os +import time +import json +import numpy as np + +import torch +import torch.nn as nn + +from torch.cuda.amp import autocast +import torch.distributed as dist + +import sys +import pdb +import wandb + +import logging + +def is_master(args): + return (not args.distributed) or args.gpu == 0 + +def get_loss(model, images, texts, loss_img, loss_txt, args): + image_features, text_features, logit_scale = model(images, texts) + logit_scale = logit_scale.mean() + if args.distributed and args.aggregate: + world_size = dist.get_world_size() + rank = dist.get_rank() + + # We gather tensors from all gpus to get more negatives to contrast with. + gathered_image_features = [ + torch.zeros_like(image_features) for _ in range(world_size) + ] + gathered_text_features = [ + torch.zeros_like(text_features) for _ in range(world_size) + ] + dist.all_gather(gathered_image_features, image_features) + dist.all_gather(gathered_text_features, text_features) + + all_image_features = torch.cat( + [image_features] + + gathered_image_features[:rank] + + gathered_image_features[rank + 1 :] + ) + all_text_features = torch.cat( + [text_features] + + gathered_text_features[:rank] + + gathered_text_features[rank + 1 :] + ) + + # this is needed to send gradients back everywhere. + logits_per_image = logit_scale * all_image_features @ all_text_features.t() + logits_per_text = logits_per_image.t() + + else: + logits_per_image = logit_scale * image_features @ text_features.t() + logits_per_text = logit_scale * text_features @ image_features.t() + + ground_truth = torch.arange(len(logits_per_image)).long() + if args.gpu is not None: + ground_truth = ground_truth.cuda(args.gpu, non_blocking=True) + + total_loss = ( + loss_img(logits_per_image, ground_truth) + + loss_txt(logits_per_text, ground_truth) + ) / 2 + + return total_loss + + +def train_only(model, data, epoch, optimizer, scaler, scheduler, args, tb_writer=None): + print("Training starts...") + # os.environ["WDS_EPOCH"] = str(epoch) + + model.train() + + dataloader, sampler = data['train'].dataloader, data['train'].sampler + + loss_img = nn.CrossEntropyLoss() + loss_txt = nn.CrossEntropyLoss() + if args.gpu is not None: + loss_img = loss_img.cuda(args.gpu) + loss_txt = loss_txt.cuda(args.gpu) + + if args.distributed and sampler is not None: + sampler.set_epoch(epoch) + + num_batches_per_epoch = dataloader.num_batches + + end = time.time() + for i, batch in enumerate(dataloader): + step = num_batches_per_epoch * epoch + i + scheduler(step) + + optimizer.zero_grad() + + images, texts, concepts, kb_concepts, ids, kb_ids = batch + if args.gpu is not None: + images = images.cuda(args.gpu, non_blocking=True) + texts = texts.cuda(args.gpu, non_blocking=True) + + data_time = time.time() - end + + m = model.module if args.distributed or args.dp else model + + # with automatic mixed precision. + if args.precision == "amp": + with autocast(): + total_loss = get_loss(model, images, texts, loss_img, loss_txt, args) + scaler.scale(total_loss).backward() + scaler.step(optimizer) + scaler.update() + + else: + total_loss = get_loss(model, images, texts, loss_img, loss_txt, args) + total_loss.backward() + optimizer.step() + + # Note: we clamp to 4.6052 = ln(100), as in the original paper. + m.logit_scale.data = torch.clamp(m.logit_scale.data, 0, 4.6052) + + batch_time = time.time() - end + end = time.time() + + # if is_master(args) and (i % 100) == 0: + if is_master(args) == 0: + num_samples = i * len(images) * args.world_size + samples_per_epoch = dataloader.num_samples + percent_complete = 100.0 * i / num_batches_per_epoch + logging.info( + f"Train Epoch: {epoch} [{num_samples}/{samples_per_epoch} ({percent_complete:.0f}%)]\t" + f"Loss: {total_loss.item():.6f}\tData (t) {data_time:.3f}\tBatch (t) {batch_time:.3f}" + f"\tLR: {optimizer.param_groups[0]['lr']:5f}\tlogit_scale {m.logit_scale.data:.3f}" + ) + # save train loss / etc. + + timestep = epoch * num_batches_per_epoch + i + log_data = { + "loss": total_loss.item(), + "data_time": data_time, + "batch_time": batch_time, + "scale": m.logit_scale.data.item(), + "lr": optimizer.param_groups[0]["lr"] + } + + for name, val in log_data.items(): + name = "train/" + name + if tb_writer is not None: + tb_writer.add_scalar(name, val, timestep) + if args.wandb: + wandb.log({name: val, 'step': timestep}) From a7826350233e09a185d9a96efa5edf19d1e6f0af Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Fri, 9 Dec 2022 18:46:48 +0800 Subject: [PATCH 023/101] FashionKLIP codes --- examples/fashionklip/clip/clip.py | 1 - .../fashionklip/clip/model_configs/ViT-L-14.json | 12 ------------ 2 files changed, 13 deletions(-) delete mode 100644 examples/fashionklip/clip/model_configs/ViT-L-14.json diff --git a/examples/fashionklip/clip/clip.py b/examples/fashionklip/clip/clip.py index 02e28a76..348fd976 100644 --- a/examples/fashionklip/clip/clip.py +++ b/examples/fashionklip/clip/clip.py @@ -19,7 +19,6 @@ _MODELS = { "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", - "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", } diff --git a/examples/fashionklip/clip/model_configs/ViT-L-14.json b/examples/fashionklip/clip/model_configs/ViT-L-14.json deleted file mode 100644 index 9dee5c46..00000000 --- a/examples/fashionklip/clip/model_configs/ViT-L-14.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "embed_dim": 768, - "image_resolution": 224, - "vision_layers": 24, - "vision_width": 1024, - "vision_patch_size": 14, - "context_length": 77, - "vocab_size": 49408, - "transformer_width": 768, - "transformer_heads": 12, - "transformer_layers": 12 -} From baf687ea5956daf6c4c9e1489d21225e0601ffcc Mon Sep 17 00:00:00 2001 From: chywang Date: Sun, 11 Dec 2022 14:46:51 +0800 Subject: [PATCH 024/101] Update model.py --- easynlp/appzoo/information_extraction/model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/easynlp/appzoo/information_extraction/model.py b/easynlp/appzoo/information_extraction/model.py index 5520aecd..cd8777f9 100644 --- a/easynlp/appzoo/information_extraction/model.py +++ b/easynlp/appzoo/information_extraction/model.py @@ -4,7 +4,7 @@ from dataclasses import dataclass from ..application import Application from ...modelzoo import AutoConfig, AutoModel -from transformers.file_utils import ModelOutput + class SinusoidalPositionEmbedding(nn.Module): """定义Sin-Cos位置Embedding @@ -126,4 +126,4 @@ def compute_loss(self, forward_outputs, label_ids, **kwargs): y_pred = y_pred.view(input_ids.shape[0] * self.ent_type_size, -1) loss = multilabel_categorical_crossentropy(y_pred, y_true) - return {'loss': loss} \ No newline at end of file + return {'loss': loss} From 36fca42784f62cdc2d94c0402d765df51c3849fb Mon Sep 17 00:00:00 2001 From: zjy <756628577@qq.com> Date: Mon, 12 Dec 2022 16:58:12 +0800 Subject: [PATCH 025/101] add ie data --- easynlp/appzoo/__init__.py | 16 ++++---- easynlp/appzoo/api.py | 18 ++++---- ...run_train_eval_predict_appzoo_cli_local.sh | 41 +++++++++++++++---- ...n_train_eval_predict_user_defined_local.sh | 36 +++++++++++++--- 4 files changed, 79 insertions(+), 32 deletions(-) diff --git a/easynlp/appzoo/__init__.py b/easynlp/appzoo/__init__.py index 25b5ef2b..9a027139 100644 --- a/easynlp/appzoo/__init__.py +++ b/easynlp/appzoo/__init__.py @@ -33,7 +33,7 @@ "video2text_generation.model": ['CLIPGPTFrameTextGeneration'], "sequence_generation.model": ["SequenceGeneration"], "machine_reading_comprehension.model": ["MachineReadingComprehension"], - # "information_extraction.model": ["InformationExtractionModel"], + "information_extraction.model": ["InformationExtractionModel"], "sequence_classification.evaluator": ['SequenceClassificationEvaluator', 'SequenceMultiLabelClassificationEvaluator'], "sequence_labeling.evaluator": ['SequenceLabelingEvaluator'], @@ -47,7 +47,7 @@ "video2text_generation.evaluator": ["FrameTextGenerationEvaluator"], "sequence_generation.evaluator": ["SequenceGenerationEvaluator"], "machine_reading_comprehension.evaluator": ["MachineReadingComprehensionEvaluator"], - # "information_extraction.evaluator": ["InformationExtractionEvaluator"], + "information_extraction.evaluator": ["InformationExtractionEvaluator"], "sequence_classification.predictor": ['SequenceClassificationPredictor', 'FewshotSequenceClassificationPredictor', 'CptFewshotSequenceClassificationPredictor'], "sequence_labeling.predictor": ['SequenceLabelingPredictor'], @@ -63,7 +63,7 @@ "video2text_generation.predictor": ['CLIPGPTFrameTextGenerationPredictor'], "sequence_generation.predictor": ['SequenceGenerationPredictor'], "machine_reading_comprehension.predictor": ["MachineReadingComprehensionPredictor"], - # "information_extraction.predictor": ["InformationExtractionPredictor"], + "information_extraction.predictor": ["InformationExtractionPredictor"], "geep_classification.data": ['GEEPClassificationDataset'], "language_modeling.data": ['LanguageModelingDataset'], @@ -77,7 +77,7 @@ "video2text_generation.data": ['CLIPGPTFrameTextDataset'], "sequence_generation.data": ['SequenceGenerationDataset'], "machine_reading_comprehension.data": ["MachineReadingComprehensionDataset"], - # "information_extraction.data": ["InformationExtractionDataset"], + "information_extraction.data": ["InformationExtractionDataset"], "dataset": ['BaseDataset', 'GeneralDataset', 'load_dataset', 'list_datasets'], "api": ['get_application_dataset', 'get_application_model', 'get_application_model_for_evaluation', 'get_application_evaluator', 'get_application_predictor'], } @@ -98,7 +98,7 @@ from .video2text_generation.model import CLIPGPTFrameTextGeneration from .sequence_generation.model import SequenceGeneration from .machine_reading_comprehension.model import MachineReadingComprehension - # from .information_extraction.model import InformationExtractionModel + from .information_extraction.model import InformationExtractionModel from .sequence_classification.evaluator import SequenceClassificationEvaluator, SequenceMultiLabelClassificationEvaluator from .sequence_labeling.evaluator import SequenceLabelingEvaluator @@ -112,7 +112,7 @@ from .video2text_generation.evaluator import FrameTextGenerationEvaluator from .sequence_generation.evaluator import SequenceGenerationEvaluator from .machine_reading_comprehension.evaluator import MachineReadingComprehensionEvaluator - # from .information_extraction.evaluator import InformationExtractionEvaluator + from .information_extraction.evaluator import InformationExtractionEvaluator from .sequence_classification.predictor import SequenceClassificationPredictor, FewshotSequenceClassificationPredictor, CptFewshotSequenceClassificationPredictor from .sequence_labeling.predictor import SequenceLabelingPredictor @@ -128,7 +128,7 @@ from .video2text_generation.predictor import CLIPGPTFrameTextGenerationPredictor from .sequence_generation.predictor import SequenceGenerationPredictor from .machine_reading_comprehension.predictor import MachineReadingComprehensionPredictor - # from .information_extraction.predictor import InformationExtractionPredictor + from .information_extraction.predictor import InformationExtractionPredictor from .sequence_classification.data import ClassificationDataset, DistillatoryClassificationDataset, FewshotSequenceClassificationDataset from .sequence_labeling.data import SequenceLabelingDataset, SequenceLabelingAutoDataset @@ -142,7 +142,7 @@ from .video2text_generation.data import CLIPGPTFrameTextDataset from .sequence_generation.data import SequenceGenerationDataset from .machine_reading_comprehension.data import MachineReadingComprehensionDataset - # from .information_extraction.data import InformationExtractionDataset + from .information_extraction.data import InformationExtractionDataset from .dataset import BaseDataset, GeneralDataset from .dataset import load_dataset, list_datasets diff --git a/easynlp/appzoo/api.py b/easynlp/appzoo/api.py index 8676040e..019a2a53 100644 --- a/easynlp/appzoo/api.py +++ b/easynlp/appzoo/api.py @@ -33,7 +33,7 @@ from easynlp.appzoo import CLIPGPTFrameTextGeneration from easynlp.appzoo.sequence_generation.model import SequenceGeneration from easynlp.appzoo import MachineReadingComprehension -# from easynlp.appzoo import InformationExtractionModel +from easynlp.appzoo import InformationExtractionModel from easynlp.fewshot_learning.fewshot_evaluator import PromptEvaluator as FewshotSequenceClassificationEvaluator from easynlp.fewshot_learning.fewshot_evaluator import CPTEvaluator as CptFewshotSequenceClassificationEvaluator @@ -48,7 +48,7 @@ from easynlp.appzoo import FrameTextGenerationEvaluator from easynlp.appzoo import SequenceGenerationEvaluator from easynlp.appzoo import MachineReadingComprehensionEvaluator -# from easynlp.appzoo import InformationExtractionEvaluator +from easynlp.appzoo import InformationExtractionEvaluator from easynlp.appzoo import SequenceClassificationPredictor, FewshotSequenceClassificationPredictor, CptFewshotSequenceClassificationPredictor from easynlp.appzoo import SequenceLabelingPredictor, FeatureVectorizationPredictor @@ -62,7 +62,7 @@ from easynlp.appzoo import CLIPGPTFrameTextGenerationPredictor from easynlp.appzoo import SequenceGenerationPredictor from easynlp.appzoo import MachineReadingComprehensionPredictor -# from easynlp.appzoo import InformationExtractionPredictor +from easynlp.appzoo import InformationExtractionPredictor from easynlp.appzoo import ClassificationDataset, DistillatoryClassificationDataset, FewshotSequenceClassificationDataset from easynlp.appzoo import SequenceLabelingDataset, LanguageModelingDataset @@ -75,7 +75,7 @@ from easynlp.appzoo import CLIPGPTImageTextDataset, VQGANGPTImageTextDataset from easynlp.appzoo import CLIPGPTFrameTextDataset from easynlp.appzoo import MachineReadingComprehensionDataset -# from easynlp.appzoo import InformationExtractionDataset +from easynlp.appzoo import InformationExtractionDataset from easynlp.core import PredictorManager, Trainer, DistillatoryTrainer from easynlp.utils.logger import logger @@ -119,7 +119,7 @@ 'video2text_generation': CLIPGPTFrameTextDataset, 'sequence_generation': SequenceGenerationDataset, 'machine_reading_comprehension': MachineReadingComprehensionDataset, - # 'information_extraction': InformationExtractionDataset + 'information_extraction': InformationExtractionDataset } ModelMapping = { @@ -154,7 +154,7 @@ 'video2text_generation': CLIPGPTFrameTextGeneration, 'sequence_generation': SequenceGeneration, 'machine_reading_comprehension': MachineReadingComprehension, - # 'information_extraction': InformationExtractionModel + 'information_extraction': InformationExtractionModel } Eval_Model_Mapping = { @@ -183,7 +183,7 @@ 'video2text_generation': CLIPGPTFrameTextGeneration, 'sequence_generation': SequenceGeneration, 'machine_reading_comprehension': MachineReadingComprehension, - # 'information_extraction': InformationExtractionModel + 'information_extraction': InformationExtractionModel } Evaluator_Mapping = { @@ -212,7 +212,7 @@ 'video2text_generation': FrameTextGenerationEvaluator, 'sequence_generation': SequenceGenerationEvaluator, 'machine_reading_comprehension': MachineReadingComprehensionEvaluator, - # 'information_extraction': InformationExtractionEvaluator + 'information_extraction': InformationExtractionEvaluator } Predictor_Mapping = { @@ -243,7 +243,7 @@ 'video2text_generation': [CLIPGPTFrameTextGenerationPredictor, CLIPGPTFrameTextGeneration], 'sequence_generation': [SequenceGenerationPredictor, SequenceGeneration], 'machine_reading_comprehension': [MachineReadingComprehensionPredictor, MachineReadingComprehension], - # 'information_extraction': [InformationExtractionPredictor, InformationExtractionModel] + 'information_extraction': [InformationExtractionPredictor, InformationExtractionModel] } diff --git a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh index f33bfd92..e2930167 100644 --- a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh +++ b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh @@ -1,14 +1,34 @@ export CUDA_VISIBLE_DEVICES=$1 +if [ ! -f ./train.tsv ]; then + wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/ie/train.tsv +fi + +if [ ! -f ./train_part.tsv ]; then + wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/ie/train_part.tsv +fi + +if [ ! -f ./dev.tsv ]; then + wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/ie/dev.tsv +fi + +if [ ! -f ./predict_input_EE.tsv ]; then + wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/ie/predict_input_EE.tsv +fi + +if [ ! -f ./predict_input_NER.tsv ]; then + wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/ie/predict_input_NER.tsv +fi + mode=$2 if [ "$mode" = "train" ]; then - easynlp + easynlp \ --mode $mode \ - --tables=train.tsv,dev.tsv \ + --tables=train_part.tsv,dev.tsv \ --input_schema=id:str:1,instruction:str:1,start:str:1,end:str:1,target:str:1 \ - --worker_gpu=4 \ + --worker_gpu=1 \ --app_name=information_extraction \ --sequence_length=512 \ --weight_decay=0.0 \ @@ -16,7 +36,7 @@ if [ "$mode" = "train" ]; then --checkpoint_dir=./information_extraction_model/ \ --data_threads=5 \ --user_defined_parameters='pretrain_model_name_or_path=hfl/macbert-large-zh' \ - --save_checkpoint_steps=500 \ + --save_checkpoint_steps=50 \ --gradient_accumulation_steps=8 \ --epoch_num=3 \ --learning_rate=2e-05 \ @@ -24,11 +44,11 @@ if [ "$mode" = "train" ]; then elif [ "$mode" = "evaluate" ]; then - easynlp + easynlp \ --mode $mode \ --tables=dev.tsv \ --input_schema=id:str:1,instruction:str:1,start:str:1,end:str:1,target:str:1 \ - --worker_gpu=4 \ + --worker_gpu=1 \ --app_name=information_extraction \ --sequence_length=512 \ --weight_decay=0.0 \ @@ -38,12 +58,12 @@ elif [ "$mode" = "evaluate" ]; then elif [ "$mode" = "predict" ]; then - easynlp + easynlp \ --mode=$mode \ --tables=predict_input_EE.tsv,predict_output_EE.tsv \ --input_schema=id:str:1,scheme:str:1,content:str:1 \ --output_schema=id,content,q_and_a \ - --worker_gpu=4 \ + --worker_gpu=1 \ --app_name=information_extraction \ --sequence_length=512 \ --weight_decay=0.0 \ @@ -51,4 +71,7 @@ elif [ "$mode" = "predict" ]; then --checkpoint_dir=./information_extraction_model/ \ --data_threads=5 \ --user_defined_parameters='task=EE' -fi \ No newline at end of file +fi + +#mode=train处,目前使用的是部分训练数据,如果需要使用全部训练数据,请将train_part.tsv修改为train.tsv,save_checkpoint_steps=50修改为save_checkpoint_steps=500 +#mode=predict出,目前验证的是EE任务的一个例子,如果需要验证NER任务,请将predict_input_EE.tsv修改为predict_input_NER.tsv,将predict_output_EE.tsv修改为predict_output_NER.tsv,将task=EE修改为task=NER \ No newline at end of file diff --git a/examples/information_extraction/run_train_eval_predict_user_defined_local.sh b/examples/information_extraction/run_train_eval_predict_user_defined_local.sh index 5dd2c5ff..198edebf 100644 --- a/examples/information_extraction/run_train_eval_predict_user_defined_local.sh +++ b/examples/information_extraction/run_train_eval_predict_user_defined_local.sh @@ -1,7 +1,27 @@ export CUDA_VISIBLE_DEVICES=$1 +if [ ! -f ./train.tsv ]; then + wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/ie/train.tsv +fi + +if [ ! -f ./train_part.tsv ]; then + wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/ie/train_part.tsv +fi + +if [ ! -f ./dev.tsv ]; then + wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/ie/dev.tsv +fi + +if [ ! -f ./predict_input_EE.tsv ]; then + wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/ie/predict_input_EE.tsv +fi + +if [ ! -f ./predict_input_NER.tsv ]; then + wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/ie/predict_input_NER.tsv +fi + MASTER_ADDR=localhost -MASTER_PORT=6018 +MASTER_PORT=6022 GPUS_PER_NODE=4 NNODES=1 NODE_RANK=0 @@ -14,7 +34,7 @@ if [ "$mode" = "train" ]; then python -m torch.distributed.launch $DISTRIBUTED_ARGS main.py \ --mode $mode \ - --tables=train.tsv,dev.tsv \ + --tables=train_part.tsv,dev.tsv \ --input_schema=id:str:1,instruction:str:1,start:str:1,end:str:1,target:str:1 \ --worker_gpu=4 \ --app_name=information_extraction \ @@ -24,7 +44,7 @@ if [ "$mode" = "train" ]; then --checkpoint_dir=./information_extraction_model/ \ --data_threads=5 \ --user_defined_parameters='pretrain_model_name_or_path=hfl/macbert-large-zh' \ - --save_checkpoint_steps=500 \ + --save_checkpoint_steps=50 \ --gradient_accumulation_steps=8 \ --epoch_num=3 \ --learning_rate=2e-05 \ @@ -48,7 +68,7 @@ elif [ "$mode" = "predict" ]; then python -m torch.distributed.launch $DISTRIBUTED_ARGS main.py \ --mode=$mode \ - --tables=predict_input_EE.tsv,predict_output_EE.tsv \ + --tables=predict_input_NER.tsv,predict_output_NER.tsv \ --input_schema=id:str:1,scheme:str:1,content:str:1 \ --output_schema=id,content,q_and_a \ --worker_gpu=4 \ @@ -58,5 +78,9 @@ elif [ "$mode" = "predict" ]; then --micro_batch_size=4 \ --checkpoint_dir=./information_extraction_model/ \ --data_threads=5 \ - --user_defined_parameters='task=EE' -fi \ No newline at end of file + --user_defined_parameters='task=NER' + +fi + +#mode=train处,目前使用的是部分训练数据,如果需要使用全部训练数据,请将train_part.tsv修改为train.tsv,save_checkpoint_steps=50修改为save_checkpoint_steps=500 +#mode=predict出,目前验证的是EE任务的一个例子,如果需要验证NER任务,请将predict_input_EE.tsv修改为predict_input_NER.tsv,将predict_output_EE.tsv修改为predict_output_NER.tsv,将task=EE修改为task=NER \ No newline at end of file From 0fbcd598eca562c5fde4d8d5ced1021cd067255b Mon Sep 17 00:00:00 2001 From: chywang Date: Mon, 12 Dec 2022 16:58:49 +0800 Subject: [PATCH 026/101] Update requirements_diffusion.txt --- requirements_diffusion.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/requirements_diffusion.txt b/requirements_diffusion.txt index c8348eb0..446d7469 100644 --- a/requirements_diffusion.txt +++ b/requirements_diffusion.txt @@ -1 +1,3 @@ pytorch_lightning +omegaconf +tokenizers==0.9.4 From fecf989d8e79a917d4e84cd9b7b9f1fa58d2293e Mon Sep 17 00:00:00 2001 From: chywang Date: Mon, 12 Dec 2022 17:44:33 +0800 Subject: [PATCH 027/101] Update pipeline_example.py --- examples/latent_diffusion/pipeline_example.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/latent_diffusion/pipeline_example.py b/examples/latent_diffusion/pipeline_example.py index 2bd868e7..68a0e1db 100644 --- a/examples/latent_diffusion/pipeline_example.py +++ b/examples/latent_diffusion/pipeline_example.py @@ -28,7 +28,7 @@ def image_to_base64(img): # # poem # generator = pipeline('chinese-ldm-poem',pipeline_params={"n_samples":1,"do_sr":True}) -# data = ['一行白鹭上青天'] +# data = ['停车坐爱枫林晚,霜叶红于二月花'] # # anime # generator = pipeline('chinese-ldm-anime',pipeline_params={"n_samples":1,"do_sr":True}) @@ -40,7 +40,7 @@ def image_to_base64(img): # food generator = pipeline('chinese-ldm-food',pipeline_params={"n_samples":1,"do_sr":True}) -data = ['青椒炒牛肉'] +data = ['小炒黄牛肉'] # 生成结果 result=generator(data) @@ -53,4 +53,4 @@ def image_to_base64(img): pil_image.save(os.path.join('./',one_prompt['text']+f"_{idx:04}.png")) # 生成base64 # b64_image=image_to_base64(pil_image) - # print(b64_image) \ No newline at end of file + # print(b64_image) From fff7fcdb116cc2d56a45c1465a785e138d28aee3 Mon Sep 17 00:00:00 2001 From: chywang Date: Mon, 12 Dec 2022 18:39:05 +0800 Subject: [PATCH 028/101] Update pipeline_example.py --- examples/latent_diffusion/pipeline_example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/latent_diffusion/pipeline_example.py b/examples/latent_diffusion/pipeline_example.py index 68a0e1db..4c84594b 100644 --- a/examples/latent_diffusion/pipeline_example.py +++ b/examples/latent_diffusion/pipeline_example.py @@ -28,7 +28,7 @@ def image_to_base64(img): # # poem # generator = pipeline('chinese-ldm-poem',pipeline_params={"n_samples":1,"do_sr":True}) -# data = ['停车坐爱枫林晚,霜叶红于二月花'] +# data = ['远上寒山石径斜,白云生处有人家'] # # anime # generator = pipeline('chinese-ldm-anime',pipeline_params={"n_samples":1,"do_sr":True}) From bce0437c22437f72b7c962802c6dbc2346e839d9 Mon Sep 17 00:00:00 2001 From: chywang Date: Mon, 12 Dec 2022 19:50:10 +0800 Subject: [PATCH 029/101] Update run_train_eval_predict_appzoo_cli_local.sh --- .../run_train_eval_predict_appzoo_cli_local.sh | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh index e2930167..a20b9401 100644 --- a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh +++ b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh @@ -66,12 +66,24 @@ elif [ "$mode" = "predict" ]; then --worker_gpu=1 \ --app_name=information_extraction \ --sequence_length=512 \ - --weight_decay=0.0 \ --micro_batch_size=4 \ --checkpoint_dir=./information_extraction_model/ \ --data_threads=5 \ --user_defined_parameters='task=EE' + + easynlp \ + --mode=$mode \ + --tables=predict_input_NER.tsv,predict_output_NER.tsv \ + --input_schema=id:str:1,scheme:str:1,content:str:1 \ + --output_schema=id,content,q_and_a \ + --worker_gpu=1 \ + --app_name=information_extraction \ + --sequence_length=512 \ + --micro_batch_size=4 \ + --checkpoint_dir=./information_extraction_model/ \ + --data_threads=5 \ + --user_defined_parameters='task=NER' fi #mode=train处,目前使用的是部分训练数据,如果需要使用全部训练数据,请将train_part.tsv修改为train.tsv,save_checkpoint_steps=50修改为save_checkpoint_steps=500 -#mode=predict出,目前验证的是EE任务的一个例子,如果需要验证NER任务,请将predict_input_EE.tsv修改为predict_input_NER.tsv,将predict_output_EE.tsv修改为predict_output_NER.tsv,将task=EE修改为task=NER \ No newline at end of file +#mode=predict处,目前验证的是EE任务的一个例子,如果需要验证NER任务,请将predict_input_EE.tsv修改为predict_input_NER.tsv,将predict_output_EE.tsv修改为predict_output_NER.tsv,将task=EE修改为task=NER From 46f180a41612c662456f480d2d67a9deebace68c Mon Sep 17 00:00:00 2001 From: chywang Date: Mon, 12 Dec 2022 19:50:58 +0800 Subject: [PATCH 030/101] Update run_train_eval_predict_appzoo_cli_local.sh --- .../run_train_eval_predict_appzoo_cli_local.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh index a20b9401..a31d5318 100644 --- a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh +++ b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh @@ -60,7 +60,8 @@ elif [ "$mode" = "predict" ]; then easynlp \ --mode=$mode \ - --tables=predict_input_EE.tsv,predict_output_EE.tsv \ + --tables=predict_input_EE.tsv \ + --outputs=predict_output_EE.tsv \ --input_schema=id:str:1,scheme:str:1,content:str:1 \ --output_schema=id,content,q_and_a \ --worker_gpu=1 \ @@ -73,7 +74,8 @@ elif [ "$mode" = "predict" ]; then easynlp \ --mode=$mode \ - --tables=predict_input_NER.tsv,predict_output_NER.tsv \ + --tables=predict_input_NER.tsv \ + --outputs=predict_output_NER.tsv \ --input_schema=id:str:1,scheme:str:1,content:str:1 \ --output_schema=id,content,q_and_a \ --worker_gpu=1 \ From 32db1ae5da4e24d0a440e4996ec98ba12cdea4b7 Mon Sep 17 00:00:00 2001 From: chywang Date: Mon, 12 Dec 2022 19:51:14 +0800 Subject: [PATCH 031/101] Update run_train_eval_predict_appzoo_cli_local.sh --- .../run_train_eval_predict_appzoo_cli_local.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh index a31d5318..c89a77f5 100644 --- a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh +++ b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh @@ -88,4 +88,3 @@ elif [ "$mode" = "predict" ]; then fi #mode=train处,目前使用的是部分训练数据,如果需要使用全部训练数据,请将train_part.tsv修改为train.tsv,save_checkpoint_steps=50修改为save_checkpoint_steps=500 -#mode=predict处,目前验证的是EE任务的一个例子,如果需要验证NER任务,请将predict_input_EE.tsv修改为predict_input_NER.tsv,将predict_output_EE.tsv修改为predict_output_NER.tsv,将task=EE修改为task=NER From 694ded1a2d40d563f9587e7afc0809a93d5cc094 Mon Sep 17 00:00:00 2001 From: chywang Date: Tue, 13 Dec 2022 10:57:05 +0800 Subject: [PATCH 032/101] Update model.py --- easynlp/appzoo/latent_diffusion/model.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/easynlp/appzoo/latent_diffusion/model.py b/easynlp/appzoo/latent_diffusion/model.py index 31514b3f..5cfbba5f 100644 --- a/easynlp/appzoo/latent_diffusion/model.py +++ b/easynlp/appzoo/latent_diffusion/model.py @@ -32,7 +32,6 @@ from ...modelzoo.models.latent_diffusion.ddpm import LatentDiffusionModel from ...modelzoo.models.latent_diffusion.autoencoder import AutoencoderKL from ...modelzoo.models.latent_diffusion.wukong import FrozenWukongCLIPTextEmbedder - # from ...modelzoo.models.latent_diffusion.ddim import DDIMSampler from ...modelzoo.models.latent_diffusion.plms import PLMSSampler from ...modelzoo.models.latent_diffusion.RRDBNet_arch import ESRGAN @@ -132,7 +131,6 @@ def from_pretrained(self, pretrained_model_name_or_path,args, user_defined_param def __init__(self, pretrained_model_name_or_path=None,args=None,user_defined_parameters=None): super().__init__() - self.hello='world' print("出现如下异常 %s"%ex) From f0ea271966e54bae241994056c3ee108b2c66bce Mon Sep 17 00:00:00 2001 From: chywang Date: Tue, 13 Dec 2022 10:58:31 +0800 Subject: [PATCH 033/101] Update model.py --- easynlp/appzoo/latent_diffusion/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/easynlp/appzoo/latent_diffusion/model.py b/easynlp/appzoo/latent_diffusion/model.py index 5cfbba5f..d7ed6b91 100644 --- a/easynlp/appzoo/latent_diffusion/model.py +++ b/easynlp/appzoo/latent_diffusion/model.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from ast import literal_eval import numpy as np import torch @@ -22,7 +23,6 @@ from typing import List, Optional import json from tqdm import tqdm, trange -from einops import rearrange from PIL import Image from ...utils import losses, get_pretrain_model_path, get_args from ..application import Application From cd754586a5be25508ffcc819ce761253145a7d25 Mon Sep 17 00:00:00 2001 From: zjy <756628577@qq.com> Date: Tue, 13 Dec 2022 14:50:51 +0800 Subject: [PATCH 034/101] modify ie --- .../information_extraction/evaluator.py | 26 +++++++++--------- .../information_extraction/predictor.py | 6 ++--- easynlp/cli.py | 8 ++++++ examples/information_extraction/main.py | 26 +++++++++--------- ...run_train_eval_predict_appzoo_cli_local.sh | 27 ++++++++++++++----- ...n_train_eval_predict_user_defined_local.sh | 21 ++++++++++++--- 6 files changed, 76 insertions(+), 38 deletions(-) diff --git a/easynlp/appzoo/information_extraction/evaluator.py b/easynlp/appzoo/information_extraction/evaluator.py index eaed63fb..d26cb8b8 100644 --- a/easynlp/appzoo/information_extraction/evaluator.py +++ b/easynlp/appzoo/information_extraction/evaluator.py @@ -4,18 +4,18 @@ from ...core.evaluator import Evaluator def fush_multi_answer(has_answer, new_answer): - # 对于某个id测试集,出现多个example时(例如同一个测试样本使用了多个模板而生成了多个example),此时将预测的topk结果进行合并 - # has为已经合并的结果,new为当前新产生的结果, - # has格式为 {'ans': {'prob': float(prob[index_ids[ei]]), 'pos': (s, e)}, ...} - # new {'ans': {'prob': float(prob[index_ids[ei]]), 'pos': (s, e)}, ...} - # print('has_answer=', has_answer) - for ans, value in new_answer.items(): - if ans not in has_answer.keys(): - has_answer[ans] = value - else: - has_answer[ans]['prob'] += value['prob'] - has_answer[ans]['pos'].extend(value['pos']) - return has_answer + # 对于某个id测试集,出现多个example时(例如同一个测试样本使用了多个模板而生成了多个example),此时将预测的topk结果进行合并 + # has为已经合并的结果,new为当前新产生的结果, + # has格式为 {'ans': {'prob': float(prob[index_ids[ei]]), 'pos': (s, e)}, ...} + # new {'ans': {'prob': float(prob[index_ids[ei]]), 'pos': (s, e)}, ...} + # print('has_answer=', has_answer) + for ans, value in new_answer.items(): + if ans not in has_answer.keys(): + has_answer[ans] = value + else: + has_answer[ans]['prob'] += value['prob'] + has_answer[ans]['pos'].extend(value['pos']) + return has_answer def get_predict_result(batchs, probs, indices, max_seq_length): probs = probs.squeeze(1) # topk结果的概率 @@ -66,7 +66,7 @@ class InformationExtractionEvaluator(Evaluator): def __init__(self, valid_dataset, **kwargs): super().__init__(valid_dataset, **kwargs) - self.max_seq_length = kwargs.get("sequence_length") + self.max_seq_length = kwargs["few_shot_anchor_args"].sequence_length def _compute(self, label, pred, hit): if label == 0: diff --git a/easynlp/appzoo/information_extraction/predictor.py b/easynlp/appzoo/information_extraction/predictor.py index 635dff50..5307bc23 100644 --- a/easynlp/appzoo/information_extraction/predictor.py +++ b/easynlp/appzoo/information_extraction/predictor.py @@ -21,9 +21,9 @@ def __init__(self, model_dir, model_cls, *args, **kwargs): self.MUTEX = Lock() - self.task = kwargs.pop("task") - self.max_seq_length = kwargs.pop("max_seq_length") - self.input_schema = kwargs.pop("input_schema") + self.task = kwargs["user_defined_parameters"]["task"] + self.max_seq_length = kwargs.get("sequence_length") + self.input_schema = kwargs.get("input_schema") self.column_names = [t.split(":")[0] for t in self.input_schema.split(",")] self.tokenizer = BertTokenizerFast.from_pretrained(model_dir) diff --git a/easynlp/cli.py b/easynlp/cli.py index 196eafcc..99501f68 100644 --- a/easynlp/cli.py +++ b/easynlp/cli.py @@ -155,6 +155,14 @@ def main(): cmd.append(str(args.micro_batch_size)) cmd.append('--app_name') cmd.append(args.app_name) + cmd.append('--worker_gpu') + cmd.append(str(args.worker_gpu)) + cmd.append('--weight_decay') + cmd.append(str(args.weight_decay)) + cmd.append('--data_threads') + cmd.append(str(args.data_threads)) + cmd.append('--gradient_accumulation_steps') + cmd.append(str(args.gradient_accumulation_steps)) if args.buckets is not None: cmd.append('--buckets') cmd.append(args.buckets) diff --git a/examples/information_extraction/main.py b/examples/information_extraction/main.py index f94fff9d..26dd4004 100644 --- a/examples/information_extraction/main.py +++ b/examples/information_extraction/main.py @@ -2,32 +2,30 @@ import torch from easynlp.core import Trainer from easynlp.core import PredictorManager -from easynlp.utils import initialize_easynlp +from easynlp.utils import initialize_easynlp, get_args from easynlp.appzoo import InformationExtractionDataset from easynlp.utils.global_vars import parse_user_defined_parameters from easynlp.appzoo import get_application_model, get_application_evaluator, get_application_predictor, get_application_model_for_evaluation if __name__ == "__main__": - args = initialize_easynlp() user_defined_parameters = parse_user_defined_parameters(args.user_defined_parameters) if args.mode == "predict": - args.task = user_defined_parameters.get('task') - predictor = get_application_predictor( app_name=args.app_name, model_dir=args.checkpoint_dir, input_schema=args.input_schema, - max_seq_length=args.sequence_length, - task=args.task, + sequence_length=args.sequence_length, + output_file=args.outputs, user_defined_parameters=user_defined_parameters) predictor_manager = PredictorManager( predictor=predictor, input_file=args.tables.split(",")[0], + skip_first_line=args.skip_first_line, input_schema=args.input_schema, - output_file=args.tables.split(",")[-1], + output_file=args.outputs, output_schema=args.output_schema, append_cols=args.append_cols, batch_size=args.micro_batch_size @@ -54,10 +52,12 @@ model = get_application_model(app_name=args.app_name, pretrained_model_name_or_path=args.pretrained_model_name_or_path, user_defined_parameters=user_defined_parameters) - - trainer = Trainer(model=model, train_dataset=train_dataset, evaluator=get_application_evaluator(app_name=args.app_name, valid_dataset=valid_dataset, - user_defined_parameters=user_defined_parameters, - sequence_length=args.sequence_length)) + + trainer = Trainer(model=model, train_dataset=train_dataset, user_defined_parameters=user_defined_parameters, + evaluator=get_application_evaluator(app_name=args.app_name, valid_dataset=valid_dataset, + user_defined_parameters=user_defined_parameters, + few_shot_anchor_args=args, + eval_batch_size=args.micro_batch_size)) trainer.train() @@ -72,10 +72,10 @@ model = get_application_model_for_evaluation(app_name=args.app_name, pretrained_model_name_or_path=args.checkpoint_dir, - user_defined_parameters=[user_defined_parameters]) + user_defined_parameters=user_defined_parameters) evaluator = get_application_evaluator(app_name=args.app_name, valid_dataset=valid_dataset, user_defined_parameters=user_defined_parameters, - sequence_length=args.sequence_length) + few_shot_anchor_args=args, eval_batch_size=args.micro_batch_size) if args.n_gpu > 0: model.to(torch.cuda.current_device()) diff --git a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh index e2930167..7e2dfa25 100644 --- a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh +++ b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh @@ -28,7 +28,7 @@ if [ "$mode" = "train" ]; then --mode $mode \ --tables=train_part.tsv,dev.tsv \ --input_schema=id:str:1,instruction:str:1,start:str:1,end:str:1,target:str:1 \ - --worker_gpu=1 \ + --worker_gpu=4 \ --app_name=information_extraction \ --sequence_length=512 \ --weight_decay=0.0 \ @@ -48,7 +48,7 @@ elif [ "$mode" = "evaluate" ]; then --mode $mode \ --tables=dev.tsv \ --input_schema=id:str:1,instruction:str:1,start:str:1,end:str:1,target:str:1 \ - --worker_gpu=1 \ + --worker_gpu=4 \ --app_name=information_extraction \ --sequence_length=512 \ --weight_decay=0.0 \ @@ -60,10 +60,11 @@ elif [ "$mode" = "predict" ]; then easynlp \ --mode=$mode \ - --tables=predict_input_EE.tsv,predict_output_EE.tsv \ + --tables=predict_input_EE.tsv \ --input_schema=id:str:1,scheme:str:1,content:str:1 \ + --outputs=predict_output_EE.tsv \ --output_schema=id,content,q_and_a \ - --worker_gpu=1 \ + --worker_gpu=4 \ --app_name=information_extraction \ --sequence_length=512 \ --weight_decay=0.0 \ @@ -71,7 +72,21 @@ elif [ "$mode" = "predict" ]; then --checkpoint_dir=./information_extraction_model/ \ --data_threads=5 \ --user_defined_parameters='task=EE' + + easynlp \ + --mode=$mode \ + --tables=predict_input_NER.tsv \ + --input_schema=id:str:1,scheme:str:1,content:str:1 \ + --outputs=predict_output_NER.tsv \ + --output_schema=id,content,q_and_a \ + --worker_gpu=4 \ + --app_name=information_extraction \ + --sequence_length=512 \ + --weight_decay=0.0 \ + --micro_batch_size=4 \ + --checkpoint_dir=./information_extraction_model/ \ + --data_threads=5 \ + --user_defined_parameters='task=NER' fi -#mode=train处,目前使用的是部分训练数据,如果需要使用全部训练数据,请将train_part.tsv修改为train.tsv,save_checkpoint_steps=50修改为save_checkpoint_steps=500 -#mode=predict出,目前验证的是EE任务的一个例子,如果需要验证NER任务,请将predict_input_EE.tsv修改为predict_input_NER.tsv,将predict_output_EE.tsv修改为predict_output_NER.tsv,将task=EE修改为task=NER \ No newline at end of file +#mode=train处,目前使用的是部分训练数据,如果需要使用全部训练数据,请将train_part.tsv修改为train.tsv,save_checkpoint_steps=50修改为save_checkpoint_steps=500 \ No newline at end of file diff --git a/examples/information_extraction/run_train_eval_predict_user_defined_local.sh b/examples/information_extraction/run_train_eval_predict_user_defined_local.sh index 198edebf..7fcf6bd7 100644 --- a/examples/information_extraction/run_train_eval_predict_user_defined_local.sh +++ b/examples/information_extraction/run_train_eval_predict_user_defined_local.sh @@ -68,7 +68,8 @@ elif [ "$mode" = "predict" ]; then python -m torch.distributed.launch $DISTRIBUTED_ARGS main.py \ --mode=$mode \ - --tables=predict_input_NER.tsv,predict_output_NER.tsv \ + --tables=predict_input_NER.tsv \ + --outputs=predict_output_NER.tsv \ --input_schema=id:str:1,scheme:str:1,content:str:1 \ --output_schema=id,content,q_and_a \ --worker_gpu=4 \ @@ -80,7 +81,21 @@ elif [ "$mode" = "predict" ]; then --data_threads=5 \ --user_defined_parameters='task=NER' + python -m torch.distributed.launch $DISTRIBUTED_ARGS main.py \ + --mode=$mode \ + --tables=predict_input_EE.tsv \ + --outputs=predict_output_EE.tsv \ + --input_schema=id:str:1,scheme:str:1,content:str:1 \ + --output_schema=id,content,q_and_a \ + --worker_gpu=4 \ + --app_name=information_extraction \ + --sequence_length=512 \ + --weight_decay=0.0 \ + --micro_batch_size=4 \ + --checkpoint_dir=./information_extraction_model/ \ + --data_threads=5 \ + --user_defined_parameters='task=EE' + fi -#mode=train处,目前使用的是部分训练数据,如果需要使用全部训练数据,请将train_part.tsv修改为train.tsv,save_checkpoint_steps=50修改为save_checkpoint_steps=500 -#mode=predict出,目前验证的是EE任务的一个例子,如果需要验证NER任务,请将predict_input_EE.tsv修改为predict_input_NER.tsv,将predict_output_EE.tsv修改为predict_output_NER.tsv,将task=EE修改为task=NER \ No newline at end of file +#mode=train处,目前使用的是部分训练数据,如果需要使用全部训练数据,请将train_part.tsv修改为train.tsv,save_checkpoint_steps=50修改为save_checkpoint_steps=500 \ No newline at end of file From 0fc1c7e7ab0defae47435d7a6bf05001d2a2cfaa Mon Sep 17 00:00:00 2001 From: zjy <756628577@qq.com> Date: Tue, 13 Dec 2022 15:34:22 +0800 Subject: [PATCH 035/101] =?UTF-8?q?easynlp.appzoo.api.py=E4=B8=AD=E7=9A=84?= =?UTF-8?q?get=5Fapplication=5Fevaluator()=E5=87=BD=E6=95=B0=E7=BC=BA?= =?UTF-8?q?=E4=B9=8Fsequence=5Flength=E7=9A=84=E5=BC=95=E5=85=A5=E3=80=82?= =?UTF-8?q?=E4=B8=BA=E4=BA=86=E4=B8=80=E8=87=B4=E6=80=A7=EF=BC=8Cinformati?= =?UTF-8?q?on=5Fextraction=E7=9A=84evaluator=E9=80=9A=E8=BF=87few=5Fshot?= =?UTF-8?q?=5Fanchor=5Fargs=E5=BC=95=E5=85=A5sequence=5Flength=EF=BC=9Beas?= =?UTF-8?q?ynlp.cli.py=E4=B8=AD=E6=B2=A1=E6=9C=89=E4=BD=BF=E7=94=A8?= =?UTF-8?q?=E7=94=A8=E6=88=B7=E8=87=AA=E5=AE=9A=E4=B9=89=E7=9A=84weight=5F?= =?UTF-8?q?decay=E3=80=81data=5Fthreads=E3=80=81gradient=5Faccumulation=5F?= =?UTF-8?q?steps=E7=AD=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- easynlp/appzoo/information_extraction/evaluator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/easynlp/appzoo/information_extraction/evaluator.py b/easynlp/appzoo/information_extraction/evaluator.py index d26cb8b8..c77d405f 100644 --- a/easynlp/appzoo/information_extraction/evaluator.py +++ b/easynlp/appzoo/information_extraction/evaluator.py @@ -67,6 +67,7 @@ def __init__(self, valid_dataset, **kwargs): super().__init__(valid_dataset, **kwargs) self.max_seq_length = kwargs["few_shot_anchor_args"].sequence_length + #easynlp.appzoo.api.py中的get_application_evaluator()函数缺乏sequence_length的引入。为了一致性,information_extraction的evaluator通过few_shot_anchor_args引入sequence_length def _compute(self, label, pred, hit): if label == 0: From 65eb56e47c4ed56891ff9de0779110182e13d333 Mon Sep 17 00:00:00 2001 From: chywang Date: Tue, 13 Dec 2022 15:40:49 +0800 Subject: [PATCH 036/101] Create requirements_video.txt --- requirements_video.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 requirements_video.txt diff --git a/requirements_video.txt b/requirements_video.txt new file mode 100644 index 00000000..308106f0 --- /dev/null +++ b/requirements_video.txt @@ -0,0 +1 @@ +decord From aed50df3c6ecee917b6a7ec7c9bb989afd476b56 Mon Sep 17 00:00:00 2001 From: chywang Date: Tue, 13 Dec 2022 17:18:44 +0800 Subject: [PATCH 037/101] Update run_train_eval_predict_appzoo_cli_local.sh --- ...run_train_eval_predict_appzoo_cli_local.sh | 31 +++++++++---------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh index a8008bdb..7e14d532 100644 --- a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh +++ b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh @@ -1,8 +1,5 @@ export CUDA_VISIBLE_DEVICES=$1 -if [ ! -f ./train.tsv ]; then - wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/ie/train.tsv -fi if [ ! -f ./train_part.tsv ]; then wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/ie/train_part.tsv @@ -28,17 +25,17 @@ if [ "$mode" = "train" ]; then --mode $mode \ --tables=train_part.tsv,dev.tsv \ --input_schema=id:str:1,instruction:str:1,start:str:1,end:str:1,target:str:1 \ - --worker_gpu=4 \ + --worker_gpu=1 \ --app_name=information_extraction \ - --sequence_length=512 \ + --sequence_length=128 \ --weight_decay=0.0 \ - --micro_batch_size=4 \ + --micro_batch_size=8 \ --checkpoint_dir=./information_extraction_model/ \ --data_threads=5 \ --user_defined_parameters='pretrain_model_name_or_path=hfl/macbert-large-zh' \ --save_checkpoint_steps=50 \ - --gradient_accumulation_steps=8 \ - --epoch_num=3 \ + --gradient_accumulation_steps=2 \ + --epoch_num=1 \ --learning_rate=2e-05 \ --random_seed=42 @@ -48,11 +45,11 @@ elif [ "$mode" = "evaluate" ]; then --mode $mode \ --tables=dev.tsv \ --input_schema=id:str:1,instruction:str:1,start:str:1,end:str:1,target:str:1 \ - --worker_gpu=4 \ + --worker_gpu=1 \ --app_name=information_extraction \ - --sequence_length=512 \ + --sequence_length=128 \ --weight_decay=0.0 \ - --micro_batch_size=4 \ + --micro_batch_size=8 \ --checkpoint_dir=./information_extraction_model/ \ --data_threads=5 @@ -65,10 +62,10 @@ elif [ "$mode" = "predict" ]; then --input_schema=id:str:1,scheme:str:1,content:str:1 \ --outputs=predict_output_EE.tsv \ --output_schema=id,content,q_and_a \ - --worker_gpu=4 \ + --worker_gpu=1 \ --app_name=information_extraction \ - --sequence_length=512 \ - --micro_batch_size=4 \ + --sequence_length=128 \ + --micro_batch_size=8 \ --checkpoint_dir=./information_extraction_model/ \ --data_threads=5 \ --user_defined_parameters='task=EE' @@ -81,11 +78,11 @@ elif [ "$mode" = "predict" ]; then --output_schema=id,content,q_and_a \ --worker_gpu=1 \ --app_name=information_extraction \ - --sequence_length=512 \ - --micro_batch_size=4 \ + --sequence_length=128 \ + --micro_batch_size=8 \ --checkpoint_dir=./information_extraction_model/ \ --data_threads=5 \ --user_defined_parameters='task=NER' fi -#mode=train处,目前使用的是部分训练数据,如果需要使用全部训练数据,请将train_part.tsv修改为train.tsv,save_checkpoint_steps=50修改为save_checkpoint_steps=500 \ No newline at end of file +#mode=train处,目前使用的是部分训练数据,如果需要使用全部训练数据,请将train_part.tsv修改为train.tsv,save_checkpoint_steps=50修改为save_checkpoint_steps=500 From 92e5994e2acca61a08bda6d5057e1133b7c6c2c9 Mon Sep 17 00:00:00 2001 From: chywang Date: Tue, 13 Dec 2022 17:19:34 +0800 Subject: [PATCH 038/101] Update run_train_eval_predict_appzoo_cli_local.sh --- .../run_train_eval_predict_appzoo_cli_local.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh index 7e14d532..de96120f 100644 --- a/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh +++ b/examples/information_extraction/run_train_eval_predict_appzoo_cli_local.sh @@ -48,7 +48,6 @@ elif [ "$mode" = "evaluate" ]; then --worker_gpu=1 \ --app_name=information_extraction \ --sequence_length=128 \ - --weight_decay=0.0 \ --micro_batch_size=8 \ --checkpoint_dir=./information_extraction_model/ \ --data_threads=5 From cfed742a7c0b8778582abe4aadf5a335ca0de6b1 Mon Sep 17 00:00:00 2001 From: chywang Date: Wed, 14 Dec 2022 10:45:28 +0800 Subject: [PATCH 039/101] Update README.md --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b9c9c00e..cc13d93f 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,10 @@ EasyNLP is an easy-to-use NLP development and application toolkit in PyTorch, fi We have a series of technical articles on the functionalities of EasyNLP. - +- [PAI-Diffusion模型来了!阿里云机器学习团队带您徜徉中文艺术海洋](https://zhuanlan.zhihu.com/p/590020134) +- [模型精度再被提升,统一跨任务小样本学习算法 UPT 给出解法!](https://zhuanlan.zhihu.com/p/590611518) +- [Span抽取和元学习能碰撞出怎样的新火花,小样本实体识别来告诉你!](https://zhuanlan.zhihu.com/p/590297824) +- [算法 KECP 被顶会 EMNLP 收录,极少训练数据就能实现机器阅读理解](https://zhuanlan.zhihu.com/p/590024650) - [当大火的文图生成模型遇见知识图谱,AI画像趋近于真实世界](https://zhuanlan.zhihu.com/p/581870071) - [EasyNLP发布融合语言学和事实知识的中文预训练模型CKBERT](https://zhuanlan.zhihu.com/p/574853281) - [EasyNLP带你实现中英文机器阅读理解](https://zhuanlan.zhihu.com/p/568890245) From 71dc76e7d51d69af7d56c9b09ceefb5cbfeba3d9 Mon Sep 17 00:00:00 2001 From: chywang Date: Wed, 14 Dec 2022 10:46:25 +0800 Subject: [PATCH 040/101] Update README.cn.md --- README.cn.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/README.cn.md b/README.cn.md index 44ec0c1d..45a51ae2 100644 --- a/README.cn.md +++ b/README.cn.md @@ -22,7 +22,12 @@ 随着 BERT、Megatron、GPT-3 等预训练模型在NLP领域取得瞩目的成果,越来越多团队投身到超大规模训练中,这使得训练模型的规模从亿级别发展到了千亿甚至万亿的规模。然而,这类超大规模的模型运用于实际场景中仍然有一些挑战。首先,模型参数量过大使得训练和推理速度过慢且部署成本极高;其次在很多实际场景中数据量不足的问题仍然制约着大模型在小样本场景中的应用,提高预训练模型在小样本场景的泛化性依然存在挑战。为了应对以上问题,PAI 团队推出了 EasyNLP 中文 NLP 算法框架,助力大模型快速且高效的落地。 - +- [PAI-Diffusion模型来了!阿里云机器学习团队带您徜徉中文艺术海洋](https://zhuanlan.zhihu.com/p/590020134) +- [模型精度再被提升,统一跨任务小样本学习算法 UPT 给出解法!](https://zhuanlan.zhihu.com/p/590611518) +- [Span抽取和元学习能碰撞出怎样的新火花,小样本实体识别来告诉你!](https://zhuanlan.zhihu.com/p/590297824) +- [算法 KECP 被顶会 EMNLP 收录,极少训练数据就能实现机器阅读理解](https://zhuanlan.zhihu.com/p/590024650) +- [当大火的文图生成模型遇见知识图谱,AI画像趋近于真实世界](https://zhuanlan.zhihu.com/p/581870071) +- [EasyNLP发布融合语言学和事实知识的中文预训练模型CKBERT](https://zhuanlan.zhihu.com/p/574853281) - [EasyNLP带你实现中英文机器阅读理解](https://zhuanlan.zhihu.com/p/568890245) - [跨模态学习能力再升级,EasyNLP电商文图检索效果刷新SOTA](https://zhuanlan.zhihu.com/p/568512230) - [EasyNLP玩转文本摘要(新闻标题)生成](https://zhuanlan.zhihu.com/p/566607127) From 88fc56d0ed62261b9d4969e8633df24363c1db97 Mon Sep 17 00:00:00 2001 From: chywang2013 Date: Wed, 14 Dec 2022 06:22:09 +0000 Subject: [PATCH 041/101] update parameters --- easynlp/cli.py | 60 ++++++++++++++++++++---------------- easynlp/core/trainer.py | 9 +++--- easynlp/utils/arguments.py | 40 ++---------------------- easynlp/utils/initializer.py | 8 ++--- easynlp/utils/statistics.py | 2 +- tests/run_all_tests.sh | 20 ++++++------ tests/run_vectorization.sh | 3 +- tests/test_classification.py | 2 +- 8 files changed, 57 insertions(+), 87 deletions(-) diff --git a/easynlp/cli.py b/easynlp/cli.py index 99501f68..fe5740a5 100644 --- a/easynlp/cli.py +++ b/easynlp/cli.py @@ -100,6 +100,17 @@ def main(): cmd.append(args.mode) cmd.append('--tables') cmd.append(args.tables) + cmd.append('--checkpoint_dir') + cmd.append(args.checkpoint_dir) + cmd.append('--sequence_length') + cmd.append(str(args.sequence_length)) + cmd.append('--micro_batch_size') + cmd.append(str(args.micro_batch_size)) + cmd.append('--app_name') + cmd.append(args.app_name) + cmd.append('--worker_gpu') + cmd.append(str(args.worker_gpu)) + if args.skip_first_line: cmd.append('--skip_first_line') if args.input_schema is not None: @@ -111,6 +122,9 @@ def main(): if args.second_sequence is not None: cmd.append('--second_sequence') cmd.append(args.second_sequence) + if args.data_threads is not None: + cmd.append('--data_threads') + cmd.append(str(args.data_threads)) if args.mode != 'predict': if args.label_name is not None: @@ -120,19 +134,28 @@ def main(): cmd.append('--label_enumerate_values') cmd.append(args.label_enumerate_values) - cmd.append('--checkpoint_dir') - cmd.append(args.checkpoint_dir) - cmd.append('--export_tf_checkpoint_type') - cmd.append(args.export_tf_checkpoint_type) - cmd.append('--learning_rate') - cmd.append(str(args.learning_rate)) - cmd.append('--epoch_num') - cmd.append(str(args.epoch_num)) - cmd.append('--random_seed') - cmd.append(str(args.random_seed)) + # cmd.append('--export_tf_checkpoint_type') + # cmd.append(args.export_tf_checkpoint_type) + if args.mode == 'train': cmd.append('--save_checkpoint_steps') cmd.append(str(args.save_checkpoint_steps)) + if args.weight_decay is not None: + cmd.append('--weight_decay') + cmd.append(str(args.weight_decay)) + if args.gradient_accumulation_steps is not None: + cmd.append('--gradient_accumulation_steps') + cmd.append(str(args.gradient_accumulation_steps)) + if args.epoch_num is not None: + cmd.append('--epoch_num') + cmd.append(str(args.epoch_num)) + if args.random_seed is not None: + cmd.append('--random_seed') + cmd.append(str(args.random_seed)) + if args.learning_rate is not None: + cmd.append('--learning_rate') + cmd.append(str(args.learning_rate)) + if args.mode == 'predict': cmd.append('--predict_queue_size') cmd.append('1024') @@ -144,25 +167,10 @@ def main(): cmd.append(args.outputs) cmd.append('--output_schema') cmd.append(args.output_schema) - cmd.append('--restore_works_dir') - cmd.append(args.restore_works_dir) if args.append_cols is not None: cmd.append('--append_cols') cmd.append(args.append_cols) - cmd.append('--sequence_length') - cmd.append(str(args.sequence_length)) - cmd.append('--micro_batch_size') - cmd.append(str(args.micro_batch_size)) - cmd.append('--app_name') - cmd.append(args.app_name) - cmd.append('--worker_gpu') - cmd.append(str(args.worker_gpu)) - cmd.append('--weight_decay') - cmd.append(str(args.weight_decay)) - cmd.append('--data_threads') - cmd.append(str(args.data_threads)) - cmd.append('--gradient_accumulation_steps') - cmd.append(str(args.gradient_accumulation_steps)) + if args.buckets is not None: cmd.append('--buckets') cmd.append(args.buckets) diff --git a/easynlp/core/trainer.py b/easynlp/core/trainer.py index 50249978..df7d10d4 100644 --- a/easynlp/core/trainer.py +++ b/easynlp/core/trainer.py @@ -214,7 +214,6 @@ def set_train_loader(self, train_dataset, args): def log_train_infos(self): args = self.args - logger.info('=' * 10 + ' Training Start ' + '=' * 10 + '\n') logger.info(' Num of GPUs (all) = %d', args.n_gpu) logger.info(' Num of CPUs per worker = %d', args.n_cpu) @@ -254,9 +253,6 @@ def log_train_infos(self): str(args.save_checkpoint_steps)) logger.info(' Distributed_backend = %s', str(args.distributed_backend)) - logger.info(' Worker Count = %s', str(args.worker_count)) - logger.info(' Worker CPU = %s', str(args.worker_cpu)) - logger.info(' Worker data threads = %s', str(args.data_threads)) model_num_params = sum( [p.nelement() for n, p in self.model_module.named_parameters()]) @@ -523,6 +519,7 @@ def save_checkpoint(self, save_best=False): if not save_best: return + """ if self.args.export_tf_checkpoint_type != 'none' and hasattr( self.model_module, 'model_name'): # If the student is pre-defined EasyTransfer AppZoo model @@ -531,7 +528,6 @@ def save_checkpoint(self, save_best=False): (self.args.export_tf_checkpoint_type, os.path.join(get_dir_name(self.args.checkpoint_dir), 'model.ckpt'))) - if self.args.export_tf_checkpoint_type == 'easytransfer': exporter.export_pytorch_checkpoint_to_tf( model=self.model_module, @@ -550,9 +546,12 @@ def save_checkpoint(self, save_best=False): else: raise RuntimeError('Invalid export_tf_checkpoint_type %s' % self.args.export_tf_checkpoint_type) + """ + # This is a hack if torch.cuda.is_available(): torch.cuda.set_device(self.args.local_rank) + def contrast_learning_process(self, positive_negative_examples: torch.Tensor) -> torch.Tensor: # compute the exapmle emmbding original_size = positive_negative_examples.size() diff --git a/easynlp/utils/arguments.py b/easynlp/utils/arguments.py index 86892bd6..2e9cec7e 100644 --- a/easynlp/utils/arguments.py +++ b/easynlp/utils/arguments.py @@ -106,7 +106,7 @@ def parse_args(extra_args_provider=None, args.is_master_node = True else: args.is_master_node = False - _print_args(args) + # _print_args(args) return args @@ -273,7 +273,6 @@ def _add_easynlp_args(parser: argparse.ArgumentParser): group.add_argument( '--micro_batch_size', - '--train_batch_size', type=int, default=2, help='Batch size per model instance (local batch size). ' @@ -286,7 +285,6 @@ def _add_easynlp_args(parser: argparse.ArgumentParser): help='local rank passed from distributed launcher.') group.add_argument('--checkpoint_dir', - '--checkpoint_path', default=None, type=str, help='The model checkpoint dir.') @@ -364,12 +362,6 @@ def _add_easynlp_args(parser: argparse.ArgumentParser): default=None, help='Resume training process from checkpoint') - group.add_argument('--export_tf_checkpoint_type', - type=str, - default='easytransfer', - choices=['easytransfer', 'google', 'none'], - help='Which type of checkpoint you want to export') - group.add_argument('--input_schema', type=str, default=None, @@ -415,34 +407,6 @@ def _add_easynlp_args(parser: argparse.ArgumentParser): default=16, type=int, help='Predict Table Read Thread Num') - group.add_argument('--restore_works_dir', - default='./.easynlp_predict_restore_works_dir', - type=str, - help='(for PAI-TF fail-over)') - group.add_argument('--ps_hosts', - default='', - type=str, - help='PS hosts (for PAI-TF)') - group.add_argument('--chief_hosts', - default='', - type=str, - help='Chief hosts (for PAI-TF)') - group.add_argument('--job_name', - default=None, - type=str, - help='Name of the job (for PAI-TF)') - group.add_argument('--task_index', - default=0, - type=int, - help='Index of the task (for PAI-TF)') - group.add_argument('--task_count', - default=1, - type=int, - help='Number of the task (for PAI-TF)') - group.add_argument('--is_chief', - default='', - type=str, - help='is chief (for PAI-TF)') group.add_argument('--worker_count', default=1, type=int, @@ -483,7 +447,6 @@ def add_model_config_args(parser): """Model arguments""" group = parser.add_argument_group('model', 'model configuration') - group.add_argument('--transformer-xl', action='store_true', help='use transformer-xl for training') group.add_argument('--pretrained-bert', action='store_true', help='use a pretrained bert-large-uncased model instead' @@ -873,6 +836,7 @@ def add_finetune_config_args(parser): def get_ds_args(): """Parse all the args.""" parser = argparse.ArgumentParser(description='PyTorch BERT Model') + parser = add_model_config_args(parser) parser = add_fp16_config_args(parser) parser = add_training_args(parser) diff --git a/easynlp/utils/initializer.py b/easynlp/utils/initializer.py index e3b1d9b4..0f3b1d4c 100644 --- a/easynlp/utils/initializer.py +++ b/easynlp/utils/initializer.py @@ -44,7 +44,7 @@ def initialize_easynlp(extra_args_provider=None, _set_random_seed(args.random_seed) #this env is for predictor - os.environ['TF_FAILOVER_RESTORE_WORKS_DIR'] = args.restore_works_dir + #os.environ['TF_FAILOVER_RESTORE_WORKS_DIR'] = args.restore_works_dir os.environ['EASYNLP_MODELZOO_BASE_DIR'] = args.modelzoo_base_dir os.environ['EASYNLP_IS_MASTER'] = str(args.is_master_node) os.environ['EASYNLP_N_GPUS'] = str(args.n_gpu) @@ -151,9 +151,9 @@ def _set_random_seed(seed): np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) - else: - raise ValueError( - 'Seed ({}) should be a positive integer.'.format(seed)) + #else: + # raise ValueError( + # 'Seed ({}) should be a positive integer.'.format(seed)) def write_args_to_tensorboard(): diff --git a/easynlp/utils/statistics.py b/easynlp/utils/statistics.py index d0771bd5..50839126 100644 --- a/easynlp/utils/statistics.py +++ b/easynlp/utils/statistics.py @@ -62,7 +62,7 @@ def output(self, step, epoch, learning_rate): if 'loss' in key: logger.info(' {:10}: {:.4f} '.format( key, - val.item() / self.n_tr_steps)) + val / self.n_tr_steps)) self.last_time = time.time() diff --git a/tests/run_all_tests.sh b/tests/run_all_tests.sh index 94eb4d2c..232c7a8e 100644 --- a/tests/run_all_tests.sh +++ b/tests/run_all_tests.sh @@ -23,14 +23,14 @@ fi rm -rf *.tsv *.csv *.txt -echo "================== Test MegatronBERT classification ==================" -if [ ! -f ./train.tsv ]; then - wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/classification/train.tsv - wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/classification/dev.tsv -fi +#echo "================== Test MegatronBERT classification ==================" +#if [ ! -f ./train.tsv ]; then +# wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/classification/train.tsv +# wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/classification/dev.tsv +#fi -python test_megatron_bert.py -rm -rf classification_model +#python test_megatron_bert.py +#rm -rf classification_model echo "================== Test DKPLM ==================" if [ ! -f ./train_corpus.txt ]; then @@ -42,9 +42,9 @@ fi python test_dkplm_language_modeling.py rm -rf *.txt -echo "================== Test TorchACC ==================" -python test_torchacc.py -rm -rf *.tsv +#echo "================== Test TorchACC ==================" +#python test_torchacc.py +#rm -rf *.tsv # echo "================== Feature Vectorization ==================" # if [ ! -f ./dev2.tsv ]; then diff --git a/tests/run_vectorization.sh b/tests/run_vectorization.sh index 4d717d88..80101b57 100644 --- a/tests/run_vectorization.sh +++ b/tests/run_vectorization.sh @@ -26,10 +26,9 @@ python -m torch.distributed.launch --nproc_per_node 1 --nnodes 1 --node_rank 0 - ./easynlp/appzoo/api.py \ --mode predict --tables tmp/dev2.tsv \ --input_schema label:str:1,sid1:str:1,sid2:str:1,sent1:str:1,sent2:str:1 \ ---export_tf_checkpoint_type easytransfer \ --learning_rate 5e-05 --epoch_num 3 --random_seed 1234 --predict_queue_size 1024 --predict_slice_size 4096 \ --predict_thread_num 1 --outputs tmp/dev.pred.tsv \ --output_schema pooler_output,first_token_output,all_hidden_outputs \ ---restore_works_dir ./.easynlp_predict_restore_works_dir --append_cols label \ +--append_cols label \ --sequence_length 128 --micro_batch_size 32 --app_name vectorization \ --first_sequence sent1 --checkpoint_dir bert-tiny-uncased diff --git a/tests/test_classification.py b/tests/test_classification.py index dc6a9edc..ab026fba 100644 --- a/tests/test_classification.py +++ b/tests/test_classification.py @@ -118,7 +118,7 @@ def test_2_predict(self): --append_cols=label \ --first_sequence=sent1 \ --second_sequence=sent2 \ - --checkpoint_path=./classification_model \ + --checkpoint_dir=./classification_model \ --sequence_length=128 \ --micro_batch_size=32 \ --app_name=text_classify \ From 3949c66abdb844e835f176352ef8c03e5bd8e221 Mon Sep 17 00:00:00 2001 From: chywang2013 Date: Wed, 14 Dec 2022 06:24:21 +0000 Subject: [PATCH 042/101] update params. --- easynlp/core/trainer_vanilla.py | 341 ------------------------------ tests/test_classification_main.py | 2 - 2 files changed, 343 deletions(-) delete mode 100644 easynlp/core/trainer_vanilla.py diff --git a/easynlp/core/trainer_vanilla.py b/easynlp/core/trainer_vanilla.py deleted file mode 100644 index ddf1022a..00000000 --- a/easynlp/core/trainer_vanilla.py +++ /dev/null @@ -1,341 +0,0 @@ -# coding=utf-8 -# Copyright (c) 2020 Alibaba PAI team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import time -from ast import literal_eval - -import torch -from torch.autograd import Variable -from torch.utils.data import DataLoader, RandomSampler, SequentialSampler -from torch.utils.data.distributed import DistributedSampler - -from ..utils import (exporter, get_args, get_dir_name, get_pretrain_model_path, - io) -from ..utils.logger import logger -from ..utils.statistics import Statistics -from .optimizers import get_optimizer - - -class VanillaTrainer(object): - def __init__(self, model, train_dataset, evaluator, **kwargs): - self.args = get_args() - self._model = None - self._optimizer = None - self._train_loader = None - self._start_epoch = 0 - self._start_global_step = 0 - self._start_time = time.time() - self._current_loss = 0. - self._current_epoch = self._start_epoch - - print('log: set train loader') - self.set_train_loader(train_dataset, self.args) - - print('log: set_model_and_optimizer') - self.set_model_and_optimizer(model, self.args) - print('log: resume_from_ckpt') - self.resume_from_ckpt(self.model_module, self.args) - - # print('log: set_tensorboard') - # self.set_tensorboard() - self._global_step = self._start_epoch * len(self._train_loader) - self.evaluator = evaluator - - @property - def model_module(self): - if self._model is None: - return self._model - - return self._model.module if hasattr(self._model, - 'module') else self._model - - @property - def learning_rate(self): - return self._optimizer.get_current_lr() - - def set_model_and_optimizer(self, model, args): - if self.args.n_gpu == 1: - self._model = model.to(self.args.local_rank) - elif self.args.n_gpu > 1: - self._model = torch.nn.parallel.DistributedDataParallel( - model.to(self.args.local_rank), - device_ids=[self.args.local_rank], - output_device=self.args.local_rank, - find_unused_parameters=True) - else: - raise Exception('CPU Training is not supported.') - - # # Build Optimizer - # self._optimizer = get_optimizer( - # optimizer_type="adam", - # learning_rate=args.learning_rate, - # warmup_proportion=args.warmup_proportion, - # max_grad_norm=args.max_grad_norm, - # named_parameters=list(self.model_module.named_parameters()), - # gradient_accumulation_steps=args.gradient_accumulation_steps, - # num_steps_per_epoch=len(self._train_loader), - # epoch_num=args.epoch_num) - print('log: set SGD') - self._optimizer = torch.optim.SGD(self._model.parameters(), - lr=args.learning_rate, - momentum=0.9) - - def resume_from_ckpt(self, model_module, args): - if args.resume_from_checkpoint is None: - return - meta_file = args.resume_from_checkpoint + '.meta.bin' - model_file = args.resume_from_checkpoint + '.bin' - if 'oss::' in args.resume_from_checkpoint: - local_file = 'easynlp_resume_pytorch_model.meta.bin' - io.download(model_file, local_file) - meta_file = local_file - - local_file = 'easynlp_resume_pytorch_model.bin' - io.download(model_file, local_file) - model_file = local_file - - with io.open(meta_file, 'rb') as f: - meta_data = torch.load(f, map_location='cpu') - self._start_epoch = meta_data['epoch'] - self._start_global_step = meta_data['global_step'] + 1 - self._optimizer.load_state_dict(meta_data['optimizer']) - - logger.info('Resume from checkpoint {}'.format( - args.resume_from_checkpoint)) - logger.info('Start epoch {}'.format(self._start_epoch)) - logger.info('Start step {}'.format(self._start_global_step)) - logger.info('Start learning rate {:.6f}'.format( - self._optimizer.get_current_lr())) - with io.open(model_file, 'rb') as f: - model_module.load_state_dict(torch.load(f, map_location='cpu')) - logger.info('Resume checkpoint Done'.format( - args.resume_from_checkpoint)) - - def set_train_loader(self, train_dataset, args): - if args.read_odps: - train_sampler = None - else: - train_sampler = RandomSampler if args.n_gpu <= 1 else DistributedSampler - # train_sampler = SequentialSampler - - # Init dataloader to make sure at least one cpu core is loading data - # Note: here num_worker=n_cpu - if getattr(train_dataset, 'batch_fn', None) is not None: - self._train_loader = DataLoader( - train_dataset, - sampler=train_sampler(train_dataset) - if train_sampler else None, - batch_size=args.micro_batch_size, - collate_fn=train_dataset.batch_fn, - num_workers=max(args.n_cpu, 1)) - else: - self._train_loader = DataLoader( - train_dataset, - sampler=train_sampler(train_dataset) - if train_sampler else None, - batch_size=args.micro_batch_size, - num_workers=max(args.n_cpu, 1)) - - def log_train_infos(self): - args = self.args - - logger.info('=' * 10 + ' Training Start ' + '=' * 10 + '\n') - logger.info(' Num of GPUs (all) = %d', args.n_gpu) - logger.info(' Num of CPUs per worker = %d', args.n_cpu) - if args.n_gpu > 0: - n_tr_samples = len(self._train_loader.dataset - ) * args.n_gpu if args.read_odps else len( - self._train_loader.dataset) - n_tr_batch_size = args.micro_batch_size * args.n_gpu * args.gradient_accumulation_steps - else: - n_tr_samples = len(self._train_loader.dataset) - n_tr_batch_size = args.micro_batch_size * args.gradient_accumulation_steps - n_tr_batch_no = len(self._train_loader.dataset - ) / args.micro_batch_size * args.epoch_num - - logger.info(' Num dataset examples = %d', - len(self._train_loader.dataset)) - logger.info(' Num training examples = %d', n_tr_samples) - if self.evaluator is not None: - logger.info(' Num validation examples = %d', - len(self.evaluator.valid_loader.dataset)) - logger.info(' Train. steps = %d', - len(self._train_loader.dataset) / args.micro_batch_size) - logger.info(' Train. batch size = %d', n_tr_batch_size) - logger.info(' Train. micro batch size = %d', args.micro_batch_size) - logger.info(' Train. batch no. = %d', n_tr_batch_no) - logger.info(' Evaluation batch size = %d', args.micro_batch_size) - logger.info(' Sequence length = %s', - str(args.sequence_length)) - logger.info(' Saving steps = %s', - str(args.save_checkpoint_steps)) - logger.info(' Distributed_backend = %s', - str(args.distributed_backend)) - logger.info(' Worker Count = %s', str(args.worker_count)) - logger.info(' Worker CPU = %s', str(args.worker_cpu)) - logger.info(' Worker data threads = %s', str(args.data_threads)) - - model_num_params = sum( - [p.nelement() for n, p in self.model_module.named_parameters()]) - trainable_num_params = sum([ - p.nelement() for n, p in self.model_module.named_parameters() - if p.requires_grad - ]) - logger.info(' num model params = %s' % - format(model_num_params, ',')) - logger.info(' num trainable params = %s' % - format(trainable_num_params, ',')) - logger.info('\n') - - def optimizer_step(self): - self._optimizer.step() - self._optimizer.zero_grad() - - def after_train(self): - args = self.args - - # Save last checkpoint if needed - if not args.is_master_node: - return - - if args.save_checkpoint_steps is not None: - logger.info('Saving best model to %s...' % - os.path.join(args.checkpoint_dir, 'pytorch_model.bin')) - self.save_checkpoint(save_best=True) - elif self.evaluator is not None: - self._eval_scores = self.evaluator.evaluate( - model=self.model_module) - if self._eval_scores[0][1] > self.evaluator.best_valid_score: - logger.info( - 'Saving best model to %s...' % - os.path.join(args.checkpoint_dir, 'pytorch_model.bin')) - self.save_checkpoint(save_best=True) - self.evaluator.best_valid_score = self._eval_scores[0][1] - logger.info('Best score: {}'.format( - self.evaluator.best_valid_score)) - - # self.tensorboard.close() - - logger.info('Destroy Process Group.') - torch.distributed.destroy_process_group() - - def save_checkpoint(self, save_best=False): - if not self.args.is_master_node: - return - - # Save the model - model_to_save_prefix = 'pytorch_model' if save_best else 'pytorch_model_step_%d' % ( - self._global_step + 1) - - with io.open(os.path.join(self.args.checkpoint_dir, model_to_save_prefix + '.bin'), 'wb') \ - as output_model_file: - torch.save(self.model_module.state_dict(), output_model_file) - - meta_data = { - 'epoch': self._current_epoch, - 'global_step': self._global_step, - 'optimizer': self._optimizer.state_dict() - } - - with io.open(os.path.join(self.args.checkpoint_dir, model_to_save_prefix + '.meta.bin'), 'wb') \ - as output_model_file: - torch.save(meta_data, output_model_file) - - if not save_best: - return - - if self.args.export_tf_checkpoint_type != 'none' and hasattr( - self.model_module, 'model_name'): - # If the student is pre-defined EasyTransfer AppZoo model - # Save train_config.json, model.ckpt.* for EasyTransfer - logger.info('Export tensorflow checkpoint (%s format) to %s' % - (self.args.export_tf_checkpoint_type, - os.path.join(get_dir_name(self.args.checkpoint_dir), - 'model.ckpt'))) - - if self.args.export_tf_checkpoint_type == 'easytransfer': - exporter.export_pytorch_checkpoint_to_tf( - model=self.model_module, - ckpt_dir=get_dir_name(self.args.checkpoint_dir), - bert_output_prefix='bert_pre_trained_model', - appended_val_map=(('classifier', 'app/ez_dense'), ), - appended_tensors_to_transpose=('classifier.weight', )) - elif self.args.export_tf_checkpoint_type == 'google': - exporter.export_pytorch_checkpoint_to_tf( - model=self.model_module, - ckpt_dir=get_dir_name(self.args.checkpoint_dir), - bert_output_prefix='', - appended_val_map=(('classifier.weight', 'output_weights'), - ('classifier.bias', 'output_bias')), - appended_tensors_to_transpose=()) - else: - raise RuntimeError('Invalid export_tf_checkpoint_type %s' % - self.args.export_tf_checkpoint_type) - # This is a hack - if torch.cuda.is_available(): - torch.cuda.set_device(self.args.local_rank) - - def train(self): - print('log: train loop...') - self.log_train_infos() - args = self.args - for _epoch in range(self._start_epoch, int(args.epoch_num)): - if args.n_gpu > 1: - torch.distributed.barrier() - - running_loss = 0.0 - for _step, batch in enumerate(self._train_loader): - if self._global_step % 100 == 0: - print( - 'Worker step %4d, batch %4d, rank %d, l_rank %d, master %5s, loss %f' - % - (self._global_step, _step, args.rank, args.local_rank, - args.is_master_node, running_loss / 100)) - running_loss = 0.0 - self._global_step += 1 - - # compute loss - if type(batch) is dict: - batch = { - key: val.to(args.local_rank) if isinstance( - val, torch.Tensor) else val - for key, val in batch.items() - } - - label_ids = batch.pop('label_ids') - # loss_dict = self.model_module.compute_loss(batch.pop("input_ids"), label_ids) - - forward_outputs = self._model(batch) - loss_dict = self.model_module.compute_loss( - forward_outputs, label_ids) - else: - # call user-defined loss function - loss_dict = self.model_module.compute_loss( - batch[0], batch[1]) - - _loss = loss_dict['loss'] - if args.n_gpu > 1: - _loss = _loss.mean() - - _loss.backward() - running_loss += _loss.item() - self.optimizer_step() - - print('Training Time: {}, rank {}, gsteps {}'.format( - time.time() - self._start_time, args.rank, self._global_step)) - # save ckpt - self.after_train() - print('Finish training.') diff --git a/tests/test_classification_main.py b/tests/test_classification_main.py index 42d8942a..d0110699 100644 --- a/tests/test_classification_main.py +++ b/tests/test_classification_main.py @@ -12,8 +12,6 @@ print('*' * 50) print('running local main...\n') -# from easynlp.core.trainer_vanilla import VanillaTrainer as Trainer - if __name__ == '__main__': print('log: starts to init...\n') From 773777f5782c1978b293350e18c5380febf167f2 Mon Sep 17 00:00:00 2001 From: chywang2013 Date: Wed, 14 Dec 2022 06:27:38 +0000 Subject: [PATCH 043/101] updata param. --- easynlp/core/trainer.py | 1 - easynlp/utils/arguments.py | 5 ----- 2 files changed, 6 deletions(-) diff --git a/easynlp/core/trainer.py b/easynlp/core/trainer.py index df7d10d4..353bf229 100644 --- a/easynlp/core/trainer.py +++ b/easynlp/core/trainer.py @@ -216,7 +216,6 @@ def log_train_infos(self): args = self.args logger.info('=' * 10 + ' Training Start ' + '=' * 10 + '\n') logger.info(' Num of GPUs (all) = %d', args.n_gpu) - logger.info(' Num of CPUs per worker = %d', args.n_cpu) if args.n_gpu > 0: n_tr_samples = len(self._train_loader.dataset ) * args.n_gpu if args.read_odps else len( diff --git a/easynlp/utils/arguments.py b/easynlp/utils/arguments.py index 2e9cec7e..0bbedd8c 100644 --- a/easynlp/utils/arguments.py +++ b/easynlp/utils/arguments.py @@ -101,7 +101,6 @@ def parse_args(extra_args_provider=None, else: args.n_gpu = args.world_size if torch.cuda.is_available() else 0 - args.n_cpu = args.worker_cpu if args.worker_cpu > 0 else 1 if args.rank == 0: args.is_master_node = True else: @@ -415,10 +414,6 @@ def _add_easynlp_args(parser: argparse.ArgumentParser): default=-1, type=int, help='Count of GPUs in each worker') - group.add_argument('--worker_cpu', - default=-1, - type=int, - help='Count of CPUs in each worker') group.add_argument('--master_port', default=23456, type=int, From 9e240d9c78d0deed1b28bf7d3d1d44db48d23f63 Mon Sep 17 00:00:00 2001 From: chywang Date: Wed, 14 Dec 2022 14:46:04 +0800 Subject: [PATCH 044/101] Update arguments.py --- easynlp/utils/arguments.py | 1 + 1 file changed, 1 insertion(+) diff --git a/easynlp/utils/arguments.py b/easynlp/utils/arguments.py index 0bbedd8c..aea1c51e 100644 --- a/easynlp/utils/arguments.py +++ b/easynlp/utils/arguments.py @@ -284,6 +284,7 @@ def _add_easynlp_args(parser: argparse.ArgumentParser): help='local rank passed from distributed launcher.') group.add_argument('--checkpoint_dir', + '--checkpoint_path', default=None, type=str, help='The model checkpoint dir.') From 9e790cf8f1bb3e3803054d619841a1e700751c54 Mon Sep 17 00:00:00 2001 From: chywang Date: Wed, 14 Dec 2022 22:14:25 +0800 Subject: [PATCH 045/101] Update cli.py update cli --- easynlp/cli.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/easynlp/cli.py b/easynlp/cli.py index fe5740a5..6fb6fca6 100644 --- a/easynlp/cli.py +++ b/easynlp/cli.py @@ -27,8 +27,10 @@ def main(): os.environ['PYTHONUNBUFFERED'] = '1' + # TODO: Need to modify here - os.environ['PATH'] = '/opt/conda/envs/python3.6/bin:' + os.environ['PATH'] + # os.environ['PATH'] = '/opt/conda/envs/python3.6/bin:' + os.environ['PATH'] + set_variables_for_cli() args = get_args() if args.user_script is not None and args.user_entry_file is not None: From bdc8a826f97fab974ac896fcfb8bb68e6f41632b Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Mon, 19 Dec 2022 14:09:23 +0800 Subject: [PATCH 046/101] fix text2video_retrieval tutorial --- examples/text2video_retrieval/README.md | 1 + .../preprocess_video_frame.py | 2 ++ .../preprocess_video_frame.sh | 30 ++++++++----------- .../run_clip_local_appzoo.sh | 14 ++++----- .../run_clip_local_user_defined.sh | 14 ++++----- 5 files changed, 30 insertions(+), 31 deletions(-) diff --git a/examples/text2video_retrieval/README.md b/examples/text2video_retrieval/README.md index becb9002..ed7be4ef 100644 --- a/examples/text2video_retrieval/README.md +++ b/examples/text2video_retrieval/README.md @@ -3,6 +3,7 @@ ### 准备工作 * 安装好EasyNLP * 进入目录 ./examples/text2video_retrieval +* 下载并生成数据 sh preprocess_video_frame.sh ### 数据格式 对于train与evaluate 数据格式为制表符分隔的两列 文本\t视频提取帧存放路径 diff --git a/examples/text2video_retrieval/preprocess_video_frame.py b/examples/text2video_retrieval/preprocess_video_frame.py index 4de78a68..a3231fef 100644 --- a/examples/text2video_retrieval/preprocess_video_frame.py +++ b/examples/text2video_retrieval/preprocess_video_frame.py @@ -76,6 +76,8 @@ def extraction_process_save_path(csv_dir, json_dir, video_dir, frame_num, frame_ image_list = extract_video_frames(video_path, frame_num).tolist() # except: print(video_id) + print(video_path) + import pdb;pdb.set_trace() for frame_idx in range(len(image_list)): image = image_list[frame_idx] diff --git a/examples/text2video_retrieval/preprocess_video_frame.sh b/examples/text2video_retrieval/preprocess_video_frame.sh index c04ceb39..02d75719 100644 --- a/examples/text2video_retrieval/preprocess_video_frame.sh +++ b/examples/text2video_retrieval/preprocess_video_frame.sh @@ -1,25 +1,21 @@ # Download data -if [ ! -f ./msrvtt_data/MSRVTT_data.json ]; then - wget https://github.com/ArrowLuo/CLIP4Clip/releases/download/v0.0/msrvtt_data.zip - wget https://www.robots.ox.ac.uk/~maxbain/frozen-in-time/data/MSRVTT.zip - unzip msrvtt_data.zip - unzip MSRVTT.zip -d msrvtt_data/ - rm msrvtt_data.zip - rm MSRVTT.zip +if [ ! -f ./msrvtt_subset/MSRVTT_data.json ]; then + wget https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/text2video_retrieval/MSRVTT_subset.zip + unzip MSRVTT_subset.zip -d msrvtt_subset/ fi python preprocess_video_frame.py \ - --csv_dir=./msrvtt_data/MSRVTT_train.9k.csv \ - --json_dir=./msrvtt_data/MSRVTT_data.json \ - --video_dir=./msrvtt_data/MSRVTT/videos/all \ + --csv_dir=./msrvtt_subset/MSRVTT_train_subset_100.csv \ + --json_dir=./msrvtt_subset/MSRVTT_data.json \ + --video_dir=./msrvtt_subset/MSRVTT_video_subset \ --frame_num=12 \ - --frame_dir=./msrvtt_data/MSRVTT_extracted_frames \ - --output=./msrvtt_data/MSRVTT_train.tsv + --frame_dir=./msrvtt_subset/MSRVTT_extracted_frames \ + --output=./msrvtt_subset/MSRVTT_train.tsv python preprocess_video_frame.py \ - --csv_dir=./msrvtt_data/MSRVTT_JSFUSION_test.csv \ - --json_dir=./msrvtt_data/MSRVTT_data.json \ - --video_dir=./msrvtt_data/MSRVTT/videos/all \ + --csv_dir=./msrvtt_subset/MSRVTT_test_subset_100.csv \ + --json_dir=./msrvtt_subset/MSRVTT_data.json \ + --video_dir=./msrvtt_subset/MSRVTT_video_subset \ --frame_num=12 \ - --frame_dir=./msrvtt_data/MSRVTT_extracted_frames \ - --output=./msrvtt_data/MSRVTT_test_all.tsv + --frame_dir=./msrvtt_subset/MSRVTT_extracted_frames \ + --output=./msrvtt_subset/MSRVTT_test.tsv diff --git a/examples/text2video_retrieval/run_clip_local_appzoo.sh b/examples/text2video_retrieval/run_clip_local_appzoo.sh index 8988de1c..e8bd1afe 100644 --- a/examples/text2video_retrieval/run_clip_local_appzoo.sh +++ b/examples/text2video_retrieval/run_clip_local_appzoo.sh @@ -6,7 +6,7 @@ if [ "$mode" = "train_en" ]; then easynlp \ --mode train \ --worker_gpu=1 \ - --tables=./msrvtt_data/MSRVTT_train.tsv,./msrvtt_data/MSRVTT_test_1to1_1000.tsv \ + --tables=./msrvtt_subset/MSRVTT_train.tsv,./msrvtt_subset/MSRVTT_test.tsv \ --input_schema=text:str:1,image:str:1 \ --first_sequence=text \ --second_sequence=image \ @@ -20,13 +20,13 @@ if [ "$mode" = "train_en" ]; then --micro_batch_size=32 \ --app_name=clip4clip \ --save_all_checkpoints \ - --user_defined_parameters='pretrain_model_name_or_path=alibaba-pai/pai-clip-commercial-base-en' + --user_defined_parameters='pretrain_model_name_or_path=clip_vit_base_patch32' elif [ "$mode" = "evaluate_en" ]; then easynlp \ --mode evaluate \ --worker_gpu=1 \ - --tables=./msrvtt_data/MSRVTT_test_1to1_1000.tsv \ + --tables=./msrvtt_subset/MSRVTT_test.tsv \ --input_schema=text:str:1,image:str:1 \ --first_sequence=text \ --second_sequence=image \ @@ -42,10 +42,10 @@ elif [ "$mode" = "predict_en_text" ]; then easynlp \ --mode predict \ --worker_gpu=1 \ - --tables=./msrvtt_data/MSRVTT_test_1to1_1000_part_text.tsv \ + --tables=./msrvtt_subset/MSRVTT_test_part_text.tsv \ --input_schema=text:str:1 \ --output_schema=text_feat \ - --outputs ./msrvtt_data/MSRVTT_test_1to1_1000_text_feat.tsv \ + --outputs ./msrvtt_subset/MSRVTT_test_text_feat.tsv \ --first_sequence=text \ --checkpoint_dir=./clip4clip_en_model/ \ --random_seed=42 \ @@ -59,10 +59,10 @@ elif [ "$mode" = "predict_en_video" ]; then easynlp \ --mode predict \ --worker_gpu=1 \ - --tables=./msrvtt_data/MSRVTT_test_1to1_1000_part_video.tsv \ + --tables=./msrvtt_subset/MSRVTT_test_part_video.tsv \ --input_schema=image:str:1 \ --output_schema=video_feat \ - --outputs ./msrvtt_data/MSRVTT_test_1to1_1000_video_feat.tsv \ + --outputs ./msrvtt_subset/MSRVTT_test_video_feat.tsv \ --first_sequence=image \ --checkpoint_dir=./clip4clip_en_model/ \ --random_seed=42 \ diff --git a/examples/text2video_retrieval/run_clip_local_user_defined.sh b/examples/text2video_retrieval/run_clip_local_user_defined.sh index 0237c240..c22ddbbe 100644 --- a/examples/text2video_retrieval/run_clip_local_user_defined.sh +++ b/examples/text2video_retrieval/run_clip_local_user_defined.sh @@ -13,7 +13,7 @@ if [ "$mode" = "train_en" ]; then python -m torch.distributed.launch $DISTRIBUTED_ARGS ./main.py \ --mode train \ --worker_gpu=1 \ - --tables=./msrvtt_data/MSRVTT_train.tsv,./msrvtt_data/MSRVTT_test_1to1_1000.tsv \ + --tables=./msrvtt_subset/MSRVTT_train.tsv,./msrvtt_subset/MSRVTT_test.tsv \ --input_schema=text:str:1,image:str:1 \ --first_sequence=text \ --second_sequence=image \ @@ -27,13 +27,13 @@ if [ "$mode" = "train_en" ]; then --micro_batch_size=32 \ --app_name=clip4clip \ --save_all_checkpoints \ - --user_defined_parameters='pretrain_model_name_or_path=alibaba-pai/pai-clip-commercial-base-en' + --user_defined_parameters='pretrain_model_name_or_path=clip_vit_base_patch32' elif [ "$mode" = "evaluate_en" ]; then python -m torch.distributed.launch $DISTRIBUTED_ARGS ./main.py \ --mode evaluate \ --worker_gpu=1 \ - --tables=./msrvtt_data/MSRVTT_test_1to1_1000.tsv \ + --tables=./msrvtt_subset/MSRVTT_test.tsv \ --input_schema=text:str:1,image:str:1 \ --first_sequence=text \ --second_sequence=image \ @@ -49,10 +49,10 @@ elif [ "$mode" = "predict_en_text" ]; then python -m torch.distributed.launch $DISTRIBUTED_ARGS ./main.py \ --mode predict \ --worker_gpu=1 \ - --tables=./msrvtt_data/MSRVTT_test_1to1_1000_part_text.tsv \ + --tables=./msrvtt_data/MSRVTT_test_part_text.tsv \ --input_schema=text:str:1 \ --output_schema=text_feat \ - --outputs ./msrvtt_data/MSRVTT_test_1to1_1000_text_feat.tsv \ + --outputs ./msrvtt_data/MSRVTT_test_text_feat.tsv \ --first_sequence=text \ --checkpoint_dir=./clip4clip_en_model/ \ --random_seed=42 \ @@ -66,10 +66,10 @@ elif [ "$mode" = "predict_en_video" ]; then python -m torch.distributed.launch $DISTRIBUTED_ARGS ./main.py \ --mode predict \ --worker_gpu=1 \ - --tables=./msrvtt_data/MSRVTT_test_1to1_1000_part_video.tsv \ + --tables=./msrvtt_data/MSRVTT_test_part_video.tsv \ --input_schema=image:str:1 \ --output_schema=video_feat \ - --outputs ./msrvtt_data/MSRVTT_test_1to1_1000_video_feat.tsv \ + --outputs ./msrvtt_data/MSRVTT_test_video_feat.tsv \ --first_sequence=image \ --checkpoint_dir=./clip4clip_en_model/ \ --random_seed=42 \ From 9d530c78657bbdcac22178dff0e1b80e30f73f81 Mon Sep 17 00:00:00 2001 From: deplay <1150696014@qq.com> Date: Mon, 19 Dec 2022 17:17:35 +0800 Subject: [PATCH 047/101] fix text2video_retrieval tutorial --- examples/text2video_retrieval/preprocess_video_frame.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/examples/text2video_retrieval/preprocess_video_frame.py b/examples/text2video_retrieval/preprocess_video_frame.py index a3231fef..1dd47eb9 100644 --- a/examples/text2video_retrieval/preprocess_video_frame.py +++ b/examples/text2video_retrieval/preprocess_video_frame.py @@ -89,6 +89,11 @@ def extraction_process_save_path(csv_dir, json_dir, video_dir, frame_num, frame_ with open(output, 'w') as of: of.write('\n'.join(output_file)) + if 'test' in output: + with open('./msrvtt_subset/MSRVTT_test_part_text.tsv', 'w') as of: + of.write('\n'.join([o.split('\t')[0] for o in output_file])) + with open('./msrvtt_subset/MSRVTT_test_part_video.tsv', 'w') as of: + of.write('\n'.join([o.split('\t')[1] for o in output_file])) print("Finished processing {} videos in total.".format(len(video_ids))) if __name__ == '__main__': From 6fb952e8b019d633d42026c4e4e99e62b7fb9853 Mon Sep 17 00:00:00 2001 From: root <815451052@qq.com> Date: Wed, 21 Dec 2022 15:57:19 +0800 Subject: [PATCH 048/101] fix decoder-only finetune --- .../appzoo/sequence_classification/data.py | 2 +- easynlp/appzoo/sequence_generation/data.py | 3 +- .../appzoo/sequence_generation/evaluator.py | 22 +-- .../mg_seq2seq/eval_utils.py | 4 +- .../mg_seq2seq/evaluate.py | 19 +-- easynlp/appzoo/sequence_generation/model.py | 48 ++++--- .../appzoo/sequence_generation/predictor.py | 12 +- easynlp/core/trainer.py | 18 +-- easynlp/utils/statistics.py | 2 +- ...n_train_eval_predict_user_defined_local.sh | 28 ++-- .../sequence_generation/main.py | 6 +- .../run_user_defined_local_en.sh | 2 +- .../run_user_defined_local_zh-chat.sh | 128 ++++++++++++++++++ .../run_user_defined_local_zh.sh | 2 +- 14 files changed, 216 insertions(+), 80 deletions(-) create mode 100644 examples/appzoo_tutorials/sequence_generation/run_user_defined_local_zh-chat.sh diff --git a/easynlp/appzoo/sequence_classification/data.py b/easynlp/appzoo/sequence_classification/data.py index 2f07b8ad..c5720ab5 100644 --- a/easynlp/appzoo/sequence_classification/data.py +++ b/easynlp/appzoo/sequence_classification/data.py @@ -48,7 +48,7 @@ def __init__(self, label_name=None, second_sequence=None, label_enumerate_values=None, - user_defined_parameters=None, + user_defined_parameters={}, *args, **kwargs): super().__init__(data_file, diff --git a/easynlp/appzoo/sequence_generation/data.py b/easynlp/appzoo/sequence_generation/data.py index 2ed2a1bd..d81480c0 100644 --- a/easynlp/appzoo/sequence_generation/data.py +++ b/easynlp/appzoo/sequence_generation/data.py @@ -158,7 +158,7 @@ def batch_fn(self, features): def generation_convert_single_example_to_feature(self, src_text, tgt_text, tokenizer, max_seq_len=128): if self.decoder_only and self.is_training: input_tokens = tokenizer.encode(src_text) - output_tokens = tokenizer.encode(tgt_text) + output_tokens = tokenizer.encode(tgt_text, max_length=self.max_decoder_length, truncation='only_first') if tokenizer.sep_token: input_ids = input_tokens[:self.max_seq_length-len(output_tokens[1:])] + output_tokens[1:] # input_ids = tokenizer.encode(src_text + ' %s ' % tokenizer.sep_token + tgt_text, max_length=max_seq_len+self.max_decoder_length, truncation='only_first') @@ -186,4 +186,5 @@ def generation_convert_single_example_to_feature(self, src_text, tgt_text, token 'src_text': src_text, 'tgt_text': tgt_text } + # print(features) return features diff --git a/easynlp/appzoo/sequence_generation/evaluator.py b/easynlp/appzoo/sequence_generation/evaluator.py index 939f1323..735c67fc 100644 --- a/easynlp/appzoo/sequence_generation/evaluator.py +++ b/easynlp/appzoo/sequence_generation/evaluator.py @@ -47,6 +47,7 @@ def __init__(self, valid_dataset, user_defined_parameters, **kwargs): self.num_return_sequences = int(self.user_defined_parameters.get("num_return_sequences", 5)) def evaluate(self, model): + model.eval() y_preds = list() y_trues = list() for i, batch in enumerate(tqdm(self.valid_loader)): @@ -64,17 +65,16 @@ def evaluate(self, model): eos_token_id = model._tokenizer.eos_token_id else: eos_token_id = model._tokenizer.sep_token_id - with torch.no_grad(): - gen = model.generate(input_ids=batch["input_ids"], - attention_mask=batch["attention_mask"], - num_beams=1, - min_length=self.min_decoder_length, - max_length=max_decoder_length, - early_stopping=True, - no_repeat_ngram_size=self.no_repeat_ngram_size, - num_return_sequences=1, - decoder_start_token_id=model._tokenizer.cls_token_id, - eos_token_id=eos_token_id) + gen = model.generate(input_ids=batch["input_ids"], + attention_mask=batch["attention_mask"], + num_beams=1, + min_length=self.min_decoder_length, + max_length=max_decoder_length, + early_stopping=True, + no_repeat_ngram_size=self.no_repeat_ngram_size, + num_return_sequences=1, + decoder_start_token_id=model._tokenizer.cls_token_id, + eos_token_id=eos_token_id) if self.decoder_only: pred_tmp=[model._tokenizer.decode(t[batch["attention_mask"][0].sum().item():], skip_special_tokens=True) for t in gen] else: diff --git a/easynlp/appzoo/sequence_generation/mg_seq2seq/eval_utils.py b/easynlp/appzoo/sequence_generation/mg_seq2seq/eval_utils.py index 1355be30..73c0d137 100755 --- a/easynlp/appzoo/sequence_generation/mg_seq2seq/eval_utils.py +++ b/easynlp/appzoo/sequence_generation/mg_seq2seq/eval_utils.py @@ -92,8 +92,8 @@ def metrics_func(model, epoch, output_predictions=False, summary_writer=None): if output_predictions and torch.distributed.get_rank() == 0: filename = os.path.join(args.log_dir, name + '.jsonl') output_func(predictions, examples, filename) - if args.mode == 'predict': - continue + if args.mode == 'predict': + continue total_count = len(predictions) single_dict = {key: metric(predictions, labels, examples) for key, metric in metric_dict.items()} output_str = ' > |epoch: {}| metrics for {}: total {}'.format(epoch, name, total_count) diff --git a/easynlp/appzoo/sequence_generation/mg_seq2seq/evaluate.py b/easynlp/appzoo/sequence_generation/mg_seq2seq/evaluate.py index 823dc471..9cdabe56 100755 --- a/easynlp/appzoo/sequence_generation/mg_seq2seq/evaluate.py +++ b/easynlp/appzoo/sequence_generation/mg_seq2seq/evaluate.py @@ -177,8 +177,9 @@ def rouge_metric(predictions, labels, examples, metric="rouge-1", duplicate_rate # dealing with Chinese en = any([isEnglish(i) for i in ref_list[:5]]) # en = [' ' in i.strip() for i in ref_list].count(True) > len(ref_list) * 0.95 + if not en: - predictions = [' '.join(list(j)) for j in predictions] + predictions = sum([[' '.join(list(j)) for j in i] for i in predictions], []) for prediction in predictions: buf = [] @@ -194,15 +195,9 @@ def rouge_metric(predictions, labels, examples, metric="rouge-1", duplicate_rate buf = remove_duplicate(buf, duplicate_rate) line = "\n".join(buf) pred_list.append(line) - # if torch.distributed.get_rank() == 0: - # import json - # with open("./results.json", "w") as output: - # for ref, pred in zip(ref_list, pred_list): - # if en: - # output.write(json.dumps({"ref": ref, "pred": pred}) + "\n") - # else: - # output.write(json.dumps({"ref": ref, "pred": ''.join(pred.split())}, ensure_ascii=False) + "\n") - + + print('Example of the predicted sequences.') + print(pred_list[0]) rouge = Rouge() if en: scores = rouge.get_scores(pred_list, ref_list, avg=True) @@ -388,8 +383,8 @@ def evaluate(self, model, dataloader, example_dict, args): else: text = self.tokenizer.DecodeIds(text) predictions.append(text) - if args.num_return_sequences != 1: - predictions = [predictions] + # if args.num_return_sequences != 1: + predictions = [predictions] for uid, prediction in zip(uid_list, predictions): local_predictions[uid] = prediction if (idx + 1) % args.log_interval == 0: diff --git a/easynlp/appzoo/sequence_generation/model.py b/easynlp/appzoo/sequence_generation/model.py index bdec0f73..05f0ddea 100644 --- a/easynlp/appzoo/sequence_generation/model.py +++ b/easynlp/appzoo/sequence_generation/model.py @@ -5,6 +5,7 @@ import re from ...modelzoo import AutoConfig, AutoModel, AutoTokenizer, AutoModelForSeq2SeqLM, BertTokenizer, GPT2LMHeadModel from ..application import Application +from ...utils import losses def sequence_padding(inputs, length=None, padding=0): """Padding the sequence to same length @@ -136,17 +137,15 @@ def __init__(self,pretrained_model_name_or_path,user_defined_parameters=None, ** self._model=AutoModelForSeq2SeqLM.from_pretrained(local_path,state_dict=state_dict_without_prefix) - self.loss_fct = torch.nn.CrossEntropyLoss(ignore_index=-100) - def forward(self, inputs): if self.is_gpt2 or 'bloom' in self.pretrained_model_name_or_path: prob = self._model(input_ids=inputs["input_ids"], - attention_mask=inputs["attention_mask"])[0] + attention_mask=inputs["attention_mask"])[0] else: prob = self._model(input_ids=inputs["input_ids"], decoder_input_ids=inputs["decoder_input_ids"], attention_mask=inputs["attention_mask"], - decoder_attention_mask=inputs["decoder_attention_mask"])[0] + decoder_attention_mask=inputs["decoder_attention_mask"])[0] slice_len=prob.size()[1] label_len=inputs['decoder_attention_mask'].size()[1] if label_len self.evaluator.best_valid_score: - logger.info( - 'Saving best model to %s...' % os.path.join( - args.checkpoint_dir, 'pytorch_model.bin')) - self.save_checkpoint(save_best=True) - self.evaluator.best_valid_score = self._eval_scores[0][ - 1] + + # if self._eval_scores[0][ + # 1] > self.evaluator.best_valid_score: + logger.info( + 'Saving best model to %s...' % os.path.join( + args.checkpoint_dir, 'pytorch_model.bin')) + self.save_checkpoint(save_best=True) + self.evaluator.best_valid_score = self._eval_scores[0][ + 1] + logger.info('Best score: {}'.format( self.evaluator.best_valid_score)) logger.info('Learning rate: {:.8f}'.format( diff --git a/easynlp/utils/statistics.py b/easynlp/utils/statistics.py index d0771bd5..9d328e14 100644 --- a/easynlp/utils/statistics.py +++ b/easynlp/utils/statistics.py @@ -55,7 +55,7 @@ def output(self, step, epoch, learning_rate): """ logger.info( 'Epoch [{:2}/{:2}], step [{}/{}], lr {:.6f}, {:.2f} s'.format( - epoch, self.epoch_num, step, self.total_training_steps, + epoch + 1, self.epoch_num, step, self.total_training_steps, learning_rate, self.elapsed_time())) for key, val in self.loss_dict.items(): diff --git a/examples/appzoo_tutorials/sequence_classification/bert_classify/run_train_eval_predict_user_defined_local.sh b/examples/appzoo_tutorials/sequence_classification/bert_classify/run_train_eval_predict_user_defined_local.sh index d7ec90e7..fd06e853 100644 --- a/examples/appzoo_tutorials/sequence_classification/bert_classify/run_train_eval_predict_user_defined_local.sh +++ b/examples/appzoo_tutorials/sequence_classification/bert_classify/run_train_eval_predict_user_defined_local.sh @@ -9,7 +9,7 @@ if [ ! -f ./dev.tsv ]; then fi MASTER_ADDR=localhost -MASTER_PORT=6009 +MASTER_PORT=$(shuf -n 1 -i 10000-65535) GPUS_PER_NODE=1 NNODES=1 NODE_RANK=0 @@ -20,7 +20,7 @@ mode=$2 if [ "$mode" = "train" ]; then - python -m torch.distributed.launch $DISTRIBUTED_ARGS main.py \ + python -m torch.distributed.launch $DISTRIBUTED_ARGS examples/appzoo_tutorials/sequence_classification/bert_classify/main.py \ --mode $mode \ --worker_gpu=1 \ --tables=train.tsv,dev.tsv \ @@ -31,14 +31,14 @@ if [ "$mode" = "train" ]; then --label_enumerate_values=0,1 \ --checkpoint_dir=./classification_model/ \ --learning_rate=3e-5 \ - --epoch_num=3 \ + --epoch_num=300 \ --random_seed=42 \ - --save_checkpoint_steps=50 \ + --save_checkpoint_steps=500 \ --sequence_length=128 \ - --micro_batch_size=32 \ + --micro_batch_size=16 \ --app_name=text_classify \ --user_defined_parameters=' - pretrain_model_name_or_path=bert-base-uncased + pretrain_model_name_or_path=roberta-large-en ' elif [ "$mode" = "evaluate" ]; then @@ -59,17 +59,17 @@ elif [ "$mode" = "evaluate" ]; then elif [ "$mode" = "predict" ]; then - python -m torch.distributed.launch $DISTRIBUTED_ARGS main.py \ + python -m torch.distributed.launch $DISTRIBUTED_ARGS examples/appzoo_tutorials/sequence_classification/bert_classify/main.py \ --mode=$mode \ --worker_gpu=1 \ - --tables=dev.tsv \ - --outputs=dev.pred.tsv \ - --input_schema=label:str:1,sid1:str:1,sid2:str:1,sent1:str:1,sent2:str:1 \ - --output_schema=predictions,probabilities,logits,output \ - --append_cols=label \ + --tables=nlu_text.tsv \ + --outputs=sentiment.pred.tsv \ + --input_schema=sent1:str:1,content:str:1,pos:str:1,ner:str:1 \ + --output_schema=predictions \ + --append_cols=sent1,pos \ --first_sequence=sent1 \ - --second_sequence=sent2 \ - --checkpoint_path=./classification_model/ \ + --second_sequence=sent1 \ + --checkpoint_path=./classification_model \ --micro_batch_size 32 \ --sequence_length=128 \ --app_name=text_classify diff --git a/examples/appzoo_tutorials/sequence_generation/main.py b/examples/appzoo_tutorials/sequence_generation/main.py index 379fa978..5edf4072 100644 --- a/examples/appzoo_tutorials/sequence_generation/main.py +++ b/examples/appzoo_tutorials/sequence_generation/main.py @@ -40,12 +40,8 @@ try: from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm except ModuleNotFoundError: - print('APEX is required but not found. Installing Apex...') - os.system('git clone https://github.com/NVIDIA/apex') - os.system('cd apex && pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./') - os.system('rm -rf apex') print('*'*80) - print('APEX is installed. Please run the code again.') + print('APEX is required. Please refer to examples/appzoo_tutorials/sequence_generation/README.md.') print('*'*80) from easynlp.appzoo.sequence_generation.mg_seq2seq.finetune import main from easynlp.modelzoo.mg_utils.pretrain_glm import initialize_distributed, set_random_seed diff --git a/examples/appzoo_tutorials/sequence_generation/run_user_defined_local_en.sh b/examples/appzoo_tutorials/sequence_generation/run_user_defined_local_en.sh index 631c3824..64bc74b8 100644 --- a/examples/appzoo_tutorials/sequence_generation/run_user_defined_local_en.sh +++ b/examples/appzoo_tutorials/sequence_generation/run_user_defined_local_en.sh @@ -31,7 +31,7 @@ if [ "$mode" = "predict" ]; then --output_schema=predictions,beams \ --append_cols=title,content \ --first_sequence=content \ - --checkpoint_dir=./finetuned_en_model/ \ + --checkpoint_dir=/root/.easynlp/modelzoo/alibaba-pai/pegasus-summary-generation-en \ --micro_batch_size 32 \ --sequence_length 512 \ --user_defined_parameters 'language=en copy=false max_encoder_length=512 min_decoder_length=12 max_decoder_length=64 no_repeat_ngram_size=2 num_beams=5 num_return_sequences=5' diff --git a/examples/appzoo_tutorials/sequence_generation/run_user_defined_local_zh-chat.sh b/examples/appzoo_tutorials/sequence_generation/run_user_defined_local_zh-chat.sh new file mode 100644 index 00000000..68017279 --- /dev/null +++ b/examples/appzoo_tutorials/sequence_generation/run_user_defined_local_zh-chat.sh @@ -0,0 +1,128 @@ +export CUDA_VISIBLE_DEVICES=$1 + +if [ ! -f ./chat_train.tsv ]; then + wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/generation/chat_train.tsv +fi + +if [ ! -f ./chat_dev.tsv ]; then + wget http://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/generation/chat_dev.tsv +fi + +if [ ! -f ./config_ds_glm_large_generation.json ]; then + wget https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/easynlp_modelzoo/public/mg/config_ds_glm_large_generation.json + wget https://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/easynlp_modelzoo/public/mg/config_ds_glm_10B_generation.json +fi + +MASTER_PORT=$(shuf -n 1 -i 10000-65535) +MASTER_ADDR=localhost +GPUS_PER_NODE=2 +NNODES=1 +NODE_RANK=0 + +DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" + +mode=$2 + +MODEL_ARGS="--block-lm \ + --cloze-eval \ + --task-mask \ + --fix-command-token" + +TRAIN_ARGS="--lr-decay-style linear \ + --label-smoothing 0.1" + +COMMON_ARGS="--save-interval 10000 \ + --log-interval 50 \ + --eval-interval 3000 \ + --eval-iters 100" + +TASK_ARGS="--length-penalty 0.7 \ + --select-topk \ + --eval-batch-size 1" + +TASK_NAME=chinesegen +MEGATRON_PARAMETERS="--deepspeed \ + --deepspeed_config ./config_ds_glm_large_generation.json \ + --finetune \ + --task ${TASK_NAME} \ + --data-dir ./ \ + --checkpoint-activations \ + --no-load-lr-scheduler \ + --num-workers 1 \ + --model-parallel-size 1 \ + $MODEL_ARGS \ + $TRAIN_ARGS \ + $COMMON_ARGS \ + $TASK_ARGS \ + --fp16 \ + --overwrite + " + +# MEGATRON_PARAMETERS is only valid for megatron models such as mg/glm-large-chinese + +if [ "$mode" = "predict" ]; then + + python -m torch.distributed.launch $DISTRIBUTED_ARGS examples/appzoo_tutorials/sequence_generation/main.py \ + --app_name=sequence_generation \ + --mode $mode \ + --worker_gpu=1 \ + --tables=./chat_dev.tsv \ + --outputs=./chat_dev_gpt2.tsv \ + --input_schema=source:str:1,target:str:1 \ + --output_schema=predictions,beams \ + --append_cols=target,source \ + --first_sequence=source \ + --checkpoint_dir=./finetuned_zh_model-chat \ + --micro_batch_size=32 \ + --sequence_length=128 \ + $MEGATRON_PARAMETERS \ + --user_defined_parameters 'copy=false language=zh max_encoder_length=512 min_decoder_length=8 max_decoder_length=128 no_repeat_ngram_size=2 num_beams=15 num_return_sequences=5 num_beam_groups=5 diversity_penalty=1.0' + +elif [ "$mode" = "train" ]; then + + python -m torch.distributed.launch $DISTRIBUTED_ARGS examples/appzoo_tutorials/sequence_generation/main.py \ + --app_name=sequence_generation \ + --mode=$mode \ + --worker_gpu=1 \ + --tables=./chat_train.tsv,./chat_dev.tsv \ + --input_schema=source:str:1,target:str:1 \ + --first_sequence=source \ + --second_sequence=target \ + --label_name=target \ + --checkpoint_dir=./finetuned_zh_model-chat \ + --learning_rate 1e-4 \ + --micro_batch_size 64 \ + --sequence_length 128 \ + --epoch_num 3 \ + $MEGATRON_PARAMETERS \ + --save_checkpoint_steps 3000 \ + --export_tf_checkpoint_type none \ + --user_defined_parameters 'pretrain_model_name_or_path=alibaba-pai/gpt2-chitchat-zh language=zh copy=false max_encoder_length=128 min_decoder_length=4 max_decoder_length=128 no_repeat_ngram_size=2 num_beams=5 num_return_sequences=5' + +# alibaba-pai/mt5-title-generation-zh +# hfl/randeng-summary-generation-base-zh +# hfl/randeng-summary-generation-large-zh +# alibaba-pai/randeng-title-generation-base-zh +# alibaba-pai/randeng-title-generation-large-zh +# mg/glm-generation-large-zh + +elif [ "$mode" = "evaluate" ]; then + + python -m torch.distributed.launch $DISTRIBUTED_ARGS examples/appzoo_tutorials/sequence_generation/main.py \ + --app_name=sequence_generation \ + --mode=$mode \ + --worker_gpu=1 \ + --tables=./question_gen_dev_small.tsv \ + --input_schema=target:str:1,source:str:1 \ + --first_sequence=source \ + --second_sequence=target \ + --label_name=target \ + --checkpoint_dir=./finetuned_zh_model-bartbase-question \ + --micro_batch_size=32 \ + --sequence_length=512 \ + --export_tf_checkpoint_type none \ + $MEGATRON_PARAMETERS \ + --user_defined_parameters 'copy=false language=zh max_encoder_length=512 min_decoder_length=8 max_decoder_length=64 no_repeat_ngram_size=2 num_beams=5 num_return_sequences=5' + +fi + diff --git a/examples/appzoo_tutorials/sequence_generation/run_user_defined_local_zh.sh b/examples/appzoo_tutorials/sequence_generation/run_user_defined_local_zh.sh index 0302e147..fabcae1b 100644 --- a/examples/appzoo_tutorials/sequence_generation/run_user_defined_local_zh.sh +++ b/examples/appzoo_tutorials/sequence_generation/run_user_defined_local_zh.sh @@ -58,7 +58,7 @@ MEGATRON_PARAMETERS="--deepspeed \ --overwrite " -# MEGATRON_PARAMETERS is only valid for megatron models such as mg/glm-large-chinese +# MEGATRON_PARAMETERS is only valid for megatron models such as mg/glm-generation-large-zh if [ "$mode" = "predict" ]; then From a0fdd60455d4a3ecfca56a59c0b1188b0f60584e Mon Sep 17 00:00:00 2001 From: momingTang Date: Thu, 29 Dec 2022 19:56:19 +0800 Subject: [PATCH 049/101] add code for XtremeCLIP --- examples/xtremeclip/README.md | 113 +++++ examples/xtremeclip/XtremeCLIP.png | Bin 0 -> 350378 bytes examples/xtremeclip/baselines/DATASET.md | 178 +++++++ examples/xtremeclip/baselines/chartheat.py | 20 + examples/xtremeclip/baselines/main_dtd.py | 212 ++++++++ .../xtremeclip/baselines/main_dtd_BINOR.py | 239 +++++++++ examples/xtremeclip/baselines/main_dtd_FF.py | 201 ++++++++ .../xtremeclip/baselines/main_dtd_LLDR.py | 223 ++++++++ .../xtremeclip/baselines/main_dtd_adapter.py | 218 ++++++++ .../xtremeclip/baselines/main_dtd_binorm.py | 208 ++++++++ .../xtremeclip/baselines/main_dtd_bitfit.py | 206 ++++++++ .../xtremeclip/baselines/main_dtd_mixout.py | 219 ++++++++ .../xtremeclip/baselines/main_dtd_self.py | 210 ++++++++ examples/xtremeclip/baselines/main_eurosat.py | 212 ++++++++ .../xtremeclip/baselines/main_eurosat_FF.py | 225 +++++++++ .../xtremeclip/baselines/main_eurosat_LLRD.py | 253 ++++++++++ .../baselines/main_eurosat_adapter.py | 229 +++++++++ .../baselines/main_eurosat_binor.py | 230 +++++++++ .../baselines/main_eurosat_bitfit.py | 240 +++++++++ .../main_eurosat_matching_ablation.py | 214 ++++++++ .../baselines/main_eurosat_mixout.py | 240 +++++++++ .../xtremeclip/baselines/main_eurosat_self.py | 223 ++++++++ examples/xtremeclip/baselines/main_fgvc.py | 212 ++++++++ examples/xtremeclip/baselines/main_fgvc_FF.py | 201 ++++++++ .../xtremeclip/baselines/main_fgvc_LLDR.py | 223 ++++++++ .../xtremeclip/baselines/main_fgvc_adapter.py | 218 ++++++++ .../xtremeclip/baselines/main_fgvc_binorm.py | 208 ++++++++ .../xtremeclip/baselines/main_fgvc_bitfit.py | 215 ++++++++ .../baselines/main_fgvc_matching_adaption.py | 212 ++++++++ .../main_fgvc_matching_adaption_vector.py | 211 ++++++++ .../xtremeclip/baselines/main_fgvc_mixout.py | 219 ++++++++ .../xtremeclip/baselines/main_imagenet.py | 168 +++++++ .../xtremeclip/baselines/main_imagenet_cl.py | 166 ++++++ .../baselines/main_imagenet_matching.py | 187 +++++++ examples/xtremeclip/baselines/main_itm.py | 452 +++++++++++++++++ examples/xtremeclip/baselines/main_itm_FF.py | 433 ++++++++++++++++ .../xtremeclip/baselines/main_itm_LLRD.py | 447 +++++++++++++++++ examples/xtremeclip/baselines/main_itm_V3.py | 450 +++++++++++++++++ examples/xtremeclip/baselines/main_itm_V4.py | 449 +++++++++++++++++ .../xtremeclip/baselines/main_itm_adapter.py | 474 ++++++++++++++++++ .../xtremeclip/baselines/main_itm_binor.py | 437 ++++++++++++++++ .../xtremeclip/baselines/main_itm_bitfit.py | 437 ++++++++++++++++ .../xtremeclip/baselines/main_itm_embsum.py | 404 +++++++++++++++ .../xtremeclip/baselines/main_itm_mixout.py | 445 ++++++++++++++++ .../xtremeclip/baselines/main_itm_probsum.py | 426 ++++++++++++++++ examples/xtremeclip/baselines/main_itm_v2.py | 455 +++++++++++++++++ examples/xtremeclip/baselines/main_itm_v5.py | 463 +++++++++++++++++ .../xtremeclip/baselines/main_oxfordpets.py | 212 ++++++++ .../baselines/main_oxfordpets_matching.py | 212 ++++++++ .../baselines/main_visualentailment_FF.py | 222 ++++++++ .../main_visualentailment_FF_1000.py | 222 ++++++++ .../main_visualentailment_FF_2000.py | 222 ++++++++ .../main_visualentailment_FF_5000.py | 222 ++++++++ .../main_visualentailment_FF_8000.py | 222 ++++++++ .../main_visualentailment_FF_9000.py | 222 ++++++++ .../baselines/main_visualentailment_LLRD.py | 225 +++++++++ .../main_visualentailment_ablation.py | 214 ++++++++ .../main_visualentailment_ablation_vector.py | 214 ++++++++ .../main_visualentailment_adapter.py | 253 ++++++++++ .../baselines/main_visualentailment_binorm.py | 228 +++++++++ .../baselines/main_visualentailment_bitfit.py | 228 +++++++++ .../baselines/main_visualentailment_mixout.py | 260 ++++++++++ .../main_visualentailment_tipadapter.py | 211 ++++++++ ...n_visualentailment_weightedLayerPooling.py | 270 ++++++++++ .../xtremeclip/baselines/main_vqa_BitFit.py | 215 ++++++++ examples/xtremeclip/baselines/main_vqa_FF.py | 216 ++++++++ .../xtremeclip/baselines/main_vqa_LLRD.py | 235 +++++++++ .../xtremeclip/baselines/main_vqa_ablation.py | 238 +++++++++ .../baselines/main_vqa_ablation_vector.py | 238 +++++++++ .../xtremeclip/baselines/main_vqa_adapter.py | 240 +++++++++ .../xtremeclip/baselines/main_vqa_binorm.py | 215 ++++++++ .../xtremeclip/baselines/main_vqa_mixout.py | 227 +++++++++ .../baselines/main_vqa_tipadapter.py | 204 ++++++++ examples/xtremeclip/baselines/mixout.py | 95 ++++ .../utils_eurosat_adaption_vector.py | 143 ++++++ .../baselines/utils_imagenet_matching.py | 148 ++++++ examples/xtremeclip/baselines/utils_itm.py | 213 ++++++++ .../baselines/utils_visualentailment_FF.py | 230 +++++++++ .../utils_visualentailment_ablation_vector.py | 230 +++++++++ .../utils_visualentailment_adapter.py | 216 ++++++++ .../utils_visualentailment_tipadapter.py | 231 +++++++++ .../baselines/utils_vqa_ablation_vector.py | 213 ++++++++ .../xtremeclip/baselines/utils_vqa_adapter.py | 215 ++++++++ .../baselines/utils_vqa_tipadapter.py | 213 ++++++++ examples/xtremeclip/clip/__init__.py | 1 + examples/xtremeclip/clip/clip.py | 238 +++++++++ examples/xtremeclip/clip/model.py | 443 ++++++++++++++++ examples/xtremeclip/clip/simple_tokenizer.py | 132 +++++ examples/xtremeclip/configs/caltech101.yaml | 31 ++ examples/xtremeclip/configs/dtd.yaml | 33 ++ examples/xtremeclip/configs/eurosat.yaml | 32 ++ examples/xtremeclip/configs/fgvc.yaml | 32 ++ examples/xtremeclip/configs/food101.yaml | 31 ++ examples/xtremeclip/configs/imagenet.yaml | 31 ++ examples/xtremeclip/configs/itm.yaml | 32 ++ .../xtremeclip/configs/oxford_flowers.yaml | 31 ++ examples/xtremeclip/configs/oxford_pets.yaml | 31 ++ .../xtremeclip/configs/stanford_cars.yaml | 31 ++ examples/xtremeclip/configs/sun397.yaml | 31 ++ examples/xtremeclip/configs/ucf101.yaml | 31 ++ .../xtremeclip/configs/visualentailment.yaml | 26 + examples/xtremeclip/configs/vqa.yaml | 33 ++ examples/xtremeclip/datasets/__init__.py | 28 ++ examples/xtremeclip/datasets/caltech101.py | 24 + examples/xtremeclip/datasets/dtd.py | 79 +++ examples/xtremeclip/datasets/eurosat.py | 51 ++ examples/xtremeclip/datasets/fgvc.py | 54 ++ examples/xtremeclip/datasets/food101.py | 24 + examples/xtremeclip/datasets/imagenet.py | 221 ++++++++ .../xtremeclip/datasets/oxford_flowers.py | 67 +++ examples/xtremeclip/datasets/oxford_pets.py | 125 +++++ examples/xtremeclip/datasets/stanford_cars.py | 48 ++ examples/xtremeclip/datasets/sun397.py | 50 ++ examples/xtremeclip/datasets/ucf101.py | 51 ++ examples/xtremeclip/datasets/utils.py | 378 ++++++++++++++ examples/xtremeclip/download_dtd.sh | 4 + examples/xtremeclip/download_eurosat.sh | 4 + examples/xtremeclip/download_fgvc.sh | 4 + examples/xtremeclip/download_ve.sh | 4 + examples/xtremeclip/download_vqa.sh | 4 + examples/xtremeclip/main_dtd_matching.py | 212 ++++++++ examples/xtremeclip/main_eurosat_matching.py | 223 ++++++++ examples/xtremeclip/main_fgvc_matching.py | 212 ++++++++ examples/xtremeclip/main_visualentailment.py | 213 ++++++++ examples/xtremeclip/main_vqa.py | 241 +++++++++ examples/xtremeclip/plot_images/plot.py | 31 ++ examples/xtremeclip/plot_images/plot_heat.py | 68 +++ examples/xtremeclip/plot_images/plot_v2.py | 36 ++ examples/xtremeclip/prepare_flickr30k.py | 45 ++ examples/xtremeclip/utils.py | 172 +++++++ examples/xtremeclip/utils_eurosat.py | 143 ++++++ examples/xtremeclip/utils_visualentailment.py | 230 +++++++++ examples/xtremeclip/utils_vqa.py | 213 ++++++++ 133 files changed, 25068 insertions(+) create mode 100644 examples/xtremeclip/README.md create mode 100644 examples/xtremeclip/XtremeCLIP.png create mode 100644 examples/xtremeclip/baselines/DATASET.md create mode 100644 examples/xtremeclip/baselines/chartheat.py create mode 100644 examples/xtremeclip/baselines/main_dtd.py create mode 100644 examples/xtremeclip/baselines/main_dtd_BINOR.py create mode 100644 examples/xtremeclip/baselines/main_dtd_FF.py create mode 100644 examples/xtremeclip/baselines/main_dtd_LLDR.py create mode 100644 examples/xtremeclip/baselines/main_dtd_adapter.py create mode 100644 examples/xtremeclip/baselines/main_dtd_binorm.py create mode 100644 examples/xtremeclip/baselines/main_dtd_bitfit.py create mode 100644 examples/xtremeclip/baselines/main_dtd_mixout.py create mode 100644 examples/xtremeclip/baselines/main_dtd_self.py create mode 100644 examples/xtremeclip/baselines/main_eurosat.py create mode 100644 examples/xtremeclip/baselines/main_eurosat_FF.py create mode 100644 examples/xtremeclip/baselines/main_eurosat_LLRD.py create mode 100644 examples/xtremeclip/baselines/main_eurosat_adapter.py create mode 100644 examples/xtremeclip/baselines/main_eurosat_binor.py create mode 100644 examples/xtremeclip/baselines/main_eurosat_bitfit.py create mode 100644 examples/xtremeclip/baselines/main_eurosat_matching_ablation.py create mode 100644 examples/xtremeclip/baselines/main_eurosat_mixout.py create mode 100644 examples/xtremeclip/baselines/main_eurosat_self.py create mode 100644 examples/xtremeclip/baselines/main_fgvc.py create mode 100644 examples/xtremeclip/baselines/main_fgvc_FF.py create mode 100644 examples/xtremeclip/baselines/main_fgvc_LLDR.py create mode 100644 examples/xtremeclip/baselines/main_fgvc_adapter.py create mode 100644 examples/xtremeclip/baselines/main_fgvc_binorm.py create mode 100644 examples/xtremeclip/baselines/main_fgvc_bitfit.py create mode 100644 examples/xtremeclip/baselines/main_fgvc_matching_adaption.py create mode 100644 examples/xtremeclip/baselines/main_fgvc_matching_adaption_vector.py create mode 100644 examples/xtremeclip/baselines/main_fgvc_mixout.py create mode 100644 examples/xtremeclip/baselines/main_imagenet.py create mode 100644 examples/xtremeclip/baselines/main_imagenet_cl.py create mode 100644 examples/xtremeclip/baselines/main_imagenet_matching.py create mode 100644 examples/xtremeclip/baselines/main_itm.py create mode 100644 examples/xtremeclip/baselines/main_itm_FF.py create mode 100644 examples/xtremeclip/baselines/main_itm_LLRD.py create mode 100644 examples/xtremeclip/baselines/main_itm_V3.py create mode 100644 examples/xtremeclip/baselines/main_itm_V4.py create mode 100644 examples/xtremeclip/baselines/main_itm_adapter.py create mode 100644 examples/xtremeclip/baselines/main_itm_binor.py create mode 100644 examples/xtremeclip/baselines/main_itm_bitfit.py create mode 100644 examples/xtremeclip/baselines/main_itm_embsum.py create mode 100644 examples/xtremeclip/baselines/main_itm_mixout.py create mode 100644 examples/xtremeclip/baselines/main_itm_probsum.py create mode 100644 examples/xtremeclip/baselines/main_itm_v2.py create mode 100644 examples/xtremeclip/baselines/main_itm_v5.py create mode 100644 examples/xtremeclip/baselines/main_oxfordpets.py create mode 100644 examples/xtremeclip/baselines/main_oxfordpets_matching.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_FF.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_FF_1000.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_FF_2000.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_FF_5000.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_FF_8000.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_FF_9000.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_LLRD.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_ablation.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_ablation_vector.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_adapter.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_binorm.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_bitfit.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_mixout.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_tipadapter.py create mode 100644 examples/xtremeclip/baselines/main_visualentailment_weightedLayerPooling.py create mode 100644 examples/xtremeclip/baselines/main_vqa_BitFit.py create mode 100644 examples/xtremeclip/baselines/main_vqa_FF.py create mode 100644 examples/xtremeclip/baselines/main_vqa_LLRD.py create mode 100644 examples/xtremeclip/baselines/main_vqa_ablation.py create mode 100644 examples/xtremeclip/baselines/main_vqa_ablation_vector.py create mode 100644 examples/xtremeclip/baselines/main_vqa_adapter.py create mode 100644 examples/xtremeclip/baselines/main_vqa_binorm.py create mode 100644 examples/xtremeclip/baselines/main_vqa_mixout.py create mode 100644 examples/xtremeclip/baselines/main_vqa_tipadapter.py create mode 100644 examples/xtremeclip/baselines/mixout.py create mode 100644 examples/xtremeclip/baselines/utils_eurosat_adaption_vector.py create mode 100644 examples/xtremeclip/baselines/utils_imagenet_matching.py create mode 100644 examples/xtremeclip/baselines/utils_itm.py create mode 100644 examples/xtremeclip/baselines/utils_visualentailment_FF.py create mode 100644 examples/xtremeclip/baselines/utils_visualentailment_ablation_vector.py create mode 100644 examples/xtremeclip/baselines/utils_visualentailment_adapter.py create mode 100644 examples/xtremeclip/baselines/utils_visualentailment_tipadapter.py create mode 100644 examples/xtremeclip/baselines/utils_vqa_ablation_vector.py create mode 100644 examples/xtremeclip/baselines/utils_vqa_adapter.py create mode 100644 examples/xtremeclip/baselines/utils_vqa_tipadapter.py create mode 100644 examples/xtremeclip/clip/__init__.py create mode 100644 examples/xtremeclip/clip/clip.py create mode 100644 examples/xtremeclip/clip/model.py create mode 100644 examples/xtremeclip/clip/simple_tokenizer.py create mode 100644 examples/xtremeclip/configs/caltech101.yaml create mode 100644 examples/xtremeclip/configs/dtd.yaml create mode 100644 examples/xtremeclip/configs/eurosat.yaml create mode 100644 examples/xtremeclip/configs/fgvc.yaml create mode 100644 examples/xtremeclip/configs/food101.yaml create mode 100644 examples/xtremeclip/configs/imagenet.yaml create mode 100644 examples/xtremeclip/configs/itm.yaml create mode 100644 examples/xtremeclip/configs/oxford_flowers.yaml create mode 100644 examples/xtremeclip/configs/oxford_pets.yaml create mode 100644 examples/xtremeclip/configs/stanford_cars.yaml create mode 100644 examples/xtremeclip/configs/sun397.yaml create mode 100644 examples/xtremeclip/configs/ucf101.yaml create mode 100644 examples/xtremeclip/configs/visualentailment.yaml create mode 100644 examples/xtremeclip/configs/vqa.yaml create mode 100644 examples/xtremeclip/datasets/__init__.py create mode 100644 examples/xtremeclip/datasets/caltech101.py create mode 100644 examples/xtremeclip/datasets/dtd.py create mode 100644 examples/xtremeclip/datasets/eurosat.py create mode 100644 examples/xtremeclip/datasets/fgvc.py create mode 100644 examples/xtremeclip/datasets/food101.py create mode 100644 examples/xtremeclip/datasets/imagenet.py create mode 100644 examples/xtremeclip/datasets/oxford_flowers.py create mode 100644 examples/xtremeclip/datasets/oxford_pets.py create mode 100644 examples/xtremeclip/datasets/stanford_cars.py create mode 100644 examples/xtremeclip/datasets/sun397.py create mode 100644 examples/xtremeclip/datasets/ucf101.py create mode 100644 examples/xtremeclip/datasets/utils.py create mode 100644 examples/xtremeclip/download_dtd.sh create mode 100644 examples/xtremeclip/download_eurosat.sh create mode 100644 examples/xtremeclip/download_fgvc.sh create mode 100644 examples/xtremeclip/download_ve.sh create mode 100644 examples/xtremeclip/download_vqa.sh create mode 100644 examples/xtremeclip/main_dtd_matching.py create mode 100644 examples/xtremeclip/main_eurosat_matching.py create mode 100644 examples/xtremeclip/main_fgvc_matching.py create mode 100644 examples/xtremeclip/main_visualentailment.py create mode 100644 examples/xtremeclip/main_vqa.py create mode 100644 examples/xtremeclip/plot_images/plot.py create mode 100644 examples/xtremeclip/plot_images/plot_heat.py create mode 100644 examples/xtremeclip/plot_images/plot_v2.py create mode 100644 examples/xtremeclip/prepare_flickr30k.py create mode 100644 examples/xtremeclip/utils.py create mode 100644 examples/xtremeclip/utils_eurosat.py create mode 100644 examples/xtremeclip/utils_visualentailment.py create mode 100644 examples/xtremeclip/utils_vqa.py diff --git a/examples/xtremeclip/README.md b/examples/xtremeclip/README.md new file mode 100644 index 00000000..6dbcdcab --- /dev/null +++ b/examples/xtremeclip/README.md @@ -0,0 +1,113 @@ +# Tip-Adapter: Training-free Adaption of CLIP for Few-shot Classification +Official implementation of "XtremeCLIP: Extremely Parameter-efficient Tuning for Low-resource Vision Language Understanding." + +## Introduction +In this paper, we introduce a simple yet efficient paradigm for low-resource Visual Language Understanding tasks~(VLU) named XtremeCLIP, which involves very few trainable parameters to improve the generalization ability of the trained models. +In our XtremeCLIP framework, we reformulate a series of VLU tasks as a unified open-book affinity-matching problem. Furthermore, to handle the insufficient supervised signals in small datasets, we adopt contrastive learning to utilize the implicit sorting information of ground-truth labels to provide more supervised cues. + +
+ +
+ +## Requirements +### Installation +Our CUDA version is 11.2; Python version is 3.8; our torch versions are as follows: +```bash +torch==1.8.1+cu111 +torchaudio==0.8.1 +torchvision==0.9.1+cu111 +``` +You can check your cuda version and install the corresponding torch version follow the [pytorch's official website](https://pytorch.org/get-started/previous-versions/). + +Besides, please install dependencies: +```bash +pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple +``` + +### Dataset +We have provide the training, validation and test dataset for Visual Entailment, Visual Question Answering and Image Classification(i.e. FGVC, EuroSAT and DTD). + +You can download the preprocessed datasets for Visual Entailment by running: +```bash +sh download_ve.sh +``` + +Download the preprocessed datasets for Visual Question Answering by running: +```bash +sh download_vqa.sh +``` + +And respectively download the preprocessed dataset for Image Classification(i.e. EuroSAT, DTD, FGVC) by running: +```bash +sh download_eurosat.sh +sh download_dtd.sh +sh download_fgvc.sh +``` + +It is woth noting: the downloaded data will be placed in the current Path(i.e. `./`) + +## Get Started +### Configs +The running configurations for Visual Entailment, Visual Question Answering, Image Classification(i.e. EuroSAT, FGVC, DTD) +can be respectively modified in +`configs/visualentailment.yaml`,`configs/vqa.yaml`, +`configs/eurosat.yaml`, `configs/fgvc.yaml`, `configs/dtd.yaml` including low-resource settings(i.e. few-shot or the number of training samples), +visual encoders, and hyperparamters. + +Please set the dataset in the config files befor running. + +In `configs/visualentailment.yaml`, you need to set `root_path`, just as: +```bash +root_path: '/PATH/TO/VisualEntailment/DATASET/snli_ve_%s.tsv' +``` + +In `configs/vqa.yaml`, you need to set `root_path`, just as: +```bash +root_path: '/PATH/TO/VisualQuestionAnswering/DATASET/%s2014_4_clip.tsv' +``` + +In `configs/eurosat.yaml`, you need to set `root_path` just as: +```bash +root_path: '/PATH/STORE/THE/EUROSAT/FOLDERS' +``` + +In `configs/dtd.yaml`, you need to set `root_path` just as: +```bash +root_path: '/PATH/STORE/THE/DTD/FOLDERS' +``` + +In `configs/fgvc.yaml`, you need to set `root_path` just as: +```bash +root_path: '/PATH/STORE/THE/FGVC/FOLDERS/fgvc-aircraft-2013b' +``` + +It is worth noting that for the first running, you need to set the `load_cache` and `load_pre_feat` in the configuration files as `True`, and the data caches will be stored in `./caches`. Thereafter you can set both properties as `False` for saving time. + +### Running +For Visual Entailment: +```bash +python main_visualentailment.py --config configs/visualentailment.yaml +``` +For Visual Question Ansering: +```bash +python main_vqa.py --config configs/vqa.yaml +``` +For DTD: +```bash +python main_dtd_matching.py --config configs/dtd.yaml +``` +For EuroSAT: +```bash +python main_eurosat_matching.py --config configs/eurosat.yaml +``` +For FGVC +```bash +python main_fgvc_matching.py --config configs/fgvc.yaml +``` + + +## Contributors +[Renrui Zhang](https://github.com/ZrrSkywalker), Peng Gao + +## Acknowledgement +This repo benefits from [Tip-Adapter](https://github.com/gaopengcuhk/Tip-Adapter) and [CLIP-Adapter](https://github.com/gaopengcuhk/CLIP-Adapter). Thanks for their wonderful works. diff --git a/examples/xtremeclip/XtremeCLIP.png b/examples/xtremeclip/XtremeCLIP.png new file mode 100644 index 0000000000000000000000000000000000000000..8af1ff1f3129da2b9e83bc103c45a8dd39284590 GIT binary patch literal 350378 zcmZU*1zZ(f*9S^-=tjC*kdQcpgmjm5cS(nIcT1Pj-3`*+-Ca`BB@K6c-^b_u?!Ab= zIUHu@%;n9QfFyu`{m*L%2q{Rye_ty>(*3g!aLy15 z2$+BN(FC@Cz5;;%z|sF~p|c?Wcg8HJzk9=GWXv|g?|oG)KVpjY_NVqz2<=IgC)+S;p^yCWOk zk0*#Y4A+M<`&SbJl}hj*_rt3UmFWrD_FUS47Az@VVE^KPWd zw%-IB1JT5_b#=$9%~uaMN3azCuhH=HCnGx+6Pk=~{d;?YB0Vcp(9PWy1J@bp?!Zb zK$@PO{zf=H5Apxr>+oABQ&R6c1Gkg(Yw5p(NGAjn7auSGoe!Uc#Ky$r=D2oZ{fD#F zRE{VNiN|quUS3}6do$INPsJ891@rUs4^c8QGRPp1FM4%-{aj3P)&B_zI2c?e&8MX@ z4LO`|o}Qjk!X9}j1W*p9;c!9!4wYZG2t+UiJNxwra|fUIlk3_KXFa#wL>}i~2Wo;) ztx+X-MX_FwzpHoct`8L7ym>R-Xg+1QTxaocNX_^+_CQe75fHQezXZ5=Ra|1iK_q-x z3kOpZNuNFPT)cp=oD>)C{&zAF$n5sViCv>$k#Lbo`0k!>H!4+1RT0=fmSF#TQINSH zM(sM+xy*Fl8-n#Bx5t@*-zQ5o4(FY^69`=f*mc{jbgJ!rVGXm?f3E?L0;1k;BMJ!e zs}DA-U}yaINnGo$JI714H%ANmcZ;TDY51n`8-FjumrOw$q}}H3+|?~QAg6PE^a0s2 zz><*TOh3-1<}wCH+kf-?_wRQV4H6%gD9DzhK4*VL`g^RPA|?8pH}I9L_S?M|I|Dw~ zQ{oSE2Bnzz_}m8FfsXs*&o>KtE-o$)YX5hL`T`)Hg2tV$XZyZRbWAmg!aqyN6R+LQ zJf$+F&p&PkpapJDiWi7gwvQ z_9u4yXFIQogrx52|BeC(N;J?H!!m$?oL8*Kdwn8~R!Jzo5d#fC^s* zg-f`etT7{hBGil`=wbyf>>KYF%L`08?>|Y%do*9{-!Kmm{QQ}5CW~A+*zR2!Bt;Qt zg?4jyDm@Azr{__5<6YRW(ao>982*O?Y38g?=|7B#q?vD_0+dXK7>WgY)UzZg) zeyCg#-E^m;6aB=*O6>lD3O1%V}A~wgGlyZpYK5G>~Y3T!36NDF5%spwltJWO43|YaM7bs=O;kQDKIGM~qh0 zftz^xoPyJ|Um)3ji7bLYs7muIYD(dbQSvo4jw zdgVwCuIVk5pxhN%qzF<&{I>|zh?0T9cd2IIQ$KjP1i+!xiQFyVael@Vdn@pKJ+oA6 zwos|pVZYNqocLDYq8Dr8!!1!@s8rZBbSZ91bO1aW4Gj$iL>H;}T!~7(1J!@(f?zh8 z9X*}t*!S4j6@&yaFeiU~LrdGYD2*}8hToifUdx|~m4-?b7~(#{_)YtE{wC(#_wMM- zl{-3(iKLV8z#V6vt4Z*RXPvbZ;v{&;_XbG)RS`O=9GPs8N6H+sJnLVCXB*Hi0$^UMA`yu7?T zF|mvl@#IuO$iK5EQc%fZIVdYd;CJwo*KZf?k0r&DurGy=MXv?vdFjSaJ8lU!(`|0M zJ~vLccGdMP`<{g%_?(9xOJcrUhuhQwpO+gNg=4zfqgJq6-+MGv$VE5bug=KNma1`G z4Vl9->q!kp`c@I9vVO(;8y{II=n`^rd$DR!t%6^>hAIbH`96Fgc>mnaPLcv1#Q_giUB&6RyGOs(gL;SdtO zT`Qi8ZrIy1E%>sdB!NYoOoac<;mF3&LCW1ylZR+vP?zMiqG2z1Kx@AqAx;;a4iW~t zzS{oid-R9kJ5G8SOW_-XiKMv!G#j_qwZ@e?8zvZW)>SnFu5d`>;*bAk79yCOoE)&I z$^GSS1TNFB`SNsCx|teicvSuV%Vn#|Ft2d9h(Fn$Odcd?ozdd`hBrZY-)3N-kX{$M z2MZ5{au-IlR}VEhN>-v+vCl?-fZ-c~noEsO=eO9etqzjtboMZdYMaDAj1Oy~w0=7s zbLV+)DH4`gwKSITyXcREoVBghh$4MTt$|j|`?7l6gC@W@jhJWmcWedG>9Ap9O^?^x z(%*1}^r4dSYrlO@O${qFVB7KBV6{+Dq0?g7&RqwC`5%- zn#z5PGhWkf+of}S}RgL1|DX} zTNFKJ_=#c8B-~~bxlqoo}k9e37reqb=fmt&12u(_?|P5o1%(Fmaq*k0~-Vg?5XZ`VEK zJI;iJgs^C2?7n=-1lv>SO7!&fupR|nqDKppQX~Ayt{6Zn<|IL0^ZDJ7pQ^Ewh8pdH z%X(4V)rdK1v#6DR1!j#?%UnJ$`n)zcz{*tqQRd@iU)JIS2az^WtzFdTJoaX)KQnFF zvl%b{@`mIar~9$Tdi~KG;kMcB&CNR3t}k!K?{~nfOG_H-vq7jWEMh^Z#n}Hqbs$Li zp=A2Na`^yod=T9+zD~DoZ}z-rgAoUcqXAEWY&xgOFt!BxvoR*_pU0*X1hofbO9Rpiyi#^??RWMb&b9woPX1%2**ZD<*Mu875zqae-Uua>( zv(caoxDbhz=oS7=LY^>HJjH62x*{Th3vIdoP(iew_e~T~ z(|H*IeDwc$hJ0%eCbN5hGD^;v#fff?Ow4l)3|`Y#r^}mExYqveASCfkMnDBl}1W?!QhR;S{pGkit{!{N+Kfw6VNckf}6n%VrjES3wsXdC1 z64@ny8<8ULi{LVp(`~mhQ0-d6;P1BEnVFi0ii4ufZ9Ju+E-c_(Oi52q%~@uVv!0~o zi(8MDLyVEK)NIMA4Yv;ro zI|u~Y)LsN8EBAP@)@-6M(|b8j5-X4BSamFo<7m0ws?Gg|p~WjX3CKT!801xl3XAtg zQz{G^Y7SK_W4~b3p#67jv>o)#%P{7GEzOjqG|i2n3f_4~=>Rd5<8eEQ8$m`#2A&`9 zC*mO+5wh|{p6%ZVLG^lG+iqARv07>`?3yF;`LR%h^TzS^ZmSLFo3=ZOXRCO}R-$&P zcC(%Pet}9T*mBO=*z7FDhPz!D2#h%>yq@lG7NoXF$l0+~<8pGVKT?CI$^$?2JR1F*|sRI(VfBNBkmpaX4| z|1M30WF7}MZ0Vb88#1&TMPip0MbRq@oJ@LE+3s+PDVcs5s`ID+h@o5gecgH4>3FR| zx?@1RW4`uvr{Q<>n{kp3*WV&8_>TBz3yxJmI`s&)^1=Su_06NyLp35~D~0Mw#neiF z^92xqI8d&HCrh>a@&eC4_xD30qDs{&-|75FIIA`q*tTQm=Ekpqw#-$`6(efS8CMC5 zilV08QkuiChy=1?f+dwPer>d?+xn$C)_1Z#WHQIsTI%6C(6=5H5%mt- z?cD}9^Kn_y%A&S4>k6gVt=&=Te+&zq4h2LuR-SpWg`L>A?tV1*l@d%u%xVZTL&;9O z;dzC!&ZFynzY9=CfSO)lo#q0Yo)9$g4S@0;h*z;+ODzAvw()k;feK#k@i z<1h4nwBG$ypPtruGDjcN&UsCUi{TRQqdL!-xVB3CEyFo^dIk$~2MY^RTf3B}MRiI( zPbK&|nQQig~DIf^ojekPvt9;k z>Kw4}GZTj6WcYz-j-`eov1xeYZ+P0D|EX?b87!(um`13mWmQYKg^*IDphxt^(n3h4;yTsDwPsIgT%=v`_w7q&QPVcVw>RrA)syN=hlh}H2v!Go|<9x0K%FlTL zuD>v8hIVk!|2VqewwV6@_%{OsgFYRruXFoHM=MSCMo)&g8YoIt21x6eH3HHT^nq+) zIHYE0?f!)b!C+G8KMKMvx_)y~#p6amX`ja%lp+-8ySJS5t{W_Jb|M4RJ1CGHgq1$d zQdzh#;qZ{e1J%DShrbEM)HR@iYaU8%+evSp(sdrNLzlC5w+yf&uqI(o-KJI3uJ_$wRzlRgZ4N|}BQAnsFMC;K>;s@BjAifv` zm+h`#TG3~UCX|nM*gfjK6Gb5*0{|@de!fvd_3xvBjiX>-KpYO}G5qSh85wX6JfUFc zU7&EDn-je)-C)`6proNGFd0dDxG>@QY!v2RBy%fBzzo91fjUU{@5a)8{|!{CNO%W% zZdY5eV^isAy=k0KfJY0Sc8d0z-aV@*I$i3~d6)Z^o_0qxcbaF7Fzwx^(O$`nE7preP&YYhYv*bRd^0~3OM*%>wLbr`TIf2yY ziItC!NEjzNa!i|sAeQ&bDWg_HWV(vYv1&p4WR_(46X|j?J~beH45L8V`glAn?(#|w zp}7*w7jk;G5%X!=(?6gI6@j8h=)g86200&f6Nu^QX=d~+Cz}eM))LddDrxdKaD!d; zE~S(GjZQfZ_$#N{74sp9OJ>VVefFcRjuX%dCQ|B6PA1ym_-t&x`aqAiC^k}l3;@(k zrMNiZfJy*+&09@oupM!l>(;xYm^h?&e;1mZYih#Nj^a^ej9sRn<__b`1o$vs|uYf=0k9BP8==T4Gn(SlCY=Ly}`*7kb#`hxb|aHl`2P(E|dX|WMd zv+gc-z&oLlk)F3}Uou^{!x+@@^KJMX)N1&nV zJ6z|w_qS!A!!h1m!`O(hN(_}PPo7ofmb>Mq;h=z`KYg>KMBNgzM$@7UDLF`{G!P=2 z0CifW93CsMw*?C!C_v+xx?EqSKb`tCrl&YfFn4{m*O#g))p9;zR1XNhLx@spv7vmM z-Z0l@Y1*DPf>#uELa3_E?YOy9tPFR3#yl(b&i=i`@0iZfP5-BVwO+(Gkf7*?^*P3| zF&0IGwz2RZb|cBtY?QuQQCV4Jqe?*KN(M-I5(~eGqH$4z0^vEap7%X<8Y&p|`M7Dl z(+M_&{^9GNvGQ{WgOGZ`b$-jUqyZyJ)!SRR9vV7=QlFir03%L)xgOquhfRo;>A-n@ zy0)00Th7pEdthkBWG0t9LayBXt-DQ>+9RUfy3~`e|8_3B9zie?>!9!8PEzMEPd?&rJ`$hx%yu6Timaq(~_rk$-4*i+8Q%OUR&p#37KQvuA;? zqlHRKj|?lYOg)Q*;Py0dfDjl>LS>o~drj!Lmcf|g(~I2JLDl8j7$ zI!|(tD;SlyB=Zaa#vZ?q0SvY4i}URIhlC4qi$OeMPYgG(BK(NgK5}1bY2n)xn{^!^ z{ya)}2fmbLd+S)s8qn%lvV=cDp$6;cN$E>77^+L@Sgio3H=c&!a+2^`?Nvm@Mmxwx z!@+cO*&MI-?)w&lSx#qUrwLx(Pub=65lU;`TW9CeJxh zlHGclebsVP_iqRGY1bx;`q z5(?b_s7jkiV0B<&2IC9dun0BBoE=kg^CRP}T^$Bay3>s*QHYbK#z9dO2~$a^tdwaP zRRIh}kz$Y4x$n9es_lc-%&;~l(L$y6Vw34oel6|sG4bnqi%HKf(o>Q3;Z;~vm9Tb4 zLYJO=shd%?r~yIiC-9daY8)X5m2FZ;QM5^=%|e2$;d^k@l9d0o4-^JLf_4fyJfcBZ zJmB9l#HFf(b3d!WxW6?pansP`R7i5*wAHF!eD=S6(bmzqQV*;CY5w-*w&V4~!$B?p z_G=p&IFs0!7tE=~=fDE6>mnLiy}iAk0f_;y$&?FZiWPH%Rq@uu52{@+3~dH#C-_VI zN+FhZwzmbtN)vht*;hFN#D@O}x{jd9E6FRy{RKSP^;JRvTyu)#!H4Df$k^w?TOCo#b4CNn)7hh zsw$}21XkII_j$ekR|mI=Tr^5J2niJpsd%F($ci(XyM~dRiGzci+W!7F^P_bur*}qj zvI3#TcEY{{@bDt18voUA?f6vB7CX{s2rXXwAk7t0%F+NV29DJD^9!9JXirDg< z11!W8N0mS7cy~T$5fpXb4)D$pZRXScekNe}WwVh_mAumzRK&#g7g!JmINR&Z6=RvENS&W z{GSdAov0GE-uB?+d?Y$+Gc<5h>jIS%H<5@jgP3V_OorI{zQT@6&a4fSI2jZFo70hv zl6zWMbF)RIYK8puHMSn1$#uH`)mZSRZ+3d4{?IUK#g|_ky=%1v_WqPyRe>RJePX$5 zMK))#E7026obT>PR~k+S9_rirJ08Ln2%EdOlPxYsZF*_=SoTLuIO2}Ug*xF}Ht^ux z(Sng!}`jKSa}-PJC|zeCmS( z^CFZy{IYd&jwTU2Wz@7n+;bMPnL>F1ho2x$;xuVTnNLl^oSdB8>$Hf0lv#Ac6Kt$9 zLV&$;{t$+M@YD^cJYhp@Che5joIr-T?I*O=clHkp7n8{vfe4|NPI>}VK_ftlU^NUB z!uk63D*%~&FYkbPv0ZIqwVE$e({&5NQV&MPhr*`VX7QB<5}XNv9|_aiUE_&F$~G9V zN>C7E#EY}jJM+_1ss(51xn(wa%NS+6N|sO$=;ET-yn0A;`XBEPv581!zx~(_A?(TY zuJ?rz*R&ii4^>cp??`K=<{~L0JI2W5XjcF*+U8oCFGs7oMNuC~-Q2#v|G;@Kn9cPO zk&z4J$=P5ZuNlqlVm2CSl>Ie~nHuO~cnF;+CE!jE(S+KT2v82-I#1i@X|L>D03N)*98DV zNE!~uoa;Or?ng|h6!!rB_{_D&X^y_*P9{WirOREJRfl;3DYdv6z-{%p}JgC#%g^J-Bi@Zio5F zfDfQ`AQGus|3?qZ5<#B?M^_c~mXg~a+dsgIi4PP2%KDe8877J!^>#SC37E|sN)|%s zo78vfEoX9b7({xg9uC^h%%tyY`Iy61pfTOT`UleTRGM6W_rkvJv`1++IbmNXZ=xk*gwQ%E5me5yN zpYQ|+M>lz1NzUXNI(*s59IIwYAD*+Vw=b9 zYO^Ev>)g~72F-41WqXOP&i)&eR0K;@rC$+jAmRV?6aaBb$objzw0ZxB^HK0Bbp8;q z=ffan%;fG9hol40*&tXf3K1=AwL-X&!Q*!SZeU=~luLSq8W`R>a)>U!Z0dK7``$0( zv5H3N4W|hL{yoe64L?HUm87<(pm`rr+cqON|>h}q9Fl1nTuJC5-L3S2tKf_KRTInw`iY#M-T3?r#;gDgG zUI1qQi+R&t2oTCu=rC)5I!NdeaF)(x+b!kl?*7p;Yo}A57QSqI0XiGT82%Fe4m*zA z(U(BsIo}f64^Hi9t`t7g;nys^oGFu>dY0({z0RfrPD;dAO7Uq^S9Ah=BBIDiqlxby z0BTRujq;O;9eFa#!WY9O7ra96{BPq6tPn&G`Z)hiQH_9iDw6zEp2>hG9av^;^z(@W z%V@~*VuL~7GR{Y9?4K^X47M*zC`C~mq^~=|{A%@*IhUcn({y_+W*Z%Ity1|!t!F=5 z?1tz-2b5J#S2v`bIx{g}tH>aL67oqqk zuo`WQM)1w1c@29TeQ>k{&R^wHFdD!&n?oK;F#_e{r42$$OWkSYOkK~w`QoG ze9ptZFVB5sePC_a)-)fNTwfe%mw(-^PvlvPDA#WuY&fp9LX`qCmrHbCvi%gv5v zdH5{MEMNfY@7E;&xg~qgH>+#IGARQ#CkDtDFF@1KBo9bM-|SqD$Mt-Z5$lb!c%G+F z)*^b-7i1zN$~Ak6jJHbP1x#^dMrCBDWu|9HPG`=X%=(R2?QI&&V@J9|eH>;bPWOie z=4X<-sli#CE}ykA04}pUdrQb=qm5d{u&V>`>4Xh3i|_y7T~>047JH!6Cyz+OlFK}h zt#?ylaTMjFf41mHnax3HA^4E*`k9y4GxAo+p9BS1=UCUexZvq;;=cxm#|r3U=8)>m z8WiD=MKCy5_pv)2D8z0NX|G08@O6cUsG8Wot*az6>$RItQ80c%|I#@-$q|RIzG^&9 z#|nUdK{5`eZ@Eg&uM5t>DoV}I>gp*=n(2l4X;d`NSCelW+r}D<4p)toYY|NgaAbm3 z266M*TnuK5zCN8)+&faP*AcQ?WujpvjuOjBMQ*fMk0sXn$Uh!@$mV3*QO0KNw!wxi zQTpp&X?*lmL5Q$BrmvFOQc$bNd|>D`*@{ z=Z6M`KCV86NjcUz3u_4>kNY43r%D2-lshHuSjx}t((-t!2Mc;WeKm0KL;%j(>cua& zb^^}la?pgUK?hkD*vXwNs#2bUt5^R~Q)%=YubaA~L#4@3oOB@RvTQrfoS{XwcVii+^CX;&TS4C*Drz$D@PoY30 zhAa~RY6r|(=Qh!yP`*@RjS*K70YMI6XY#rnrvTbo3E~`H(kU^I(_)>)%ywTyHaDOu z`#nI{CIC-=s<8aJ$5|VI*&Sbwm^+@7I=sE4d=EQA`T(~si@S)pS{QI1*={4G;n7Gw z&lI5Ko3v||7@`vM)R~SE9BX`c|3x3Q?0eZMbogto^nMSFR-R^UY!ByL+y7E(UR? zcEw||@c!lp46Dv%hk==u`kU(5BS?v5B?b8v+@+hMhG-+0P?2;6N#6Is3h-Gl%UU8ymN2nm4?W&kXI^=d;{ z2Z&Y&!vR*Hfff)*9f%~8Dg|_fB?RQY^M%1twe%;%x#~W+isrK_;HhsHsb1hd0;jRy zlM~D4gJ`DHd^~BTE?;S24v(X+HA`*~{Zxak#gFd0EFmKqAukgZF6p-%8YVBzInynVypOSWUc-R?)iiC6(6p>V+&R6QyC~h@- zJw~1SJuS_FY4fR_*k|^}#p8aLRR&^MDtSbf7s+uKnb%i<)6_v`2U`$KYZM&f=nVCF zCu^$BZVQS)-*o!G<9<`+`_LrN7A&E9kYY$2HF^hFjfOj+7-4jw+$Kr&oi#nRS&;pN zd>U2$rms<3i)9+`1q1}l%=WcW4;YaqWT`gKvvFgymjF4Y3z<;aj3!9q^l-O8=0Cas z8WYr2(hHlRz5u`i99G32tk(*}+MNj==XaRC+csgw#bIt^%CKdWE-f#oY`o$4+0wZ^ zIvCJsyV0Iu;qTeg*VlJ9Lvn?fVbTtC|Na80UxUymjSr`C;dS4WfnD_`0eyDBN{5V^ z^n}*$57#(ah)G7}!SDtYwilJ|R#d=$>xFdA;Pn3x(KR>yJ6>%c=w5^;#ghb-x z_>5Q8wsm?sF0D$8uC*Oyrqu?u4neBPR@rMS=w{IwFe_&0q|M+>H?0Jg6ya)0xQ~CA z6*Me8K)UX2yejqEvR*4~cm~G(t#H{D8skU1rvTAZv}yq%QvUmmS8u(SlPp@@ag?rX z01=IJ)Yi&)-*@OC)hkj2kiE_6cS$UXnMBh`L&!;iigRN4a;u#q&v_<0~lF`6=4w` zXX)4u=kzS5g3Ri)p=2&SFL&}y_c^Ze|epV${6AAMe; zOH6yYRA27Ye-&-#=r|&~qF~wj{Cnl~`OF9K3}r<~`EnA)5vcIOmj`9ZjTL9%!rb76 z%Fd(qQN44P!7j&twDkyCnBnNv1l1Y~C}?Q6m@DsvH|F}^Yh6w>v5D|5^&93S$;TIMhlE&aj_+J{phRGL`nV} zk^j3}P%xLXt_3+YNRzhfrx3g#ATPo#=jS1Vq9w6tYn*f%4Rt%nC*0=B%q&vNiv+mo zSy?t$gHhFF4`m>{z0uXY#A-^Hz!PN?WE|>0Tb~f-M+x=s8Z&9$gEtfGQ>U?A1IfxXk z%^|QmA$5Kn4|?k-|9n&prFx>(Vyf=iJ8Jue6zov}ClxGx!{TL5bm07yPYmT!7IP@N zqK#oKv{jEJ`&+dNfYs6Qr<9!nHcdIv`in4zzyh+kCU#0Y4|uN!N>Gd*pQ%DN}u8pQ0CY$u5WI3 zF`Mar0+8Ewwg{sQ=(Z1<*DRO0bO<}v&OuaA@!kpB5@s_r7a-m*Ow#$&X?(6g9-ETj zw7cy5ol~vXh?fZoqc*-Ecit1w^*O zYhrM-{v_G1?}B=V_lH^v8p-2x7+Bc0n|Upn^u<0ytI#kGM}8@qF`yM65`zHN19$#@ zbexr`3-$vA5~E%Uz00pzg#DAI+zAY;>&{H?2YTvG*gBiFH2jn#*fsc{P)s5Ao!mrJ zAoei;Z6U05`**FZv=|LUYc<-mxn0p{=!=nfT`MAH)79ZU-kym^NZ)QTp0gBE;f0DE z@N22he$-Q_SKuc(X8D8rWV>BFkhL$BfFHK07wCsnZgMf^IC35Ur z1WGppp&HM5R@rqS5vKc&Av3IAR_<4K!%DPDr&Hc3yrHJ?3m+;S)_FHO z3FFmpD)RKoAQ@J7i;U=31BOt`Khi-oSwrk!aD2_-PaKiUd`&2?CncP2$4A?C3_ zy$(hv+P&-FI7*`ck9OP{K0ahFn%Yd4#1{bg80_u35PdV!@=(9$$*z>-1D5)N226cc zhYYb@NZ0_1Ix^}I5(v~8#g_&(zS1MgR|1pIaR;nRA)`#0VHvr&xL9g-+y}I>Awv?x zoyW5d+Be@*nrLJoGn%c z7M$ShY$}7W*uWTZLc^01&)1PUU=SF)z%Ukx4m^qc*3q2C0BD!;U+cVB0X{&UdrD!-OamJT<5+ z|6@*d3j24S`+4cPy%dL|zQ(q8lxZ|N-}*`JyiUw%?2jsWp%QW1B+X1Qz@uhL$jTnA zlq*M{hSnPqDRhzgd@%(y(QaO)E!KeyBV$)CZfLD;dOoO<=w9#p#DHY$XuNUpM>BR0- z288vH5BPO(>NGp|AD1v+&wO5?r3!e>RWZm|;FSg`R62PAQqf;%^u;E5H`lOEpj$r3 zxa)I$V*80a8j?bu9izZH37H`}@Hvc?%Y2dy8y#p2&v{4W4L z-nX?U>@e^TphXmS`mgW8HHGvk-_|3cCv#6t{eeYARE}^iL^8zXxQPs|-eMs?o900r|U$ zj7n1tO?5s_*CzS$Wf<3Q`mYvg_Iw&!sH3AFjIHg= z7Ut&;v;;={;(e>+Z3(UlHtq>QAu@UJyM&qxkjl!8yk^Mlw_(vTYmON7H>RWC1cMGT zw5X_9a{bMwN(PCtl+2NszFVZ?szUNWbqS?N625$Qs&}19OB~V#$eBx zAE0`&*7Bw>ZxXlH4iB;RhgqfTt$yvJjuor)FWBghJn-3rHkkgumy#^$i81S4} zSFsOB0-31juK68Csz|oi6)A7At>^<7%$bses1z<%QI~}c;cB}{X(CpPV@K}fOjcaVw` zsxzQyyME}}oUGd5vKhOOk7T%Zl)NJs@-3{MSJq8CKQ?E|ULZdWqwVxS@BUWE{)j6B zofYQhX<&wk0L9%s4mp9sA0(~B%iGd=@tYTEZ(M`rb>-z5p2K2(wc4=n`O!uAiNj`f zb-hc-supl%c0hRmXQe4zkCJZ3iYMyXsvL^c6V-d#vD)E}viBizIP1WVF=%%ZIMZX( zP1R=70U57ytPzhkC8DkNBX{+C#W3KD%U+48@PnrrQu4BX>l=-)B7aQmS=v2Ku6Wdp zozI%ubg9s8yjl&Cwz$CPAvxmmOex|-^fxEpXTo#|;H;q|{@~93eOjNZYVa9YlcUdd z&Y8CMGlgSoGF$k|$uca5?|Y=ArZP>9$-PCLCgcV$1=cFWpi99TA;I10HqS_J1&znM zJM*v$H-lKB^+0EuuXK#p#NC8cGu}06Z4SGL0@Vcu_%hIR@zktjRX#!J$%n+eCrudn z0in#up@ii`&k)q-2FvhyQcw{2-67P7p6H~{y`3Y+3Vs=^YO_O1!}B2)@2ZT#po4@) zvf(Blt7g@{b~yV>-TjNB)~+9+K_F6woh@|*l)RCG!(hK4)S?*lDn{cZc2dV6b=D|OpkDEYPVgJZWHcH| z+*KOg>n(W}R|pntp4#oTl9s41vp@QB(IllSoY0}T-CQ|{D-QM`R#R1QB|ym$!T| zp`)|B-Dzekxe7wb_g^fH~d7yBR22KNY;#4=_d($ z(1ij6xV}ge$8|Uv$RqZKF5rB7 zm^8iIxPzAQ{qd3YTp=UC;%OxlgSjzPPGV!!kZ5RpkOOjo7h{+sBi8j@tV}KwsB{jr zdp=YhSd-|x#N@5A*$gZ#?C(w1>T$^w(;}1X41_;K^oJ$-*6AeZXMeWHZ9Ir0Do>t> zuI5-ct1}2V4LG@|^8qR!)bOhZqd_dp12714l6#C|hDETaaIgwCizvY`J(#^!ncFcz z9MH0T7?EEuBd#%fFlkV^+SGLrS`_mX_6yFf76LFa;}Zk-G|$JZTW+75-Qf2dyf1fG zX@jrNP8T3a@f8)nbW_^CM|-bJCeG(|vGL{6*l?K%DWGrK&~DhS73kV zpn?4WrMO0h7%M@tNJ@tF;6XfYwxoSHTdfoB%510YUIYnrM!`ff#fS-IU$vf2@sQcl z1^uCJx3i~x=jF}AL#Fftx9)RtdF1#mVK!=x53C#<_&D&d&=|x3HiX0#s~W5FRgU9`Sj6xHEtYDI$b#0fumV}=SVd+SdR2#c4qeJc$~q~8nQLhU5dZuw z00Z-$A75SP@a8bRr!kpE<#`5`un^v5fEuV4_hP>Xv&QEdS5Qd!&1MB1Pbh;eKItrT zJ<8pNK0kN38YGJYv@D~1jx~cL~*b`vhhez3E<0VL&2*Q3`ln8Y8b?m%EeFc&zqBo|Ve6-WuHQ+~V>%h!T2d2s1u@aeB1Q%7RaM zscHCpzEuQAF~ZHitKp5}7PShaJQ~S3GUxZtco*b8JNQT`O_O5F#nF+w^pI$ACP+(U z8b6%qy~_~0ZHnTK=C8i7Sw9Tz3_ML_l}xww_(e95 z>D8eo53$wlZOLJF4s2Lq9%5alrhA<84LC9y`vug!eGIC-v$Lbi!^@+q{+VG27xeTR zcPIX>Rdsco1InWTq#~4E3s4lcLCqd>W$Hs5tgMg)0aQP(Rp<8-Qj^jWQbJ)VzP9?) zi}211sbO-~qO(gPWRQv|Go{L_<70_YZ{^dlt`AQSPcN$?zh%SHd}!2#uiS139Xs#u z`WQrZD^5KU>$I9~UDA8`r`Jz3P!P3t&VRP#CmyNSHO4v}_#u$;^hVmqJ9qw>g6Vckcu12uV^d1;Q=N`ua3{}fH4tjo23$J_LrGX$`ikKF z1Nw!zy9hQ-pqHUE&`J9t_Gk4c{8PCMXeQ}=n*w}$6olaXo1l+ycEJH$+;EI^`P_6N zarEfkFu|b^2gYE zR0t+Ea!Pb(e=I%ZG{D#4Hzd5@b-<&9xcPe@@yNFsgC9>PdAIF9l!Cz98gE!DuG5E8 zME)O|&M_eJ_l?&z*|u%lcAM=s+qSl?jhk!pX4^HnHoKi{Y_>b|`=4{(&#U=9yr27X zT`J-Dv9J*EjA*AD?QT?BRHC11m>i+{W><8B9mBtDl(Q`9@W`Q}FbNt?#kEnRhD^!W zZ=y#c5dxEPZ&SXxm7@7o5sdgk?97;h+jY!pw-^!d&;Y$&_B!5P&ogpd zBO;%ReXt_)<-IO@FKEYC5#dtSfn02?tUJkh#`S`8N3EP~SHtRqTylg%;&-G%Ug@K{ z9l!aWs=#;gYxfnAsMy^>)A+2CmiZ7InP(b9T4MS`I$;st-RqY_m{>8?JTv0(k$+3_ zYl~I+M?|W_#9mTa6NQMU^DN|EqL}mM*Y-;l;H-frZoKd)XITcm1bF~7CHZx)(aN?-7_LYW#1JZtQp-iPtPIh%wKvp)=X|0};n?8xI#sSBtul8Le@N`Ie zOQ74P>T_Hw)y~e&+`9G_YqTIrvs~+MO^s ziQ#U92WtUwY2Bgu;h*+knaN#l`QU3?zK~a@Tx@QS#oBZa5syO|qItOQ(e$|kx956`2(feK$WCuS~l&s`$~vu!^En!;EAHTl1A8#t=PDN6$tBX2Kx7_$;`I z)8=Nh)Boe`*;d;YjiDmpCj*=%I1C~o!|!G`6{mqW45tK-`?f9hdcx1;GQVKOse!?a zAf{h@lagxX<~ROAAc6`NVNwuOFp-4q8NbAzAH9L z>A>mjIap*8b#qz3g_kydX+$yto77&`F_rv|nT7u-bsqHOnE5h9*bcNvzl1=X;1fJN zj6Y%oEZL|yB@Ha|8%YCf=n*HltRVA?40*0}M1TWXixGx+1T!L*Bhfmx3jBr@<^*XD z0jK@pz3t=55dIK(joc570@qU%=UiZX^>gC8RUoFd@>i{ysc-|0b2vkgY6`jN4Pt?P zYakwaSeYC$>0;Z_bPwE$(|eVI)RxW{LVdmU0FabaJ<^d$&uO@R{(eh3HL0FpniJzk zI?J59CBJcL(&|%WPCvzhb;u|g9#NVfORMs4AJ6(ePge&F9-b>%*F=mN@QzkesAn>z zS8&z5V>Oi$S%!NlK?>^5jwy^1=Jqx02mKS{z*mXmH*pfyC2%}-n%GN_(@Dr3sCl?| z2z!0J5D2>QwUw85H)u?e8qRL{=Y92W->?s(Q%>6@;XocBL9PVtJ9(1c7Mrw^5u(wpXbc^_xjo(SfQ@XoNG)!s;iq*vObYOGr9Qe`)-Hl zP2|qbz_}xgo&H3T>yR{~O3PsTXjnMsx1jM3O}WaB09x7ydIoPQQPh(iW4KYp^yOcx zY0+o|JBf^f9eUfAjA+k|3YD>HzoR5DYdkOVS*wXq2rLH4bN>q#gngAHKVp!SARH(n z@s5}xh$m4i;n0+D4a-NuEvi1(7i~bN0S^4E@mf#*36T#&Pa=;dGa!idCt{dXXqEZI zjY2^dQwolKuMqJJTMu-9zJ{kQ9P>BI8Z@*Y1H)1!fq6?&BAB>bAU8C~P~RL85n2cY zz&yilsgXKXHbIG*4?qhRxzFJRYw-Wj!1Zbm-ZnBsw1FLoX_aoQn+9dx(@j~Qk2=Qn z4oa&5NGxw-H_c-wwa}Izlyo%ag9V2flQ*b4$KMiB{Qg8jns5BTgq)AmrkN(Gp95ls zeer7T-t;>n(aEgVDpWTMmshn>0NR6B7_ls>a7O5OiI9mus=(+u2rS&mI9z<Ft!C4LO;=dC8z>ZL%w-K+LP~I;>}0SjcrjyarNw>MLu{mE#_*R zCtvPLliy4m8xsAKd%b`AXi5aNIhLvu#YF-gBvxAwS~#u<`347lE}jGa#`2i57(J|A z{(B%3AjR!3?NotkIRX~8l?EG7u=JSDOgpDBo_x9;v`{LTnqX`v5C1^O&1h@m+hrs5 z7naI|6?eVb-{BZ&N;pake%$dZl*Cu*E{!>=7}z;J6FDv$=bWT+UOS9$63ZvW_MED7 zXlvM`>waGDqRI;dl{EfKRKk02_B3~$y>I@tK4q!j2)-pFh6OO`=e|ep@_zpY;;Gr) zIbZq0=i}v8Ghb`rqUk~6XX9j{JGHR3JYA@tGc6_C3tb`cyQD<>pz?y9*cBb>o3PN? ziQ8a4!Q<2MN>0nU5jZ@h%O_Q$_f>Me+9VzO>ccQQi9p z$@^fpfvU@dKY-KYGH(CRjs|XbRKm>mUCM&&7_(;8k^9UHxqdzJd}PkDLyS03qrHPRvk3v;G-I zC6>~ z8RR}ZCvSSN>Lu>3GKPzGr}(yhqkp*DiP6$I4$fPT zP+0ubX;7Q{FuQlia8;nVI?>&FJlU);?-5MLO(*D5;Fjjwbc5tRtnoPt;!V>ob;eEa zLc<=J4vH3iIr}Fys$z|7!GzCIz5U{O&V91PF-638worrh>8_jk+kX|q4E~z%VB^ni zx*XizPa7dLva=`O;eW+@mzOh8(KZ-~61#5N)0#g-AV^JLx?6*BaW?y0ROQE}+UP=# z57ebhl^1Ia>&o(it{=7gNLTyn0?-07;1X@ejS`070kY_`Q{<^`in5_L%rSGe{921& zggsr3k&Q7Ml6NSgADyc%^gV#HHgn|VoH25y=oq*f#n#4;ge_Xsl5vMMf;rh)U0TcI zT=!yO88dK3{09enhD|C|IA~m7ZEO;WRdtO==r}|#iat4F&$*5j@;e`Y4i8JanD%}B ztc}kN5Lqr+F=N(|HyrQaY-Mq4n4{=l`NykR-{5D)8?ZAuw-@cb2^Fps%JLb->vi*^ADbt5o2WQXT#8cG}&YZ66NQd9wMFqxE8pF1*`hKY}~KeY!5 zeJFoXsL&c0;H4$B;7;W{yCF|eP*DN!(Hs}ZMp6WF6@LpTqPTuiR#1%&9!Nttm=&sZ zSNJmRb+Az7*Do*QOQ`_V4j1t&-(`cTbv7@wrAK&+l@u!QAd=1T&mE zP1F|8qlC;J{_PN!s9Ag3$TK8P8JR_ybXFPjzQQ^b>`1dr)hP|%fOhbya7>oCUe%aO zp0PJEI~Kp`{msp2#S#8^K>stbwubdXmJWuDEuBfyFait5l|64D)tu(zuYF&WRnS=km2|pS?fJ9 zrIfz$ek!Rxo_0J-acM*u`In=IrdyGPqxFm^qUNY6!~eaS4H1xJ_3*`&WS`2zM#RU( zaidJF0+EiPHiKIkH{V}$s|<{{Qpl7r60+*qxrhXJV@!jM2=Va#eZ2ufHvdhd1tVQ> z;g6dJ4e%GBf6L;IfkO?qT>JG3{ZU706CY&>7nh);hE9}h2bl(MRJ4wMOi~XVqew}_ zp;75u&Zvmld0R+t3|3=)ulLho=b&%k&YH4a{i3EL2*1Ko^zI)s&txJVyJivAPB|?n zMbD5uo)<$M3v$BU_HE~jWt9OaX&HSm4i4alncL&8HoL-@%>-r1!1WiNi7T}EMyCu1 z-I?nQz?Xs+X{7Dw_xZnAaNaJxBp9Hhx#Z^4KT%vEmurMxLe-ixayhN11LlT!?};ho zH{QNmVXBMe+Mkox5#V3MQCsdKxhy-)#(^Lyws*5`Z4!!DV}AU2xT;=wPc~1hYQF^u z2As}bIEC!e3{4Jv-KeBtr#0a@_4+4<{1TD3YjA%z7Zj#D6AilsJhyvgg(Bo}v9T!5 zc}sB{E!7661*1IldjeS(zxhnTS|WG)$2-GM@6pRpylW?H02)OdwjT5zQV=%)|y15+YgN7a34A^Rul(7K~0;Wbx)pO zEPc9B3q1N@a2}(fQ2h_pr9iH>eK zM4RMDq9%_%yw9>2VaB@Bkv%EsSPf;cc&TNUZd7)m{#lecJ18`ZXYl=F7rfi^{p=0R z$kgpeQHQTagd?qJffM7wEe$`r?{nMWdrdh&B@$@cB{S`0A1hHjhn)^;5~ZN?wX2A63mSYgqMKdIZv1cNL;EAxAYw}NI{lz#Pm zJ=%uI2m?_*fex7P-nX8BP@hx|!P!JaL|*_;q?WXFiD!v%$caA^{_s}~a|>{|eVP*C z2HkR?IIMKc9Q-KL{doEU9A5a$dt&(DWX%=wx}O)w#AD0vr2mROsJFH@M|wKib^6$} zg~lIJc@#PdMlUpYo!^KUhUD`*nWvJ;P`D0Y%&Y4q4rtTVWOaIWj|9tJoVMk9U5U1wq91sqMr3@+ljet2HR zGcEL**g4p!E+y|2PV|26&wh~fz{S}wS1yFtl^KbKoUR>oMQ-(pvF`VUi;w>oao-UM z-8Q2mYlA124-1SCq2$JE2Xj^l*qJ4R@h}11ESVrlc5`k>!>}2QhsU_o_zN>IOvpA+ zO_kl)8aA$2jai@DE{>ALlGI( z&aQNU@kA*~;6MIjeHV_Y_nkJe9NzMOE1fO(5nn0yP=vg1xXV=HLar<1Uhfu-e3PO? z8iGYSFFsyfFfQ@o)M~9`AO>Ok)QR_pJ|K@Q?Ixf|NgyF1$Ck&UTr8%rF^UBOEuNpA z_Mgml-eNMJo}z9KBO|}9{#~^hdsuLmlTFCKdRPjDAWldKJk>{=3CLO1Z~a0E6Shtc zk(2C41l@&6#3b~MqZCC?`wv($%A>sN}DnsqAvk6hX(hAsxOuwQCxB= z#DDh;N(e)SqtPz2`Cg%jQ{#dH1kcd4CbBHx&@MLl?I#V}Q3*It54zMbbXjvxgpS9- z&HcNz+B}$AeXoHxF@pgn#9}3eV!K{+XsplPpw06T@Fuai*&h?PQ^*Hk%^Ub@AMI?* z?a4&)l>3nT`Ve$h5n};oDjM=K(xTLxz@q+FtNIu~v=nl6e0)5nOB+n+^}0NXHhQt% zJwI4>kvfynZ`*w*Z6lwanMqG%C{*y_y}VBq(AIM|vIu5UZb#kFMs@+DeBFAy_tJ#zs-6%r;p&T}<5$Tb-b(h~z7HUuL>tWz3rjI9c z`~H>ooCPDIre)Plj1_=N*u!wWjG4rH!TB{}z4u(ZJevB&1&P~{TJ$VlY5wYp4orM3 z1w7(Nwyi{35+>G|=rm8M!(Q}9F|NXuiWRXmaVYT4%vw#)6@a0u^b%Zo6oVfn$nP>u zUtSBn#)_(q=_7G}hTa*ZXd#@^A!O=S5VnK{16T-s+xNw_YqKe~hT}qO;-})!=$0R9&8;6G`nCF;bL;R} zowhn-ylSU3Yz0Q6*nbN(+lS5T{gaoJ)Oe~{gG(eyjEiH)Cs*_bYixv2r9=$( z@8Tpa`aUOyh zZ4##2arF7#Ysm9hxg?Tyf)a9K`0>Ob%3Hei!$rvYO=N&a%iTN5r`LlalR~qxSvJCSCCpo*?? zBx=*D%&XizKNBiRdBB*bZ-WV3bRUNrJ+dt4XP&jn`~R^7N}$2$ZN>5UBx}ofhzs_5 zgmAZQU*k#TyRE5TMd7;eE&iiyv|0d7lV33d-+<0nQuhTjs-JuIIZD2`xSKPJA&gw+ZC11Bhpn`VzT{p^ao5^9z} zUcLaK9H7q=iDVh;J^(Q6Z#1PTHksa1XO)RDCr#XsWKkqObQ@vO-4WK9S-OQx~6P=dLw-P#4`IbKd zwprh$z7v=Yi-9Ho6T_4`Ra`NNLWq4{&H(WHE%L8-X1KRaxJYJ|McMeabEHq8W<&Wb7A;SlgwZ#(7_O}TqnzW%_~8*B|UtT<@DR0;H3 z`1$V$qQq|g(;Oc)*Lz}1s@@g!(*L|>0rdjv!4Gi8Eupu*uqRx$sfIkNR=P!;u*O}!o1c2aimWUkFHLHgJUQp6 zXdnx}pw8|A4p9)_@r#vBMkT-AQvT~X=mk!Bw!Ulxw6=9Sfa}c#kDxHDI9*!Wxq?L? zF}xhO^Gh?{PC*x9kf5wJV5gwVXXx6ziBaO zoWzSS70PAHznNyCgR(wmUfOR!@+X3iBidiwlH3&tLt9p5X3~E{dIH&sV{iYX2jP%~ z86@D9CgqnU+XQbjJRQ?#+~r+zRV7h+Isl0=c$#XmZ5yG(qE{d43?QdQT7-3N7$HaY_zju~hlHcA z7Y>y&IR#+d%*!)J*z&3k&TS|2@%=!DDGtPA1^jD$OG`}+*HsUa!rY(0g8~#MV?XDs zD$f74JG!|AWzK(-kp!3k% z1B^%yly#@8UpolMa+4OZ5bNBkL@M?x>DTP@I@MnLo<@OFquCDFW$fJFX(7GdUiaImUSmirT*V+$+ z7Vv23hW6|L#bPWXaz)_d?#=0e2Z`Y0ZS6tTFjgpNtX^3Gq0?J{lDk|{I}v=cwv$9o z!;FE9{-#x7B9(6<;pDcn9z&$dcJFtmKxeZteivBqx%^M(a(uOw-Yq4qg(nb_<_fNS z9zU?ytv&dF@`8(y*Qxjp4_+gQi(jxq(l8VXRjAguC-c`YX~0CB2XYH0fb~h!Vyd$p zqr=7>6E~2BmACX~(~pQ0Bw=}A377Ak<7zdFib0DJwCiLT$`7A&SX+^&ab$GX2z;M9 zK}0^zd8BHs#@`!3_I%$2I|?1NZ$`F4D))C|)1h5Ru!p%XZ~msVpV}@Jc4Z5WNXu<> zyVZ2cp$Nf|qzo9dAB1P=OWVq8`^-28Mf2D%!)8$FXVN7b{0H7slLW)M)g^&SEgiC< z+T4bI<;hDEvJ;O+BXX|_5QUHnR<|m`F0KvT7lTmT)<4@{0aI-hz(B1J|1PvT9A{JpYKBCkU zwmfb`s=P7s*Q)?Q*KKi29(VC-NY3F;kGC#Ohvlp0)<$)rr8CV9c+?)(1IFmHr7Giv zc}g_2kg{0RzRB0!+YY3ycgy9)m_4BuhjUvO-NA*yHl28%kv^lfQA5Kh+z`H!k;Weo zXeQ9M&19qFdJPpvygyO&jNh#glu$`G@`D}-RB=e%>dQM$I60GGJdVH?4QpZwyPv5) z@i9n0mWLjNMSAqG4@;pvn4-7WVoF=D|I*aj+FY1f6n^yHEez58mLBRj)p3{RM)#|VyvTooEz9`YwXIj~1HZ-3oG_5EUM zQsmOF?@?;WWB%^n*94jLdt4~MHr!2AMhGNzL&!{mh|v6X_wjnqj5;4~n~=p?XRhHg zDp|nuZvwt2IA3oEAT6hlGpLD+q&kVn`z8XeskoV6jGB^>omx^{`{4`}N2M%TM*2N% z9$*v~+gLPvC6D-o$=4qFim;{l6y%FfhC)8vsV#;%^!qcZA$(}0kQpGkCP+=o6}Jq|TYklce@g2BHRNmy$RwmfJw?So;|COq5Fx*Q zvF+Cj-wFn&Kr!uT(acEn;(^d#Zb zRKjkh7PFNI3;XA`1|0qF;3_-4XYn0is&eo&m`XugM@irDZlFyz7775N=&Bw~fh=3H zC#}VLre7~sTbs}gyRex<(MXcFbC6+YJ{Wt$$h&7p3%)^~Ai{F2{la8`=pRR_Er#6- zQ$E>Xqvz(QXUB%1JTmiM{^;T0Ab~a=KGfr>eEluAIiX%I^5OG^^pZ^16@dVcYH;()SE0&mPxms5mv0|d-aife*7+pp zJ=#}2XNU-_9TterQWefIuj&j%ilE{XP6gM#%s4xxo&;p07X}ggrDr1X{K8O0MlW~}(6t@swjMZ=Vf233EKTe02|>uf}_4PR0_%q-jKZU80<=HkQ= z^>B(aVr;EEqZ77$m{Txmblof7vLqhT_e*06J)W(0M7xOE(Qs&DGYV4f z|Jl#F-8SN81lp;&>Y2IHv$DhZ3B<%nGY*nKF`#SlQaDQwlfD<#Rn$_SR&LlUobVAZ zO4*J|{k273EebLtG`&VD6nCCBF(dRv5hp6(^5WsCN)MJgzIM~J##&0_?0|b5Acic2 z7{#dc4Iu(V_!+Y2{DWW~v&3ZzHUfrl7tixN#h}x|K1mg?404Z8O$)cXJeDiye&vXu zfIuFwtR1w4Phd7}Ea*hxz@jrAM$|0RZp50)7RDM9Th#Reis_%SK{fXD%U zK|MNq_3nRd+ql&d-&X(l;p6>PvN_Z??AA8SBh0y;X4+uRhv`sQrCq@IHYP?F(527~ zyMsc}+rl?$`;}l0Py_=?gt6p2S-dXR4Dfk$DWF1tT5xE=FfZH?q$_ye^fmSZ6^ebn zghZpBiMhLm3@Jv1cfztZEF8Ri9O1~6l(ZT9wBTbrG}B_UwE_XmL}n)3j-4Q%vsK6t z6uKe0Jl}9V>(E8+eb>$qke<;VYI2oI$8Fu5?a^eHLfM?Z4o|h0QSDKFP%J!dS)mp; zDdo#-S(qwehUm^O^3<|tJfW!;t~=jT*%`I#$hlls_fuMtVHJ;i^DPh=#d@gN`mz-KiM}=?@F) zf)5pRT^%aVgm&poDxiuopwgum^!lPeB3wGobEBT>*;MiC@OYD7}T@B346+F^3ug!**QIA zR|Cjs2Wq(Z{eSZNC~YTx+pW+&p0bHk6c7Q)++uR0Tq}F`R4G=R){qAnyKszILW#!Fo znpjYj()5Au#x6~(TC$QuMvy5Q=w@PNaC8cj(LdmFw!z^r;Z6$9TkLh2Qi52zJyISq zRK?tccj0=0sQ2S8e&Frb+Q$tVV2H9)B1>YBZU_2qC+AaHfyh83wa;xy)i|J}l80rL z@hU+=`G2eD1GDS$97-xs4L7n541vIUg|%_{+E-?^NN}y0DHbAXsNl2VrCS_-x3$oy zrwbmEkaK_|Ab=wo!)KG7kCt&>rCz*iVaI*U3bFOsRmu6zAM~q#adBzu^+_x*=uTot zF+Lbv=Ur@Eb%gV9|Gex;98Dx#{v|uaSp<`92MHa6K?;}?@4;6UF(HK)S}Ic|V>|z< zYBKf5$!aLa6{K8dpTdrkANb_#D{U$7T5fn+TU|amIeDBRSYTe@m#1mP8qQj+&S_K^-?YV%X5>f_IX8l3rJO=ADCaFU@2XA)gGp|-Slf0 z{@TU=4`&56<9;gE?}k}&v|o$+7&t{%Oy%QBp+bd_U)nd-@PA1BMQI2Z*liqKa9dW-a2yJ*Ap+~Iau7Re#z1Qby%(&2FS3B*$EB(&DR4lai4g-kBONw zds!{;>M#IH_A@kdRXP)CLj%^AD+tT?yK=U7u0hGO)36WQ+k4`%+itx3`*~vMH~48P zY}lvOW@`dx5_v~Qlcv;q+I|X*d!8j|bn{4QJN>>FI&#dS<~;c=*Q0~876avL1>?St zt$)pNfMM|9OvnzTX)GAPM}La6m%uixQN_pG!Gwps~V9HZL4ki0SF{LPbEOBL1}6 zUS1BD?|+fr(Nvck2gaY-Zr*~_ zIApk=#PkFKNaE{yeO-+%be}k7CcXiPT^j+9XC~dp4jtiNbSpNj6Jz3)luv&gk^;XXH+Ka6400a#r`yK&!k(YK$9@Bk2RMO0P zP~7%r!zMQ?s4!LBT`=t9@%gBnu6%p?$?Ms(9T};G?H^ElRC{hp*~C8jTn_tE)_d5@ z71a{N>!E--2k)y4D_0BUtjS-=d{1j+vwF*PnmV?xC@4W6JF2SWKK3oyErJnv@aT#r zA)|wDPs=x7&`3G_fU^kjHek$>` zv+tKQg>HAy>0)H;SN7SGGVVF1)u$!f#bO1 z?a}zk#B`>VNAeX6yjlxl0>^`zR~V57?ol_{0xLcj%T&VLzqG%qs5}(a^=|81Eyx(9 zaNFr|G}SNzqib|KcORP-$d@8sy&)&PlrQ=@d&f)ff@OoVyBF!P{?1Js2gr@mP`-6o zES#=JvdTP1citUG^C=epoD!zdK%kH*`ikl16^Te89c*W9?84z13EUpJ!@bnTB**S8 z+vJucrE~D}t^3KI2hURI6&pZ!N}(nL5AAWg5hBJ|=3lMy&}$6;-COyzHfdaouqH#& z{p{ts5B789&)sNU48#9U7hy2dtep~}ZD8Hmce_S}TQCuH-DfMPS92sE-4BTZuAg*M z;JfdP16&wmAXF;gAY zkZt4@*8VKOglj*kl=?et1Vwa0k|kXweV8N`CZ_+xnF7I06zRzi-sJs)Anz3Zawg9& z9G&R01Nf1_462EnoRSLHZU1k&SHiTO|9QI|R3&4jN513pK3{B5ovH2b4;z*#Vd8V_ z{EV>?^FQB;L2zrd$Tzkj9v@O4d=@9l1U(MwetO+IJtF7xX3Xeu^B+!U&$sFNu%lj6 zUBvNXBQ>YN>{8vm{c7Ne(!d~Dg9OPsIq{g@^oL2@dwF%_k?&rEZW#%KMv_h6U=sQZ zM>qFaKM2~Kze)2BLjlz16LJ%y)$EKlC0PPB{L4^HANDff?d|VhP-BdRd|Xf<+4gj()OnQ?|~nEPqU%caO-y!7x@S{?68G zyGzZp>N~>y?j?it9hV;tm;c8x>`?rF7AUZj!7d5*upJCVNtVU49*%}*Z2>bipi#Gb zvI_81CCtwqIm|IOXo%o|Kf-!oq^z z+F@}l06q~17uR{CGff@~OfQI!i>t1!r5!i`6J#+K3*NxE4st7qWM=)weXzq}5pYQ! zcQ6u9M1b}kc8;>KsfmEX3SJ-XS6GIkj0_CTNfj7FwiWUza;vDMl*4WwE`9+)0Jg}? zfB{sJ5zVr+G-15>w_qJY5-pB3%Lgua9}sT_3e*Eb#EjK6bVx}t-_9%^Rr9$MzSBvT z$qbh?xpdj={4DWVca87AoK%u7T>glN9*~k$wqEGU5A1jpPtYk|E?;_oy$jr~HH-kf zLuRJ2#IOaRma=)fsT)A#W2X#PNXe)NI|c=w`WZ|fu9yTT+9x5sW$-I&rDPWC(eZJ*|7@Ub$o(_LV7X2UW=8~kHMJo`O|c! zKR?hHCuc-58!V*MY}@SE~F=avmp(6HuJu^p603&1#fw*p?k+q*#~dB@sS$kyft8T1$y> zq?}K?<8nMp&64Hw9G1;J8E1m8H0)<+ii}iFwRauahP(&_28MR7cQF zoS%A0^tW377WWz!HZN}{+ak^35nxusA}tX%*Z-!mDXuNDN$V27>$+Z(HA>w|0W$NY z9WFbdLZ#m=LQ~XXwiWmq!3_t5R*r=u^$q1l2HSd`!g0uBeuZ z0ZK#5)1@%*d6-07e-6a9M<(s3vkXI(+}U1sQ#6&2FU`Q8J*=v{gX zl(@^7B_?Ym8~?R`HL1s;OC)G8pGd%QSbjYD%fb#cT@5hka+|2S4C)SCdZ8ohHvESb zStQUj6mK;!X8swtzj-|7DvDWwiHUu8fuy=ssC8_xs^!rs6ksL#A>eZOoxX+Gg)qy6 z%3mj6M5)bfqJ=hEhs7mfcO>ln6*riNjGRhTwEy^$jD#pBCq>i5WUj@q?f38LQ>0Tj zCkv|9#KV0~1-;O0v}A~gu{Rg8HfiEv4y5)a#b_-$4(V@rmkS?HMkb&om>$|S6bZ+1 zQYOj|9Po}+&||n=T|mTq8B>BNvz!04P)5D?(vN?D?9P5{G_38OJ;0V1=s1Gu3W+ay5ZD_S$ zMG<*+vGSnZJN$nrFn{%Fe$U=sT_BRqUo0nw%s9bI5FB05woB>&IU)W{6Pvb(V} z+x)NeX?d?{&*MtWjU%qeq2= ze&qeSb=$E|@A-fBFjQMo883lf=>YD+0=R}H|NvMzf36C`W9MtsZ zKj17gEr54@6K#rZkwN+;g!Bu?6RLJd=(2mT_YO-{xL+%jQE$IV&q_V|72Lh7!j;QY zU_ovcmIp(P3XfOqt8Y$O)bAj;7{j1hx_uhn4Pyo5bme*mtCQ(rbBe)FIb-ItJhM>9 zNSNHiKbMHir;&mxs$e=Od_OVs;+X21x!q(I^wpW;S;Y?~& z=~TY@|2E|+#uf(L8WqBd_z8KC1V^A+zYfPnlZg@*lI*0(TB-2hleWsv;N<$lZP$2> zlK5N+M`XJC%F!%Z3GZ_Y7`TkfB4lh-qh7Awf6TOC#HeUajjRd&S?qKUF&>Ki^8DJk z*}Y-K#ZyU{XM%_u>hv(bkjSj}wl``TY-p^>XZ2?LTf8olQ=&vmg;c1~;fVUTc#cc- zrbwp>J_k0!l^L~w7eBFBq~jU?hbmlGT=1R$;4o+<4?Pyyc5n^VCx)kHnB{_M-<1nOh% z#bTk>($IvZ5PkVwgHFUW5ch{V-oKe(lM{tHhE|3g#Z@fIfHaRZS;fSI=Fy{W()#uu zRt3Hef*+_)G8oka0;j}F>zHCMOAdc$(N1Ed*L{x=2D=u-1-ALWM--@le%bMs{ZOim ztD9@@Sod)jw8i*MgsaZ(&^{b4LB8Qy-0`R10`^eG?}AOYqmbI{F=-oCeR7Ri4M0ig z%I(WI7VSTXC4=Mc%TGZ(CyGaY2J`<}046e#eLFkXQM_tBB|a|$Yw*YuYvh^ms@Oqu z7(V%CBF!VW;wlmnkl3We#q^<>NXFfs`dQGU_HemsNtK*9fel{KkUA z_D&=w?;BwO3Zd}Mc5;F|G7-n96;D?+X{c1`e5d1KvDNQir>@3sYYo(>28#G(M!Sm# zSF1XVx)UIkku%$}-Fy!R$PfM6JPxbF)N5;`a3b%`KeA{f+}=+~I&oh&&j^~;buSSr zH_BfoE&~fR=r1~-U&xE)x*Fz|9zyJW+WQRB*z{>24>JY%H5f9!kPuS5)HGKSJ$m@N z9Ts*9SXfBD<;WD=a^bs9HZA5@a#m>63~)t#Ms;QrN|G0MPzGMLRw2YjV6eEx7y?f` zSI>Dnlxx-ejMAt3DWLY#6=G1C?r755(^|Bha|Gyb`Qp1&ewyv4NNOCX(@T0Df>ui| z|Go|U=tA>h%n5y(>9-wbl?LC#Y?EO?q#NQ$P)5ZepC6HI*QilmB!3E|CHt+Qt^wGr ztC&ZXk0Ma~q1+QHg^2$&Op3t<>;SNq1ami%G2~?-Klo_}?nWQ75jScmY%6vf4ImcI z{cJ`I-bi%HzU0t05B^C~>ic+MLXs)!M5M5r8v_Qw@ZtL3^;QR94U|VFKcOn%GxiZg z@R`c1J%YJ2^l_XH=qR|8;B+ZZyn|Zl5ID+#()62R~DC`9lW{t2sdblrjdm(Zf z(K~o34=zhoLOI;#+< zqPk!cVEJLRBqm(>#BpbbXuK26u(n~<1QspDDZ$JVS-r0isH~8xYGTg_8=Xw@JvEg&X?bABdLzqp4*VQM!||e=Aq=I~?YhPz}|Ex0vjMf`=Dn zPWp+T#~w->!W2)zk+HR~Pg9|6e!SR6>tJ!e>4lifQ9kn&>(s}uyu>V_{jGj`aeTRP zP&=zi{oqQKv>GA4NhfXD%7ydDrX!cvN$V(-lnO_UgKZPaYkc=lF}r?OE{(&bPGY=j zW$bBojSyI?nxk!^zSQC|Dk{<`BO3|*0#|?Ppx(sebuU(2-kk$%}=e?B4t z=l;~*We@sY0EB3#=+!mM6@6@j)!b_y2*s=ZBlg#&WC;s{=|crvER^5UX!g-muP7&|}AN8yMj!6_0H z5LL4j9y9@^U?NIrh1T$MTWn2CNWJEuaa8oD8f`88#ud{t>sCwxSeC-j^z0iuSk~!q zlUzBjsLY;6VTGb%9B&ds-Xh+;ldxR~76suq6eFa)aycwuFCf&E)V$58tA$Tee=t<3x7vtlI%C#!L*uDI}S8GVY}l~|1(5Y$-=9-woV*HX(AHzq{k}M*J-Uz*B6G` zWmi#*m1ANKtw8ub_&$6D&m5X2Mwl;-EdBXKRrN^4ZM4Hmm^44|W*n?;=!a>qktR9p z4Gk53She7XlPG@%qdg0PugLrFw`HkRCs1~=ZD9x04S93xgvt}Y@dmTlLNUP76`C1uNl5@2zOyEnxV;@V zgcHSad-oL*Uj$5i6!4%xiG(bKs)U!OmYql$yqz2zp$Ud+zapQ*P*I(1z**j7T05xE zsQy0yWI>z0{KS-(5i)3IQ19_hQ`O=5&wwJvUHx~EJy@8kn=%{P~^0J`>{SKCLu(f(l)f2TNIDHQSzNH>q=NgrPM91fSiyGxO;e-&nD3 z0~!PhIij=Z4@hTUKKXC=+)fNvMN2$W-u~$GCtrMBDv_#`ibezti`7d0Q!l>0Gjs1< zH_S=t(XCB1IIewWb8DvE7IoOt^ZKp$X%ttEitgFb5Es-IO`om}sa&N8afAM%Q;W;* zqn5HcTt99o?wMd1&@HI>W*Ikr((A9ixP8l}@t0lhXEe^fYHpuCsagB?ZP~IhC?tf( z^>p_^ye3*ew7~DPKviX#%k4HA0~9_HyH4NarnSF(GylJnreE8w_keSNZfLBv5&Jfl zM0c~Ooo^!LUy)k76A@2X%Hyk ziYu;QtK17Hurwvq1?x<}6!`zZ7Jv~q-gqN02k^wJuf7WY!oj}!3opFDXjzw1jFEtP zFcMakEO+S$$&Z+r7T(D01ggegiH!&8T!5T)?2+RWbfd` zL~|^F&A}!xG7kfH1SL+MC7vjP@`5#R2QmKm@#D$F{@i-*Bj6X7r*WPZAO`Lo4;z#U zcpqR8GzhT>2^dfb3P7nq;})O|*^Cx)a{N8btd+RDl;OSCcFVy67DS$$oAg^*Y9)3)UY;MuT2 ztEDcp59QwW;N#hc4*UE01qTO4hJ~tCsv`x3OINL}sH%GPsYhaCqao@ybIeR$8 z#4%kuC8|7I-^VMi_-%tsAs^bm4-aZqPF~M$UD^cQ2tQNQ&t{JN;vrXQ4ROQ2UX`y^$w%}F;mO2Drqy@kXF4QwY39szI=|Gxqx%jRscK5yshN#5vzXLnkIYUF$zER^+e4_LbXtQq zeWx>+C9!c5nMM-glqek%HZ*Z%^LI&V-4Z@Z)NaH64bo9pyG8K1nT8gNi55727HG?t zd){u@X6I-$9oC$z?l>Cdb2@3#B;QPM2j5fR685fH4YJ?Pk`)@iuLuH=0aOlf2d_pc zss|txji3a2XJ4J&a(E)_XA0;Ey#Q(;+u_@tkY)kQdR17mdauF`@TP?Xo(SF?R;NgQ zpshe(AUh0|bt#z)n=U|7ynGXH6*18q3t&uuB(J{`jw+xMR0PNXu>zIAeFKgI?kBjY zK$hW>0Bw-KZA5rlaQYyEFR&DV2+tCr1J4x&KuIV7{6Q5U41fk-+<^j(4(>0Y1GiYn z7raYAlE^Yp?| zq8RWTo-b(4z!veA7pk}Hw@hwK9;A)!R7OBK=!_E@E_J=J2TNC&abPp69&uvSn2zCCAiKTGIPl zAAVj_Q#W_k)N5v3me!}2APCC84-AWk|M}9-tJcn)b-7-5Jnr@B+nzaTY(a7Hgk8n>Z{}KTiaw^y;hFDPI!>kPb2FbYaG=l znA5~4KcO(#Ws|kqD${8*6=nVq9{7aW>U?>@fo>fP0eV%(XnlMn(mR*#+jLZjX71P+ zez(ZZ1qDY|tyo@fs)MG%q1EZ~@(xXyFgZ3ZzD4bbg+&WotQO$Az+|ecsVY~gHU0r1 z0Npj!Ri^qHwMG{Z7=q?>9YQrVl?s`{KR86K)`IT}2NkQUE9+}3?RE#RXOuUa>ViWf z)f%nK>8P)(O<%k8aBfyoa!O5AWx|-*9;AVoBlreu_K=nRp6pCqbiNPr`HpolC-H^E@2#Lzw5+wp05=B*! z%I);{6^n;x0nq~O+X8KgZnXJKStSagOxXQyvT;_MUAi2{hI#Ei6^;Pp@q418G}*g1 z6G{kv2e`&ucinY^1`Xl^AD0#sKnNT|8l=NpQcV0F3kc8#Er5F90|I*jWC2V8WC8yU zFYeH&3-Aje2cUr#a0U_X2oDeW-Zmk0wCGL=6}@X7MuHO!2UrHrA+OXp$ShyJeBHWr zxS8OMLcg#`cpiL*2Vw&12T}$s0v3k2444b9C{*W3#@EKTaO=`=G{S~lk^A|RW$E7p z^cqulXs0GDv2Opyy4>Am8@~-1awYBxPMgJAa6suF>a^7(CW=z2It03=#A&WUnT7(b z>sqXJ9xbpvbMMq~BOiOrcDg=|dUU@xL^j)$mG$tI+Q)9J(k5kGA# zTCu%kO@{oAY4Pz9dcH%-YptRFCwzLGS zr)c2q+O}@@whaokX3V53lY6EDV^E!nc{%&j*Zf>maF}f;kO;qkz>@bzN0u>C8EIkiUJ?+8tX~mz5UY{Kvm1 zbxk>vm$mGNFH4G!BqjHvfZopt{ITxWCEGUsYOb%l>ZZSR>Y7qol>gJWpX}egb;68m z2MwD9BIBJ)D-wP+#ZIZBFma$&q2c95CUN>7+@h1zNz~yI8V+_!5F7Pq0Je}}drUobp#1NQ4#2Xggr_n2=5mYmPE$~di(Fv^?L~RcqJV<%p>THgn z)xs0<{PWKvCIa6F%^J`Rx;}jPF#cI!9H_VWRyjkuf!pF+MygN*q)a%x=!Noii)=9&86H&!t%H8eXeHb|?mqV3ThsKN0hOaR%ptM1! z8$M)!{-gz-!|p&~w4k`uONnaEq_isa^|NOdmy{kUEOxOpId(W*PMg(&)aXgi-LVqw z^{+uE4-3@5*65Y!4@Dv|QhUBYjgu_|%rSjf)Re)IM@mhs{jhCCoo&nBs)+-_Nu#oP zDX=+E+^9D@RZ3Ya0`x{j$5d97v!!t=$QzqeK(9Bb+LUl~@~sqenzTTz$^P%pv-si( z(8->=!$TMpk8^1OtJS=D?ecBut18NhHm_Y~Fb3x4?Bk`U@NmxNb<2Pxswzs&W>a-# zdG@}Ya=9WhCO#~}^1cyo8yc!)YD=es}C{}CqGMPL)BF#8rXiE<5=wo>8FWXoC(eu_k~X1Y|)-fE@@1@Je8C z3KIvnCU6A6i#c=VoctCGQvpc_bsLaCr-@Cbkda0mJ#0Y1Xpuy?x; zWC4E`!k~CD4q+IL27@j}Vq)UHh6C>sq&h$-;OwByftv|&f=N(75Y!Ne9^P31tiU?C z|Ni??y#@84Fb@xS2eTt03{oJ-l;Ylklav6sobf3Do)F5o?KbIg)p4mhD8j$TsPYX9 z9oCv|4}L|oSB3W3vJDGEhtE;@d&0iRH9A9=fi;=G5naWYJkT?M;vAVm>EC1c89NmV zv`-5R?%#JuMrPNJi7_#eT9wl2cJsVdRMqT1kn`SWUkys_EpV5bIr!4@ie;mSedT+j2Rb;Sn)2uDLC1-_DQU{O6h#-%Y>f zwzOg6zhCg)cVE8u!`B~-nS9lpo9{0?lJnLJe**yl!p2NcDt=h_@uB@0Gw0qpa?%xk ze#V;W3P6YWq-33+NBcTDuA^3~|8?0yjaEN-)(uHrJh)Z*Z@+X(PJzDhLGjZYSO1uj zI{c3hJb&;&#{P^gA>om{+XMuMYIHaV%Q|-M39v#Hd|0l&=^p=p;K%OH+>^0c^Q}H7 zE907*?^P*O$vyftU#XO)nmDAA>Y!M?M4_BiA^rZad5`picM`4RG;^H)%@2<~`>H~zKsjE9*rrcW6g~V6hK3C-pWJqwy#K1Cr0An+ z8tr(P$A<>Y;ToJ}*Fx3+bD#=e6_0U~VM zvX2G$!W8@+^){u`%RvHT}XIzPF5z0yrE%HDvf8+zh_7KrZr2WIwYl} z4FlRB$5dbMbT~RB97Q+C$<5h|jz&VKZaA0-(*raivFwL0EM^mm9#`FXcW_t)xJKHL zaY6wGkbmd^%-OhvF2bvhP=%{ukulkOch=QaUw6wx{RWKsoen8Ncl?%F>Z3L+DeN}b znrMWWp|F{hwdK+;rEK%t{7Nkw6 z3y1<7@ir?aL<@)(Xg?MZzunvHUJz}R6*5PgwRovE1h)bK3jxui3q=b_KYEEscKaV_MN`{X*mzsoEcM6x_4%GuF)G^kK|4l&_dCR6kK>WmA7t{ z?D8Q|-QtaVb8A2PIX4h$L!_=(NB@gSe6-H&ATln%O)Gp-y!Uk2$k7YtfBNI%MG?_a z(a{}@Mgs(Uq@c^nOAlpd^Y1X@%BzDzTde5eZC^}?7P!zYP+eJCR#F%m2GB6pKQP4Q zavsUg!DV;k_!*&*v8>wja`pjLM8_s-bsjhvng%kYm(98%Fxazkg51)<{TUXEDK4Qi z;-G>xau4jo&7?zOvQF>WO!6Li*?Y;Kb?sfDVbP7=)==vxF3by!i1tT+^hCMP=(t2W z?Kfa#>fkYL1#RGL61;zN_?B-fl6rWUxl)P3W>z@N5{;ikL#Py~M6d7w9I6JnF%z#x zQV&;A=_m6u8j7>SvNjSIJ!Hcob6`kazi_Y-?;OR1XaUgz?Z*P*e`fo+7ve~cTHx`= zA3u#P&@=(#fQ|vEfnvoq*IaY5TME?y7vMyIZh$5r-s$^=iW2RIAAU#>S-^<+_;{~3 z3bnvZ;7Wj2fG-GDz(ITm1L)v;N}B2fbH(cfDhFX)yQR1cnGbdV{urlo6ts5jIn_MJiW>44T1@x8+6&h0Z_W#uv1qah`kvsdOHo z3W=2|jt99wd^EWK)WAODZRJHWtzKm$&bxSA*cO;Ed0cpC$p02B+PGy$o!O#QtK(y% zu9-D$>i7|Qtp^`${-IPVA|k?`dFXCdiH&MG98QPRiI8rieDU-wSpX_Wa-4x5#79?c zFD1sxm%kplVPafdn7*RcM&uV_%ATdJ&q1@e-fZ4ml~-aK(j&NaJ}JT=l?vt6bLVZ} zvT5atrN6COh1);=_Gkkj$uVTu@DU@&1O&967;+BmAa)^I;A~reL4tHp$xhNgkak$YfIPIe|fDM4Y8 z-YCm?hxS{{<``U5^vC&e4!iB};cU0t)uYdVlR*j~5}Vc9qwioM()mugsR^+xCcC^o*`YRQ$`31DPKie9(bv$sCB%nO%Orj>2?Sz( zJfEg+iOyddtqQ2DRR8=RC>*M?y?)zP$a;@)is(j5Z-7`{w7@yBfQW9K6ElmQw4DVA z-TLUGkEW)k&YU?Das&xq4FoPrjf)#1DlVUD5-%Z}{o*l_Ldfi?tV61l2x zY8pRmGJ`%tq4zT&wu_Kq!>(*AEG|}QBMXR7Xd4;wcQr#~lhWe+euG8}UL<+B`%%Y; zPfXSuJWeHqWGyZ@91s*57!nFRK~7b9F(F#@#z473i3Z1-72lVa7IjMQ4Nb#pF)LJR zx5H6flqZ+Vf9L||6K;)4F2M+KXRSv?hLUZuefOx!X*DfI#1dIt}ETCzK@3LO| zh>1(b0yVX@A1_$^fA9V8a6tk4*!ZEi+-|u{HfhX=yKb7(uUAj#7|plFz<#~|_tm$r zzx&C&tEQ_OLMaP%`Ct3yr)B&P_U_(QL^qmm&$Ceyuw!8Npo(hGIt1sA%)FWaohs1C zeU=6pT8w9%jS-%)O@@7>#1a#xNl82bDL!Mx(-%;w)S;nar!OO3J*O5pwjRVYKs?Ti z1)ydCI^Y!&9h0Edd4zlO4tktNqT`bUKa{efe7sJALc(insg($c?A@_y*|*o+^q0@xdG26#W^rMDO=W3FcvNhB zCtS=Lwb1-io;&@aT~ccmY8rl$q7E5fIP1-7YlSpCOkzZLLoUH#6VVL~avKfM29Z;g z5IXD<1={vziHgW6R*BR~+!aMdm8Rryy)oD=y@0uL%{QG`QnbK@Wr5azOtkZP3S0N> ze30VsT44c#o(Pf|D1c6xGR5l&LQ{B}Y~H-NfB*il8KFh2Sh0ejyiNM8w4z_JifDlg z$O6k(Z}|5c@0d*HuAP%QC3J`g3*&XRprmwX=H91Xdh?~fKinxP9()Ad-IArM(Gi-=+?m) z5}<1TPAi0i*_&54xJS_MCvKd_yC8NgTA=+~puVma;)Wcd=-@B`FUUK%A9N!&E=ds4 za7x?&)&zz9<=am|A40>UI(O^6Z}--p7k`?$V`EKqRqEieb+uK6M-F|r-~+8jhdf7Q zR9r|{)S;}*l|O&Yu9DeQ-?e8dfQU!GqrNsS0l(cQU{)XxsG+jm@xhNzG)*ewygW*`}sdD`mo^0p-I!OjfzS5J?Mta?bPM&lFA&C>Pm@9r-7vl zM?)4O(dZsxQVMO@XB zVGHg!aur_taO+Plr4R@NjljWoiF`qsCu8_p49d2n-GH-Yf00ne$+gAaPn)n1_$dz~K|p zhD|tGwxdO+=2cx*UE+4QTxN&NQ9`0I{%W0nq~I#sU`ux&f|%lLApZ3A%~Zkc1>C z28xntX=%juY@~+Jwq^y|s5`vdTAb@N*MMs%jiI`@Zko^zE$YQqQBbjcY324`{JRZ{ z7<&`?j=t6HHKj!>|7Wi$ix_=_zC#b+OP6xGN!WrVRx2bkE@d-{bH7+EP+VFT8x#4& z1Aj_Nj7NY#m?nF1V}=c?tg2qRYOU336VMH_#j3&8`B<4I4RYCi!yR*KsW3TcXhqJ9r8F(+z1O$AsW&m zKbj?rK3}`MvC?f@8NIf)x~-ZL+Yv3$t}GB79(C1?_xKL!x;q~BJw-C(x?3NxTWt!B zn$LVL#U^yQ>;8WNaH}*r!4L7qJ0C$(8t}sF|2tsB#D0TDdC0RqnKS0x&W~^7zUgkg z2PSsz3E7q^2QgTPAZFBb^qx zv)(RKnmsBT@Gt@lk{G8XP$tm=ZKx$`wM5QHfGup2dPo``EQ3EDmPesWVsnsmFc7!G zOSjY5_G%CA3m{UTzpo;FaZOgbE~+zJkVZG#Ww*L*7MIP; zPoqoXsb~Sw0-^<)v_P+J$;D-r0sclWv>_BDz=_pvuc@tbxsGl+tzN%zaB4q;;dDHX z{(V!fnts{9)ZTiHHYmVfE?^mMg6p|tK8`9)jzr8n?-mfK9KtZ5NbEjTofPGV4+}^K z%#L;<%mJ>^ieSAjJ^AqPLH*B?g_8!1JxeWL;>{}LvMzBR>23q-$&2RjEWPF2XGiBJfD|p7tfoQuL#QHXn)+7Po)}!#_fm_~ zQfX~c-+-l`+G{Net6dsym1vZbAe97%K9od>mZDKfGzN*hK?z<_Es?n-W)wFR63>e{ zUwA}Oak$(Lw_Rda7_A+Wk$-O(Lxb{Yi$^0?5G^2D;A~mogio$E@5E}gs=+}U4nPNA ziG=3#MRm65eK!@MY2cDTn}8SO0~o{9Shj50%P+q?e*Ac$O@qO3(@i%~v2WkLiHV86 z9q}$s#nTLf_6gDni0ti@9J)Zzfq_ObLpWb3&RA7oOCBB3Z5xV77#N<3y2mFEzrmUsl9u?_U=bp zckYgliNRq`xJvNWqN0*_KK>#hHahq4k&4PHyTgGD$FqOCALK%*IOW$tU0waK0jckQ zx7W#5a->yYcve;w7ppXq+>+N(XSGqjV0@gR4Q4& zPJv-TsuqzZ+HhU~E_OuzGzxH*Hf&aGs@+<^Pa}W)`Yvq6pXi3$ZpVANfgJ^jj}|%c z0Y35UbghWj&XonKJEw++C92D|JF1;ZmrbIzNVFP>${|s^Bzl=dZcIB_%mIIY0&w+me%$iPM&!pD&#D?%i8( zH{etVcu`eV1ylh-fz$$R9yxMk|Ni~N#Qu#Y}KuGTg5<(4T`Zl_J+m)cw>ilIY zPjo99aMV|!sw7kUIW4seNudkyRym>bD+N`5MYzXPb*g6`PV1SW{Xa*`{t*m zZ$0;9zm%T$J@K#2+joy2Ipm!eo(T;K5IMR{nyx)(suVJUsvImZAa(lA6EFRp_HZo|s<0w-)-TsP(9*_aH$)x)Q0 z7ay`$S14=_DUK%!heYX+sGJhLPNE5M%cI<~82B147tx~;>M%*H)e?)_6E-Ex?v8a> zD#aOj!6(|PK27x@`VtoJo_n;x` z)~%a4bLJzDJi^}Kv(G;J)mL8)9y}P-1JHpoU=6Ro{yJT7m6o4=`f1{5fK9ye$}9Et z^*|Uu{P06`bTsWRSg_#LS6{`z@$vDXB*fad>#ndaNO2e!EFmVjPk{JRb1+tOT^RkL?JZ3gxpr;ACp zRu$D}Z#5TYJ8kuDo7LEL2>ITYomP{zB!{l;)#aLqj=(GRhcc7_VPQkAu$Jf7WUjGQ zm-=<=8KUH94^ILU0Y_{rRM}4h6uG>kqT#lLwTTPidC{o@4 zz`3mTmXbrNz-WbE0|WD<3S!2EZh`uG6CcIZwRKjz!#6ZaSV#~*#5&16k}u4$Lu}N6 zLwNwfr{3f69e5sf&HIZi&2P;1qrCPFensGK*OwBibweDGrHIEzyO#6)|pEq(q8bk&OL4 z$lfwlokUkAsYh=Z^PYAG`Yv<~#AhxY#W!&QG*Jd$d3s zqZ?oozyaU|YuB#5_uhM9RlNQ7+kg4XU*J=KZorQiHEI+v0U8-*vqwc@+O%mXfI{?u zk-@t-Ipl*)5ENt9tXc29_uh>+-uU5%A9m=_0Z`+A|NCEDTpSojVPWBuPd-T%@Qa&o zz8M4r(g#q;6Hh#0G#WJ;4Kf@+BxJ%};H@2O;=cRtqxb*(=RZh|(kWfQ>0q1#2M+An zvu8#|28hYdojYgDm{C+zM7)*HKKty-E3ZVNl95B=2n-AaNfBruXS|J$+KSS(3xE>B z$Nj;Sv#V;?uiBVo-&&|Jqsi>yR^TavtcLUGx}xsjc2>6bx++W20dv6tm>>#6Q0ba4 z%Qk+4BnPMjREvuA@2hvO^6x$zf4_>|Ym~+iUvvZU(XtJTOMm;^<+N$TJIeL`sC!t_ zvmMo?c2kwxVlox(uiXB#Dkw6r=jht3Ejj=DtFdFMU#I@1t3E4Tvp{*<8#1k5;nH`j zwl8IAtqcsu`NvXt5MdEjU{qkQkpjWQcP?VOZ7pE0F0I)5b9wr={yj!SU-rl5Htw)h z?O9i`{%ce5!Jd!(a6&D&i)X1UeQ|Z>>X^+OpQf>un$Pw%Yz zq_E65w)kxIAjOKJ1)8?NDIYBn;Aq-l?IKqyF&V>)C*Q1Gcu-Ng)0OCy$*mH#O`;EQ zspCAX4QaSk3XKu^vdS%UxaFl%^?r%UCQ(@2HnX)3s@ci8Thuq2l@No|9AtI_;vwt1F!?|0ul$% z2DRXlY}>Z&-FM%;{r21GC?LSIeGQKTI0kQ*Uw-+8e&L4TR>Lg7Q^;SsbSa)GAS~XY zo+ja_tEkQ1sx$=q#dY_K@1+WkIObz=w2~?yJY?YXvW<)U5_?CCnMVX!OK}d@6n=pb z<8A>8Ir7a573trZi?YL$2ZfEg&RSXMtgEB|ZYJiU11`Ho8`(*p(97eN(x5W!9gI38 zHhlE;wyL7?&EFfl4Q2UiI=uVv*UuqY8b9YLWAfmlpZ`laB+d>&|lSi)7iS66KQ9uknpRm$^)>~_`H@QYjA z{4T_j7nTJE_Dh*{+4y-^&D5&ZCbPxnuxnIm9VaNydx&B!JEl(@o4I#?O?7QpU{LqY zNq60J?H7xF9MZoZ4sC%(f%c4keOhlJ=Uj(YcDtJ&GJX!NyaW5~&#n7+?|Z@T@=T^xyMflJ2% zZH#Wfl~}W84d%nA12lnzR_oEE@I4y6Tl=0u-`KZrA0i!WHsZ_zP=e1$r%s(fF^KjG zz=24|!i5X>?Ae1q3pyRX&HARjZIVEmbjy}4Tm{I0f&u;_3<<1c_wL;gH-y3r7=VM{ zUhNNos;$%M#x@+j?VmmkWT4asz;{6DB6RRfU58%kKo9%(jE7%Biawz?Djq6hC?BnE zyOk<(l~$%wAB{<+P=SEdWp6$5?F*qpuF{0ZYl35an`JKaE>mhWp>ckReU!#vWl)qV zI8qalpo{KI%j$@Za7LV_S_mTnDPslAj;h_iS}O8!e({85al4R8#bw6Ov7es@K=IRh zs`xogHEzf0xa+>d5Yw%;KtRt?a->UF>-AAGSetxpP$^cRik#ED=5kdCyj z9(^n56|1(F?mAdIple{a4n}l3+A78hEgvj2S?$iVBqnOJ+28u`vrY-|S4^20j4q5s zQdCm*!Tg0hy)!0Hh=~g4>t4Lqq6J!Q0nh696vwf}E&KncC?RH_Hwz#wC(#xk)>!Rs zsoGI&R>V4GVYL!_p2S(#pl!#;0zU#WMVyohiQ|aGR_Q3T+4EF3b(mDEg?-KNkKN@a zW+LA9CX6Cxi557A7HDI119$={4PpG?0wh}W0s(geXoFfLpllHIgaiExngT0M!flyrxxG{hd1*y z38jB{$l#fl^24=R8*28g3m$NJ!wMV;_N|b(q07gzry$6C(>t$+W$)rw8 zox@bkL;wX823}zs=8k7hO&QC0g`dB@sst!U=@*RC4YU_~Z8@$PN<*luK?_zGJ^4tk zqMu5eDr1;uz7mPkTx+kZ)C5O6%ydyDSLsd_wubhp&cJ9>=@%r$$UMV%9aWcLaH&#_ z$B@cDOx7S1>*=eug5R8yDagC}8qqs2;4LbGbK!pkV&yHvz&ulyxaDetk8p+*Aj4Zm zNWjhjmO|{iaoA3`OX+9uz-KBArgoUCou)ceIqartm$^=+)`N?Ao8pex>nc1J)*9uy zhA8wt516-@m}plPuv%?DuK4Y>|9(u^6pcphZ#0-Kgo3rDru4df-s}k@N9c6NpQtmA z{KNTQ@7j~KZqv3m-v7i~c=j##@KF;I6nOWo*DE|8$l`GkS^$`1)`;ld9qV`HR3ko$ z--@4BfyYXRNG+nIt)4K@#=49OzFSB24n0d^qSdvv6&2M{gTmD+6&>-c`uiJ_5@UZ` zzd0fzY@%@({}inrzgY2-wm?(sTH&orSk@k2+#Y`yyQB@)t|o6)Yb!eRP*ofXvgW$o zdu4@Hs$i?kKU1n+FHyw1-2QIQcA`__c1R=z5^K7oCf{|$X( z9Hf!qV5q(&n@m%fKrJvzfLCCPoJ>V&qL$044v9;g_k8(=MHQQWDBJL@OsP(|?q4UD z6h?e<{;6_Mj?@IlMNYoU&?&8S{lco9%VipUTyPW)DW|G(%6zw^eB(FyOI{Bjd0oVq z8_LqZt2?+A`w8Wb{yAmkvv|Q=npeGRC1`?D7Z5ynX3gFW=7N0)j;MpaKYI6 zqr+4a*lUz7wuhxWzjog`Bvc*sl?tsv5MH&{SDErMs&=kGIaMFswK8KRViv~aK|y`S z*B#hWy=RTXR2P^sDmZltxC1i*n5fyi)>Lr7URg*Guz=J_0i6fbAK61;^jsq*-(Hus zsV-{^C{o0@n^l1kjV6y&O7-qFfLVyh0!8_E9f~`Ty{5eW&`u~A>d-jue$C!>!2_oU zr(TBFQuXfN%!LPRRmA`wL1~v8JN0i&ar9sZAP%*aJC|2wtU^0BD0Q+&U4?wN%T#!< zcK>>WX#qHthM>USN~MEdkYgZ6=sKxy0&KbYSZDph}VXWn5!ZJF}PlBDH5|dNs&gzMHi6-IY{9br0>w9 zksZX-c4dJrJ9j_!?8|UBI>yHh8_+MccMsG+D{HFx7k}=J_hKR=`}FFrXdYb4yoyT8 z?GBKUo*Io(+OTcqnJj>#N;xF8@5Ir=m5OE|zwOFI;$WI-0klPXBpUfad^CMe1#x7* zSzB=3_}GXb4Sp-YudRSN_T<*C+*aDZYhcId+RquykSjtCDh z8Vr8=%f^oCoRIj#@|AtNcN5W#W?Hh%i}BFpm6&K&CY8p9HKzBrd3V3R!G?t@02$-u z%a;8ir{_!hSi8RIV|n7=c5MNR)_8c(HB|}SLkoTkbgb3pAJ%CrWyP#&;!6nQc_ZUd^~O` zKp2Y`FGjWlhQ)W^eK%&z7?d)IdqUt6!kr+%fuIHzaU-Xe0H)x#g48JO3r;0u(k8MR zfFK(-Yyc%eF_cq~5m*=~m4c*DMu1mH2(%92l;H`DAd-sG2P5gzr_UR2yn%9u?{qwA zltJMdG$dROyG)_9lpZSh=^X?$Vy54xkMD8t{Rb>XIc}E=g-%Z~;oxghCUe$Tvu;!w zgK_$Rso^1O2owp}aG=M6E4#}D3xMieve%TBuKhx33=121Eg?~kEPCEvQE00yIsx6F zy!&W*PggWZh9^{a!zZw(3%;5`l-yMrzcK(_bQhQ1%F3MI_S!0I`4KP_YSHP@DxP65 zG>~lXXZX=coQ*#iuJM(KNMCF#FR07e zDwWFXa<*0NT8eI5dUpeY4_+quI>$1|d(aE?1 z0oD-rH@NS_;$J_duh8LFtAnHSzk9V}%aU%7e1jWNqc*wqrb9bzmHGI8dKf@i8_<-6 zPhl-Pa%90Xc1t}|u0On|;D`SM{z$mtIc-?{i7mKo6@__UK5eZm4jXZe%W5g!dDPW~ z`v{*X|GU>MMOmugD1WO35!s3@Kk}`MUlNajt+ouOD)=Lj6K-?aOeMd4UHHSR5fg92 zIQiecg8YgmvSVPMi6XjjqQTqU*+*Y2EG#aIiHdmY!MmqU7^P9GZMIKTYUNc^Cq3}g zbKfofwR6XW=CAJN&7QF%W6w)Z|FuV#E*?yz;U$eXeK6v7H*;}7XUzieI3}cp)4bW{ z6jV7tI{4~rDa-+Jqom3Pu91>>7FA1CO6Ba!C;j)6FW-3oQ^p(`5)>I879SH8;BU-5 zQebJ=QaGnZ6a5s_jJjIFL47HC@&P?@1p)F`F<+ZtDZs(6c3$E_D`Ym+zKHxn=navR0@!W1g;2(>R0h@%lLaPeB8jo(dq*&RK4 zG=iWY7sNe5+k%*^zSaX)fB^vjL2Q&yKmC+IBm(>a8bWYZDuPzbo;~}`H{YzRti0{E z+a^w&2p~e6w7hKDvX4LhxL?11$;rvIJZ{`L9Uqy{002M$Nkl3Z@jnEB%0CVv{Hux7Hoj)gK7_MasOo3$M-@96fF!^q?W=1GMyg*WnJbPOKG09G?&Po?DMndPeNe` zHWweN$=ad}2$wh=s-OskKG4@)0))a^l1n>IQ?gM zQku&;*%NS7DrIe7dvI&b?lt&3z-xdWAVrwNTuV?=3jaTQ=K<$NeWrg&GaB`}y?EDN zmv!&GVp9x+&_hi~fFwuiKlLuTORnTDcPW=6y+Z;al_Ye7Z4Aa>8yDQWy)N6UR=rE} z|IUsr zAGNJ`wE9E;HRR5(?fvhY2o6!0!?|fy{Lq9bY4?X($SKD6uU9@4Z zxwsnUqUD93xF;=k*3N-gA;Aqgj_%FNhTi=jq#6nCehsIRng_oZZ*SQC)%o4KUOScD z2p`&rK%?}Uk8z{6zxI1llO%o3ihYLBwClg#&xIKM#oxyo_nAv;65Xv$&pb@jlB;$$ zxcT0CDRgL~Z_{(-*MF)PjEd_@COzzf-y&M1?$=-Fdh0nP70%ijZLj?iLC3g%c@|R@ z_rxUx-L$QKtbNsE)p!3}ehZLg?Q8#vXl2aj|KJ+8fQXyOv7KETpQemn5DRAA40U(p zw_oC|V6hFo`#+g!qW!3S)U4tKcLevox&Qkg3hdtiyMbFr`|987jZ*2A4?xF(So^A$ zq6K%BE&XJEciR2{L}34+!^u?Y{=09#cF6*$job(YQJg+`!kRZXaW;4GU3Y4S5B=gN zAB;v~tQgOn8>OYaqqMM)yX9Tm)Cvq<1W&>XG4xWi(!Sp=I|AbRBt zv#V>0;B!k%O8*Hr)ZWL{j{M#?zS`K>y05No@4>@+57e!EZEZLjYUay-6t1mbf@c9uM-758YQzNnxt%+*aUvt@!aRM8+TUE=bv_mofe*fbC9p8 zgxT9+n&;0>vFf^QsWz#jQwS7i48}}W%48~H$xlV~rAdn|trK}V$yM!rO;23w-jmY9 zXmHOMc>HQH3PzDEi5j`l8zk+$hCu%zaGt+fU1+M^^@u*mt3uw{haY|zjsv~} zHiWwLOL;UPG=jJT_5{|0z!U1ixv+z~$I_)sX&<(QOe>Hwy{HYY3Lz2%8j#5qp#f`S z@@^K)xY6V-MNE0#L&*wj18|1T{g|blCDk9 z7R|jS)_NGD3dgWXcw2PuT!o~?S~*s=$taq4d+Ul{__n-Qeb2uuxm`%=HF^$Ssyt`0 z4WCRI_xMGqBw$P|Rbx2_8v=(AuG^Z5bR{|(+Ftz~RSXuJqjs*rY~`-Nvx6fg3-89` z!&)&COOAA~>(nV1ExgxUJT%^R7;MvtFa@vzp}N2rFz1_gy`^Lrf9!X@`7{(5H%80L zzu~Ru^g74PSEFNio4?^0LPl6iX$|z5rDCLRvyv_cm631TOFGf0Oc{bql$0sI{A*>( zC$k&bbh6`(KN%z|S#l@|vu3RttFjyFThU4~v*R027DSsfZMKsg7H<(t`EVdobMzo= zAwb4p5HqPb<}BQMkS?iEcdtx6CtA|}KY$SMdfb9w7&mG-d_-VY@hUz<_ zUb|FUBwYi0WnHtJaAMoGIk9bP!invPZJgM)ZQB#uw(aDe_q)&j0efw%Ufo^QRR-!G z68JFl4r{sS00n}pD?3Y(VC@;)^3cTlmWK!kyxl2E^}`llpEHyWq&4R^Ob}}_h`?22 zpu#v?Wk@D~42hmPVEu#q+CshOlZUSA@!s#pgSB!kg^PMmwXnoWJ4#!%?LgAEgVHu& znAazN`FpGT>MR?jvv8jjIeXaz{gMNVIT65z(SSC?0S^N2|0S`*+kgv)CBIwUaV5+w zB>Qt1!^On~-1$HuXn|~$TTwO^I(R1OL*$=YyNmv*U2Hr7~D!%9Aka1y1 z2<-IdL9jL!4ruw_QHEW}reEL=kpr#u+3Lo5CYRF=#f%BjTW<&$La>S=jk(B*qo(Q=KPW$I+jUEN9+~sG76ko-X zcdsj(+fKxTr&Wh&9m&L>rbTC@-f*JmtYKv`_Y-K0tvuhC5G%)4x%NxZyItq)&7D-5 zSx89Ah9(ay)%KqR+r^l?g&uX>VYQNMgnPGA6v!NBt&tB$dxkH%`{I3NdeYc^=41L+ z#FE$5&@vvu*}d3o)4EAkWRe{6S=`=ZYN=9#fX~;S%A}_F z0;0M3#=E(OcGd%3A~bG1hMrT{a@T^A+_{wc`>71Q-h#xLe7H=p&Rajd_*51U9yo=x zdZBIB!vsk2SacKuTSzhK^#bzGDK8hPZ{lk0NBadu0>YXO0Het;aQpjqg**n{xCXe-eOx^)qIiD}yrU`rA>mxRg z?a}Y&fPd*MvxN)XlZb4sx?frl?HvTZcV!l+BlNnwgmIG7J^owM?QbjlKj=aVZqpw# z-Yg7k@UuP_?t(2a()X)rX^~B=*S6)dm-$8l)WnwRM6wDhE?>bVSh5bGkHTQygRvJa zJc+4PJtI7{jT-~a1O=H+CdHMmUTuTscAV(Ty=d##Ey^}@)|TTAyHvB;JR`&}q&HZ2r?BbP{Q@l?Bpmz--P*FD zQpD-<{zoiP;>P;?Rj)(sp?sf2X%wWF#Fc^V&qD~Et4oW&J1h%iO(lGuA%O>BVlxgQ zpj`^saJ*p0x#rbp-Za;IK>@j*Da^;{*Q>!O9MYPvWof@XbNJq$MHNmwGTFg$9(UV93E(EE;Qg0A< zZ0X8QhF&`Nkvx6Ep-c~noeV~3mPfIfsL_U-Syxw2a0I^bX|uiZ-B%TK)IaK<8hUQ! zJHA$OdS<(94$IEOnjpC(R&MQduS(NEFNgTmi-VxtDd@S{#{Rnfq%^E2Dlizo3Xh~N zl+|L(kg+sy0(@7WmC>9x$ilS!bl{|nvw98Q%1ihdn9&n+5FAB-9K}rZ@|MkE-p3X2 zXuJ-~vKd=#Nb+kE>LMOdLK}=uwgt5mldw5Kp?Z zRyNmeK8Ke5^>^%Y*V;0)_CCm1&gZ=9#Zg*lkEZyq`+)ln4_%sMgKyE;`P_`f*RIkO zGpmx{&ZR1#%Bq$F>moL(WG@I}?Rhphpg5qIaQ+Ryb#o3|XY?=Dnj)n?3Wx7dw_%)7 z%~nIyKO9AWl&BrQ&~mHuCA*!!GUs^y`^{f$b6C(VH{D*MYsXM3+S1P%T;S^3=uqSP zY;`pz1KuaJgvga(ub?fA@_Ua`uxf<2=_en#AO`v=v`jC-1tDB%|eQCyjc>t86F10bJ z($$q!mUsvSHZ}YJs}Pq_O)hjh?I)FAalb)aj$d{>%V{M5F&yx9hj={OEsx~I7dV`R z)q&LaNn?rI)uY@j8OTwlR*Nqub@-Q7UyAJq13`^QZ{R=5>@fjAfLXpj)FhNeGCd2hv{Zra!i|UHw(gTZlj5b1o8{cCsTN{oi1Odh(p$cPK@1hA5;&# ziWVLO!r#Z3(|`?ukUvUp_~Z5db;O1v<3Gi)4;Ho^cJQ|V!QBU3FFSq{LjFl@W^uB| z)5HqYtPvQqPQn4?CXd^G&w-yDFU<@itAi)>CACFHzi;zBiO@(wTWIrWv*1%hB&+u% z_icX#h@K;ND$SA(W+2x4#c*6wZbsRRYCbofd*NntePl4Adc^^P^JvPi-&)+(k>OaV z!~@XDp2rMuZ7a*HBYrm^p}ywfEk(@wd!OUvyn_J9c11FeK zV&=4$hmXT-W-~5z&$An2Qq|bL<;Nv(o{PYA+hu9oPUv+^=(T*#zwe@xO$5|};@c~I z*QIfw30J~K%8P1ge)nXQ?HuZ&C{8w#MW(hicY3L zY+yX`q>HtOJ$=-$ZQJnqo+7|*Hw@Qtd3nN1U`X-mUehW)9u291iTrcFc>5vi+I?t6 z$w^GKzG;|*^pQKX)Q)L|XMV5MdWODm!35q#js010Kn_>Imq>Asy-1m*93GF;Z746W zk=j_Bn4wFc{e00B#)C7@n0x?bnM?|(LVp0UR8jk+F0um$b%T`MfQ0u^Y_g4}P%`J$ zot-yK?ir!#Gv5ChpS8^;A5|hip;W6Snvte}3z3%{>TtZ0t}Z7cD6+GG^eaQZ)oc}P zmlCMSIV41?f#_gotG_;r^oterK+t7jidxS;#Xu>bv1@*t&}t(_skCopKRs_gj-%); z``cRYD{@J#rwGm(^eBC4E^anC(7&r@JyhZN`KJR(^I7@9f_AQu5;i4U3KfNQ~uEG+xWh5Y2V>sk2iVrPuK2oH!Sh> zbOE)^R~Pd9<3V-T&yujG zuE%muUy@*)!7@#objZeA3|3aAxzwv~=cT#9w)RNr>I}TDjz%v9QHMbz8M>)n^{i9K z)_o2=t#9y2;4hp`i;C-4#6h|+MlYS@v}Z zZU_%yN6h_u6x%IAyR~a9E?WFkgezUdi9a+r0Vsv84M=lSZl%_zN*W%_oC6+ic88Q zd5{sSvW{d{=B%sU@t2ZRtV)D}-N^IdQmcafe3ABA46u-EU31_2eMtp*eCpc-a>ezb z>HY6#1LmBjQBOp-f+KzUzB70mm-t!TN6k;g$xF7rO!Q`#7*-YzLhcq8Vis{{U5no; z%O0%@NGIVey&Ym>4;Jju7UU<=Y5O~Kd7aM{b92~EI?*ut
  • ao`t6=e3o1_WlU`BkVN0Q+u_GJlabC#xRRWfA1awHKev!zK{|5qfEhBmF`BL z@;Rsk{bVmoe6#fH+;*j^tIheCHpBjMARmylMJ&XV>6LZwpT9P()U@t4v@TKvW)%yc z#28axGrNy3vSd3%dZ15rJ<9Xh*2@^nmD? zW>9d^z>epO5#Da{t?Jzo|FI39w3f=&|2URm<<)nPcXV^)?eifK=VEU^cXNO77PPsa zHz1+jRcClvT$P2dYYGd975;^j615jJTTrddXR4p~DT8=vlTJ5qKI^^_m(8yQ_2{bNbq`cJmrYkm5AIwAs>aXR*B+{b6C^{01kmdmzh^ z&Sb2N5n0FoC$iWkINvaz6F1E4nT%Y9f?n)H?I~zfp;MLvFKNI+mz%b+nEtY!+{U4) zq~R%1|G62Qrst>4x3b6&j6m~5cT|`KxcZlk*Lz@pLX!{@U+7SEim+8%a;V{={G<0HIIwtiE(D(1bZeA7ePPR9bn*ihCq zdEJ>xKZ_{eTf>B0Q(xEomBK~oy#|I>xeQ(W=>=UT!&TMtbeEF?)i-rF-C8kmF0uVB z(#%S9WN6n?LrCuralu_h8gXpCLpk5~<`X)mBsg}-CFj-wb*?F%%McJ=dmHl+C}0FS@1{jN3hQWW{}8xs_y5z*2%%c;8?(Z+gna-54LxE*f z+LvtdyiuDI64fl|te)3>brU(|P;H5V&kuQqKMC??Xf~wyzgN{u2!4+sa~d|YYf~4( zx9Ys1s;IMf6)a7^GvS3=f^%3i}#OQH8hH6J@D*5_^ zasFllM4oNE8BnDku_H<8SAIx*vT{|a{nw+B0r8g!-i&P+6EveZGutFzfmQt(aUJ_AQEg9;Sra5!MFvPu^6=5;L4$i<0I?5@jH?b{77|!9>Ahz>_ zo1Bb+`IygPPKJf>QzWlM|96|g(}Pp2U9V)iQu zt(G4z|L#((LYMz#@)|DO7&?_$5n6e-p?xYtUJh$*B7U+!Rn0ny5n;&_SOA7l(uSYh zbTq6r%bKpgnWmc*sd?-i0q|maStY}xfVW`>P7!r%`IQxu~*pdZpQC#G^{32 zGyp|_tr|Nd#~jM9_qB38rIom{qS!94w>s(8I(zm)zC(i#sKpw={woV{K(En3&|XSx z^4AGV%jrgxRnexd26FJK2h(zg?Fav)74lVZvKv>5g`~>Ow8?UZCN4<~$Ps4@QMpct zbJ>(J4Nktviel)1hi+Oop4KDCG!q+HKc81|-}7s+n7BMycXFIRn-kNNbg8Bc3K_|k zM^vGP@=RNDw1uHV0XTt)lbtQDo~`HiM!j$fnvAJr>q-HPM+!y4*aB(K_c<;u$9o`g z(x$n^wY_GQ21ELDQRr0-f$}ZGW7GV9t1l5T&=*-DlrvJvnjP~zAK16dG)p$gs)oP( zFW&ow2nvwcy9(I9I}lyxovM~LoVF2;mZ{F>cj)N7xu2hHYgJH6?qg;-B{))Kg+nSG zC0Gdmb?l#VvQ?>)R0qs_!FN|N77ct^liU&L%$Hk1nFFs(bkOIGx39Ihfs!cPOZx}4 zU&S=E0Up%p>{tz2HT-Ry$!8H61zJ{3NOf5Gyy0SHN_0idkO{UGZG0ACW*5Pyts1hN zXY_`V6_Q9s^$gj_+6`$?)W#6AwhJb{p;9r|9=)3OPnBQlX{Z2}*^ zTEfkn*uWXE!E4u{#OKbO7rdi4Ta6j2Igo)n56oX^+7{^q*@|nU%jIVD{+jxdF4_$cn zO;Q6+oVptlQLOa6y#~MjM0P~}{!uQ5RIgVe8?9*qab)^Q7UzbOnQXfKe1ys1pu4NLloY4glBnv>c6kpNeUWPg1`~X{HZ3h&?L$YcG5ssDC9lDy#@`8O2D`T z8!6aRi|a^{hTvj`!W_72)ad?Dk|00?kQ@=AP|%`TNs|(VXc#!*{rUxQDoV3ph$QQw zBpdu0-!N)We`3sqida-2bdz^;`;fe12X6>=g@O|}%^fO68-`avJA80()NUh5H5n91GsiM%Hg|x+ zpn0-hm8HQE-0Rz!j}zp;Rv9+^dpyNr5K@III(xx8m4pQznETnt4N&a)Vnn}t{oA?G z_I2HCm=Ln@0?}31^2fL5shYi-*nW&CcIqs>pnlTQT2{I*v3sGkqRGT$Hg;wew;>=^ zDq_`o!=M&(WzWK!Dw+Nss^0vv8H*|D!g8{(Fp(XlvOWk#_S9M&a~h^Bk%4s7HDQVb zJIuY!*l&&^ULJ-v0CICXfEMc|h#`>SXOh7>H;#S<3EauFw1? z>sTj|X#VNi!+@@CB!OAIuKerT_4y^Zu2`f`1eZ0>Rughe?IO*&sI8D9BkZ-g^JM!T z(g&ljebPe+Sf(?()AmcWgg$kp!TlWzFZCXDelDbqcC}-f!C1NX$nWA$kUu+&vf4md zPUY%C@F@OmM@>jjy^h0O>pzRocA4WgS-dwJ;=N0{KDTT80>r_Po?FF=`-y{+gAnZC z2{#e}AsZ?+2vfBG(TYS||#@^=e(NFO_(GG~k8lOwdJ4=O}EG(d#jnQL-q zu658N6RK!GdGwgMVUh@2KezIhfNg}LaKG7DYS_Qd;x+^&q$HY?@zrbD5#vT=wrG{Y zD&pIC^TEy`mZ6Gqrj7((lLa;hWrJXt23H50Sz9f+G%)Gx@VG^NdxUMo<4LYEl*M5<`xb%#y1*9d|e4 z13+&NM1{@FMR|&V`Cx2J?^Qvc0omgD=m)f3^ycV@==w6!3{q<=RHm6LSaP1pP=OpH zFYjF+q1J>|4F6LSph-dgZCXuSL&-Kf&6v0KdvdtPM`vlGtajSmh0<#hh+*GeyLUY$ z^hi6iyV!a)1A(zLmA*_YSbF{;J^5|RWJnHC79uOKj$+nWHwltZnPPeyo ztGZ8UM9E86+xk8_6k?}|t{Au$f?h^Xe-ZYv6ln+`aW}q>UiX+1a;a7`R&z=Cm;ak`N0N3e2HGu#xIUrmDu9*#Z zG2nn6r)?S#l(PN)Q)927oBqH~tR~5EDFwN9zY0Hl#L%`EST*6B1#rfk>K@~AbgjLr zD(HC~vEMwB{IqQ(5OcD-{{4=rVrX(muP^_${t(}0vZ}CGB_Zy05qcDFxOAs<`$S65 z_|du_-SbOgN^l9;X4P)xiN!d`s^2KS%C+*-D!)QWo)V=>0)2}Z`}ov*=rJdP<5GsQ ze{4#8WE7hlY?q#)HC69t^pug4l3HJ7Mh6F9i5}{2pjES?St4C$C~+DLOEFUvdp{C09r@T6PpR7L`=0d==!t z;P9AeC5JLmZk*%;la%9ZdV~Nmg#W81@C3&|JsnEOp!zSU z6P^hw-(Uau@~*7}b)CQyYO}Z=*emfS?CtLXURh50(vk`Fw|b4CW#?C-g`8(`*Nkj7 zZ=H|GH|2q%-1AE=Sm`Koy44$gQH5^9L1t(@HP$a)XzjoM`mN-+72gpd(B)d2|37X@0&t; zeAoF5!@Epr3cEGdp9K#BtJqo=*LiPG>$i(%D=Ad8&lVRymoBC7L6*W;|Yl{^kV0lV{c z>QXQ1YkEF(832(iGER;gOQ!T(0jeLDb9G0E8zlDRH-E3fHInrne>^*N?rWZxZwvPo z7Wiv^W&;^P78k=7H8rmG_9R5_%5%i4#A)HuaW=E|hJ_vflH(uhb(2v87|fY6E{2j^ zv-D;t8$6UKVLf!U?Mk1`xc8~?DIuW!bpsGTqrL<;;**?Ry|ob}LQDh6#d)RKUH{l` zVU|Rt<5jPpgPLW4J|&t?4fs3L+@~BTUvepOR<=9NZluNHNSxyWSwd8dYTe^amn0;y zljHAaB5=K?OR2>`SLLbpb3SR!ajAazCTKtczSJMiDICSBfd6WRZzB8dBx6Nvf7c88 z{>Khk2$4|(#}LDbR-Z>+IEfr*i4C?k)Egem0(xSi8NK386j--`1lTR zoZvl53QT#a>eUkADyh8im@A$0A z$q8To_D~sQ-qyC$)9*!-$0=qfIS6!{8s4S4w{Yt>8*XSm$>(OZy0@&Oy}TvjKg{lO z5&@Ib0(ji*n_QL@>ABUMIaeDua4pza%{8x16A?K&(h{wd4DyiN+Ik;a_S4*CFLaaX zIp0c!hB;9GH zUy6$o?HA7Vbj!EuBv-ebLQ8EB!CO5YI+Y@CKZ6dyb)h2`;->e$b z)R>E(T$59lwM%v>Cgy1SXCyh?knB8XlVUu1c&pqtxz>)V35USgB8T(UTzF+9$B-m- zz2#Y3S$L9%xcwO%N&qyw>@VkBy549Jh~ph&-!Xz$IM)y{uqTck8}-B$rec@Jf9ej2 z6-Wd!1nyKKEhYXt#n`~#2>tIXH+MkX=3xaQ@HTkgacgA zGJCetn|cB@*N^-w_%QAeKT&CUY_~oV{N%AXJsxl`YuI zVV!Sw*0iak5zE)|u@)+-k=JJZOP)6CJcmZho|7~B&jB6Gi5TfD;p#w!A?H7Gk#&u4 zhAlmTJyd}v^{Bc;1cf*4sI5CxOv#jRu{Aw!-d11lRhOK>do?ENe!LT!*(3T?*DRaS z*1{gJG4akeglRMal6MY%5y1q{ej_~NE|nVC3$LV^)Od1W*M{&y^P96!WULx!PEr+U zOj_#+Und{;mX)STx8&T*dRks@B_QuS9u95sy^Pcfjx^H|eVX^?$6kGpqf*4NYO89l z%RehA5yzyRWx~)kXR$3lhD&|}Kftm6nAY}lpnGhzH=I#tztVr)(ET@JHdt?D1$kuk()bw!+ z+jo2~@YoN-h7Zivn9JfA-=La6%%S{#STuLEw+rVfYH6iSy2foqUr?(y4Q-bGd)B*~S1U>V>In>?Q$hipUp$TVZOP}gCo4gej| z3&H&cL`>I(f+@eA(PASAfy<40Y1*YR;nlGdBW4ads;CaRrB6u(rzriUY7J^O^WaHcInGY1k1_L@{ z*sm!e;B+b10Qr_e$hqb)2eN>TyzJ&fq~7L!qPRqj>&h(P@fXe`J{!=nuJ6vfrTz2o zQCj_ifdhDRjkLJ;!Jdgr(IB5m0}$}d%OoR-fAA0%rm=nFts{FwP5Vpw$P@5$xnr1N zkU*@6<3zr;ZKC=e+(BWI)@SIlJP08RD(`FzpO*5YYOvn`RV3r#4^s32!I7iRxI`&S zX8tSTj_{usVXLU6mRGU8?AA+Y702kT^o+RX0%E2UQp-T5v)EtI#vXaD%$W(z_8MsNSjFcP zDlKZwmb?>2vEBOHZqEPL_0-f9WwskoTaxtemisYq*7fHelIL(^4gUuIZp*m^WLWgg z*PlGu_1)vICs7xdN8l*qHfhJ~%*@T(c?}`28&jIB@aN3oU^wb^$2EV)rce4+lgR&f z0VzrCvsd9iD1QGr`rFl1xm$Lf4l@o>kjCC=w?3|{=H%eGy=t8Q8cew7g>11l{iiqH zO2F1J#b9i;YnO{Ea;8rXSd5W628|-%@g?X+uQ0Rmy@PcbM-Xgk0U`7YR!tpOJ00YW zmSoYY?y3o`FmCZ_>TF-LN(+7a<#e|Rp}w;t=6Tumu~S0pD5=a>iWm zmP_%TrIXU*SgH9nK*sQey4=X{F7ZrGom?AO5xImu)f`U=F&Hvb!{ECmutdN?G0-LK zp))GKFj6&Wo*QP}zswsjxN#>%aa_kNT{}kgs;EPc226yt5IkM$O(r=az46 zMI3L>s)%~dBDJC+A;H(vVRihgF13ytJT%6+>u2NwDx9k6)C6X{tLnhc&o; zxK{SNa7+)hq!=%G5Db%2@~2dg_LNwLU#83Ea)}=+eX;*Mn5CqMCgig_Y6O*Qb>P~} z1aq&JjM!oKd92WX_2Q2iVU1mlzEZ58F;SjpYH=Rgml7LPQujkwQ3&cV(5tn-WmVwc z+Sq0-oWv4BR%4%#7!0lPTi2uy2ywYU!_X>LiL z8|WUW$eU>D7m~DguM&F@qlSi!ZI?|AQg{=}&Gt)j;a9GXGccxRr|5)^^1lO)@|T5{ z4=S|kP!<$@E-4@KMPI-wgYvnNi)OZ((nP@nrKTItb^jcNt7Be-954*6iP$m0n9@dC z*RQB|e>$joHUU-hUx92GN)iZ8tGoj1a-3|RAJ$c zvU}~w(NkbTPokB9xi``^>m8ww@Cwuu1{B|mL#{Ek1G*UQGsJT>W2O@30sa?#Q(3)-w;RLw_pcRZ58GFELQHE0OKM)J=_5S8P-g*2 z>l-$9bTth?_C8JxuM)Cnris+aer47Wd@nK*wIPLkpuZQ*Y9&2xjsap!J$b*DySL!T zPs#|mJ`;`z(SkyH@vP-|_8`)qfk(MY^<|qn4N9^QVrXuMt?nG09w(WvpgU6ZV!97g3Ar zv*t%6w&;1^ye9CxsPhDBvqb8D7&$mF-%{_|W3h))Wn1+1OUy9(+`{UWDCAVv;ey>` z%!wBf+xH>;nhzangX=dSA`Z4W5#>rEqlBObHl9c#$d3JDmy0Vo*J-z&(^MB2Q>}R& zH53ec%RKCll7bqTxW#1~mSv@b+jkKhgW=;>dG;Z{?qC~Y4$0Kx$|MDu)P-wT3m>xcmTHcWD6C$rAGb>$s9>swev+}gA%LB(F*y8XfN^-O=`<9&Qm|2 z&vi)qOk0Cb`VK;}V#hxkyxhLclOzgFPW)8uxuji(hm(K4^TI5t#VdRzUsQq*koOx$ zub@MHUW5DM*~Vv!_0hwPjVodBG&!g~d7L?az9OjFem6v}Zx$md_H>nIkd!QVCYh%@t`Fu=A8#s9*x(OXTb^iQe}U3Bbj19tI~TUzPPbu+M+3Edb%bC z%iZ>8z_20QdT1|Tb)NnLYx;RQ`}fEi0Eots%q%T68xDlDIUUFMzV!Nob^N^^ZUn9W z1JpGOBQl3$1pqy%UH<-c;vk3-2_Q0~qM|ZrQrLl<_?nxWn=O#QL`Nrf|4-6F9No)D zT~)QQu@R5QHR@NKR073pt9G`l=Bo%{3>I_$45^VAmoR-IDMbSGA6TCXovI~^`QoxX za-d{YC<6Z3$%&s+8n64^XdSs_RmrxwIwiM5D{Tr9HHJz)?COqr7_H%tR1+kny+;pD zyRCoDxdcoYhV}N`IxV)r&ACjV@DYbgPz#@zcD`0@IEI`H`%DysucH#_>FKJwkPui* zV3Fbl|2)1vURxb!7TzAuIc+yacl5n(#Z;L@fcNLL#VddyFYX z08T+xKNs_}=k)$-9Ws!Ce%`U$A(0OvoUy4o<~=9cH-||a%7XesN-=xnc!xfNhunmH z;mbeqM}dTf|EqV$ZG^gJqMGD^+_X;*e1&j)4-+LTQEt7?Ksj0$xm6VF6J4Kz^xx@| z1M*RWL+@;KL6ZCspeEp)*?^D%88dUr;t30Q_6anCFuqFb&Xwjp@prWbE~(EI-|QK| zSMOhYms=aRh=;n_VZJ!h^-YzU%e9Z+@8`b#@k=}=)F9Lpc3f&R=2(|{jabHR@c3@9 zpYoNrKZ*%9b@YQ2l;Xc|CXezB$u|5;sC3^H&+qgb>Au^1kISuTr0|jpOkw6plFLno zYHnS50xTAW{W?#)aTfsXL}+)tK+;yH&2qJOC{bS$gp||7CFk#Dd?=(?;jhGQ zIZ0Yzh$BYoWxNNbm~=e?x8Vx4!W0;R@UEl@W5CJCLp$GhTxT#-JSOBgN#oUelgh1{ z&O7N7xC4U9U2GCBkikKqU9(-sC5Jhz`i1Yy0HJ1Y3?3JXB^ZX^9o{xD6#zSH;+$uoJ=<5cwP1_ zi(`eZ~Y(4VaK7-P%gIaa0SdN(h{HkVpSqMw+0{0?ba<7h}(%BM(Yb!6%2B!MOAj5MmlmA zVanCZ+f`LowVuMxzI8!nWQ|fdS;(=hTmf}0*j8VPuKMTEzBMq+%g{g+{WmMdc3BRV z7*q6K4QH{p~qW2(Oj?VC}c1{V8j5=H6FBM+Q$E)lhKe^Ir;L>_O8fv!#5+*zn} zKOpUJ+N9~~8^?Kjek^Qsb z_6|m)ouc4-Gp=LbKZRmz84poaX2UDtSZj~@S!iA!-RHJWb=JDCPE;7J^Qn1T80%C4}40-;%d^21S$o+3^<* zexb0fr^Oky_l3xVM0gSsf)lkrsDyq6jPF0K+nJ4h-sgO0l5?9Ga%z4E0L5i<-Zzdl zs~dK{bG}~*VQNQ9xIN%O`LGs0pSKY`g)tL@K>ef@+0m|@)38`+eN9C%A0XuXRD(ZYcSSAr&|3tVAvQFzo(M(hL$-5 zbW@G#2J46>{C*iJ5b7s7#0#`rvtMoeaK#>6Y4^t?{mVUgjD z$S`)XS+6juwf4RAJXYB;b4aEUi-dIE^uvC?)qF=}{ZuD9VHpPC!=gojaEG_p%c_Fc z^?W_n$RmGg&!CuRwb{p3LwTwyoMoq(9VDQ{n9nI%s5xvlh0~@)hqUys3>nOij zPc;TZC0V6Yyi$8nsSuq7X`1plfTJXg7sn4mh42K9>jHa={X#bLNUdy0k|%>0tKnW+ zQ52}%_QS>;YqIT)v9az`#UB&VCxz;Lvv|x=x2AaF9}>ZUMtYJAk<~(703vGtpG*g* zU;8ZQ`MWpWA?l^@K09~wiUk~yr}!-Ps<-#4Va_w0*Rp5a3p`k^412K zE=JG%5cVJ961gFet=DYsGI?`L09C8ddZ966;p{KtxevXX4=E)VuJbt0l%CaP&PJ_PazG z%naxr#ZJ0%PGxpBv21 zO#aFe&Qwf4u>21zOZfqc?=bMyy3t4D0+8tZT%PxxVzR2C2w$?Cwo1z_UL&bE}&;gUTpoaNJ|dqlM9)0kwHaN zfO?nPd1Srtg1N%KhDzNcVM3PtQZO7*P>cdf*-ggL$`#|82TYg@>`g6ce1!zz_rdSQ zA~BR8sbSdQZ%{x6)kwh*4iG-jN*ez+aF9=Aj|^m?vM5x|_XP;Pj6UsbHF=ET+3^dK zpiC!L)OEGS2DZdx>hXSOo9h5a`%}#C!>Ww}$=pkRpW}7x2qTD`%ZL~lkoXCd;ytVz zx6Pr^GMQw2Qk!wiCndU*O-ye+HI|9lhg0;Wg*yiw=FSrr?)OWQZK0&9x^~mFXxE)p zvz%aWBCH^Eei?IqTtiMK4}-Bu6dd-SY~Tx!=avCPnl50-HB{_jFQei3D@qziG=9#7 z>AwY)iOvn&Ym$xg01tjX&@+OAeV_!@yWZEHgd+JQ#y`EwEFlu19s-U)qoFH4S@kj7 z)v13(2!Y&S2nWG0f+d3FaU!8g631pT;ln&w!bAGg*mKgr$MML(vP}?4m4Hf!Fbj^l zB^ zuwS7RTQcaLi*EaqIgu@dGR}AtWJY8J!|?;^?EZ``&vTXP8X`M$g1Lg@|C}j*WAp|0 zbjwpA#Y?OUJF=WRkA^p1N%qsUiKb+-fP=L=a~L2}t5HyY0m3W^3rx$h?h3wVIa z0y&#DTzN&LlZ#DFcX=(bVQ&Ulb^smO!d@u)XL(5&vp+>H2^`dfPZT8*DX^ZwzzE!z zJOfThb2nn)^|gTs3#qx5E-Ji71O*9|m&~Z&Bv>;g>mBlPSQW$uVHV1SEfDSsU?N+F0tbAJm#(g4<6fQV!ziUp3urm+%2=PlgGv9j##O z=&E-mxUAQEz+JB^V(^2+0Qt}`yCtm~N1{D%=r|dUfEjlT2LKKSAuJsgDuGSSq(oU}>c3E)wk=tRC29Z#0f?~?a zijAQC4!nPTU@#k<9}W@-r~g_&bSWykwAc{5-VX~%gp|trx*#I5prA^$KY0j~eK^^T z6qWz-P_TYvIpRHmr@*X)qSLA)S^5tKz5vuJ)RArcWuQ!e5Z4YwE*R5=pHtsXAhU>O zsXG44Z^4ugpkDU@{rR^_FfJQxAefWC6Qw95FDzJ~UoSXX7vu=B-wq=d3KSE;8wpFW zAc_kwnyI*33X2fZa51=g@Kvo51XX~Qftmq#>DSACj0m8&nF-VNgleRA;QHexRA7(+ zjRiu97(PDJ1Sk_vb1L$82W+-r4v!0O=C5EJle}MlaN;gd@C$fK0|#CS{O`wYjafH4 zwole>R1a>FBXIfueyzf5$-n80ok*c5_8DK3b`X~&_308&}-TJgWLxO8@Cf+Xv-XZOk=M5+vMNgm;z*HFH3}95MtnZ5qqgs zxE;~aP_?d@df7_?B8)31`G~Edyr3yy)Wk!>B>9o7Jm7!vsTg&uSA%GyLq7{M=Xx2# zFDaO6YJ8xi;8>0qHqHlBLLGKzlASAX>Y>C??^3jnK9)9_Zu-9AuSA{ zS0SMkc9zy3(c?#=b|@UVqEc3xo5a6kMU2AHXa$8NOv;%HH@1OTZm92t14YKK2Vbyp zNWtA^q+zaULB|U@|Lq489Lj5j@4Rd~-O zLLUpITX4&^|79SWPe6lH=PvqDMB7;_5-A{3bO4?YDiQz-pZZ+|^GMV(7M{!}ey4(t zBc8e1-eLmn1)wQvQ&GbUB1;g*?}6q0L@soIS_K5anxc%VWNYKz-5|Az%D|4ITH^5q zecAwfitGG2U+*F_f+B(6!tjXv#pZmv?*^uMk10uU-=-N4q;p{)p(Z=gD$Fn~Q2L_Z zsB#5^TLHy;W1n6*qTy6&r-_Xv@sUi??~yJ7gG6fswV<^?OUPj*kEYYGu5ei#NMGxT z-)7S~B_vl(+ya7)fo0IEu#K?HVT7%{^tN*3qSNk7+5h1US=gZWQ+~67s@Si$?1tVcmA7M@H(n!}YMg%? z8&l$LI(!o+`J|PUU5RfUyfgGV{@gW?bY$NUz|K{j`&|F%)be!L%~yn{kaokK>>T*$ zmiG<^Uev=|ZDX0OmNw)5LsX?_hZ5Km-kq>m<>qhc7_F~W+Y6`jbcf8Hm6(Om!fyp3 z_-YvF*e2Y|D(nZj7QZ&k0ieb{82J?_6zw`P8LFF*q_fkft-}&1d~nn0KP(WO`e`$P zLg-aLZB;=hq+Tsvbx~9UlhV!io!Os{xPLQ<`3+qDIUUqXakVtdRH<82*8{HpBgNE> zVXd0JjMDgwyBtm;qL|xQA&2uX&3Ezu^s-oz;li*4OA3i4{`A>pjHj8#BJQz*ZiL1R zQJ)4_vvv?p`Hw+aBKifYibAU2Dcn#DIF=Zh)LNkt1&kvD+dED1Yy12`iYPUC3BQQR z!SSlosKot0*1;Dv2dq5upN@26eYUW2M5>@IBybvhLzv&qn#p@V%bkP%OSsa`HrX3b zJ;)VJMSQ6HycBC{A)lpiWMBC0;TL#;q_x0TMCuG+iq6-8XzB33La)HTcXmL~*|SV( z2afDtiiY$V*XlCbz}6XN>2s0BHj+%<^TWDhh*HqTjveN*uGXWS!tcgZiIs+AUpe#5 zli2an4N>No%^kZ~VfqTp+H=EFI+MQ3^^WL<1uf;GmQQj_Zr9o;Jmgose& zvekZ+%EdE9NB&6}q+`@2(B~qMcjZdloCoVpgo-cAKJagmNINBP;GunNmA{1(r@@VE zBBSO731yCmwC&I*8A$HsTL+IQun&Uui092@9)cJC+qCe&*v3b=Th6}M?LO(hV-8v| ztN8QUSg&RMW`ssnu&HQAV;)X$tcYPP+a0VG4uT0wV&RTv8Yz9|0*djy0|Z-8#~-E9 zzw@yi1goQE-+_b;HH|3Wm@wh;ji*(r3#EslQUX+h&Oq{_)Cr8T1M0_nlXt5l^!!)) zxyU_~)lm1*=w2{^v&XhSrJNlyHh#F6vsg^~s&MDTseC*7e9?oi(WXu_sg21l8q%$T zq$@47KRXnG)hgDAeYCX*XbK9VnVNZ zv$hbOUeq;c&RtQwCej5Ww1&XQ0eHz*$buGC=Z>Y*MFTlrP;`dnohg_p=~ zd8y{r;R1N5BF#s5qtKeEF8AbRNONLic>8T5n;R6!o)*KD?O)<@b7D)e+H$#K`C>n_ z8PP->NJ!Um<+m1JgV=nOB=PyD#qn+9K0YeRYgYM7+oxdb8dL%jn;-890tS=)Qpmu- zwJ_8`{3mrK>RxI0cV#F`bn57xY-pXs0)fO^<8DF$e@wQTUergjE2XZ7C)dv-gUxd)*)=hhtFn9RH%JmzZu(3vMsC=;F-_t*7C`#j={tCL<8ppD)g77^<^1 zujlVfcENXYH1-MUy4hIoSN(Uy!h6r0pG)X;y{2r>Y+dRMiSsvmkF z>TFkLTYcSR#i5dvwIwp)gLy%o<(#Pc14N-Od$Pe_E}Wlvmy&Mr`8h6L&!fcfy&C?G z4*I3Rw!_ml!wue)dl!}njkA~54`eLsIYB=Agoa_pK=)UvgR3h>qZG@3Zlm+7CfWVI zE7kPPc;63^aEq&trK-`fTPnZLXPi9d8L0FTTnF{gQkYs;{}{b%n4ZI$Q_oc$|7o3V zI`s6w+h2aDS)>f~b&#o2)1RQ~*;Bb83HXU7HJOjc-KMt&M~vtVS_%pqJ-|Cn%KBO6{$Ozk%DKjcEy`Kq`v)L zHqLQBSq_w^V#3`(O8G~vI6wnFhQGVPcivB# z{R6G!7ji=Vw6|K{(@)0SGQ)d>O~d@3t$LT}mIX%6aPa}AET*^K35Ha9aXdrgtQ|*y z;E^E5U|>~e%oI5UtSd`{JIG3TXoE=VQRg@q1N4QsDZlrC`z%9HZ3ULASzV3Q>rK#9 zNfVoX@$b{T3&g3vRN2ym>J^@s&V+i8`xKJR40bFYIi)Q5@(~b@SIRk9`&72&l7 zR$+dOd`AcVT@HO`+JMw#G?9u-=_}ubfVfucm;NqBMNFFA#JUAr;{6SwzF>N{LzELE zab_Axtz=t19J}SyET>?L*+iA!G&Z{jv*kk8^=%O*yyQ4D)O<(V%wJ;@J`7`s$NA>O zYN@>kegCZuAM${2k-r(dzI&Qv@0TCC*zMPJ33Pb<9(1XEVYz+o5jAzYX1T`KDM3)m z_%L~WvYM)pI$qF9(WGHiTC~@}vjoZkU1b{ubk?A@Ky}=7AkiT-cBTo7%VvuR+)RsG z(B)>${-d{Cfd0i()&CIUcLed+k}mcRh7aW)+kh-1x=i1w9Tsc-YiSu2iVw-?sTx7O zu?cf+O+Rh#*O~;AxYoiH8_{wYEIuR@w0$__3Cn!W7eiNVzcA8eT?>zfr;I(QRPv57 z4Y4QVLP(IYoFb}e?2E;i8>{jHZ#ul6k1`Ag;s8LGlUBNEIiQJR_4njzOL1bJny6<_ z^s(=^Y{hyq%3m~H^dq{EU5kU^#>S<$bjzRG3ha&=N{na?!?s%)&WeBUG+#-6dXyLt z*$5(wt2z&ab9lsZl7wur<7lwK>MsCYtWZJ~vuicn%SG8T<-=CWhOowx)>Z3GSgXkX z5S;ggQNA_*uQr?eUY~uh(3+r&G-JdTJN;^A`+ZxJR&Oaw_{69nbw)C?IwWc}Fc}gc z`m?XTD1X>s9hZ{n^SDYIon@Z2g_r_=$^UNfehaOC5B2g6q%(r2y;&8Z9%huL2wa73 zGqR|(Cu5Ik@M_=9r8q;6hMlvQ34H;U)VDuFD!uZZ%?F=mFVXwMZGQP^pjij&7*dIM z{JxzI(}@i4u{Ykzhz+HPd3tAvtyt6adDVA4-ibW6m-vlg;#ZyI?y>Pjea*irEz>S- zra-n5#{KmawnN8(gVqDjNpkpwPp~0+ag^PX@43y$>eCYy%Dc)#Y+;{ahJ=Kwk3#D# zyJ;8)m5(f?C`n$a^|4%2M@Pf#On`=t2?)&H6`;>Wz;c7C3|SUNJRb2@6==Iz03Zty!#Gt zcaP5e(YRjjlkyH~}wLb|4pWN^4O4_Wg2yVf* zCXM<0E4CXpCfcm&KmRD4|E(Kg=*r)h3u}CWu>kC(*|(%cuLD*fy3IK0@unsw$~Na* z%xjRYjrRZSeuG0Y*T0^qF!ETx5=)EbkF2Z)&=Y)r9K7+dS{@jgRX7 zHWRPY?r%VR;y~bEJoDI%-z1qe<%IKnct0vSj!vts zO{+0lFKhvDk6n2U;q9va1B|B8{Nr73#}eHY*2 zv)O-YWU{$`a-YCgmZ34nOIJ@Ge}h6WT}yrAAUVQS`0~v3OFi}If|{Ag$ZHp-(AUoQ zvP4Z4WrpaCe#uxqAp-8SuNZh&NvSyUW^q0d%-lA~F!e#%-OFV%_{&J5E%{imtw^x~ z0h?rCA1WxV%_mTwYG?Nwtvu zLBBuF{rhlS{B6-wEx#q@peR``>s!Y8@6GSNcdvIstm}H-zQ=tjFxNAV>tCNbZ#(|< z>YYzdq&wOB?e&3J6s`n&MZNBN4r|&>;0HfF6A1`?`Si4NpZT*c_@H-eX)58U?wxJkFCQ*YV}E;SVsZEwleOfxgu6J)L7_B=B|n_jz% zha*d`vy0Z&IXceg`iRyyP_;egoP#$PnwYy@?T+6r$jzC~w*2I3qB|d+ z#>@pCuGZ7$P?pIm_@tWJBYJE2WBO^&1^+g^H8nZ&@HuX$OxU--y**EDAEv8Dy@~Z) zSq|25WZI5VIE5E2_~p}j8M+|HNp~^0K)4?Qm~t9%NG89T3wbJGhh^J%XMg4&H(K-h zCT@Pg4u8nxyJ7|HpF+WU-Of}_IE!%Y$Wh26W~&{M9xh!}=SF(;P1fwIVVY*je(19< z;rJ}6UHbM>V}YH&4$xd>rciI~P|WP4zL?vy9!1~LTyucjT9=k#GXg^x7ph~mrNWZ^ zapf*PY;DuU=jpIu-TGG1;vnebDQ_%}SKxN5(i)D`}d{15K=q9EsPT9nIhW8<~+qO!SbX*fLfovS>{EwR31Bu(o+PvtlvXETzyQQZR)P}bYqpA5S8mv`*etItg*k7G|ME^IMC z0#7kbEHM_6Pj6#&BrCp)?^lc91+qXJ0$CWK+s5so=pLo*1@9YkvW0wJTMH*kSZxAHi( zLG@WuR`VV({1sdiI%`r3tc98l$)udwB(igWdhF@CzGTfIWF14Y%1c*EZ#3%MUXQka z(LOWIu|UY9iiEzZdX7?g}>tD2Bs37b%i+_fVL)`MzCXU4MO zC8kI0!%fHg)9AZiQ0UpUFO3mN(C1`~6>@JiQx6-(JM-1;00q58JlHCe)=3MOg6Tfq zt;JGx{@Rl-XXsjYT>dD(?fGlZ1}YxID92bNPwX{kK@u}Pz-?cooCC}#m*-mtu=DQR z=dEh}GJ|C0W~;&z*C<%ci{C{TgE%X;qRKxWMDx7|@%cL1zcrvVo@qOXS}-KQ$?{hn z6W*B!9?ZVFZu*`6tO#w?I$KQlHEvkJbd-R8Fq>_i#Dd0#by91m@KI>g%OAOA{fi(# zF_(}6jA%oIqf(Sf*VVJ9rgD^5;^!n+q2Ip@gx$GfEUp%Id#iRYjj7kRG9=(X)1!H1 zOwUm48$yqk2qjT-4bj=ed}RHAt*WSd{RJCNLHppzhd8M}MX`8=b3`4Vth`20X|Xvy ztJOS2fgOfD1D3IzTm^Y=y}@AJ`JTwb9=kd9R-B|Q@8i5+E6BhcI4t{cJ@viK&A$uF z-oSjvs5ty>Kw9|GKx*>sVx^^CVYKW1;ALv}{w#fr0eW%q!8c6c>HOj5apfs^tjxjC z!iWI_6tqEf)_Pp@E;yFMS_wMgYBRxCasYVtZk?`c4X$Z$z3fvIr!I8uEp$*eB`%7h zVE4`oKaOU#mXxpGpD&wJqW~{3H=zwD`#QY7a%7RWZEW;3%o|w+@V?_^wQ5q3;%GbCFFLbTjvBhEU7wm*JFdQU zFCB0MWVgq>nw}D^KipGZB9i>R@On*Z9HIVEo+_oa$4D3kcHTBKma2gFac(Ac>nJ^ z0I$}%5i}k6A5lo?c3?R(3z(ddEo2ay6*bRb&CGPS&}G%YxHw5Y`95+w^nLg$Q$I#} zr9sdF5KGLm=ee5X06|YL6bYs?2JFg^c1=wYz(wCI^_`ahFVV8Qc$OU#W*tdjy+?I` z%^2RGB=^Gw6(&I`a{__@f-Mnh-(N$^P|#@Z()nL6HaJoI{%c;1(6HF}o=X#lJtu+e z$ywqmKoQ8|1B+y6VAGD&n0fThw0A;RA*U{OQ#UqI9?N)+yak+MTJ>euTe#)oW7KS$@(jTFjZ%CHB=sKYO^(i zrlRl2SYZ^5?}Lu>C8QL;<7eBn!p(=l5~!_6mETqyRQ&$coq^aG>X-lU zcEvcLaCE|09+6#X!hStosvlItb9ZyU8DocjyKSif9kzi~!9%m$*^apA+{7pC;!4opuJLZ6uogTiTKqM6?D$6NvQQ0Nf~9{#nc6V6 zq}EkVOualjFAL9LYg+yFUGqmHxh|9fb7ps5EAz#s4I}B%9Q?>JFgXX$Sb73QSj~e= z5dscTH_1h8mqF-IBPUnqx#rdPW*~ws0AXM_kQj~v<_0rga(N6d#QrP6$3n<3CREow z21!h7!L9_MIT*_9=-~>JtwIB%pCXopWfjmGBwxdFGFY2H8=ukR&B6gcb^Rb020spc z0}SuOa#VyQYbl!E$HK(1oh0C!{bXWhL552}`@2CQ?`Y`2NgUJE)bn&YY`Ho)diA`x zNTsBdBg~yWz-}6jm>6OoQHdelEl-LF8H7N=a^wYwfJKUNV0->F6bX83NfiLp;-N?L z=q&11?$U?Mg_ObUVmXPD8Umy@-oEARs_OkJ^hF*7+E&D%Pss-PEh45{NmJVtf8s5r zd{As!$B+0bRntUpFR3?U#AGQwvqech8l7^gV4%}thKbmG(Q2Z#QP(Q{&u8P20p^z5uvNP1-p@aulhzNdpnqFz`ahqJM-H79`P%~{ z?5drWzHQE1IOY32qKuW>NnJ-U_ubEy7gPO#-!S1bF>P2x{gFR zd;%1YlsQ!7;sd7UMOr1>7MCE6h3$7Ax!YE5E|Wu|HVO19Fay0%KBq2p#_pdb5w@y!$VqnfuX z;v^(?ghiqq13SS^YdoNtq6Xjj#kklQULHdV_68#%4ExgHHU5dZR?w@ zqF|9EHf+hM^Kio9jN)I&_x?StBvg50od1cLvR9q=ilnhS<#Y{^GNgnA8Q19cOEj0Q z@VwUBfbg)juHWyqT(0MZf|qh5$-#+%rb}l57!edzDD**HQU2FzB6Olu05yOk^bq|J zY#r>+ywbfHG=h%BUrL;vZ_QzpJ5&9lg%AV<DTX!3P5*Ma51kBZM!)qNte0YT!Uz!pJ>@eE%eQJ`c^*%wZbR#;!h zSl@u^Pck4&eXt3RRcb74Jbdj~eWk)9hRYt@+@-AxUrirfc}(4Y)1;k4(h4+l02ROl zC=YKKY|FnhJA=u3TC|D|`Mnubxj^#?iSGkp^xdhT$4$59`z#2Qd??nb6(1o&Thp4r z^DoBy$aYRn4;wvw3F~NHP-Q8S3%UWZfO8xKWcD;19OUV)IC^rIRt_{lGKr976zJid z#z&$bUyLcN;g{5kazyMl(#bbdFl`;d2?70=jE^Ss%ONt#l#58cQG<+&(u~X|N0RCd zVz?}Hm1tbV!;0gra_oYqq=vv&kib4DKI!VB<>AkOvDJO);uUYsD#@~>$jKnL|5%f2 z;C}|+okUUH(6I3S!Xst>IxkU9o1FWj((8UeYU&)>IKTE#!7uW^wR`p@wQFZmPT>=$ z`x@R21_qe3F_kt`6U+3)Z-&-t=8`V=;r+}NX2UsofcgB{O;wAkI3B$R-aJM^lnO~1 z?*`R;a5|6}ogFrrAdgv=m%+qt89f43k8|=inVDQ=a!|Mxie-}QkNUTr+>QKAS@CH% zlq?vBYiRorY{=?O*5mNeXkd|va5Z~@40j{fp38cV>D)kE{W=*FigeY!T?{SJvA+Jd z4?sdk#PUAsDlEK`JpAeM+)GH9Z_~NrDd*TF18j!4sf=&;zy?Nt5t~Sn15iagRLX;O56CdHdYTG*56Dd_N15mqsY1@;Q?w7T z2vOUjb9Bk6iqjd2Jb7|e#6X=u@Kx}#pH=gW?tj^S4|B*b zVbE^JazPOn(;iTtu~_VnwPKhFj9-n2$&Pq2c6)GK+@acNXL2aBpEd-(7GZ#|%Wb(z zJcsO`mF5g!*Ba0m-_WAJPq(n<#+n-vQ2mufe2GzVL| zslg- ziQIfTR1?M{!YBM8$z}l32fgax0D;8T`Y{Gd2dc_%o|(2ob-IMK8a$h?CKJxyT1E2M z2Ewj_p@PT;94^G)?lG_yD*`RpRu~$RRI9_*>J{$;TP|#|00=aM^Z64DRLon}j(90d zVHlW39*8qKj+D`a`zSNGdZhgBd8tnP%9&a-A0TG;z)*-*{8{H6?uPBw(tGCSis3Pd z0jAx>kq(dV{Brs19E zDyo2mGbmwegu*@Vn>VkfNySXsPkhswA)YU`3oV4Zv12%$k;vypGKm{_5DVJ=dbyRi= zs20&tke$tQy7m=D1}4S{9Jy_c(DJv|OsiOe0_BZ{ER;A3Rz+izEDg;+_<7Hyk~j!E z`~6(BGzK9Vj*KE7;*EytirI(1o2OW(Lw}1@CTJyWYDB7ha1L&iqzkPsojPXFB- zhS`p|6vP%>jx-?#p25k!4jlnwDkR%WeL;t&K>kh#RS8qW-g?2)Jnr;GB{G7C97NpB zwL+8Gn@XG51;_mXBUvQ41$>{Ww;Hy&$#I%DD|pa@n7kLfeN$G%J)bxe@)2n0tt0S<^y}7o% zm9Wk+hK6OUb1r!v)To=dY0Lw8^!12*unqcMs)gMfQV(_EeHQ{<+WmZK+gMZ6+0ikw zLEqZ@OEhC*e@@3-BGq(Q?htEqrqqv#yR)Ao8j~ix8!|`14+*O1XFdtvDHI9v0GPwH zQyTqHQFh8Op^$sP=^SW=h)G2f^<9V&(5eO9Z;HT(S5hoRi)Eso`AZ*tTb-3-Zc$!6 z5cNnOmWUQL{;TUqBaoHUbGhJ1tkJkBj$36(`#31f#{?JrO^hk=OxClK<4ZC8tHg7v zR*?P2Z)EfwM&t`4(y#^dRJ0T&rT=P+E!hc|$7r3!}Q#DWD+t z8}P#S$xUtC@1B8)qPn{J0!#Q3i#00GXfQZrk}mRpyc;80bOO&PYXA`-|)q9PmFVO57TF6D5-B@QOmBn`|F8EXunzQ7`p zFyKaqh9H7LK*ol?<4xCtHQpOZw6c6=0K#8D2K2D3s3yt)j>=bTI9fMJ9|jP+UplyQDnB6z~2}i zi;-9%w9NUHmJvAslAYG}AAb-H#X-E-_c!F`7rC}^4!X!GSIK80BSe<60n;qY*;*+P zqAsL2iNn;(_>I39bBY;Czt3`uDK94w5*TEgf_EI2Lqm^?n)xq^A>nS7=Z|G90lgwN z9w@qBmJNu00JL(MTXJPtoi~_%OT7O;h}x}zu3#(LNB!>$lXz-SnhfMLsJ%cD7uaz; z{Q;3(z-Wv-|G3}=X^g7;s}4&h8np~$!`R3~LJq$vbrn6OwgG7Q9Whct1mwUgr3!O7??BkfurSfDv=R1B$HevEO9(f7(Q$dMyMg~N>_i=rSsb~`*S=bO_E zoReh6q(zBR4&g1lQ)+@cbWb=wWyJpWp?UO+g=*~|3*A=1tbG768o5m(L1 zU_ya&{&3}M=uI*cMk)d!ow=fJ!XBor%%BFgVz!zk?1I$XY&+)(kfjim!BSy{H>7DA zu^6#ynBZt?O;N;Z@{mH1XC8qi`vr0D3;7j{ABRT0cAcgGbEfC#-7OZ0(H_fsjkGH@ zI)gqcndSb%zA?y!abD!{_BRjKRA>pR$|n1oVQ%O#qYEFzi?hebQ^FGpM@gvw+vKi? z=f{}vN5iaSMrXe^Q9*U!Jr0PiTN{Fq?YKP2j~6TKb#w67cm1!S$HR#-so(wP+g0GY z-`ZN&{tBA+rtr&6i4exhf}wloUTD`%-KR_cH`cD}^FO!i$l-k!!Y83`H`@NU+HZYm zU06g9c(1cl&XQNDZ*vaMXKxqVZm5~Mx$&B&hcr_UW; z_Z-}SB%j`HKlwk*5w|_d3p?%=cRckZ%8M>H_sC6&2K98-69iH|oW<8BVB<9Zfp^)? z%Jo6V;xtcGflkd>%A+krrGqYyPwDm+=+3If1`!VHVkkk&P^2x%O`Xg?8gaH!$G4OC1m^go_y%v9sGdR z?k+-*r5r_Z+P$E&2M9@AM6*zY#WwXnCH<8gb**5O{hL>elhF~W{qfZ z{5TmblI9pLKZ)omV7H0s0hNfs6yvnjj5UtlZLlVPSYK#mVx<=w!wWSxIpGe3vpe@cuESA@@3*wOYCBsb{rBcpF0qD6t_cJ{eH+-xxD1`W9=V@w zX~)V>N0K>eE&vLQUKy7je9EQX6rN~8_@wesQ^?;cXu>szqp{F%Bu8?u`kxpK`h%ZB zX*@?=RkFkQr%QsEthlT2iP=*f1FQS(zb#1a7zVE)rK$8QAy#XZFBSuqw$Vzhga2A+ z@svt8E1RSat8Jz)zJxq|ho>U?h5Z*PZ3i`1C6wB)}sh(%0ytYZNg-xBeG;BnyZa6@3#cTl0#DC^ljVk!B@`}KEv7Ak)dAjn4ouZNop+IC zz=@z;L3?-%k+=u2W=oh0*roA&+4sedA0l&nH)adnyOk+F+)S9k5c&4Y7zgt{{LJmk zo#Aa|&zYQajyAwNc~Q!Z%-M-;dFRX^xQ?8qN)XgtsS)X$ zHX4+{Y8eEKuYtx!3&b8oi!igW__pA~q60~pCLSFFxR<5-X`PLyc*Y#b9{`|CV1h`1 z3$NI+p(%`-7P`G-trvV^@^j}v$m;x_@ehQMl|n_ZUAWu*+Ui$yqjW;uul2OJqn9;? ze?s;0G3sGeaG+dsJ!5|X;}oyJwV7i0JWiP*2+yOtHrXa$b!{g`F_>^DFd;FGB4x#2 zo5;Gg>A4&e(FJgaX+8n0P_}8)3ae%GJfl;=Vubv#W*8=PLhUN(yp4t?z*tZZ$|wTm?Trsh_w(At77VB>x~v zOnzw2m4jS1w0!xn&u|;qI|TcC!|uXy zRm;)dx1Yk^ZZE$c2lwuH zsYOK?n3SkDHIRg`aHt-l2qE+dI&xTY^qRhdjJWpcA)vGmS_D|8ewc;=f-ahmZ=9XW zo7_k-5$Y#=-%2r8~&tDZv5bGxp?dJ_h0>KLzmL=3uDKV&6>kgm~WQj zw(j=4ugC7cF7(vv$<&{}oqsQh1o)GlyL@gAX1F^J6{F+#+g(gbrDKU(F510k|EqG0 z(L!5HehYYA!7Na|2YV#H zg5d5az|S>O3@cxi$p^jS$1g$TS@M7=iVS1H1GuN1FT7)NV~iU%%gy^tr7*}M<50*p zDz6kFBv1rqYMxdB6jdnqlpX<&5v(GG25{5k0&Mu0+ySeV2VqP3>_F221Ac-?HC;P*(3T#I4FNjfUc1KLc0-y9%6!0a=@}w+~wz!1RvHrxuH84=Zo?vQ}+r0Em@gHFT*i!=*x z2B9fgZV2*Y<8&9jCJjU*a6tKVr;$xDrFSmiW?8U;PwsTa9xQi&nN-4J6vod=Jj7;$c9}Y#h-I1> zBJt-UjQbyf;=U#4spW6`zTo=X%lcH+={3PXRZiR32^^34ca^mM^xd!4dM~wLZBjbm za}(Q3#pg#^a)bgj;Er*8FY|}NF))^ z#kf7Db=(7h-m2V#*$O>_uE&3h4W-l>OUnVxBFyTFtOy+_$kHW$0!WJ6^NfPPwzlze zHaN5O2CI!fUMPw|k>OE@Kex~#QDM0wFsQk5!*gx3g864;uBsQTPPRFkztQc^Ty*;_GA+NH@L7cr` z;b*`TEga@xtk2v$_B$&hSZE7M%^$O;>gX2~?Lo1meD(fp2g3Ik;_>F6UboVW{Ua_mEgmO?`4hK(Ri z5Ss&RgKIEgLT=4QtWPllQVnJ$s+APdx$#Ajoi|!7fnGuJ5qW*I1tJbqy=vdH%0EXK z!6Bk%p+%Tg69CI6YOQ(UJ)z3cD}tH&s!Vd_NCK*9LW@T>Sr`$~5>#;UznXu{NX=xY zy|S~8j#fk|q^P`z*vF>&AyVsN9Q}$;3*(nROwlVPDFll{W7UHqbzrNx`puGWf7eAtK+?9e-gV9SR zGx#T-wFE5a3z+Dn5&^T%tS-2Z2CR| z_OB%^9X+%NA<%bjAYLqmUb8z%1I-aER9x3C45@`yRz&6!Oki$MpQrDK>5PhrV5NsE zB8bh}aK`Z$Z9y{wAiL0k#Lv6U3Mrd4K(L=oBSB+5pJ$mgxuD_5jgEr=t{Ri@P#HFT zT+P+ktG>8BGi8c|E@9+>R;UJOuw8JyLs0G`pN0>}vp;r1`@p9n{p7-S{5YDOvCU&F zz93RzY$p|@912#0RVUsH7VeS6H95rLjYTxsGf{#3oQdDYTfz*%Lt^uK2hk z?#zb4-@B_L5Ebk<Ne`AZDsg>Z zZdaq*?{a&_x~=(^+gxE&*+oT66~cXrgUm-5GWgN$ZSwF)4j@TUjZ{ijUWlLF1RE}h?O}m>BJn!o~AQkXN!)Lu6nf2S=_&ty72l?z! zl0Ce<`yPG!mR)8C&#@TpKli=-zKz)7Jh45-7mQt3jBEo3JS`?wDLDpUT~-giFjH~oZ*_-8mh9(~C|YA_Uz^sC*#kYfaoeV&x10eCBG%{qlR)qSu#8$#C`leMmxNDT|4yn⩔r05bUz)ggQ9IGELGGvt+HM;@PyD173;HzyJplESD-1F0DspZs9R&*tF{k)2Nk4;#Z&5)20id>c4*pA>NnAfv z?&t&ZHy;3E8sEHI-T|YrvM;7+Ar=CRIAxMFZhWFwe071co-B-}fMz-DArvpqC{zo! zFtwgSB`h@dXC@}Z_v0|A1QeoagCr`AV{78HN01{YQhB8vU(!v+j>tpb768wIAzPTM z|2STecpJr+cdTf~}7tnXBIlSH-x5#sL38&6G|pk~n{SH0FCK9eSmtEnHPo zV|D%4qj6W8I&cyqOWdxe#<#V)<&VuHHHQ+00Yr2mh_9s~Ob4(@A;?r%aj}P-E!Lk| zVa4c9yw09mI2C8jwZe?Qg8l01t6O;)r3w=2{Dc_iVw`;Z%sT%%R#(@`=_cD$b=X_k zIpD)m3CU`kB}w)sim3%&M=qW-6*tGvoX3oFw%x7T>{}g?dONzfAgwA{)7`N42dG|Z z%6|e_)=7ELWX6VoiLIz|6G%$5pd4!$u(n-+2bcVL_-e%xz3954IvV&J zuy)I~nQ4&9C%RtQ~Y8m}Mz8UCddGcx(6@+TAp ziVk+Jqb50B?hGW8WxjJK@?ifn+K86WM*#+Pq4)WyV;J*ymGSE+%-w7OqY*uRUt}pT zi5{w1nA6Rb+jd@bwcuBCwKP$=2K1#hJzYdxrjew8jc`B8f*|oilplY$=RG5dAtzGH zq~-F8s+f3vCJnfNa)aVsM*A#048olLBZmWI3^lXAetc%z!mZ&Eip>%QNNQWKo8rZE0xb+{Yzd(08^Xl4Kzrfj;JlE zt&8ZqV~hSMGfg2D#x6G_POc$@tw^b&nEpL;1x*EZcrxkK=e>Qc(O7w4ddij;4Xl1q zfre%{fvnP}Faxo`PC#ZCItAQl=-KGYy*+aq{yYzaRW#nBK)uN zVWTeI>d8IXwr$(CZQHiZsmZo6HQAgz)nwatr}z7=@0{!W5&PQrz4ltag%+!ihhSf= zsJZ7|6jLKEQZETdI`P%yhh1}9Xk@bfx%Bd9_`p2xmz{jp2(@bqlg{c&p1$vcGB~~! zcbg?lC6S5%(?2Anp)_3aCE7%RRX?(&+GpUZ1Z(J0=tIJWb&7@a&p}(cRI@xwm~kU? z+nnLKO$M2TWG9(QlG5nu(7sky@EM_6qSfN&cQK_kI`;~`3aJ+6#W@7}<|{1#Cd=Z* z)4Q=4{wk70QqX<-(M`(3pDafTb9Tn_m132+d<}OERNCe-VooEqXQ%@T2XPcadW;nz zbSq)1(tAdYZk|PCydT?T7eBkci#7KD9&!mb;9P3o*GGZ6hyK4VxbG3N125p{dH+T^ z&pdrABE)HCesiPI*YRCgw{K1$FIo97S%%NyhS1Kqe#(UZIpE{QqM!hpt~rQ5c{uR} zL6AfB^%+y^O%-+}NX>aQz$4Htxl6?S+MGy}(rL1#6pb|PfYn~&4v?U{*YwLqB{K3! zU;`jF!3RmiAnE%=FnN_pGkZeB%P>}OTk%~ZhWW$rc93M@+UVA>1{T?V*5+7=tTrAA z8gF{Rk2+4;yk^->{(~}icuJH#AZ${>wrV(#n}lR9s%{usRz$N#nb5m`>-2Y{qp(WE zu!V1Axxg#QLHZ#@2Y(3Zflj=Y6>7AdE!q77)W2Yzg1Av;(kt^vfSpRDHxO@O)NDh0 z5v~wE=5`>l8zg~(Z0y%UQ&>*onUDdPnylj!f<1>Jk$;i|HxkE%Mc;tkNN*5TD6zFb zX@fD6PKdy#Hj$bH1rK~#4Uw-SE7~!U_}5qDt1R0ss{YeNLrX(yO3a|@K%>PVld;^U z2A9Mpvq5#L`aNk#4evGF2_BQ^o>p}c)jdmC20s#eMSpxG#KhNc#5_yU3?PPZIFt-R z{IPI(a)#HsfFK@$3S!K9-4#R%SOF2^=RgC2OAwmgQO@XnD-|d3Doq$d`ebT zGB`AlP7b38viLH!Htpc<#*9NTHa#6Zl7>f>q z41M{52x>QrRgXv<0(-<=6JPpp91qjnKng)z0!BQIRm02Fi=C5fD5z>X347nL{B18!n5MS;;HOD8dz!GRKFpC{p;j*~L&T@Hht5h1U0j#mKMbD64&ECjSFRLHJ_ zeYpliqS8+3aRfl@jS0sJH-&HTDplT#Sh^3lMPzjwxMTX)3gFoE9UZ03p>c7x9_mD% z8eU@Z0nS=wk0dWs2*LKsUo*K&;3tBm*tHfz5etJziy(y?naw=R(g>vnrQspw18HUt zI{+&YS~uRFzCUMH{3ZC3HI8M1Qr%E2ksjkfHMH%;k|R;U2>?p4ZbAFltS)=N`BIQD zl}$9g=vfdW>NEu!pc9*;3N%cXPg^BMIy!i!L(r6TNKnf*>8S_warLJhEn+`)u&t;? z!_kaYLkY-S@VXKg6NS5;ofIt($N^}OI#L9_JI5dF-r7E!E4=p;Vd!hUi=ssk;5;En zw-ellG50pFzKes-}^Jk)ard0n2-*xIf5p zDg|3yArp4_nxy`*f<4SSHe^bsoVg5!LlHdkf{iKLYCftMvA(Z+2%n}m(?kAH9Hcus zb5L3&rQ{qk#58J``qqb7+^R#8EwqO7aG8D?&f{&(VJ%+ywyT$u{7-8Y zwat%9&)z6%n*YwVQH_L+%lf0_6$BN-^>Pg6k`&HumTW_@s9j#4QxU(q3R*y`aw>vo z`^rFrz*_hYZkKzGjI~z?i4_#7ZztM#gkriqgY!|Ynp1Ac&Iu=d0psA{pgiz`Aiwvp zg3uvHerWO0&=Ok71i-?=278A=Jua~c3k6q%t(K=xWz8OKAMZJqNK$p@-!6D|5fstB51(m4oQAEu*Z>hq)wi zF|SC^?2l`P)p->W-Z46qf$@@26gaZ0!l z*Yah!e_gX)MHE%~JaM($nUs)?wWQn|_kXIVtLv-!KI@~1DQ8UE(?WlIcC~*kHlm)b zHHS(j3@os%Y)hbNGD8j8MD4-pP;ONAHS4o{8Iu^Z%!(UCY74XBnx3B@*cpr*AWDjYPKkA?5|6P0U_;U1lOd>)F?X0( zbc0#%y#2M4$jZmZfNz9?@a-Bo|CfwSG7@vDq%Fuq?G(m!Eke%3FY`$xbIPGWs(|*m z!wGG1tlA6>nvTHWdTH_dEOX3s>L&?xiTw)#A4OV@JSY)*gFU4@9)x+&LB$+{>JlDW zx^T2fyAagFk6i2EDhd`_2%eZ-0V+2lf#KnPyYZOfk*b!;My`XJCD~uO6PL381sW9N zKs@88D*rd#@PAX~)FBZBOMT_L^;U5iwmjmhX5aOV-Wu_~?|Hin^?VK{dzD=+9yRbEkrTbDbys=)}GWQT&wj!dVNhI^W-TF5c4%6__B1P>bgsvsy z`6wyMvD+^>0ck~L3)V7^!Cwi2dh}v--h_U-tdB87Mk}lHZdcwDe7iV_zwn6q+bJ~ zK$Lb63$<4wIcaadl$YQrW5t|xTEYJ6aUp&CaukJVpIzjKndfS$cEIZPVLbKwt!As!5><(# zA?so-;tXN%)@b)q6b1IHYU)~Q)WmBGqH&1F+9Lvdp^~{GjL4I-#zEy!q9Q#(hFP_s zwBau1%3y_I^ie2LD}`G*mc|E+kp&7`_&&fRRpP5F?`>ng?+ys$Dxu$*ve#DH1V&8j zV78#}uqH73EIlnW8W7=g6xAR_pnb>mLpo6wtCh_Iw)Weu1Wft8IkF>A&|UC(yvI+P z)C<2`_^|G`qr_%2u)Mp<;sn zJUon>!774(iBj^7!s%bxW!x7*OAndxA-+B!y@)RCORFsFs)UJM(jsg&9A5EyTJYnJ zUen(!o$uV?UhbYYa1Jzk@X12i#>U1zPTV#P!qN?lyTzUo_ZRxx`f8?<33PTSmq|0{ zv;Mw#@xU0heS1p9VA))SmLwfxzh$eSJNum$E>EG0tG;fzs?$`q-rUsG-h!lhFw}q7 zwv#7QHldj%&xD9Qi9_*6N{;wqcr2A7PlU{>kr-N;0mc3MWGLkzB?f~{9wb#zqV;3( z+pC^hD4DDYCu}lz%c{8x-vb#eo`4vwR^f=moxHk_ZXqc4ysuGGrp!UnmTst`3ag_> zbQUj$`6|Sr^*19sJc?TOpxrrC$Vup^u}Xau3N*tD-T6n?Rh1mD)uiHl@HUJvbTg>a zm`3^bnpFYr*Z(f8A!sFt$2_RE`i-qC7nMA$%uDtE&HVnix7xc%uKN)KlF;7P96B#L zeC^l2&#EPs#T)Hsb)5ycUY}sV6ZCqoVa~ui&cJN|p^(un*fq5xZ_$P&=d1=ufD!m% zka&T#{Wg9<5khJSE~E3iq+f{<^-{cZZTP3! z)hbSVb9-3pfvSPFOvDC(XCqTO!*1vSh20HZ%Iy!!olS! zxe5ACkz;mn9YGU)?`tf0tV&W&D3sP;SjY@rS^)j(2P-H%2w{bVsV8U43Y2Hc8@mll zp&v?&;WRQWQkV{BRV;Jn3*H-}x`2xnyA3<2VF0El?P$G&W8Xu1O_f}GTSKlBJVqix z8?Xk^K#P(I8EEHQasgzp_r{Go4c{jKl8E>s*mv87Es zSSQ&*_yH4DBG*}(j>1ivs5RjpLWHC{XfQPtI{kh#{P-I1R}3an#VXNR1Mu9^$`yPFG+n8Brm@`LOCS@)e|gghitY~&?k$-9?6aP*^Ad; zw{=FWzOh|FW4UGR)?j{2=Vn)U#bQbrLg27Nb0Lrf4iMN1bQx8Xj^DKD6$Z_z^ktc?ccutEjiGRqv6=; zhDuy7A0-T%owSLkq?ZN(?q3LGM4hrcq?(jK95)#f*mkQMv=0*=$3NF`qH7wXFe_l% zCrM}u)>6u_I5{wkH|-i z=7cc^IR|qPxnFFB7%nF#eL)3RhY4@M*@Vk2EunnyH+M-A=n4vnfPE!=j}uz$4bB6E zEtEUp5J!2;ULX#P4P|jly%qzUQ9HxvFlCB4#{6^+V;h_1A817JwGpy6#tNrUepZGN zs}1-*|5WaAmk&$+`BvxCdB;L`btGH9O63syQ++L-fLjzHL#CS&b%p{_3)2vqq!-L zhI~?G2)QJ{s7+++Pgmt`*eS14_)ixSP|d%DyD2QiOLWxA5HQTqTfZWV5kFFB&N0Hl z3FtWVOlL!foYO_h$mp17=m3L*&YsZ2MAho=F5MMvMP*s$@**L?7v^UVrP^O*I`yg( zF3Xm+uD%UPNVIdTWOxF2cr{9Itg@c8)j*ytE2}JiFTw; zX97czE{rSG^VMVt{|Z6ZI&CC*#IlG>)S`N&p1yy6TuCIJ6$pRy`<#nFvrv1Aag=-J z5pLY6GwhcwFoS>CfKS_%=D+VQWjpBhd+wF;MAU8l-}CU>d_+zV94~#-8R+d-c=b_T zuiN1x$n>O4_w;Q1v9a}m4;D}nf0UWpAIw2$|2h>9q%U5+aPA{;T)nf-A0w z&09yKlJyRggFtXz-@E7A`e4O+pB5NAPLUTYd;koNSf>q(e;05iX*F$t0!tlW=X`WYuZ8$0xfO;2&3sM~$vHA&A94)0XDaPCe zG_^>r7U;lwKidVbAxsAE=74;?u<2`+SdT)smY#U=5IjWejk7V0PZl6|piEmYGf>YK z@LSeE9(!g#fi?yg2Ve7{Ga<<(y$9D3)W?z>sdQ~&Otd)3*`t#0Blh+7oiGdOj1j4o z96KVjKVx~7cR}*82k9F*v=yDzfRUsun7rDg@1sqz8q*T-PjHtNiwgijtsO#G!hKq9 z(<`XvFwtbJY*mJB}SwbeW zKygmL8L<)(wOl+m9nGw03P?Cvg>coX@igk>-&7;BFiL`HN}!`Av(Q57-H2nM`r6)# zHYE^^VbOe$5LwlMy5!sLNY&%w)^f=sD&4t?7_#M zz%pFwdxO9udW3qD_KE2gP3`2%>c2*o{ohRk6MqJ&kM$$F5G?RD{KiA@8CA?7vEEzH zTG;lmsm%2D`O^=W3_b8APVru@|=csE?n0`D2HLF?>fJl<&6RK_jHcsQzf z^)c_SiO=0NYni7ENav5^zCJfAp@WgH%N=I5!0iUF!3aU2DhvsUCFjJ9_XE%dZueJeSh{g{$U zX46i`^KDT=3cjepq0jsu^AX5EJfk3&_a6PEqxXdlX8iw8^B9f+(cMJMjh?Z047~ij zSgX_bq8SZ#+AAjfI8J;YORTBYUl06e_&k7FvG4VVec6ZtHp813D#~v!@$)!Q;Ok{Z zaNK`S*#)z5NJ2BsAvBHAJEtv#;g2y+LKuM|6PssLp5+Du4`dbN!`y9xp?X&u zK&ILVaiIn=$I86hTfE9P2qGXuww&GGsW7Vu`k6C<144j#9;_-EQbZ;6*nqe$3zHnV zOoe;j#~TsJ7r{c$frW(Fr=Epbd=awUhj!wz!mcAIXT7^-G{v)yOz2hhNk@oc#uS9LgozQOrUv`y*QGShHMdQ2!n!LN6vIsJF;q0E{jmN~XV&ZHqV9Kctv3BseQ<*mWTEz(aHulD$As!1 zkYmW5|ATk8>aTPrB6Zl6V;8DqK?o*UQ^@(@lYGx-Izt`%Y}yX zPzM5`6-o5x2w2*iM>$@APvBP2rOi(M!Q3ez`&R`b#OMBi2_nM2l(^_xw?oJ<6ytcO^^OIHMClMmhL^%}PBkpc<&{kyYGlxyTO81IMt`6?j4QYcDxVc(O{{dP zqL27Um8)IaXa^0t zBkWb=I_LZb4TF5);QmLl?KDJ8B-}1WcF3I~!VGW;P*tcJh66~gI1Hs%1oHrJUly`xX212HDd4j}d zRcmVyv0(RYL4OKb!(2?uI#K_X+o9Fs`{~@kqADvDMVETHzsuA&YNb7^0}_-405S

    DQq4yW5hTCvKWiKr?px)1=fn-yh_NQg?OPE) z7^~2wnhRMqtTRz7rIWb32H5h2TM<43@t}2ZCWKU zslVJ)a12sj{-`w{Vf0bu1gm z(24y29+P8nf_vXb@1FRW0tEDh>ijK4v$in_6!*C#zMeZG9e&$vxiY3VEg-}sE|e>V zLK$)9ZMXrB=IZ?~Y`^`V2TU@*4Lmp88;G;Bvk?WJi74VQeOmAKkLQYgAJ44=4+7^Y z6$7IEOgmQN1m7nQUYFA%Z^aBmB{g5ppFSlYY4ZIJhr8OA>Mp<5z7C`<&jmVnI`+-k z`gSmogUA12KmpQwkW(ta@Sx5>lKazF^~6z=e^4QVD6XKBuv#_Hk);w4i!1lZaM`(( zGe4_w5v0N+fGg~f{4S1zTE>MR0`Eg36g3Z9eBpzDa4qw2EX|c93?{Ws?`Y+fv=u&- z&YE&1t(*sa?oQK%((P(sTq$TW&9RhVETE}oW^BML;1MU)h2`U>MpNe94*bOunpEov z#6&VtB16q6yQjJi`4K5@$>fpdWVo^p&+_Xmx8*V+(LKL}8zS*@|TCY1z<0NGAv^(pjbKAi!%02qQ;5ODGw8VXIn!ei{dJ z&{D&VaqeMhV%EUj-b!A}Cqox_C@KHHWYZQBkZq>tOdjm^)^?hcN+T+1@Y#gYXfA9! zy{NKx?rn-8GiXs!Pa)myv|xn@ZBn};ZncRY@P2W$e4#YeN@gyqpeBP6V?!t`amKXc z00>A0bS4FDg7O4H2rC<#R1$uKSW;6^*{*i}H z*4md(q)km0Xa@Yww~JX1)%Bg;#%w&UHaxjA8BcZez#oF&9V}$hO+3dtgu;1K>YDg` z?O%O|W&x-7|5A_IB0{OCow(5N#ED)*4e}@vbJu=O9Ud)FdggH7t@1kXx}{$r{?JYM!eGWzgU@*gAe=+}%(MibKwKOifwW1(g zhCNa{-antr-uVJ#SevPcxu~H3TpYUaZfA?KYX-m$Mwdm1r<>7G$*bOY5d`1~jC9Y( zC~;&#$h_j#_KUdK9<5^?S5ckTFckFf6s^BB=p%X3s{;lfLeTo>AO3q?P6~iYacnxB zsZg%BpHo(y8U5cqK<^S+2!T6(ZA<8@l;7MJo$P6`d53G|R`7TE^ZIvT2EC{MH%D8M z|4@7L<+~;Yd_w5HG4Lzku`T~ms*U37Iq~cHDd6gx*>2aH8wcnznfZrh=v14!Tq1~KZOmIm*Fj1GP6W{BA%z|uqC)MEj&16~Ow}$j z9eKcM(Zj!S`UH8G1sbBFBWy9XlI<|%9~w|J(6ZE0!?cw-YztG)I`Q5y4axOEs5fmVNvs z?8XB3?M%qEqUBNhhgL4V|J7yjhIcN0q3MPp5y3=wrJ{mD1P^kO((Kd`$1%(7_qz;5ze=H(4rqgsoYtqpv{p_i{}%ie zfE>w2=N@hp!tGogR*CYaw!@BG0qgv4BxhFCxez;q)TGgfFPU7*YQ3Nww_W_Y1y!;9 z312Yay#MX!#BOU9mZ^>G@W($S}r`Bx0@ruk4=oPybM3Mg}{{iJTwcF<3=e161l&=xLZJ0><{Cdv%(W z?ZY$@6?{?>#LJ<0>1KS=lnW&NRwezzKi&z$r|#&{|EPN)H8Uh5rT=L5%h22_S&2LQ zW9=<4zQ>m;5GGzpD2s&0>n$V)VlTBbXnQD&O2Qvc0rzQxV7Yu3T5~0)R&Q+0%r2#y zWDtE!h6sE_tc4ticm7v1b5|R7gu20^E*)2i)%Nl~3h6m17 zbv!UbyWE`~?RBIt|4!x{;hSPGECEvF;+jLK1<(7Ri+=Av7ybVpAg;kAw*UO94>-HK zI}&gfcnSQx9jbhw9OI+{I804KV;m>FF7uWbw51 zGO757)_Z_?=Dmo=*Y1<@Yi%K*=j}Ue_LC2Yc2IVvdcdASNOV8}PyQZi7#7lXLFS-G zm}107$htVUJ8+n@pNN7Ov|hwi(%RAb7xDcrTgx>?WF7#RhD*_l2|jTs*CqpqqV8*h z{Ef+!1Kz^>$RqtCz&%TqR>6SMi0IjxGCd$LYxDxAF=G>tVI5%t-j)D;E%IwJc%WWI zsg)AU(89OXe~Re$J2Y`56LWgOS1V)Qalwgd${>xFl2LAZcO@qsA*VzKITt&|A5kYD zT|=vF3ORvXvieg4h!hJj$Z{kb!LUtt85ghUD8DSV9M!(nNTG`Cw|6WOsx*QS^=IA{ z32=^QXezlX5kM4zttNx9%dRLzp+)7$Q;swls;Up28}k@P)TJ7?Cka6sm4%d3Z`cW+ zE!34AJx~OBMXt`GXBw8!9V@6D#-$MxG%*^>GZVrJ1IEGXg&~mjRM3R12o-F_ai5c# z^b;!pSipG;i2YLOB^4G_Cx&&L%22|pGAIffI_4BrJCMdp^U>OVCG(halr>BM>C| z{-z?k)_`I;r85wb$0|D1Iz?VFuZ)Wx4j7f-240?Sm+_5klz^cSkN6{LXs4pR) zpC&J;4Kq97teGAQaG#7cOQXy%u^I#M6!md5iZ57?1a@rIv6zIfqmQ*xaZzY=iRwe8wtTJ}E2UE&0FUEIO{bqS-A5!YXqXjUiF^*ijc6mwr7dOS7A`!EMSOu4c z&EKf|ikG@bUxZW?L(x71Jinu8jBn6Wsi!NI+~*^c0x0?*|KEABk3r1Hey;bWYUj=x zx_%_62qKZ6V8*tF9-`24!)@?yNxah9!wuiCX{6xZ6TS!FaQDWV+6jB%B#qOU6Up=a z(D9Q(G2j|1@cO%hH@Meply_5j?w|kLpE;fESvNaGk4nH^P#HWIx zAf*gDpeo3yzLL`u2pj$(WWbo|uON2U(g;m!4~K7@Ro zIE;%CI?lY^EE7U9gmjVCc3aPG=Z*BYIekd^-^+r}rP;mL!!r+1-TIwNs~_}q5*<3* zn*F$?N@f_zj!<=Z>apSJ`ag2HJkIM#BR$zqVVmu4T8>*B{U%fs@OZWH$d_Aui7IArMbF%SnQj3$g!nUP}qRYGVo_qQhCx%_K zFKQK<*Bs&@`O#v}a0A$|+wf!F$x`Vo$ZJ@D@E#mt%E}h$vA};xMZD`;A%5}CD=g5Z zTm$clW1&6o#sMF@I4HYX)I;Pkc;ZVM`=Q>^{B=kt!QoD1o#V&D9V8JVk>wFEd0`Kd zX^tndrgJ{z&ZWul5!?V~7X>EI9G?+8dmv*sQKp)-Y$;x@!hXjTugWp!E2gB={(VNh zIXHnhvX$DcV&@|KL>7z(K83`j4R`ml#-|?c z1i1ALfVO?)Kls^*$M^LU`oHKjF9RI`j%rf#dkPG?BtNpX z(u^A@t$|!gCpMKeSldyZunLk6d9A0opaj&#C>cnaK#;?7m{d(ou`!3wm@=o6%oAy4X>Q1c zcWvEs+kgtRURueyld#P?P#i9j)&e-fQAAJjhm~%k-dzK3g6~`E+ZeonvXMTdC?S0Y zD;UL~(1`k!_mF{_FzDRZgrqz6_c%GRcMCmI@z;WupjXqb8glES40?J#pxB~=4mNlW zh;=QoDQP&vBfmfJyIvAp8h;1;)3LTE*QXPG(za;&CY}l&Z~5Op&iKEomM7qEw)T{L zUfVExpMF!8JKtHBcZZ@`kM^_e+DhH^mMR1v#+T1rEwgpsJnF5-q8P2(^UBsS(;VD& zrnnLw%w5gS;v*RRcgIc`q`@GgqvL2%!gcOTcg*%vBo!BS&vIXm$nP!^xDE=b>t z5QeIe)^?2pb)X!p%+-c;2+!3ql}nyla*23qZv31ZjWSttZ;Xpz*fn%Ub3YQ68qS%vA>THu?bN)3O>#E=6=_WhLLOR8W;5}RO{KO?99|ITX$^79?^ zHv8v%ss@HP6zQ7D1yV4d2>ZG`6tw>oa|R(CsjYgu$U(BAZEY-h)f_7;EEaWJ?A)_{pniC?csnqg=Gcphi`7M zk^`9ud$gcY4ae)723|h``yyi0pS^9m;u_*q*s1Aal9E|GXTk)jFd6PYD`bGL!Mst9%Ye4J^Ntl*3Px>LV6#6tzkFS zr{+W(${<7^9-foGV?KlS`#em<<9CzFl6>$m69tS+*a!4tMNMoPwq>rlNRg9u38ocf zMp&aT(L!z@_ceH&#+t7=9oYNT&*4fpv=+kK9A2tRJEcwpcIHn87 ztKxOgHcu2c8Wvge4vL5rJM<)Ew_~xMc>#PJJ}DB=$<6G@V5$tq62IRdNR93t ze`kpCguTRkRNX8wCDubzo$=}JVj$n94R*t2T@T!p90$v8bJq^^MQc-Xq@FG!j>r|GT)x$HzyU0Y{*OR5wdg(n5caw!__-%Gt>`_WnuM?DGoRXX^j+sN=VLPI z!ARN=*T(|(LkoQAES%7L7*&Jj#rF;;Hski$?J9k9n($s^HDE!W^J6mbg|XK0;J*xG z5^#GC_gf`7R!)GL@fu;J>-e_te#7qH_9+FkbdC5%x6WFn>|QpnJ`4^E0`DaQ_kFo` z1(v&(%M1;!13zcJULwD)2^rLg*UsJ{684^J1snofLmfSSZUhOI6}vzjLK(ZHyHuiW z2HMfd)j+xiaR$l6M2jIyAW3^J;y$pJ{xxZKP0%mELvVp904Moj3St&42db=%+TfQd ze_QIt!VG~g@@PV>TG)S;Aj{$b+fn<+p;m(#3TP632$rchVX5P92cERI}`(a(#PUsF|>ve7AMFJPF^(lo}CtCrJ070}T$!%@(q{!q5#db8jN z{ZJ+`N)9AOFT)B*Qf3T!lC;raI6P1?y9*)(^73~$J2auFgFHQ<_*b}! z)l3(N^e%;l@>dOdh@6fHxQ`O%2U)EWJ%NEKo(X%M*^zJPxZw*LVA4R`DCgEK%my+o;#wfF3z1QH56r2QbLrXz>Dil-N?ds zYT|jP6CvPJqty5Q*W{Az0=D6F{{@r++f1(i>VhNP!JVaUN_d&AwD&A-%r6msWgg1y zz}NMW!;@xQaH`#ka*MlRsXx##DVTLcJ{<mY%h%l65Yhy-2CExJaw0Xuu?7{ ztwc?`A4v=vU-$Radg#bR)vuz=vGf5u^Ua>5(Pz%rVZHgn@w+#@F-s&os@>=5Eh5pJ z1Y67r+Roes%DJx)iAjl{$Y6|1%t;oxb6&A|Hf)pVX%50mbLS;e`Z^a1u?FTQHBrJV zpl*6AWEC|e^MG`@Txr~b=m8#_g zx%R!5)qen5Eo5Am*gs<5vx?u2uGW8>d=&AC>g~5NdVYN^f`GGnoZV1ew%?3Z^H`5)(&^gm)mxJ}q^ll$igRRvN7x+KbbJ-Q2B+>)d1kz+|+{=&l>9W5f$pZq}Lcv6MTVs0Mr7JSXwMedes?-pG zvF=PXWi06oOsIA1pZc+V!gI3vLyfAGin(0f zt>ogQf9(|hq9lQ`W@kMy{+Km|d54#j{aXm0Y*yI&H!+vnZT~*6b!4+T-|hBzBT_NX zVpxSX-RI-;pN8w#=4AawZ~l78KbvaTIJabG;tf+<*R#b+))y6RXS@P`zMMS1)*UJn zT{#j$`Z!&YbfFPsoqVTE_qrCeS6MZpl(N)n^=i#hbcK_svA$#JqqjZgGMm=9?_asq zcY;NZ#6#Pf;KIVz-n@J3l!NAh>PGc6?zcP?HJUAoTIO@iUY4q*d*XUnfi=sC>~9ot zfCdqK8Q7OrF4rQiF;=lV*tMlUeS5+&v#`Q5Wu4+Iz8m&$GrhGAoob+-sRLl6S{3m zY#IP%Mdw=A{xWn6L2)jj1R1n3?~tghsmGzzyMLSwb1&yw?7mRz7}E)BZstp!i@F_- zR(5!Mufvf@1bko+nh68Hl`^45#1np?^Dwwi0ki$>gy#Q4(>btL*0n)8R>vLNwr!q{ zZQHiZj%{^pbZpzU)3Ke*d1t=)1!rI9+H0@1o~pX5`>y@G#v55u%=Y6dJPx04`Mti! z=Q4QJr)}(j6>Rgs#dX_>;`!^b=W8WVxek}0O7;_lPf)KOH*XmTL8|68wf9mxNonYVplEWwI$jeUj-A zri1E*x^-A&Wl;>ATCqZLMUxnciRP%NiziBY)f*^JkrEw$sVXZ}CZbXBtI{n^)KNAw z@|yD&P4!v08&^y86l9td5IrnC{!D0W1>S1V>lNcIq|es2j*DGEl(pjm?wvo@x*r$Sq${``r} zk^yfP5C{eSfb(gD~5^prON(sZA6<+UVNt0#D-m-JVwrv$XqpG3lu$Yz_N?O z>T)sPB5p)B3zP>XN*CNMK%B${M7pU&hLi-v!-PuZikdAd<$_Ct&womNPLTQ{=HbtO=g1UWk5<4! zNfsg||2TUy`u(rZ%SD!q^u_542G;~At##->e4GegbMMF$+`RXE_XWEXc0E4v#$|IY z;gk{V;()9vz83E)DYxg#Bp)2*v*2Mg7KL1qi!PxqBQnf0@t(U&W|?QXtIx2h$?JK( zZoAbu?~dw*Uw6HlT|q4eLCo%QvX zj}w9C2i#iYHEd8gw<7c6+k0DEn`P_fsYcw2Z)CMrzjB{QbMvBl5oSdObj=w=2tyFp zmRpd~NGBx&-Jh0*d)PSTMnOfqF~O`jP^CMy54rr*tn5>+8(1dBo;I&2P1D#>Hr#~bE zNXUvp;)`pNt2i5eHxZzI1n6~?qQ%4UwQ(GfbYdw@Yy*vYpM?K zvG?@XN#{&?(IBrieV75~Ix|P80B|~twD`)l8|L_Mc z>-3Xj?2rOzkM{opPT|F{2}1mCG5zPr#((I)h{U=R$xCjN3iEyJHmgQGvh9C{X5;KZ zNgM-TsCRxM-O)GR?&;r+xa9>VZz43*MU?5*vv_6Ea1O_T4SgOebZHtARB+73C-DH) z@dq#kn6pk1VyQS}g(e<)HKZe_NJ1!Pk&YFbG`Luq_H_%Ds45gNke2o-C#7y1b=&OB zN2u}O8#F2PbK(V<0Ng7)g$}1`xS?a~^K&2MiX$zbl6Whc8PApl!ufi28urF>8Nmxy z{bPYK2i7~D#beNGFyA2;9<87f@s_C|P=`O)vrsmpbx77}Kj2}(Kv+SiXf!jj>ZxbP zZCdco?|G$O(&Q?Z?#Pxnl`jFT*kdz^4u(V{@}_96un^oscXeU`AH$Pi{aPdyPG;uL z{GwR~A3diU|zF# zV0L7Jps3XU#m7Jc8xsZ9B6p#`o`_peA*eWNv;r)@SEU_nq`xvLbsn?reV!iG^&dO@ zUutIj?vy3vvaQ|TE!jf)<=0%G{yMpQnY%U6j{M1H$l^LzPa;mMl zc4J6`=?hGM0p4o`X`dMMhYx~0q0qMw>c8*wzb5{J<)a#EyIW_;k z0|0Ocj~@xrunP>>hC-vy5FDZ-ll`PjKLlms5U z8HuI*W(-U5D$cp2-Ni?FBFWP12Xg5(mT9yTM}U|aXmGiEFla*|5w`H%ueMcD!Usv; zx+)q3$OKUEr&4PGUGY&P(_MDV(Nig<08J-Jw5YgrnSS%QqD;+%zZ4}E)vgN@^C6qO@HQR_jCVzAz_ zunKKe?KZ6@K{1707dH}#B7=R1NM?2EwJ7M&8V0)ynD zg<2$B>%6O9CI5s~aYh!5fABdLs200q$~!4Q)`a?vx1=Y^Ki~uU5f=s# zHOxO*;6}C@kYg=9#*Sb_}v z>`#$0f^8tvQwHFj(BK4rq+gqf@pvGFC{Q7i6p0xN+@H<(ezNvFVei~zKX;xZUCx$Z zxoe;4SXORmYU-!BHaVJ+CehcsNGHa+wA3vL1dq(d$>s96`}nN8>e??>HYR>xhJ-P& zL7%NNVuvnBnu#eh=c?jVc06B=C~rFL1|V8mg#NXnQi}o4lFR(;YpXt9V>Mq^w_0s? zsviDjx_sr378#N@W}@CTN9cy-?8d>__zGxGwv%f)lGthVLjnIjnsjoM48wRiZF6P`~JTF+MggQ zk69nJBk4}3%pwa@EmVrbP4JpY`m#VvDT*%~UzZ}+(ym;>O{thpKU{j)W1B?#yOCwu zz8@+rmP^y5g00}RS2bH&r6fk)-Y1lUhKb@{3h@X|U4-sy@Y>FW(&_?(sA*wV@O>!% zbvWX8G4daB<$wzd?qg8?BV_FiD0BzIeE$bf{!i~h*<)NqP=R>{1#aDD_oT=KoH+0> z7b69``(o!4-t2$U{rG{KvbpXwr3?w})EOi}g2TZ*w3d@nYJcpHm{q6dOPJ0JQ^ti? z{>98%ZH=}6^t0hB{&UTlbv^S}v;Xe-|M%>_7ukEC``jrH#4fXlp?P-gf!!%z^{8@1 zr*$5Z!$cU5{LWYr*7lgcnZr#Eku3U;n*O{R9GvL1X}s4Z*zlnMlRCHrf7R9F>ydiF z)B$>ty^6z`Xhg|-g-BrzwU7d+!BV9h014{H^MLCKsGhD`rmFCPi*2OXWfnHe9pmV@ zJSI}LMk7mo(EdpQ`M5M+mUAVmoHJdQHil#ttVL5zXK6m0HV?M>`%RZl*q_NRPO{QE zcF#kBT1_&FF8$OreAV27hhB0_Bdk6t#GcA}UZ-XQw{{5!V0=tbBv&eHZKygZQmrHn zW-U_JtLHW83H}*y!z7%Iv+#tOY#LvHx?R#hLLuF~Nr>n{I8JEDw?>s>^u)#gEE_XV zqY)u28oA`+r6KdYibxjukn&U}87<5lq=z*FWw7nAJOdI3nL$TfuoLu6MrtDKn6;4F z3QmfF&HvO;)cC1qAms^`OrV$aU_rJODqj-S^l3TDpYA7gMKgXJb>xG|nxKxjT(7jr z<0XpC68}!6o4H1l2BglQqM=1wGmokNNDs=;q4PwtNV0@m>q~Z|@fz~sJdc2srok+- z=23ov#bYJg@(64Y)`Z|)MB=(MY9~4ujY7J!ICbR!A@D`3Qy87T>N z-*kMp{o9;5rPdhQTp!iXVMCaWmMR8ei&vFlF-@JQf#wYs+l9-!ucE6?Q$)lXB#}`% zs6h+3TYnl{OX_&K@Z3+z9aMceo$Fv-1A9BzO;B!bNbC)^`wL#Sg_>=Ta{%(c*l0V?|wU`J9%P9jr)6dOE%i%6IUGc*$b^l?5l;5GHcFHw5<_Rknf! zijuKwvScI2*Okpr`Im$^|Ksu%ec!F-A(C6cl`Na~jl_6nQ*)j_z4=4JX%FvPPZ*{W z-dt;4&v3k(UFI{o;fVIOEiq{PR!TyAVMGJiMd^$2CE@c(L;Pt267%lJ;)*V#q^ugE zl*X=pxP97U4;YN#*q~fJRP|_Vq({h)`K3NlVi>^+lP9SR@J=QLhI=~t(59Zs45{ik zvCSz<&CR;j=hlx}_~O;SG0+GsXnbiJ|E`C`@d3UjV$F7 zb&8zsVKn*Z;-IvO!64>b0;@MTPUyW5dTxRH`?vcm%Uilia?dU2SC|a)F^+&;X=S`c zqFj2jo&v6sC9T0s5y;Tj0J0W+!TRjyGaSul6y@N%6xMuRqeR|91x%(SR7`^gERF8#E4R!&n?(U(vZomY+q$w%V=Q zTIHYjnAf|Qy-bTc3kD8EsISK0!aFFv?|0qqX8*1mx`_tASUPMo4Q*Iwy-lYXVQ?tO zX@)9OqsBL|lr+3F{+!Nm?PVXtI$LwlqDj-Rh>_&qgz|&K{~h*(%ci-%k17kLPkcrX@0 zJ5(VBKmJ`H{6JtY)xCiKsD;c%0*jL<1J%oQ_JjIyuBja;|j<~ zjsoXGU1mYX$KZd$ry_O8sL`v;bRBaL5(baGO%iEAg9WGgc=jh zPL#k#AE%FliW@tu@CQ(OsFnBrT-OUJQ`o)m&r2=s>g9@+9f!{!dJly=FOT{oZsq$< z=aQNeFJG@^m1BXh=^YjWn*5`+9Q%hQ&&1&_vUTsys zxPG&Ojk71j9p`treq0f&6%cq)!0JY@lSWs<-G$Ij#jA=F*4Z~JhBt9LeCAf>*16B* zP8nG)+9G-7*ws!IsF86)N7iF%C;0VDyC1Zl*LK}ci;l$QAc`wHZPm(=p^W z#(<8Fq%zBdDj2KB%Vq%DTK;LGKQ=Z@lN75}h9y#BE}2Z`owtW3+_e=`6v)8VFMmWm zdk^{L@;Z?G;>&ht^%B(Id|7#Eu70A|Pm_XRfD9V2-TPNr`lW~!9+vjR zZV|MVs8?F~=#W*aG4uAvF()qC@Jh?u`prl8^}E18Y7|755&pripw+n|Nfg4x#eayo z{`p7m|2s?6V+MA4KY#Tiee`Z@KXF(ocT4g1|7%D0zLcsegAW5s$c(o@z{4^ z0<_))I8?($z)!v{1IlIk&mAD7<=$lQ5*=4;9-6w95*X$(%uqS9Ln|WHZm7%UlIO`1 zrE1H%tgKU!i()dK*f@Uyf;0=L^b0`(5-Z%_@WZfmL_li;TFNH@U3${g#w zpU>b!^?_*|kn>uUm8CsI=ZdJ;N#{-%Wg}C$Etsu6Q96i}fomyn!es(qBzax7*u3T} zvH*8T+Y?GuuQc7vd+>5HN8_rWia8?Pl=1^by`x~3l~ZK~AOudfrSukSsc&+KE*Dn7 za;iDt)e_r*u%L~1kVVPoiySKmro(8D7mqoBC?dN@#p0GYJ5+)-!^<}5{1$H{#Q93V z%hE};BIUx>@Zw5-Vv z-jAmAXJL$1Np^0`v|YA!g0czAR$m>oMO+w5=qSu6XXch)_zB%0<9(>|Es;|}*rlU4 z6W7Ys;;)iNtrRoj((B$U#bz=+7LyxYI2e?zeDzbVAmhw)IRy`X-|0wsik0T~j?Bx4 zgbvMG9oNrhS2sp!H(E|>J3GWuW1Rwr(SPZ3k|{Ls`BRm*%(^5D3sY)%}+TXYtEZSKT*&0eaGmf~?n7K5e;tO()?CSj&Y zuz_%*$gzEAGO#IO+@HV}1w)+&DisLH@{Mk=#YL5Sj%@eXG<(85n9Z!+8|{~yzhg6j z!HMFh>8*|8;11Xx(A1O)ctHaqzXYD@;|w=z+Kw}acsO+Q^`Yx$F)%Xs^?ZC*I<{RW z=>HlpST(N$yElaqPxyWm|9wHYo3r~q_y4S2B*cn!?>xZ&qyG|u68G`n;NcpGsl)8t zg^V7cM|8aIxjKZGO$iNt=(aL!;f-_MeymRte9N@H?#eq#G=*Rw1N+`3{0QE8&DrUr zFhDl3S%XkL>C5Caz!oT zO#%Y@anPb`V}y@j&&px}utrislSM9$<-3~>i1<8BUExPr|NCYzKb4+Mas67P1l#gg&*`W9f&;eaD zaLoW|L9rjN(93`ZA-9I>pLf^<|G&aHp^m6Ef z1dy*#iW$UwFv$btPgG4!u9=ML!q&?Lp>0`|T$sHX5TdMrp{h5ES-U=9HYi*5mub@W z9a4WDNEyfT#VPobITHg|L?6gmfY`UFRDP@jW4R`pb;+2NdB;JGay1nf?r*XKk#4{+ z^U*YOsxQkC2hJ#q4_;${`>KY_g7&BI3+5#4R=d@WvrMJASqk1394HJUS}2F8l+iE5 zJWy%Iep(gW0J=VqYV2mn=hKJP=FFyP4P-qKMJS zbD*$>8iTHfzl=TFy2cV-M8Fe?#bOo>Ho$RqikcLAkv6R>Myh>3sNu}fIZ z-^+t3TXLN)e>y~z#u19{>eky6xI^3id&xN8{qPBLcvAOh035EsJ3y$#P-q1Lc|cPSYONm|hU(ptq^E=}36YIrqC{ zkJSwb?zzVdF<(ky3R<1{VfSgOOx*V7@5ZcJw<%PsplT7cQsI6YKF1oU>A=2n4QCYK zH-`ruz$iurO7RY5uM>aa=%o?f=*Qc}oqfvC!sA6LN_!=KPaiZ6-<^;Ea`YFKRHbtOP-M7`ir%W?zvTu8h7JTqUY8{;$5=bkOM4S-&>J1lkt4DeediLLcz+Or7 zx(c-Os9)j4(7Syjq}1wKh4vD9=ge2>c+XYuIRv$Rys6^*Xe>UrgKDUv$DM{(pkl`7;wX0RL07V~tbBgiIr*mg4+z&GQZ3}Wrsyl|;$^+A8I zaaSFme1eYB~C0jNi*umTm27Rwg$ijROOr04f?#cmjY zD$mvznc(&X0;jYH+;|m)A~I^iW-<2}XK9yDdoQ=K6hz*HB+h`&N{td#tR#iyUzvJIF^sO13Nl6;h~uY-xf9I9<7nkYtJISLW^x5Y$WhP*&l z;&DC7>Kd$>S(Q-Za`5_85@s~{J~%4sqG<(@-9hdsG#5T(C8{hEBE-{VaX9#r?Mvx# z`sidA$w?&J$poHTG19dAejhoVLJl_=`8Z^_Gi4A+TRyx*a=#)iSLG&r>g19fuxeNX ztRswwvLutiK8@)s`7{7S*fj6}v`lwHg$IBL{x5<3C{F_`XEqOHCqTM0gE7{$e$59G zUsOxE>I9!n;9I1)?^wO_(6nQDWveeM>$2mj+dFfr-2Z|~w6MtYI&AxlRtKF=90`3P zYi{VqP#B?=sv?yq`gHg{r$bH6CBFVnuyh8CT)nDR(ok=O>4DM^-P&cAhDdy0urN}l zBAf964$uG(b%wpLx)GPXiBnQjwYU)cRkyLh=jvqZ>&kVIuDw`UL3`fX*~cX#2@?pw zZb;Bk^&dAe8Psu}N?D&>Q#v9qCPDGx*5UdA5uU(ppV>I#ZNV25Y#h_cRIjH}zjSa) znhnORI*YL=H7%q5u!2a^;><(+RT>>?E1@Ij8#`Yvm6gCa#nQ|k7+YC8kXCV2wb>K` zE!TM})nv&PMME1(S0hJmMy{sdz|02i6<#JqERrTRmp01dq(FmcNdvorU|J^X)99n? zgu<1#%cNcDizL`*_+gBI;m_Xdp(F*lf0q{hLSoXM)Mo89aZhSddH)H9LHHTk48pWI z=$w0xw+Vd>BKV4htUNWZTKsnv7G}g9Wb!`7NH%|tc#{zWiR#YCBaJ}7M+0|Suu#HC zs4DE8%m3B4+duoTxby5?|E^iyf8SSPw}A2EigWXB*8PrZD;N!x{}RZ>B}X;f@P)c7 z7AchTLZ)n%S`onqFlkyOKbX(-#W?-VvHl2}fMI>R3EnP~E=F$10|{S%uNo&&zW69I zM*5x;e3MimK4I)c+s0k|-uZs+NE7*$Y76u+?8Wtxk1Fwh2)C9=vMm*4M)rJv?R^n5>mFDAZEUw_EOIi!I#6W{Sp+6@u_EdT&Zi|IT}Q-Q9wx#{ZE z0@)*~HC~V`Cauz+?C_&Bg})_VjO5CY!Vct0v`jq9w4};*3O<|0NkaWNU?+1}r4hLP zSS(>99G0O<6kW+wFMW}v@&C9ih6*7t>jU3uRFX-y&aiRyovTuQ2uC^yffip_I9KVG z=?0fal)ytM1x=Yn{&ACa)m*wYnGOlY4N**yCLx|eHfEd4FG*)K1Ckv4$P;X&*#PSP8!t;{Mg2!o8se@bTj~pj|4aKs1XF4D6(Mkd1qI zr2lZDK=t#1QyuLk)ig0AMi7ZFN_q$@4<6xE8$XYQ;~3w&G$#2D)*6JHPZ#;_GvH*e zOhNs^N)N%1L?5Fpy2b4FIM*s^`fga#>q)X@CwF7RscaDFmk|5G(xz0sxJS>kcscQQ z*a;7)nWs{(w5p?kw(An-z%&wmL*KyuR`P_vQZf9b^G0NfCA{f?~VE ziYl(sz=>aV|+Ki?Tx({8Z*3pjGWI|ZL}H`f|vqnp!0 zt)AUB`1%2JZv}*J1()QQU&A-A52b?D_P-9+x5rzF64$>@B?&`s+4b+)|Em|DIS%Fz zmo)$5uEr!p)~MkxoMvxt)W#1~CZ7Jr?cyHj<`hZRfli42Ul+46FIXP9$TSBA9ahsS?_rOJ%Ilmysk{5`IR)k^)QsE+b zWbuovRI);^G0zn|GXz{oSub&T(aM=BH-(ari`mo#8a%WX*aM22N@AbODrjV)&4L*J z1!rc?8A6M~j{)UaD^rt9E_R#;KIV#Up7P{zAO7PE7-m^Ul>Ev45YsAV#l3rA2uWkxIc+xT2T{Ih&9vu|(>dAIBE`D& zH0t>#5s1Ni_3V;TAn8Xx&=oW&U@(a1NhWE}jwXkc&4Q)NwMBBP2z|L3kMFQqsJGU2xp)v{>LHltaIN^)=9i4J~g8oaSy50cShU1&Yg>0K7 zz)61f?>D{R8%D@P2I~nOpWnqG&`d0yFQrb)b7Y0iY*KwjRqGt$M4F6T1PeCdxmpf& zAZ39;Q2{YVTfm_ZY~t5W2FIFaluGc2rIe$kA*04hUb}X==vIC7i(qy^_p0zSuqqDFz4xa4WWXb|0)o}gbDE9qR5B?2v#^NRCL)(@GmT`A#4sA|0fiL;PFX+Dop?Zu zA{f6f3#(W{<)we$E3wHDVnAwsvf?Q)@AK5Y(sk9*ecs^%2|>bJ{MSZ4o$=|o zcBie;d}bYKwXN%hz@9^^XlAq9^VQ0-`u0Wq_&H+i?jSfK0J`lzf${qz@7oz-1n7T? zxaS{YY0DS!I>6m|A##goV+y)Rb-hww&jy9|O>z{bd<5b@9zL)AdrWE|I}DSCVA`Ks zBr;aI(iqeD{78r*VEmjWNaQB~pzN&{@;Q;OHz0j1gm zt1q2+fZ!Z!-m6R!c~jwkp#RP6jGwwTzk_|`f74siP#j&_`*)0Cxj$Y?=KLb-O2L$a z^N(X1W&UI&nJI4u(>V{lP_z0SmU?EOmyF6Y$^Ibq&y^GOK zW8m$j@Wk?sP90ewu%ko3d+DEwcF^6D9+(X5=#2Y@gcd+vne%i@UJUwrEy-*|P(*H< zY*k}Gc*5idxlZ8)s5F60q+-@p^q{COA=#`leeFbpqRG)A&nOrb+DZs`c3XL<0rKL! zXTGLD5+URC%aJY_(jy7*S?0YVlvL8b39MfZU)d2b_HcAUO{_$Zc)uoh9PjHafn5pI z)W|w}L_wAgo<{875TdAAt$GR3s5L+Jzxp2+a+T^|7IE!7wygD82@vJ;_+o8&e2*V; z{%$Q_yF{nV=kj|aZTh`mSwVWq*STld^p7uJHB%(xXrW#f_hJ!K2ek-$K!$?nFU*mx zBg}Q!QKJo?z6?t!O;(!$*6~K%N^DW_BZ;8P6^Vi}X)*t#XvJE}Rxc9`Dq_9<7L{CH z{RuPo1FuhAS4Fs_Z2>C{v$sX4#Y`ZScL zpFd}|=8;5i6*9!wIBn9M%N-d2q+2wkaDB%#Z(ITtq|T(7NR<2;dQkCGzmTdc9cYDM?aECmcjtgFif`)* z2SuY`qn2nRp~C4j2@7^RbT8n5;oRh^^E;&TQq*bVCYRt2wS(gi7oS)m#|2LB)@!;l zpCgd*95K_ibw3v6KhZEmZ$rJY^iBmEq!ziAnf84SXyLss_f3iM<`23yu zu}H=M2r29@z6{Y2f#V;U)~OihcV;rh24UQ1={E-*O@isK9GI-@4!wiP|G}MkFoggo zJ^}ZDq8KY$qPYeF0isE+Gkiw2a82!qI@8Sb!MA3pMVIVAw@*LFC+h9N)LQV9jlV>Pe`97YwO4cogbt*7T zm*^R^Q-}F5@Ua;WU`Q^hukpVS!;xzHaMiz$QSW(E^Vr7 z;*Qd1>C*pkS8ZtasYoxN#6dUnz1Y{mDO~A*bM%b4P-qhSKC0%eT?*dr?~y(sp2?VO z2Oo)gApeK4jB`cOrwc5=>LE^o?Vn#tiVfv`d@ z!+`*4lg%-3qJnWI+AF?$-L{JO0%(0|jscjWj5_k7Gq$NW1UXFJ~75-IGKR@F$rUwz^^%?9E@ z`YzViTgzSRF~$))8meRV-^Jk1-Mi+PUSm$MRfO8g+U|8xFqCYRz+<^!8!h0G!gcA!)4xza z2nQZ#KbSx=%Q0R9;@9Mr+qNes{VdjHufeC8^%3!%M>N`4b~eU=o1pOhZ0UNiXmz80 z=edXT{RMz~2XJ@ZVZFldt1d7zTu3;Q62wG6WC=niuGY(BxxQwT5EJYx;lBA(RgS+Hf?jPvTCkj;nF9S94D|EGd_j5MGmBN=~n_14H9r-a)~LS*5-~dJi=17+M2)0v z9LpauVGy4Gbtd>(x`8x!!YM@9-%@s9K=A&bUpbDa(FL`co}ZI;4h}Svn0V{?_P@fr zNy4|$^9e~vqTKh*lOVTiAXq3nEiRXDl#LH>7eG~ZIW?ETo@x2fmiNvrk91{8AUSOx z@Z7sCDfp&Hltg{N->v1Nvi2`d(9x-q)pP5<$0p1xuaX{uy}sjsn#uvPvl z-*F~-=S>dX^5P77+R&P3&Ws)1lMNODmxB!G;YYF$m~0PP+<c9GKYnqFRp_o{rasi=yE*&x<(tlCS`yD9XGh)wy~yRqc5@@3U!( zi9lt-Z0}jVfI_A&jryrSl$f z7ckkB>!S`xSQnZ*9pw!Yv~WcxKUpV!63JdB13xgAjRzMLG7L^J+^V-K&E%}9!%8YY z$qmqc3sK8x>wd*-RWBVw3w&gBaI_o#<-*fzUfNcl_GQlbjJa#d`C7ZiUl8`^AGMkj$|ZO&z`QYVAQ2y$9lxCzlfE!A+{+f zWKzs7YMt_EF<_7gPjc0H82~u)vQey2p^!3|bZ{A}C{y@#1@^=P*^A;XDo46Pn)lB= z+5GhmWOdCCKJ~6*vsr7S%e_m%V$q@{4-lOEazzBw1{;*JJ^8o=Bt=$C^ z7LVvROwd~IqWNz!it)`+eOUBxA^Jt=-0Z?;1}PA``M4J`;(&GLPDiX`*+^1g6-9qS zz0uo_S2_|R-799-uD0HLPHmUZbM8XvOg>rE(Z5o``?W0lw#$n(XJLvdGihSw^6J@j zwyf%>=n(F{P^FE7*FP|0xm zviYH?D2NUe366G?#x>~*)tD6oAWk4w-Y+UCG&9l4)7sj3_Ij2gP{`}OP6JC7j8jb2P=iX;R@50pb`C8_KQnFugfVCglZYb4eB|ggEeqeVl^>YA zl0z{2F;=uH(Dr1JNV#$!s{eUORWxL}x_Y-%ZM~!sn66{r4A*y|idd4XvXs0vX=Ip8 z^a&+2=QAn41r_JD?Lm7@)aXo%Ud?|-<~7;)jU6;F>ZiFt7+vsnGUn{WaUvf*Yv(?x zzfBFRIgG!|5q>JZQp2-Ug(1AA=)a-|uVJaY4escF9q9{v7b)2aBpSagzo|Vv>>dCO zmd-#}0u{>Co@1Yg^Ffq;khumbcH=W*KvoR)h5qM~{v&1Xq?v5r+eQX$ zIL<3pfr(Ikb}%lmjE(2U%&)5CNaA~He!xje#Nti0v1MH5I6T@_pWUGS&_1T|wp#KI<3~TE2h4+l9};#8;UJRVP4Ip5 z)q7lEIafm852Vd!oQaaSFc=VByWE73E3=|b^FbJ88o^$o6>O=cfU3m_ zxrzNG>yzbuDZ7sPtxej7Inci5fKl+%!FyN?B_G2>?LT)O78i9c!MDIOcp9Cyc-R8CL;biu8T~hI*L~G0oP0b zkPk^PF5#!L{LK|`b#tzx?a<4XC%1Q_+|udhmCoaz&vtqGDH>7&QYx7l=_r8%I20$6 zo0M5N$5g`Qf1!uKJaFM0uuHHH*~ixiBFiU@j(4#NaAc~kzBFKWmh?rGGx6Aq;hD$U z?JnD}YTx8*{R;2vT~kmW#Tb3XiNG_z!1IwY*yBDPN#?RyWu1?MxiMHAtAiA$x_x0v z$(wAaj|l+ajZ!GE-+3Q*uxF9Du~8IK9~5QE7otJ(MK`^&aMms0D<1!9Q>kK)3Y*RO zoA*-W-gVuwadp}d7(0@l&Sk`ca~(679v!z{Gm2#elLAj_s%`nTt4skZwn)0$_?u3m?X zn9J>9)m-qqtr7Pmz@cMM!XSadhLcq>l+Sz(M?yb zqUNsacDyKEj1ki0dL<(;V#a3JJ)ZC;)S9%!E+@Nfg5eums$sk$<2oT@|Uq0Hw zZ18{#R4U5Dv8EGNSktOZk2dyqo-Hjjwy%0POy|{LsGpP3>m{^jSKUzIQ!^An?{HSYTf-N zet$Lo7vZXfNgrn#+B^Bs+xd6}<8(>gwf8;O_P<>5o4R59sZqIvku5a?6Vn?ixw~3#9P@l_?&~{GdS|WwzW%OxLhEZvBK#@x^I|m?v->7= z=gD^G_-~wg5SpEFU%?Vhzbr^eG^Nd7S8fVFB!PzlBz!&(WXjgn#<_yk*(ZHk+#VQ7 zYbF29gHwVvX~7oE%y=ELV!BR_7_r>%+>7s_o)^ro3c8N2tU>$# zKOhQV=Vg0Za$ZwZp9)Bf1A5+@`@-pl;RU@W3>%6;t{m2E%NdRm)JwZCXqt*K#l1vm zKG|8yyCebf`Rpd=kARjUKrq^GpRwbN*_E^(^=(nrxC=VEQPQPISMWk@ZWAV+6S<%| zYYZ?UGR)M;09K&{TrgRdfRsWnsRduLp#{&3M41;{8Eob^lqqv6K!dfn+tLc|g5UGg z-l3GtavHA923%WXX~$zbS(@gb8lhj4U+*cVv11D$X7IlPq~$0?7(nazszn<4U;b(! z94tMRiBf@%NH|5Y7D>g+4)ilaprzxrRDEz++$;@`g4!i%>i-T~gm@o20e}k-(5Wj& z_Gb_d${N2CMU=%BOMx37^N+V@+PKSwFjXvyq->&qNJpan8_5I<2SAG+NjZHo z|AaV6z<&x*uE2N;m(s+FdAqSvU=?P%`1nPw`dO?~p3A%Ou}CTK96IA)USfQTEr+9$ zA{PwugbeBq@^0hkX^<(Oi$c|zvB&5L^q z+09>;uB5nFXZEER;#!49tTg$#w&HtY&6Ta|e7VGcuOvQ$_rSURc;!x=g1QH{IQ@fE zMy{hPS78iIjV*#tk|8!XSP3xjB*xq>ytpFFVEWoEVO%ULR+9CScY2WT;VC6ro;*Rx zIZY&jb2RV3bMc#H^|vF%7T#}8ovNUE#nxlk6{~Tx4%Y#6^-bShH+h>yic0HBb-iZ7 z|HITfwrAEw+q$uhN-B0zv7gv>Dt1z_S+Q-~wr$(2*tV_xylb6(uKf?@mpNzG;<3zbHtc%bej#v z%D~G2+&^7PFtA04n1_zQ2oQo%3&AQA5*mY)(Mar(+8})pH(5t}88cyNHeWH}xFo5L z=h70S;zfmI0}T3*XP`ABpK51Q^^RU2QCQ~EY;lgL7bPu-OOiB(tERZY@=2uVpkD@j zWt>1R)0J%9%QM+G*H31-#1)A~sZwXi$Mj$X{Nt6uN8{kOT86#v0bf zD6i+{Tj@AA$t|5hR_`7?&mK`HC8w362;?zk+ zgUCgm5SnLwf&~(949WS!;`+w_z^Dx;*EabC5*ftZkhIDS$Nz%(8cV=r$oy265zi9L z0UKQ4#2}RqDDP3IG;x&l;V~$2ecDLdYMse21W})tE}RcoG$|_w68N$0u4?HTqDJee zj>4!`r0twz{btSgksJoD^#Azd*fFS-C_jNv0Q?lI3*yf>w8}7t@r{{yZjQ&uRaGa# zbe3rvq|h{oiBZzHI#_V44fhGsf6lnqwDSV33{pQC9{R4!k0YZ4nAr6QPum+a?4yz{6oQ4alyTr3@{BU919 zqEs5gLZvI-Q{pTKE>vf8JW*3sIamQ;%e#{ix8smF5IiDYWF+Lv&T6B_xzRc+c zV4a54g=+#GyPWtX+0*DEyIE3M_@|@so5-;gDsDjL+BIUU}6qTZOczKhd zvye>&lpH}Qiz07AA{YuWZ&mx8L^Fr~)YQ87leTZq)Vj{@XsT;T7B#m=Zlu0=6OhR^n1Wspof^ZdAP)4 zrPKbDb#_}~^+np~l_ln5F&j$CbHnXBmzcU|X%|_L=``7H+es`kp2&(5zQ2f5ZVGxe z+3zFDTePGPqUy>>Ky`UD=}|PPm;Dou@=jD6Q=ycMsuxY+bULYS(wuXRgPd0d=lB&t zEkjQKxy1pHRI-t%nj?pQJ+US3gSjKob)*n}DL{!q4N5^-l;BSLj(E7~G`oZ;yE>Z` zN;&Yir-k zCY^+jGUt=7LmS2`4gc)cIKKb`YD_Z;16oxp?k1&|I;1bFr7JLpYe`dkXJg@C=M#5F#jiI`h0o zT{GKwSVWR%A3zJo7V^V{?h|ts9{O%k1-hl3|KiLo2CJ0K`}%M!)DSQUE04yy%Kr>8 zoU?_ERC&Q+2|=18WsSvH{~?raz}C@3k#AJjEqo8Udc;j{L^<*IztHOcKx64|VSstl z-H~6vROj~XcUP7m8n4u6!nJ{#LwG1SN5f z@}4%{NHOV16J-%0DQ2oLT62S_-!XSyoZ!xUD!=X}5&*lCP15w)jPb)gY%;eu!f*eV z!9d}{c}x0~5+&fNaan5UW9Q-MswrvW-1sj;&gY|#`5N|??#Xl(SxA$EJqK>N_{3{B zCEbvA{diNAoRslKJQtCGs%~)PBBTJa^Gp%?7$8dy-7J0yzlELi?I z*f_o=frBoYCOz#1l+N}auDHjreL!AzE73-z2v`~fd_LKTvmz%_GsT^|3`D;6Fu6HK zrq~JN6joy@m4A4nSk(sN?d>>PPaAf1;oQSy%<~_tbtm zGnp1!cSY;SguIhx2GwhR&bQ!j5fE##dk;H{61akX#P+!cPo*ryshKP%6zcjI5uXI6 z5u9mxHf4hl{*h$Zpb$p~Z%LXT)G)_yg2H0_QR%QB z3*4t;@xL8j78S&pdGl_eW5`;8a5N9yd2ar|9 zBI%VTPB%~gs^IJ=J{Y&?1gm%@K_X+CvzhMZ#eQnP)Qnm%v6tB1o_K`T3UZQc4vi=M zw1<`im0|jVND3Biqbu%z^)Rwr8ao1(a^;LsH0;7uDNqQ_9K0s~nA&w&6JCQ9OBmtK zD0VpKa)5N2}#eW31DwAdy-s95 z@wREHj2LA8RWEf{kHy>J^0c_@xw&w?(tiI$AwMJO!b64Wx8Y>efb#szeUsTWe^X&E z6*q6ql5&v{oOn+ziGYph1a37<m;4!#BMt~xi> zR7L=^}tA1#UR0AebNrxIgGUF@I#hMU8r-|NHOgsR@xovqIyjjm!@;v=F(ph?Bwdn{Y; z=Z>vUU>I5#YJB6E*de{w5&;^Z#D)4^x>R1=nXXlZv^}RO8-O8p8v}#Y56X@~5Q?Za zq76C2q;| z2`XUA&akTPEic9sgF&S_`RTGJ)6%S6?abC%4(^bqrrTosFQAbB#ZNdSI+NG?AVII) zx?+8^tK0eQ)8{Rc^5QoEP+$*=!m+fB_6s!6;-bScu3V7hmpp#JNv{8?`@;b3EJwmD zTmY7knX!;X6b&gLHp-NNEvm50+)uiULFf;98sMdbb@^Yb#NaPH%S+360?3|1={M+R z_)9S}fl72NL{rSi%zz#NySwM8=d@Qe!dp^2A4-@!#zI9?m~CKg^~#V{bxi*sU{KpB zV9c1*%~;I9a@hU|GGV(`=+*It4oLHGzc>FmZO_^*Oz+=899?9uncpMXt>3r# zY+fh3npeBtB)MAI^455q4kA*aQoMb0(sNVC#&LGz>aPATeyy|>>%k;p;4Z^5O6zeBK}jgA5j%E z$l(IC)7C{(NoiU=Mk6}ln0BdXmFgXc5o9(M3{xd};-l=hVv>*D#&u4TWg45z5jYAb zyb$XOWmcRk;$I=!Fa0NNgkt?Cc2z_Jx+gWE&rGcV#z>9&{OFCP$%LuQq?l&L>xk{O zu)d;G^&;D#FN`HsOAM_rHC*0Vt%k$vsqrWBS7Vo4o~kafcxX6dnX=0)^=Mt$BDB%Z z1zjIT$?zf-OhC!LNk_p7-Ln|W*JTvNhpB}Pd%DU!4H4D%te{U`ISANPRZ3!p!c0=P zelm+Al2Y(4#KDC*8C(pKh^}#Rtus+XTp#Lh9~678bHr6Cp{KP3+`6btVGpI7c^u`2 zq<-P-S;EJXd2Na^v8Ht8yO;7Q6B>#|hJN^kSwgG)n&5=uMQnd`6_?2yORio9C`I(7 z5-}$7AaWxb3;`;z``5TWW-(S2h69#ExgPl1JTEY>fr)lE z;@PKAwKh|n%XRJD=j*^McZ$~t4cz_j{#tumj@H9}TENzd!}PSAm{0x8C1a&>{3v=SCFDu%hqTAvG;&Q0pFly z;e6Bh_?j`&+?5M=I~u#+t~Itd$^9s;zfk`^gbF(Q&p7_QMUrg#KG*aS1#G$kCymSX z9n=GE)RMple;a8u=}NqyuvvvcW_X+d(eO-ykY0`X1(83n;>dG{(|-NE!L5NmxQa6* z693qfe`a7WwMi&PRGO2Zb}XT&=sZN|R~$f}5^ADo7Q`wWX!yf;;B~iZ^WTO6R4e}S zI@`aEMB{UMy)7o>d?;)BUHWc3^dVc-)OOju9x0-N4%}AnU*bMpgGk?%a%>xu5;nuM zwoJe&j)*@&BSKO{i3TcTt$ur8}}0=*WUi`mdRne7)0n;LGP}3w&$-z4F@AgZJ^N&BcAL zbXuaGY(1WKOs6@r7O{AkgTnxv)-Mq8H82^w-UtAz0Vc_3WS?9+fd(za=wLOWGsMlwJ#q%Kj}yt1qPEq z$YV-!m51+#LUMHwUYPr{+1WaTTQ+m!Kxo`p^| z4tQB2i?c$*u_X^k>lf*XVuG+CVhu3gnVV4(JNbxSEw+aapY?LL7Bri{!ek zl#$WW^z$K~qs1Kbxu7+t-93pZ{8@y$D7$P*9efmBi%ma<( zm6zHr@1f^69FESKwaG~zORF8ZBEen*97$h;x)cm+LS%!=-w&M+u26_kB=RJ-UtzA< zj3lpwc;Kvvav>)khqy3M^eZcZ>A;JO?2OFJl+1h6@U7vk$I4 zL_|0NQ2f9-8Y)Yturv3(B$K;+2LzR2jW6x zpa^qHs{05N7A6ys=NE~|gL6q&r-t2!&9ugB^6rJGlBf^FfapU4EnTI};6uP} zK<=y5r_}SAdjq~c3p@do6mj5BG{d-%&>hj*pTk6~@QG9cvibDrj+zIAza?x`tHp>l z7`16)A-hL|5@<$JXm_Va5XEUd>~BH3P(ww2_!aMvru~A5O?<93r{{A(Q-vPr^9?XP zsPom1&8odnv?7xVKlFX6wLc;6gmmaRi2!+Zs@N$4BTi`Dk3`PcCr^gVABnJW8{mER z{zmTV()~Ug{1WGX=nLt-SM5b4Qw2OY* zho1J_6?oqM4){zl$4Z#$Hlq-e%6BOwh{#)mE(7O$UzA1< zt7XrHYo>WuqH07T67p)gq%=-Lr!kL>Z+m#9fb{bePb$>xSnRi1MgS+OQ zMDHLuS+fRV2Q#|KNRNt`YeNb#2&M>7h1(PbcA8jWBP+MNLQlx!2xt?BBbQ-zC9-*g=Rs_B)WN)w7UUt9tpIt`fdKkm~o4JzG8%;{gm< zHTCfu9rTi;TYw=6h7_RI{PGFZY1jm1$8b^>?1s_PEaXvJ)7a$2{7tD{W0?NBGR?hX ztKWvSzmKfsIA*H--@vXvz}H*x$i5wpj#(>Dl)yA~-gE(Bf0alEVI1D9Z??(}txs4E zv@j#*ls-DyRg-<6A=o=EH-KBL%l z%~U=|OnV=@odqz^0cjN9oFS*ZW?T2zy9j~pi$5??Dl?O0g*Hmj8ty96Sbmnx+Yx`I zZ+C=8jk;~#%bGJ(VbLDxcqNX>M+ar>ZX?1k2=G%Gj-cx%woHFI}nL;@uM}5OX4U!M4epA5z zR-n~v)mHn{6_5WGo&T2bd*7$?W)glEw8xqZb|)wW(l=vzZ~h2NC@k9x;Zdx>m|~rs zf$x0`tKX210Nk3#qq7SoI^0m-*Lt&a*2?=4a0`Z$x zeg*NUbfPH1!h!K>V^q7TY0}}cbxGufXR4Tpw?1AP2=m~)Oj@j9Dn9{|(vv6;Rjq$2 zF_<5fP>TS+Z(70rnVGPIwnVJ7S-VmLZ2A~lQ7{4oE+`U^u?0<(jU6b22NIDNQ9MJ# z97->m32DZPP(>kE*?i^a2}26bBl-qRN~ZkP~;dZ86Q((M?ZmW;~FIr)%bIgZ6&|58zjG{ z#+dwUW+q#as%qm=2sfJ4C{P@n>K9n;Yz&o_;tAIo*=jaEZU21FaX%YtIG;G!2b)I} zt2?VmK8CXIY;_Am;8_%gRGTFW6DPlD<>Y2TfZ_?Oia5oo)0}2K2WWSB94C{fc=*B) zPIS4S|M4kk=ka?zXQSWw~URnjpJ+^+S8dmV;#SANq4r_4;}4Gz^bVC0D%k6)#^evmBhu5)@O%vq^Cw5<9P1&RI z*N?qn@R;iE-J<(#$K!e9ttF#-hGDoJa7bLRg{XDgp3l|ep5|ufzKs1|Kk!bG!I_3_ zMmL_a-a@bhI2mfjvR*w~NP5n8;w*?)SnHaofPQ65>M1UpD6s&ymsXQiSgtRtm8Wyd ztiO`O%7&7;7z+aA>y( z#j@h1R&`K{7%haW2yT43MXFH}#0?+HrEq93Uf+FWY53*5)0u7$i~svfCV3pmEJ^CX zOzMg%C@WqUTtOs^3fJ$VZDd^8l!D6Ti~!BX&d%y-2HHe8L3AU}Of61cCA~p5CLH3U z+^Hc3A!f7temko*yBDIsO#KvtmA`r+&GMl|rXK9=I0FRvwxhdksYO{Bb(S6OeTjVc z54}HxmQBGl-`v2sXRWX~>C1wNl~G0aMGe!xrQ0;*@0_^D#ZkL-!m&pw9o?=6#G&#X>W|(^HB!P1wb{ zXP6t1;=f5`v*wbg?=AXs%k<{eD~C{u~By z{pgIUSp@Y!XI3S2Kgnb8{EB;Qic%_(`wP_-r5ImjW0hqyDdbf~b|Qw9rFoyk1g#;= z^9#j3@{p!|K0Cd8f4XDq$;;MqF~cr28lUfD2i(W`WM?jFi%0@p)1!D7y6_8PSGh_~ zorY8J2SFzr02?YRDq7280nGMLy5VwDi`_cIe3_#PyGw59Ar9GkU}3ZEH0WuOZ9QK+ z+tV}nWJAyvzZcR%p-N*xb5`tMB`O8!z#Jnu97yO-AU^g-JzS~+7amSQtcS_P6?G-? z3hJVqI?fJ{gowKA8TD@E@dO+raVka{d-Ji!rE`ViGp!%DD{k**>Oat*F!1qYP`jn| z*mr2>N_1_s{@Nf>JFwDA!#60tAt?HR<^!@|;Z&nx2#7!8gfUL^6xidK0@1{gL|3s9 zxHpN>@+TNg^iDD&4VgH+pA_k9GIjKHf+z1TC|`H~65M(`C2apQe7buvjQKkC@tFa( zCGD(@-z{G}tgkJ?KTQSn=>vNNQyy>VbxRSfPj3ghV)*KxGWfe@8YuD7Cfqf23vtEO z?f)w}daL>7Lap7w`UpHsTPTtLA5=P~n)WTQp}52I9oR}t>GKZcslm42lHijhEMzO| z|MruF)GftqJvf2t#&sVkW0DLNhLnRd5gdpV63E0L9g9JT8iz}Y6OIIC<^RH@xO!*b$unju_(f{_}Dt*g%ED;G00gsnKUQIesLjMFBrI+>i)Y0AfQut|!M8wc^# zDdXB~ve4Zm!@$AuRrA0r#F`PW=0J3i#S+6X4f!QlsizM|erUr*H3d@PGb2C7g2pB? z8-Jy|6-l$z2Ic=QRhrWD*iL${>4LM2m!srxdKl~@9pTF2^F`K&z^7KgigKNJ^zA9u zd@R$fGCy5hIY;gvL1G64yB8F<@P^{K*kpUZ_FpKS2R!F+J^4jBmT&+{@AP0<@WB!1 z1H+vCYpT!tcDEZ`kXIQAUsotqlT!p^L&fuuA1uGBt=DZjPltiKYZbt@XTv#%too-9 zsCDE-DJ}hUg2S`R*A+K1A=lULJO9UAj1L~^ydp|f#lUdox= zV`#y6P_uNiPWp+|E@i`S2kVlCI{MljR}Bpf?*kxTAHt!_4TsT(1(q4hFUu^~_yA&*kZVIC=j~4O6!j*mixG z6L$+9;eQF>f4Q`AyZe%aqyR1^{%7LpEg|WFCM97oVSV*OqacAJz0@qYh()pxV$6v? z?Rf6sg)O`*D0Mjy_nj0)489A8UBKpu|6#t5e6JMqe%v?meZdyqIbB)jHs}{HvX0Z6cOZf8di$ zv$$o%50V1+2d6*^kB=&sk?(^c3MtHUnx7BvZ^btL7pHO`F_DpU6aHf@zR6^PzG`K zzTyJJtGn(dEcK6NcoG+TJ451vX@LUB++fVa)CU(F@VB{9E5$vjSZTXV>xPdIfgV|? z6CV?1Lu9)WCPJ`-Q(V{2VIpWF!g#6L`-Te}dV<5?YJy@dk%InPz>Xk8Y|KdCX1PL` z#YvqGlqMw1iisg7&=iXW*u%xTN=w8(t)D-md{b zOouGQtRehncyJO#ysv z>qfe#=su&|xP5ow1w~^h4!>sTw2GgGQy4y?$~F%KzKd0Sb6!5oAa?}v@BYT%t0Bcp zbzey*P*VQik*2_Wv8?PR7XzGF+s)=a55O%jP3bev%g7Jmqoycyg8S$0F1?*nqlnRZ zarPqwtt^{N1*Fi+{w-Th^`Jkobqi94%6fpmJryYWJ%`GKMbjAgv?zlyAHF=BgK_`@ z8F)p_U>(U~Z^6js$O`w6+8Unewof*Hs*ie?u!g1*cgWFR5w(X39R>{sBdsjlWkcS))7!Yp#ABAeUu6>Sv-Hg*=TV$>SR3YR@g`o;3i$Dt}bzy6& zTfQV1oPY@#GguRDG*v!vPM@BWTdKkqbwl+aX$DKUnL3x zbm$$U0l2{VY*%4R_l(phnKghwY80x+q&&9q(9gVk}Ac+ zO+OAws<@}r$ah#qU?Q*x;G;v=ZDj8#i2JJZ@)l?s8-3^d0RBebt5r0tt>!3-Anfu|(5KAgU#~VONIM>FTj~ zP`v}viAl0{b33}jEiRcOSFs)_j@jvA0U!7L&n*0pEc_lLIyI|ZF4xVM>qV{mLGb_1 zoqg`VU(_lrX_j>xYS?(EkfS1aa!Rs4nDJtv316_aU_fFjEVjRJw?F>1Y&yT=>sCFT zf4|o-&C@$5CA9E{z@eZ#rDq(=(T1S!4Lg=@86`zj0?&|Zp!wjs?U&chSYoOG%@rx-O1GsX-i-P=AR;d= zL=ihZH3r`FuiRJ$#uCb#u(-rUtpkP&Xd|VN!&u-%BTUCEi_R8QGbE#dgn%M9`&Wq# z8Rc~2JVk=jJbuZnb47{FqoRj<(R7cmbkNeP4PqyNYN$#!N`b8$ZWHl-FN%wg zbcHTozRnI=K!nLg!JJEyi?b#-ugQh=sILxgiLIsdW-j&uvymS!{S+iXp@9|j#^4FB z9a@M0M8K**;a7r3AC_AM_*4q5gI&EfRa8LI$m-Y97emPjSLCc!WW}NUy+&U4HMoceq%~AHChf6Xe*J6fa@lPBJmYOa-1B+N z@xBgY@Zm%bz|UyevR|+G+Ufba9aEy5wZ70?dzjC#ebM6o2#|g$XH7>`LeLL|*EOm% zQ?2I<-6&+@F0t>Bc%aQWpWep-6JnIwGx7{JpY5)tcI4_e(7Z!cgj(TB7Pl3G#13!{3)fIS#dFHzOEn)`8+DE?a)X< zRZ8F)m^=y-y2>ySq%Ij6yDv--(&5hB0Ay{lB}pOJ4$4k&CweFvh%Hqg>ns-@Zx8?i z#T-LH0h@~5c0Y(InHUaJ5Kf~M8U$LemtZtt=}iNb%1!?v#&TjNbrX{=rj-c}e;wpZ z?KEhI8m$6sYV7XupXVwwT+OuJkiX)efL<6ohi)>AH&4KHi-1>Zs%jY;xm%Bxvufm{ z%J7 z$8^jHwPktC!Hv4N*Y>rz=dwbuC&iRMp{l!-rkBq1y6ah&5v5BZLbv9*ZN}^Bs^j`7 z4ag%Oujw3P{(tl}_%g5*?9JSqpH8F@6d)is5NH&G=8NQRgVW-mWbimVHAh%s*@pj0 z%i<7K2)rbZTvMG2?KV!w>J@(^Je=k~iC2-ZjKYZT3>Xsh1vW}WTP29ZFH-H|#8p1D{~X%NLFH7&iquGV6$T7;X30utJ} zheGVpBSe7^;>sQ-2?DOm8WBfPHZAA{Ukng%f3caOtc3<4LXkBg{Gl+lp!H*#XX@`$ zdHjwqHWjWWHw~qFY623{zII^}T)WL$0Vf&p5oEyw3^;8J^-n0rYvz$EWo!#pJVFdo z(Z2Y@Qo>TGp@JDDI0V#_v@lbZ-}AOwjk!@ShwDo$Qh%U^GFBoY_Jxn7 z7@utq|EkJfnFkmALG*(nLyb*d!CHH?T=(vFZBPeV8t0c{QEaeK}t7 zb?@=DGZ;t8&~sbF;4y{>=sGa&d4B(X21XyYH*31j*<3cet?wxs%G*t6XbHJMnTW81 zDS=>vHKmN|l`glLY9>eE398KonpZfNET{DX+nVj52!h&5z&sU%A5`5$qyvPLokE-< z1rPWS;;6JY0FQ*c=f%)auL)P?2cg@wl8P z(}f@rs4lp+A2G0=-M4D=o`L$sQs||U-#D7_`K@|F;EC#vkOj*v26z;TwlCf2(BDJ> zHXTk@Bnfi|n&<5TNuX*g+1Y?R$v$<6BJklID@X@UpAGeIW?4$PoJi0xQsOy@M0FI| z*7dT`jT*y1e+H6fG&291BAmljG1gJF-QSgOo9K_F3$%vnVhBogLVfIN;li4<;%cmb*PMzP6#5Z3HRz5Ycx1*0ufima z3X;N_qQ;LzSd_gsWqCZ&TdzJ1dvtfG)+9W8?mmFVF{Oh}r)Foj-5daXZp5!&y(Zb@ zGI{vmf@>#DOiZ?)!1+Il0bOY?-R@P-ymt>{_M%+?8Ri3$tkFIFUc$>yqogrb+uff$?M$FdJ@&U0 zHUIxV{G&-_2dx0tvvLSOQ6W^Dw1ZX9BtIb~J)izpF0LiEgAYF5fThX-jqEprP3oYXmMat zetqvNiWm4?k))pzsaU2)<=V}$G#=MxIIDVque!*Oc6Cc;Em>rX{-)l1@D$mBJDGUg zcqPk#_h?q-LpaI|#XbGPf<2T51nehuOEB(GWNN%HWi~noQH5NqwGCzr%h#|b+_NVy z7F`G%fqT>0mP6J{l{BtYuU;GjFgy@prFkOoT#$_0phO{I>b|;0h+aIWv6bXfcDF{P z69iVsMTiwK1yFaxFJV7J1LNG-3-viXcXcKb8;MgR>2*lzI5T-1)eKjWuoioQ)$)3BK9c}fhs7)HY8^oMB1P`3rq0Yy|us8;kd<76mJq9#&gbj?V! zob)4RN!r?z299)Oc7d~TasbI`gA@H!XI4CYT*Zo|wn5fjQ~7M3HaR>=k7~sdziACv zNr}J&*qfA}WQ-n-<0|{VShAVpf7-4;WMZy6>s3-#Z}5HJ^WQ%w&s0}uYn(|%NT0MDc4?5QNI;Ua3Z11mWG2fEF8h%B4kVto))M@KB z<0K-P<8UUs>J>>2_2F$ac5$456+Qrseovas6 z&ljuQylev~LBI+}0a%pi(i>N)$gVM@pDcg;`k2g8!&F%s8kY5X7~uSq&?#N4vq?$F za77~`v_{KitQSzx zyT1MPB=7UKz!B!KoZ~}?3;**$BdMQ8A z9oFP{zAO_iW_5}iDxk!~K9DMMquFwov5$gOu^+)P5;ZSYv6`~LsqpBBU?B{JOaj9L zx#$!-sYxgLC<;#!8W`%x{MpHiwh^p-`BZR_ufM9a%bIan8xroQY4L_&>g-!ynf{+m zC(Av{I4p1yF?`wG6z9syn78zw3xb@%85EKqD|L4>)2yTqE+ze1(bPRWPi zC*95otab37}+}E|iYog+9Dy*iKC!UJE+aGbnKYw@1)dc%Nc<1ohgyJyOfXPZE zQ|}YqK}RPvMv|BlU8DI!bdzvK;W|2mI*(El^8}k{Xv&_DD-OrCMkA&W?!wwr zr^@e8=%-Z=ZE#QL(0B3b5M`6&jAP!3D5N9wqhujNWvZ*09OT21Kp2s=W&3BgQpx(N zS>a^2rudaG`XPfg5JV0(2qxtli4D)CI;%XNYhXH*z8$8qL*I;A6^}V5K4d8WPam zg^2TbYs@H?^T>5n^lGvBd6hggRJKYB8*jkC<&`@)kk60j)Goj&f@u5jZuXb4KXtu% zwQ4C07V4i69624kVYHy>&a;9b^&Yjf2O`dhDSsc4NI^e2_)tK%$IXu4`_+5zzTO*c zCF1Gb=|CGVj9*f*_J&fPTIZXJi?we0ka$umz8wN1QgB}?hJ(SMDolWGSica!|9DyR7~znHja$xdtr;LOTjU5X}d);f^jBAln!HC^ehI@OpZuV-5P>t9S3 zF^R@{o^|HzDA;JE%-U3CE$(&Q%SFC`^!6)7WQ$t4$zSK{A+&%D2NS`8wD3_m&7A!w zN7H3&jdB9wz&XqgLWoku5zMrCQ1f_v;o^O~`FK_HJ>ld1$hO?HzCfgWjO_cTGL!dt z44VII7_j+%w|>=%5_Cg{d~MscpJm(rOqcU^0Dj$b-9`A;`@N#B*K@qp(z!RS=6G{r}2lNNPL33vkPYpr~z<^Rt#KAe!C6+KjU!Q=bv*AhD6yH7`{|f0_BvF?iLJ z578$S=agw6?$5@y9jZ1!N)kE*V=I}>jvMTIpj=D;MnUToU}9X<5f}duY%wlRCHR-m z(F@5G%t~y7pQ#LoN$mGbbg62w4mIgZhO@KZm+gxVK~!sPMxc*kpxl}*=Y5_Q!)H!- zHgm94xi1%Ix+@SWPOfBaWWl|h5unxJzot|Q7X9TA!Fq1J0SU8&2% zN{0V(N9tn^^RdH9;z7w6T0Y^arXfjya-r(XJQODtx>F_RV|KChCB=J^*jKu`^&j?@ zkp@M_#TkrGu$H(V0ya71n6CAi322ZK0i(+4)s$U@nr>{AG~)636XSN^`l++gmgO;* z{)5Y*L%(X_!rI7;gq$RZF3;VutzaVaH3S%B?iUCf*fOaQrR}XJT6o9^0}AWIhCmdF z10hS6Tm{s8IESgS6Cfw?@zgN}>~W_w2b|7vvuw&!8#0n<8vt+xijlj)5R-|)VwJL~ z!JBex4jXRUbZi4ni_XOojls6h<0Md(IAVw64uvXH1}+xW$B}DRE5mMFhDz-o)9XYphxpb*6YN(w$d1Qi`GUAJX52OQRd;-4*sn<-cuC~2f`VchsHxN zgI{(FVBHzdURDDOEiOjmz(bI;ba>-TNh7rdAVf;+2@vA|QoYEXF)f{sQ}^)qLZ>@p zxBmqAhIx?KDR;m!QrLplde!NzCUyQIT2(04*luJ9p{2 z@LsVE!c-#5!i*w-h9Ryn63}NNQlWxBl+!%(Ii783`V10<2+o~4F*%Y;iW5nFF`2wj z!@rO8K=LEMP`(8Fx-iXhuKX5{gXg=P2`g47XP{$$|~JRk+E>9cB|R|}?`RyxPp!lmP1auu;OPEV8BY|*KFTx1!BbU@+iU9m9 zW30kTwSjwqV$R>R-^micl`fq+{;*2*muW&}A?57c`zI=-i9pHc!AjRBO1i&sN)YW} z#dB8phjA2;0jLu+UZ`Yg@ubaXEmA{|^gagjMhH*kI=0xA+>8g-S&pZu$1Ny7W?|u- z2E(m9nk=Fxi&p=smTdTGm?uA5f5PKhOdwfRELEK~abC+v4br1 z5(vm6WB_U5P2e{4CO9@@=F>_G*4El)WovPBq`N|%r=qVnDxaISl-0EAw;#ppJ6l{}Me(01 zV$v&b;M|GEaasn$d1R#qv}?pi27i9Nl>>NXH76hX_Z}8$dEM|3-m{`=`9Z)KwVLRp z*DV_y1E`u~Sk+Qy#tw5EcZu*B6C(&_L2?zlcGm&t&CnUa1`>MmY{ZVfs{e#&%Y;YT9q~5Bh>b|bKYk#P~ zyM2+%_es&rC1mbfzIn>z?*JYm3$h!%lMQ6gbG;0(7c*{kkT z?Fj%_s%W~PNFifVx8tuZ?X*`|dH5O@khA$c7i=fLiRsf{WDGt7Ct3M|NEQG$_tztUU=ex!LZoEzeR08uV>1WFkVdk6^>^I^nGy zS`^+bHZ&xT+)VGR4I52`F{8CoqYcJT8}oX@`B99 z$8Qm&g`Y!V0`{x47fTIRi_gPLb_GO|hrFqsxs!`Jb(iO-euv#K{2yI|{C9(&_nmEf z2ihC%m)d$CH>=xN?+=}?F&t}7m-CByqXtjx?ED_59F>5n0T_XQZ`i5VnH?@SJB8q> z`QYQGw=lk2Fb0-u4ugb~8{g{4VUWMAb@;s2nJo+bb}j%A}#Z z*KB~4g2Yc!9Aan)Zl1H7q5G7|XlxDM*9ww3MU9$S>5f1!!B$zc)m`92n7+Q5P z$68mxp_6TAj;KR>qe_l|WfUT2Aicu(W^ebWobh~lI3&V)_g2>F^`qQEQo>T1Q5=Qo zXkPedCC^tLBSFqq1eYFp=7Z~D{cY#Lr+8#Jwb8Uf)+xV&gW#gY^fH3QkDc|(Y7VwT z&fz#500l1zd8J~Ggu2=sOAZ7m!mHTS(8N!|)Z2=_{@Rpie|mO)O;w2M8d<5Za~N@_-+r`HTLvrdT76Tf8PoCeawisLY* z(IZ^-Gw}fxRm_>IP?1A05>yTGEb>1DA&qeFT0OLwa}=_n(y0aED9SQMgKV2a>uOzR zPJnI`T+N}#^mKcxT70YFo z)>-Y%qr-iwBcw32v0kFGe5#}S=mz;87<#)$s$L_vr`E!}T8;rRW!qCoFAry9Zz<&S zuH4;2D4{`1&ABl~_R`AKI6|@+mO1vYRY=അnyc8?&i(G==s;Q~@281?H>89d! zyj$8PorLWWlwfdcGr+pRtbiC!H2YlI^e`(pV3D2-!oHbVs&a}p6#TLy2?OE~_+q96 zmQuX8$8^t}tYY%#jPhmvDi4QEshJ>zE(iwyNnxu-jTPR47SeA_TB63S8;4TyWR2;5@6Y%pNJISM(-S~at{ISUX$}lrdmz*>{ zO9aRGBrYjyJ@ErEjt77#a^sLTS-=mEG2`=dnDK2^_g^(p%1ODI4z<_Ah0hiG_s(0j z>Q6j@*y@f$ZN8lyjtu%{Rs@91i|30b-sk>7{)b=4A1nWS;X>E5X&-zaCIws;%k^%J zAQL;(*0XO<1@Rw!PR@oKo-RApd`09^zBmgA%&oBZe;fBzUvhmHiCBV`ld|%OPrShK zfbydhdhP#0gnlrwhP6@uXMC1|$`*mjzVCkIs(9B>&ub^{nx9!oPBCoR=1$X$uaXiF zPo;SXDiE#YXcLXFNDPyj`3}7VU*)0^K*HTSt!l-u0wV1rnXO#3nJhBwolvxD!mIZ$ z#mg*07b`vV>NDHIfrSm&2HXzx{eyNDHp4;+^T72i!lq0=dl1RQtOasVFPvU8IhKB^wovy8}kE z$Bv(Y8vjQTo+3EQnU*4A`i3}!Fk@&lR1X{^g&1??m`V1F~4*#P+(jGT5#MKqcsFxZ&A0&2R|eIc4R$0#grMs zl8ptWSOb_Qp%m*Js;>{13x5Xxa&g3gV%HYs)CN`mH@KQ?kM$J>CAK!Ss;+^efz`OT?|J%5Fe+D0?Jq#!5$)* z`cQeNd1RZ_o}29&ggH46K%SdVo*0m3C0K6Nj_1{(f{7*gJR?Qs^Vl+Il2C=`gq7eP zAO}!fsZFzPIzDt2D!+D`pgBGbv z4G3{!m@kr-SCZH=y`&q;ENF*F#DD#zR*ksV9Zf=fbHjSXkkP=ayX7`Oe2gk2lTt@B zV&;|Sf*fScvlD8L^m?B#=&L-yO~0pb4U*Y*_yS1H! zX4>lUHN6EOcU-oAp7XVD1UG!>SiK-F1i3s4@c;9^tjv^@d4{%2=jAKy`cvJ-__4!i zwEZsOcPCLer@zF_po^|CjWD0a`@b?{6qxQ{>i^wcvl>vt`@Yc5=*qE0pkM2o6yX}n zjN=23AKHglh}2pP-&fEQ1Y^)xn_z9U?MhfF+4 zkEo^!z;y0ly<=_d*q?=(w(oBTCl)!LX%WgCv|zHQ;~;P~x2m1%O9E*mn}b9%lyGrT zOYwX(f3n{lL2ijgt7JZ8@Y&=nn~uII`1#f^)JD_WZZ`ZgYxsZ<#RyF)CNn+iOQc${ z#HnyABbSK5Z2yflF-@vp`)<{7=u%E->}Ox`F*P_->Evaa``dm`YQLlXUG46H*aJEsm~(REFZk6GA@{V}g+q4IzF*9iq+uS)-PD zkZ9oq`zV@a;03oGl!6%%dhyRW_0GuwP$}XJbv2oOsV*hIO(w|t_f$1XX@-_sMM+pe z(jF!vfhc(TQp|D<86Hi!4Ar9E63z*n9TC~7{ep)V)t(R1vgK|xvw8{ts|A87JT?X@ z0K|HGM11F1mCc-#$+jgbh?6i^G8AIRu z4eky<=0R3wM?IoMNp7-QZW5&;GW|TaE|xScIUDEew+=+JeW>?DtmSj{elrIDsYeu<w7_bdD0nC1Hv&f-rlt6Ngn}SO10o7W^r>E+ z{U=UYJ;TK;(lB^iGR;Q!40e$BZ##4^;C+c~s)qV&FX{>_)O$wM=|Fx*eS21hMjPH` z=rIWq`~o3HtUxYTaP5$wdL30%kADs<&|?vre;pLaCY*|aU#=_Bj|)|;ir5u(E&U6i z@_S7cEjfogi*w$`fnNXVtazL1e62ydYO*e6J2!6W5FzLD?6K_}CI`9I!7$M{dr`Gu z`lW_m`@(bI>hoIQEeE#&Tkkiodp4$j^L1B2x!WeQ-{pB`_F)ml*RX4UjoGEM`&BCQ z`Ncv;{@0{C>5BQjM=7v;`N-(YljtV@|INc!gjHRC#5rjGd@o`>k$oh!?Sf9E1NMf9 zDWpd003_ro$A)u;RR5$v2e+KWj(S%UiWkz6k#FSZZ-W92cb?^Bzw=3)aSJ(q>x4MaEnYOKLL23t?qY;`bu+%k67V*MJ$?`-N721)AF zPUIXI$rRXiN7&g@Qa3+86xu8psFA(dW4Prvv#R9CcR-3gyNky!Td+sOYo6;_(tpzm z&s9-jFhVWK{*a~k<4ATuP0lE;+*KKH4H?fRW$C>1jS-cBud0gG)QlKF@D2oG6iZQ@ zBcLQkQp&`*VnfV0kRdu?0Z=pah`#EsB*Z|G;1$SaBhz#tS892FDA7?5kmXR}vCeX8KZyc$haH}QTBW64Wt9BBMc?28k4nB zc>Wp4kMDM3@;R`NWESbL#YS+En>KxdQ zCpaSeGs-a_Rv*D8j25*}ttC%^VoJCxX`SYdr)J{!;f#>^mU~JZ!vfxILULQ%N~z z9vLc0N~M(Jm}X%oeQjy^;^mrtyL@&iSpgl|V^oE?Gw6<>Z}4ohG%D`{+V#fd-6-i0d8 z$`%yOD+%BJSLyu9_1<=3j+b)*x{lYqH7D#}m%nNc9OqCZILN!fpm;Q=7V?{Ufrn^)%XHNFIE?w zs5+_0FUtu3QPZ(ywq88ctuaK5U_;OmGRFd2U?g8%tVE37%Gy)&t35gqZ=%i;zX=&r zy_AtkL#TRzRVf6UKajkoow1f2zCi!oOrX>1@f*n>QRd**N}7~OhVOii_C7ZIDXAUN z2%gvE9(#F{`t5c()+daNKOTK|0^(5k5bTg97kyqIJ{1I>9}f?)DFB1|RkKcS476o~ z$dCLlLo0S4cO7sJyIfcfwJT1QfFaJEq_2*sKW}FOeiFC;P99e6C{fQ^Y#nPi{tt5X zH72+J7Yf-@sTY9=qlN?rdQ#CNu!oi%>=!}wcIA^#(-dONB<_-EVaf(2wrM%hH{Uu> zn2LxnCX~1Q+DfESSs0uu(A405W&9}LStAOEsKUxJ7QL~+?i3q?|4aQV z;uOdzR`R_uE|L6p{qDDkmno(%_!0V3!VZEZZealca{BXNFHg`nx3n;-&_~EF$Q(>W z@t_xx6B*uzV}=-t5GLjhgh$*MV2l9i6rj+G62b^oN#Tj>VAIP)Cu0 zfC|crIlpc2>dQWncp`eUv@*RrD1+A>g*Rbn&>yEoYlwX@7(t z)A)t+%Q?&P<;4E#Zo1&V7O=@IPN*V5Qp9hN?~pHOu;82iP~j<%_!?kfBO!8MT*i92 z4$LzqF}sd(6`FZS?l|-`r;zWI2NyO%27!-=;PsH62&y~FH7z~Al7;x#vStle% zh$m?FxDzP~eFRK$tist^` zXnZicO@^p3NzT7s)G28AT*KmBINO;iXFEw>;0kT6j)^RdW?7U>G+ID0z|}L9ka`*A zbSW|k+jphyXbO>GrW-n3q5~28i!d~aozomh==VbC> zPki!rWnm8HujGZE1Q@_GDnXJnS-B)FLY^oxcyX}{k|kDnQ?6st_Bn(nrJnOCO-t0bgq*&mQJ!fBWy(-J9rDtq`l~nos0PI|?PcR)+ z-P}&+wiz`Sr51=_f9xF37SH9Jx|}nDn@peymG{cot%l9<@UL*VIgNviHS1eUh9eKgQkj$8=Rw<$m)DTugPv!g7*_tfo!`ukw4;ZQ* z$*U%iDzXjK>Zk}G8OuN-(o7y{0jYSJ$RJHuVE0;QzKM0an$t34;w5jdHrP)cG~0Wv zq%9VB(B77Od=b2Fsv=8Te~Q3g{rZ_3AxZy4PSZDJaOC3(y;0^y5seZu&A{ev)^Mc$ z-GoL7ja=Kk6sW_lI-h^h1%`%Lf~47kWSVscqEgeAlHUgRaXQlSOgv-{W5tf@&&OJ9 zGmXE8JHScfLmwn&RYGREvIJ6SDO%J zQO<8Pbod644bI6v`8plHShw>-u!v4XspE;k5qBFPPg!Gli%4nUhV&+@6x;inPohfv z>$hrjW0;`N;gMiU-oE;R{GNSvAK%7Yvw1ysL~=gH0?-G+Fbo2w<^tp-wP2&c2^_oW z3oj3s9cCG}D^?zCRy$8)9#wO;(> za0n!wVFgS_uV`dPZ5xWP8tE#3a}xIJ2r~zQM#Op(4Ur74H6*sU5{$*769P&+ zW047Op<#7n??Fz`YWV-i^@LRkW?PDm{up^Y(QdMKt*MI$`Y(jVVC}gxs^GLCt#!A+ z-UGk@e~R5^jK|bv=4HGh)ltVec(*mP@k#c(QbgNqY7WEbLa+pTg?l8lRj1O1mo8`q z-hYaeU9Lrxwz>*yFpuRIhU!EeH%6sH@>{QyzW)4rKQuu z@w5{s5Zj~w@h6A$SgmS8-#VrS{@U8cWj`R!XRmVeF=&c||MnQ>bJ6X6#^u%SDOvwD zweLpf?Sb<0trfE?DewBoCLoe2M@g~f4`?`cykA&={RvPSkMs=PR| zz#C|C)8}zL)owA7{{MP`y7K-XNdf?K*JpRHnhU^K)jVrznA04@hOgf;}fsoJ18dWroN7JH0Q-yS>OTw>^S)*Vm>fWTb zd162j76nD72R&dHnGWp{7RRbbj|HecATAuVM(&ePSp)V{Pe76pnJ8aL1do4io~626 zj-LHLGr6h6FomiM$)X1@6*sywZ!+!KF+9GXHdY0ns&2e|bSxvKdv0Z#w%OzGqm7nw zo@dfEzmsRky8=CS@$CIWzlU)LMTuvnrzpge1Hiw(>lOqg2sPtL%I8X?;hqz`gRgnv z>5`ElWfPR{EO`ej*7SCXCZ<0Kv5w3PH595z0END%Zsq0ex&iKcLq!7DLSy5*z>&ox z5@5idnM6m>28R0tcpe5uL6cLUi6Cq+aP*;7Q%O48~uc-gYeGj-d5*w$DLf9Pu z`2j#z#o1OvJWcXMB-Op1>N;{7Jow`;a(0(EZEoX{PE2_igHW}b*5w|Ij}Du}bhSHe zFsFIx3hN9fp(rFP2Zr7!1Y-(6!!nD?sPiC(f$Cii8W#LG$u56t{822dirdSmJb=9% zNI5$BHYTuAYXjrn7ou{wqQodw(<*q>e6`Z#*)_itsZ+9t!}tPkdiRA%-fcAbG79W0 zaBz=~niZRxKA}ETHgb5p=hGN~^+kzYnxw04@o0hhk8VEy-oyn$|7QzlXpX$WH`$?i zA#4XG{+9dS!?E`(=FmOn%=$<}}C^7=~3ygJ_4DmvB0L9eabcIBwm1ZOa9PTs12 ztNa3GCEsxEzIh=Q!}{~d$Vg~MU=4K})TCX^44YK9YWRmD!uZ4K1j@OPkTJU>Vh@OJ zeDQQ~vCgr*Q`Xtar3@W)-Tv@^PixbUZn=xb;3Ad~ZVmN?b%m={bP!KgfiMO?Z>2we z#vb!b2o;fx%At$Y+04L8y>|iYFr#4Gv`&lsPe|;jYbF~|5n>Bgt0o`(SLsngoil48 zh|FoKWW5XY1A>{TUIC1IDzMJ%MaV*blRwfzi} z=bVjuOoGEVrLF#LumT;YocIoPQAv@^(Vi=r3jP<*R2g~+_$@IlTtSWO6fkwz89hTT zt1HIfLFbyzTDnGN%AO|R7sR2c32Lh0GMsmicf?pc9e#Wqi6jDFOY1+rkcuv$l_0F3 z?C*K;X=zJK^IFa?!3z)K@An%e2-zV-5fvzttBs18HV5zalY+E#@4xPo!8Ry=3enmB zJZ`Vx#S|aRA3Dvr?Wmitw5eRwt{*-s1oqR@e}zzqN@NKa#Y_XW0v!z%N%+eRB_qF{ zj+6I4?EfrkU$}UY`F8=g9N`d+fezvDkg?G4NHyZg#kd^x%Scp_j_-1do!c(ey*M^;+w2(0>e+4_x{mRwg{0&1X2 zCD{Rp)lf(CxtZykQ*v`2N^qfpB@i2RLz1ov@a5_!k2j=(($x6m5&RYGg9)}(;N(|9 zY+A2qfs{QRNEr6hrUbR*{O`?Qdw?mGFyJcEm&?t&eHOv=Rh*}{j2_%Ty^>XPeU|- zr{b+sjXM2CoFts(n3-cgNz+dIhWeF%zhDxHjS@{U!joFF(2dC59Hq-YUx_u$I(0ef zN97OhRUL+dHca{EDLVVdYlg{zU=n}kPf|Q%mVa{Gn;ifCUs7lhNuPe zdWO{BA<2T4V=mKJn4q;HD4I~F_I4w9eP{$E%wDBLH<1LIwv;9KugyNQvu|v0Hu3Vutr#Dj6!3 z7OuV7lcDb!<&!@wzS#g_prG^7;z;Q6eDE665)i>g0|iyKqBtFFBK#2(>&NnbPD!S) zrZAZG`t#C_-CgdAwK}T}~ z#?gG~iwZheIFMi;zgnf-b7Ap8dAS9i3R@i*{9f+H@Hl)CJR4#5dibJY69|R5{DRJU z^F*QPTz$P~o}7Ta!Mob7SnPIq!?P&8rE09jHFSh(H16=&|LHf?d2xGj*m{VmjaPez z=zwK_3mqDgQ+zq0V(T`n)0*k*l{fxZ2>E(efk2>O;X1_t`g}gIkf<`v z`?sTS@#sVV#7pxsWdETF6rk}h zP>tHQ_@T9JU>?96Kxi)mD;CDPNAexI+l>D?BAI2d&(0MH4-&-`{@H9MH}K7tq)o*% zp=Fmj7j=$tZ+NSXDby8GEiut32iywq+NH3KGD@ezGTM;-va@gwC4q{_p`Ieq?h8gA z|1kbXdwzmp(Zkqhi$NfWTt`@uO2aC6+fssbN0YB(^4Tgyndcy;*>uZ-eA zxoYH8W{%T1htK9BDvTnaBjwq?TwX4h{XBqa zu%*|~&|>EFj^Fw7X5{PosRq32^rQRxTla6bjc2wd-bq;tQ@?_FnN2q&o|MBTyt0A7N;Zp?MZbH{z-p8emL8mBx(aA3p5R>FTp6wH^*8Z*V&rH!X>ox~h1qlA$KPoU+`%+U{Zvh^?j0mdDUu6JWARJwO~ zu8%&Ku31z#4ag(Ua9ndH{S& zMzG{byI7b0B#1->=11!yT;Ur|b{$Xx&U zDTp}sZb2EZAI4taKR=bd*}9obHKxmYAW9|IiRFnnMdnoEfOoO@EwkUsxAD zz;JQ0Z?gIy>K72@t)1D0BcgsKeXkWL>5xK?Q?udjyAjR>SA-5=7_f%&pc*I41tyV> z`dcSN_=PrGC@O@~vBAjvyIn}_yxC`*58%7%!dV2g0jLD2@OMLE!UaM>{6r%Dmi~LN z82~}B{C+2JGhB)y+%2Fn1mYH5#Sgkshdu}w2PXJlhDJMFyD9rNGZ_OY6#gYdYeWOo z;b+t($Z)_~WE$}iABBW2F1P)rL9^@5({=7<$U50^KON-0;2Du>%bu#>7>;F+H0pT7 zkDW-e5bWT~9=UM~utfX4EcD{;3osWd1Pemi;y42SU~UBB2vDf_wgJwy2w0!+4>4mL zSP9fE>_X$^ABR0Bs1L-sc|`Qqk@fiOaMG!;0F$LyeabKl=YrV9D%=|SHIDcad)j&4 z&RA6_T>O6r_8lZyxY*BPH=AEtY~TCTkL!Tq9Et=B2aDTe4c#CEOBM8N@29J;ag~9W#pC~EBfowil>N_jlN^NH)8~>Gy5R6d z>%`-k*YC2{*Y@^e9x2LWviK0PU@}1zSiByZ^86#GIi;K@_h@9DtB8(nC6m3Dyr9u= zh*};_=GqWJ;qZ7qVk^d`*N1g0zSkH2Ps?yw@dKA!p2FiY=Zi}1QLfKho=+#c_m0m? zVHvSk3w8o@3PE7+bf~v2cZiv%2bv6904Ay$k?d@|c@r^$IW;hxXfQN?0xBlp-W+ou z7GH_4yZCQ1m4A7;R88nRc?ERIT_}HqNFIs$FHO5tc8-_e{*)qFiKIxeD{gAI$st+t zDv~!|`dLaiwuLebN;%EpB3_wh!5Q76_w?G6{ZO22GvAZdO7PdBg2YI$yfO3%lUSsN$n#6 zfBj3e2tCCjMg|%vv&KIFFfiah6mx%g&ML}?)0bu#vnihLIm#xg4*`UocOAvCDp1nW zlq|H1ATFY%tZ!*k%xGR9qb$cPNt#|2)!M zrK1U`ZrXnB#d1A+%Rf@%6Cmd1>$yTGWZqqH_SY{CKKlPIj?S0We05tIE_9#tbWN%! z13`4rO5w5Hvrta57Cp5LE3LCCr!gY5%6Q0fZB+ME%8q0r{!m*wW$8)JrkZZ8!w~O3 z7n()#qs0`E7VLrMfH6TzvMDHUHh>=rRzxuJbRR-~pg)H@IjpLto_lvc{_k-J7-UBV zQH{!YV}jF!PM_7D4L+V?icB_!>OoPeG36=1ifbdu11S;O2oDLMGWmPy;JWK*2hR{KJ>HtPdF!gu8A^g?!kivlYD0o}1SnnRN8!iz;6Ox_860)Kf zH$be{UCc+Jd^POz!tT4rlOfU@_Gg^mW8i7k<2wa7382B9yg_1!jAi; zwj4Y5wkzi?m8s$LdI3{dc_1T+vh!(|&({iy2+Q|nHizGS3lKg~Gw89mpl$a%Gt=wi zHs`alnqc$0hJj9i1JnQ}j>cB0N&tGa?bPF4z9fVB$y~*Nx5{KFU6Xj#A57GZkiwXm zg88Drnm@xMtQMts7toqBe=~@%B$;nmX=vz|1yPl!Q)d~(r8HpAr%!ku4193<*tC$d zz(=eF7gOhO%ZFK2E5K)!40*dxI0AtgoUM|pskcOKtFU}TO){mX@FQNjWER}HQp%Pp z6{jhn*E*2g%gDfu<=k<0=Otb5h^fwF_vB^=HKvB{`%XDy+X;)`^M{0O2l6m<)K;0+ zj(3_txR6c-jYI>tnVRW&Xhi$EG#KO(MAV)l64pu#s5D$q$-JS$w&rdMJE3&EMGkmfrAzEZ4dRyeU4ig)YRG0b#NpfCYvi$U=-P z< z*9CDHGEo+t8hMzSdI;(ZGq#iVW2L&3Apir0A>+N0%aoErRO*%w@4UC>a(t=cRCbkI(>%qd3@nKz5N6(k7w+txoBXseIze;JC#ZP!!wnHX8R=B)4LwZuwws3bS z3;?b~Tj1B|e4PGpPESju*3j1ehyEz;zru+PSFXrF{je(lc@#jH`8yPE07EVt*Zk?- z5N@{?*5ee}xTYyKH{zmkQkHP|H`R=&*gjNgXm4n3anVxM8BfNysX>93pPm!o@PUuS z@Ji)B)2T``go)8$$YOm={Av0~&E3fq;h3;uQ{M~}u?OhvahaBJYhme>Aw~N>&w!55 z|IomOg%wE|BH&k`rr=Dey|H27`pGBr|KcUc(gKkDQM(Ut!Kkc%$XeS^=Wbp;z&OAU z2rXY-qfE4Qg>ae*XQ8!-knrQ0K>om7v#*a(icrX5)PFs(0d; zj{g05s$B2Y6II=tX6@rvfbgR)HRbWbJnLV9!-K2arR!%Yi{;18`m_C|N>kTj9s_SCQGa64%`Os^nI9gCuS2K9AB9uI+wfooUlW@s_ zB(;!dLAr$yszII5Szy+karJW9{@N4f)_>aZnsiJ5(zm+W zgTXaJ1*wfe2Lh7-dcFjQpXxUh`Zd!F==P08=PPWN*z( z9HQ_v8S36VrA6yA2FyNq7ooAHenG@yiPIR>@Q2rsVmXlr0*P6*U&;rA)wRWeamk@wAmDuSemIBP~oL`phXFuCiFe153d%kAkBuN*utD*=+pg$NoyYh zk5fHjv1dp!%+87!hGEkE*IxoN!~zA640VU8z8$)_26IPWL21j^6((|ic-q**AEhJMtGt@5F8X~=rf0HGHhxKxVn{^l#;kewAasU@g*!0?tqxEjM8@GQ7T2ot<&S24Jk zPW$U3Q!yb+ObisBFKTZMnL%QJz5NrybC`lC9QG$pc=1&Uru$4ubiQ@*I~UOLsRGO z@*@A|L1xF*^W?|usNd6kF?=g(-8?kyeHod z>c=PENr$_`q!yGjW|^o`T43*v$I{yKVjexd<~;RLe$fxUTQ8^8`Z_w02TvjTPa%G9 zgMMw79j_ZN3r*i9u?DJf|E~?%OZk5`3lAI`@pg1Mv~bqX+3 zQd$*`qe7j`Tch;G7J=9A8>s%;x4CEWoqt@hsA*g)Va~upU(!0OzDod#4aK2b$kBvL zPk#}nqHvD1T$n6P2PDn5sdWbTB>E@5Z09^72HVRHQSiF!BoBcN>}J zsEZ3@t}lZZ7h`a4^FlCqOg>5r1ru5(_C5LAO#wuN=K;y-sdM%@qmQX8<)RjeJM| zrF+;F5YrwSA0}t&8gAYxKlw^MFM*^pR0F{kAy-0_9UUSlg;*XIguv6S9L!7DW=`ao zs~e;t9J17$hO|j_ZE<@(LtE8+INf-^ZcfhB@oSm+phn?|GDC&A#_G`>}2D z>raHMxc)r>b;SI?d+#oMH0@PVMHien$5S*c4?!IO=jt{Q;kVu(Nk#(+F0m6{jNW$@ z?pCKx$;5*~+IyNo3sc7)ZhBStH5xUO4=#(sLZO+hInOEvMr5Fj6EWjl?S6?xu4Wa3 zRi8)6cqta2Qq_FyK6&6!Zn(2rgShke!#n=dkcA869@BrQ5~YT$wI+gn!UQ!(Jab1E zE@S*ZEY%S+UBmsMT^2*MzjvH=|FysCHCLbe3fKd}j)`cY}9ePV?3Pr-k zQlPH&M+&OK(-0#gnMs#m4|WwI4TaDSOpxT4OO(H8gQS9CrCMTT5vZ~vuiSu+#9Qns z0IN3m8T=cF1Stt|l)N_)T1R54Abd!H`%%kLjZB2@J&umDzn0f*SQxIWw&^PhVm*cB zDML=Mg(O@jeR2qz0AhfXt#>^c7=I=K!EbwYZuKq~9Vu@_X9TJ|M$EVhg$E;X;e$ju#PQ6+%?~@Gaaob$X-obgnUEVkd^9 z76WaRSQWK}A%8!>Bn1Xm7c7P^I>FDKuQus=9M7(Hs4!Rd+XYM?c%XWrHTe-^U7eW= zc%$0`4!)79`Ul=k(3-{ZKj~iQU}{)<{`9Z7m%}(tRaCg;ol!W-_p@$ zm0x8kR3O61?AS*gVcC?I?6{D0;t2}TArMJ&l$PD;)f#%uo0m05p~Dq(+W~~#cmXU> z6)ME3{%nY2q((Y|b*z%cqRl_hR#PG(LNrK{#LnEDrA3bwa#m^epIiUgey)5OubJw} z%r5y-XM1Zt*Iz$hR6fr;p}VirHS|LChp#n^PZ!SpK5OE1UIVc|-X-|&y51kbeBY1a zK7$v{tt%@1-g>|D9=7@LI=*RpNw+89f+q(NhbTD!51y{DV|^8zX+@cQZWfX`+8rm; zny-^Gh>H&mzSKX>CC@5Bfb+AuN$4#iPq6yWJ6h z2R^Ha^?MhiJv4S*wAbeEQ7mfF;ticLag1R~Io7ILU0NT^bc0Z%My+qj5rt5yq~eqX zN@l(h<_TV`SzoXJpQ8#OBB}Oig@I1S0oytOB1uYS5&6rjG%+V+BB0l9)(lEQy)WNM zB%g&`qH&;Vrz#A~qNRN}eWVAomRL;b^9nCJyD?)~`P@ppC>1%WasT;n$S1^Gc@Wu6 zfAQbT%<#Myk~Ob%cu0CK?$@Ia<^&iXkq}Wj!&e{li2WED-pmf})9-~X?q;#0ilHzRuylq{(?mTEx;o26(mg3Ck)^;?S?(V-_@=7cD^;&8m}DAyuE#SH0?Qir zzF-J2yQo{tCD7nnI5lKTTd?h0VD_QYHc3c59gu-wMUE9V>~y!$>t+PJ?jYiwr0#HA zy)KM4Sm#SK-;)@YM7bA7C7JEL>v7Y};t-S&xo@l4O94xW)p+ZW^)&T0Us^9+}F8zXmq?3*-RRTOL0xT?yYqUkh8X6j3owv;KFKpfs zXgJZIT^LTTA-`gD(L+p_KbtdRBRn9%cJcR7Yf<&&gcO6xmCN?nkr4M#Zee3vx$$nPv9XF>&(=_d7}*kd ze{v?CEQ?ier1@^)D@Cvj^iHq7%x`|Zx;Hr=9_QC_{P(^c*1nw2kpAIz1cLw8KVsb8 z!W6vUdi`F)KCht!?slwlUcG%6b>E^#7u40OJSQ3sv;SQ(2@hTqTwCdXhU&j2f8KWb z?vN6jTP3{peLk>!`FP0v-oh?F1{HjFhPM0Fy#hCL2J3$B`i-A%UeE9PUBh{h!`K54 zVDtUNnjG)>4};J4S8YVuvevQNpH{wlQ`WCBZor;h3kU^1eDuM5lfOL1^NgO;QT$Kc z$k`qT>DYSi+g5tF8D;{n%imw7!m{rx%zQoxKMX+bo39&>0~{UKua{E;60H9X*1Pao zwwRpVN6_C*}iqN=o?B-~S^cJq!JWESDJ@_P6cW{EIX zz_=C>+NsRvXlJL2Dfm?Jz={g#RYsz|WoHH_*1F=KI;?d&s<4eS3((5lR`Cy~t(aGZon|3O2Uw)Tp|!X|&n-0w-(j-?v_ ztrDyL$UbMKX@fbMjc=i|dvKkYINxCZN5DbxKD0v>z`e zJ8ST+xwl>$UMy0nabw6~)SfjEj{$V%ZZ%U4%=sG0B8P&-tds>D>h{ucCL;FP@zHz8 ziF;A&vDp7u$}2*J4NR+g!lRSq;gbiX+}mbJ4J8OQ2JDb0hbc*hh9|Xz&YQO;$1n>z z9P@VzISlx|mF5F=xQlPzTjGcRix#p*WD9@O9e$wu z7pVk_8?3BJJ(LJXM2Y`Mjf?L=iw56p;G`&s83L_&nF`DV(y(g~H5;`6F{(Y>#&K6| z$qNwgZ*vv?cS*1t{Hbs3!2Oqn$rt09KS_pYwweC#I)@r!ujjw`lp+w5rP`xo{) ztAiSKP-8srdq4BO=A7HX8Q}a-s&j{VRxo}zQ8!q93t*b8wsU>~aE<}lwrwiitU=tg znE`cRo~0Ftj&)nXl^w7m$h38+On8b40nrg~lnQ~&u)+`eU$jj^DeX_MoU|`0|73pp z*r@UK360$?JUmqDzmaiv9;7h$Kqu^eOnYs?<%3OxlA0p4^senyD%dH?+eaWylc!}P_bi5H${wx+> zsFi7RHKR=+;pl`{%YFM8sn@(nrP@65q!Nt#vA!le9sHgv=Cv5?m~iSwCqJgO)R-JA zOGX!TMF=%PnsU0k2(l0V#|5IEFs?7Jz<_io!>%boTU^9~Uzz&0k2@IN$U*UQ(GYc$faEzc%b{~JD;cax9^%>c& z?y~T4Zna*cQEFdj_DwM!TZ-!AMq~=#k$TH8o_h&Z0oQTXjK3psp%+8iss{PJp+-m# zV?By*T7&b00;1e^Lyq~bHPz!Sqi-bB)Ff}*Jt7oKy!YLrrv=t#B!_<_mMePZhwUnT z-g3x4p12gSlR~83i|0utdbHG=8fKKC@Y$v@Xp|)5IH$N}i^E*d4tKh7;oE$dy>Agq zLo4OeXI!qFo;fl8O1gm93b7kdMhP`x8ytpJlNbb@1@B@xIzjFAf!3K2{PO919LB~ zfc!z9cq54^M?Q~sX*u_sTr5g)Y2nw1eL%81TA@yfkRLy6-%tGY&34!Gi6z^{>QJT> zT>4Ur-chrqbAP+OP@RxGbt7g+T8MmVWmM2j1}4Ha4c#CPh=v#JNAawB`>k*YA6lv) zU+1*BOZ|43q=I2zZ((R{VJc!$4CJ?&bcR&?@ZqR2@($YOU8~o}_WKyc_Eq`y-R+mY8g$)GcXkY3O@@G% zSCGCmBjen=5dzfqq3_qSJ=2`S>!+W?#P+LxkInl<8<>I?L{0k#WBZ4-L_D> z{r;R`%1>@e3gQuy)@?C8(g`qpYiuetk^UI)SHD^T;&$NkH+&u{)Z1TZH3pMgCd17r zS|ORRGnOu9xs>L@uZ<^~pH~mwG;>A!Vk}y*#B9qt1=(U}gxEV)jQE?hKn@^(%QC_7 ztDRJFad0~Sd$N_h^BA|Ae%{nQwzigtTCJ`n@h!9+Vf{#HkIG$2%dmKjsaC1}qyNnE zK)3F8nQq-Cfkd%x2FaDdQ~Ww%o%0rLUAx*~i}^V?KlRpE%+y6BAABxSg$x@WsG*dx zp=1Yo`DwmQpF{OktNg6w{p~?k9>rql#6kc&DkkGbE9yfrP*+Cp9U9@F$Q> z)m5SNci3ctRK%eXn3Y(()|_k`qcn;WrU_t%Em=#Do&M~PjnpW^Se!GB5vhoR@lIAm zXYY2?6sUXf8F(-BEF7@7Au{~1j7+3eii{ga1?M=aq)&6O4d92$qJ1+hs1*;8z#`dU zJ31nW;=?g=w7wu^XM+VSsLI6c!ewXtDin?+WN-w zh_x28v4rX_E-F`_$^oG)blSY^afUq61qq1*+}Y*Qx?Pe;m($|7z5;C>{ty8ggdi*e zLwH$RyB6PckB&kZ!;ck^galeRJG=h2 z)g*X$KyDGUqBC7zqW``}KX{~9BpF|5$L_Zp8Fr|+1_|M)PVj&Z9HbYc;p!Hz*3l9h zzDzh#wrUN!ZLk+0Xp;YfmpuQ_J;D}XZ8D?mgU2A)G4+Wmk1P*`4PnCI1~PjDJgf^_Hh?@zX50R8?=s2usf+Y?-}P+xGOib%I%sY<37|Din(sP+n%jo z2EpH+#T&NARo^L6-KnUoi7Yr_&3&EN*w~ol+RkABk0NrofkLAUNTWuY_!pHmO&8l|rp8sy}e-k|SGjMmOB5LaA z7AIdsEn1G$BwA-iRE(CeNzlxsb!EZZgnONwqRE1&lzC@$!Yq-FP*o zNF$-Y(|yQnYGY-40f%(GOsRzr4K zt}63N+!Z)ZFs6!4b$i{mG>W?pZgy=+7(F!SCA;Q3HZGL&H(rT98&S1u9Q;27&ZliN z1J8(%LN^PupZech_<5Op+%AQX6;=CM+;wop%9qxw8EWzol(g&irbtVrD{;d>kql_Y z5;Rl80Kqou4Knn}=vJYyVZUtd$m1=bmuKj48Z2V!K|o@5XDU(*b1JM1(@RRzLaNKe z%ei}d{rMaT zXF6v>=+Ywdc>Trl^Mm>@Fu|e3!M#itc5`2T$8DE`qf?T@*I|swVpbHF2+Xcx7ku=I z1)`}Aw1>t)G8RN8`rku8ft*3+42`rluj^gqsQy9xeRHGYl5-Mmt2V12_O+ zahW4fNXRP_5lw9(@Uo6@im!gf}i?longsN`QjQ{9~nYELV;alwMC9f$I1={BiMV>k^n$RHTYPE@h z#|#AJKypddiGCjYRV|S}`wKHMr0fKL`T5!T$NP!rny91x#1US(aCQVSP7x#2a?e_! zsR9~?QW_e($v1TPtt!{qez(L@MRjLCHA_>^+&m$th$RP`)^3G2iQBvtFP@mLni0+G z?)JhxizPp@{k65J`a09zc3QsFM*#X`UzC?yF3`}8%lJKFUF+&=e}sH@P#hdApwW)* z?d5x2br`#vQb-9qB1GHiq_l3{6@S0yz)LMc;%3==PsX?zqwd`~+TjhY_}cS^(0E|> zvEt}%$bW$d!ODI|-ki#Dy*zk!<-U*R>S$?oyzE5@Jk`hPy3C3RJgNvhlE>^zts;O( zREnu(vhni%wOwz)>UkL)b{h}tAAXv?8$>&^S8C&XmKv z%`F4NLU*mQ8EY>*9cnr?ZC|o$2MUF``ei}sOgp>erbVz}4WZ_Q^-%lmFu#z;gkbU} zTL*PV_#)@Z@QjnMi7wQZf9}mPElH?>P;`ba2NL+pQB1)3F?X55F-H8NbmEz3!?Ag2 z<{mG)WyGh8Q-yATU5!k@(h4;RDIE{S zT&c@0z}1XVUKA!(420HAh)g$(6cUMS0yIlSRD#*o88jN`;SO3LJFw>;59XlNfw?P9 zP{DYB&d%V)CR$2`R2M1d*d6pB(wj^#UBM1{EkWGM!~RLbg}Jg z&aV`JX=U_<9Vl`@4POLR1M!_pzVvc)C*p?%ii5x8Zw~=5zEI?L(@tyip#l_5BxwM! z#2YL(3@Gjdc+!5qRgQ9CIn*?g!k$Yd$7s<0drjzreyf@CZCJ8p^s^_2;PqH36N^m7j2F3i|f zB2NOIJB^&ar)N=&f)qeSdW3gz38w+{0SZ8t=>XsErLYVs2el%iAVPQ40=D5u-~K^X zB+;lmL{tAG`&3|_S508`)j@UUaaf84$XdAk4;}?1Z1W>cCz!gwc$_a+H zWbv<}n1*8@RpNt4X6EzOSMS*nB;F5`K&xc$gAFiGKyBH)E<(`N*xat|ytW{yae>Ox z5?DW2zWf3UWw9Z0xvYp>grub$3$=Q-ykFxs42-rK#)uI4ixUR({hO5Z0W&F;3!trxoqAvvM~?pGWqAj@}Q2L}E2}KB0DeZWr&>%@VD{ zgs-vJimi@zSMEApw|K+|IKT{{@Y&q}v{2>BStbQ7*S1KEzsML{C9q zdMJ`1_^4m1(&&qr-8TgzE;h;VCsN>tCA9{2e2vvAW{j3?Vxjbx`eM#85~oO`xOA?< zKb17o0tXU}fd+m7T!XIJa|FqP&1l}3{u`gaiIjPwCa(TOA$AL0dATVOC3}n&V#@gT zPr>?7)s_sd9Un)*IKkNsGuA)~VYFt@Pk8#xW6J^=I;XA_e+=E0&30DApAAGV2O~q# z*9qLTI5qsuuK7LP^?s&ujTaFSm`mU;`a!->+aeO8>1-_%S3U-h`4zFYf2KA?MeoM zTA4M*_&J8MUY5s)GulrcpjJ|1oLxT3a{!Bg8zNT1>;cHHX{$t+q?UHF2{8#li~A}< zI7_awm6n1;gSzooBg%yyd@Np+Pas;*Rx#J{lrP-AEzEDO_eS>R0)vUDmK=A*+?|uE z8vSPF8bRz)Wf28qP=de#VE7Ro0dz0}FmW&-xH$y7=qnodnp*#a@#Q%A1N|}(>?? z@N`@Y;|7xqCxfy<3#SxRNK}j^FDAx^XpTe^wRZDe-s+macdj|sDiPCQQO{zXRdgs> zU;slzVL0MYz_OTW8yyZ2B zp-w2!ar+&3$gmM}|D@}&^?J;D9_tuGNOv-brSA#Lu+aG^F~#Fv>NlZi9BskphN>{j zYwtDuH`nc!Reo+q|21fuUrO8e%pMaHcyvUWti3?}pYOEAX7!F$;GXsS{I4~ir!fqw zd?62!;r}0yhl`)iA#fJK6B-*TO`yt!w5K{O(`Vl1m!}y?0y7;G5ESh(AaMw&%zB@^ zM3-&BD>~4xQ)(@SQ$t>S_Se&H52!F>*3_|Akl56}tSeAam z>r&0WP@*3?6sKjIS(e}wq;jpPr-d~oq~WxQWDM#_*epB)%~NYF$Pfr<|o?PCGmgMlUD zBCT>dFT+-B4o`U^4OLGpZQtXTys3Yb6#H8Pd=>2RXB*0^b7}6#aECf~SsLMGW~5jG zTBe1og@TI;t?*_IV%=KzNf-$S2&CrP(p)c=p}oi=qAk1my4Ds4fusw+V9P zPnj|XsG%H#HGxKe>nY-k`vXqH0sTCzivgYB8^nV+kSVA(@DI2K$P>WP2+yuk0-ZT3 zN(v+khIQ`wfk&~^MH#~jxL|PZ*bZS6S~DseA!rF~f@9cV15pFR0V54!@O~x;C4}QR zA_#<_Qh$E(!znL9EfUuO`<)u2vj#T>%eA$1FE{?nPM=s!?v9}iGEOM;$Vm$@@$x#} z-OT|Ij2!@t5LLJWz&iopV}G%>P@$GE zJ5~~)gk1(reS6{N1-mhM__fO5zWHp>-cfITe5J;9phCY!G{KOEmEMqwhhN{@BpE+g z5E5k8_y81?aghIK=wR~O(%Hu~5Dw!$mP_x-RUHz#CU3Z`21pTr1wVb#tEBVSG#y;T z>JrA!Y&@syILB`{#23v3ElBmiDLkO7msBkMN!`sav15*Js?>k<;Sl)~i3eBDC zeeJKiUwG*7!J5aDZ?1QHvkuhmGXclW#wIQ{c>S)CEbP-HE-Jv+AeropiXJ%hq)3MJw`r>@@-mwij;U!!_n;O0c+mn)*< zy#6l%k6!j4#4~^b>{yO|p-y;59G;DCs!CF!OUw)?B|dF#8WKf-7tXZS-I$q zV76s~heK7r3~g-bCP8^o5-E{1`Pa;(P88eSQ0TsDxs z-b~TP1lOD5on#8C;^<^c9JNKZm7-P0r-b0~59izD8&S&C+Lq0$1?36euIB|7DlZFt6&}g*KSTPGWcw&U!wY(p- zXzZ9U8M>NWaUh}=6-O{KXdWVdAPa%JW>8(LTB0=c`8w7=Wf{Y<&pun6?;3350 z9@c5BkAF+g0AQWDaD@C9rR19Aq>gOW)>@#Ti7yyMU&LVh1(9UVzyGufUctCxPjvhn z2)d=DAGIMz5cBW}J{5_+LB&{U8g4Fy+`QVYJ{Ao4NSG8F88dw{KRVzm;D>^eAUg>9 z)dC=puacb-PjgCzIzbX7X`*x@;4xn?ies6f#vqzy>bk2ja2=LgJoyht3NCmM_*7!n z9_h8yt$t+#(Q(e>q;E|QEi&9_ry$XipW%KEN(!(9@&x+=G{E7M%@~AW*Hixb*T;;A+TD&scwH5P8)GoBBBzBLo)J*c~ z>glHW6}R!TCa?R|YP~J5lC&Y4#`XXewkA$mMChLzlliEy?5(wYle*X>S*{a_k8Z#W zT9`yfR}d7Tm{*Gd&2-w`2Mn~DAl_5o1YhF#reU%>(CGK19$)17Br!fcAs>23A9NCm zP3HWiTOWjiIWA22xnJj_R~J&}S8b-RGdb8BQSRH5fUeu>A*avoz37B+*YAzWK!djW zZl0bxu^s2qep`6kELd|(zM*SJh?la*t>^CN<8S;937_j`Uf^SX!%z?~P$2tzeSPbR zpK!zD`G2E2|83{J-(a&0Ttu2!GonOE<;ihVJysOPS5&DS7s7+`Rq)z)E(98m(mp+( z>#A|S0|CSWq`_1WP;M1VJ5Fn=5fYJqcLzxU2yq+aS4_b%0Svl=uu+@Ip=DIex6J!_ z)@KOO^s0p?Bul;0q|s3x8f+z;o<1VtLxzYQ5Bm>!?dMjz20^;$5HyL+B;2&0_|js0 z)_N|9^zqhli=I)67&IkBl(`Emt{vkIXG!%eoPU4O>&=I~CI!VvaB8z^Dsxc|fUcRO zGK#swJp+JI#^Y%lVHv^-QwYF{YB+6y`$a#*z_bw==*Jq{2jQnO&5~4;%&P8t@g#Of zwab+LraHPw<-D+hyhPGuk(yxWVaBizVqD75e66C{lj1ywHgV+xAafHuoCYznCJoI5 z6p;KQX##dL=5aH=pm}7mFZd)f`L!OkD7&PcK9?1XJn8 z3Y(L%KD194ByIdT`!SG?vBPBjLQQTpx#9Qrf)=I`LYDaX+M#U~w39drasb)Rb~?Q0 zxx}B)>{w>OK_t%cWeyJLHGDw7CIyx+Uc7IxD8y<6JsvwEc{Io!5;fdfb;n*fHoQva z8@&8%KL05~e^h2sTU$+dCg>t*!m1)d62aSK$yr+K#$9t^C0_LrV0MgBJJQ%znMjzC zg#mK*X`Q#l&YEMbd$hPvp()F49tspR1P0f~zqxIML2CU}c50OnD2i{idU@HgQC*?O z!sX)d74NM!R5;dRDCZ%mfV+;>Vwaw7yJE(5VOB2hRF$8pqGCHjktHJHI;KXqp`*|H ze)8R70zR{g9PKI@s$#{of)8)t@1VJuZPDJ?x03K;C?c>bs7;hE@TW)~RdyH9Z4 zW>6+1Q!&cYBs2!^hI_@biCH+MVPY z5W<3QaoyZt-wt zlp%c3Nz4H++=T*I4X&_>9-kq^BqQdcpo@{v!g7?U-0#Fw4KOY>KUQQ!^vAQ^!Hcuw zPQ)3YjY5hH5_u5(*r=!CmC0k)qopVH2w^W$;d*OLR3s7yFG{7nNqY@%y zO~T!>W;K>UqHQLnFhrPCl$Dl1udlDw9LNa6`e^bN_2FBdX*+2J{fxRNpTwA%m+a3x zL(1F`Tnj|+V}|_uYurkqWoo<8di#mtZ%GDX4W?9c_u636Q;xQMjs^P}IwZnP4ox%% z0TewcOejNZ!k@A-j(tOWt_vbPzPZaMD+C&-_P-S)dt!|wW($(_tWFRNw+SflSVP33 z%-&b{hxBB_z2%`EXq&eel<}e4y(vl4MYL4!X`@i_*rJHU%##kKM<~z4LZC+xg+<%f zCa1`6sA#fqC~`*VAA}_x#9Xm(w$SQnbI)sN2$DZ2kC=ArS`&(V9t==b09w+EPCJN` zveIywCATv}k!$Mr>FtKK>fw@TQ&C488S~E0jd1s9^TJrbG(c_8ZIBwYFuV!NF)n$U*kXkejG9S%U3O}*a`>oam; z^@1fpkT#<8@XSPlOlWpAtwmwe_UC!VF+N}2myh4)@U_=9k+Uw`?jK1UFk%^fmk$JW zU1x^KfBQrqL_Np4BjH<*M80pSnf71?o)mNh820~f5cP;u-_ieBniC(;kl}qmJe%xd5`vEvX)Eab{N2!76cf{3 zfxzq%OfA?N?ME>N^6v*c<6fQ)6Hh0N69bC~BoO(J(6?Hp9_lI;hF4~4xMmkB5}ffW z2USIU@uz#8JSIPWgD2lO#57*jbJK#uBAkDoTGNzD_gAZo&>zZm0G_`Vk%5Q>NGrM& zkqAuVP$cT#Nq@qbG0_Dp<)f0Znk-fMrbOz*f>W9tON7FOrofoT59j&61f|*9N!I$W z8#P$n0h{(u&i#7yQL)&25HZb?rw3>tUd4sT+!*XmSrwj8D#1)gu79b4w6=IeK-V^D0x^BN*Xd*?Nyc-RFWqpbaWo!93wn-w)A9;^f9*J3K*>rQfRJ3{9Z4VECg9Sbaad$l6Q#F zisXnAp2ZpqKi$6;8dCA9Qsr5W;$6mc8sRMhHVd~^8`-St)*OND7!`KBNFy83+7=Ji2c0dQiYRGp71es@%A>DO5~ z2C>A$X_^{oH&nsJ5k{gxb*=UI5`B~nQG>-I5~;Ut2`)#J@eqw+sjAqOJNSJDA*^s; zecinmVu}hcH!%GLd&K*D!EiQv9$bOi z;&2Z$_7ehb&w-Zu42*Rmn%^Ll#Zv}(1iD~&f}o_o&8(N*wj29id3JXanh&A=AVLN0 zMjs+}x|AJ=Hx4$rCgh)H5V$Nt`=nS-PqNx1bI7_A6ZcVAw&K6LxS;u4xP*i^lG~`> z%Tk&*wUSgb;^2*=X);SdM^)IXE;7q0AJN(16P#iYW7g1^_E2-kKo9iuqxTR;X; zywnN>D&gKlx8)cy3LJ=(k~H-Vh7g~++I#GOmt=fheYf1**Q^&3?+DS0`JI{jUZm7| zZhr5iqJsE4ydVB^sfiqy;0e4{t1})e09fPf{~LK7a8*HcjK61P$m8GM;s1LGF4z6f zBt{IYk@hfJP$a^D>_b;?&FMfZdbjgRnLxu8#`7~Z4t33AgSn2#Wucbt!=|yy|dOtmyy*w#p9cQnni1FcSeaob#687KEWK$U+p&BkOPQ0m?RA@(Dadd;WB-mbmS)Z;$Rsl1tu;EE|RfswIW+1wB3wtB`d3wG8GhJd&pAa zBkO{9UJBkXL$puhmLki?yHbJ4&E}72esgt1fujM^q)0R0J}}DzXXM@DD*h;GiM-;G^ad9Ka(<(iqX9-(Yh9Q~lu~ zmVv@yweX2JG+Y*9 z>zD4!OR+5Ijx@}VAY7n}{$PXTs56ksF|;rZlg%Edf(|c&?d`MFXEc?oj)!{vweZCg zAu;gyNQi-?B1?UT`IyVoclau!K@$q~@nTzMOey8eoU9aZbFZM^6nEs=L z?k8STJa5`C1aV$&@B9DPKJx#3P>}!6lPz@J?79x9iPNOTh*71f-k`pS7uR!LtEN|= zqd}Vqy+S_ao^c#c))=+DN=MiqxpzrqN{|=y?7c@d`!vBblnLsE6_(wEgaiIU5m}f< zrm8bYqlz>PltZ4%drE3n12V#~$L!JQm*yhv4cHyTZjRAvnYweq2VYZal7hDbQ8t7LTmDJ2D_BxUU`KBU*a(Rjq-Or|i6hN5ZaG?LOo{pX zlh<)+tJ$}k+43I?Z0rkN>!#B;KUZ5hTAAb8zD=wUVuMx(3QsJemQ|WDALOu-<22R} z0}0cC+|QCXgV*tElxG4Cv;7i&9rl7PToC$=ch>kCtX;Z5oSsXw_ zRW)BJ#Ttr9ENPrr@Xv$SQgpzC4eEo2DH(8xMHP{MKM*YgkqfHHTEbUBso!A?J7EfUcCLL>VnA*kt-$S~oW^{Og7tU=E53x7ir)vZFROs!j(y zdZEKcPf2w9`}!}mduuTaX1Ajb&j4ihpE{iO*pn&&qQy>$>2E@nXfVZ^H16Yx@!YS@ zE_MC+tTc>?goNcV5Dow0@i`(@` zO&yLbr9XuMnA_BTG$@E9posVehlFv1sE^4T^w9saH<<=@pRUyhB4>nIc%M{(?H!J( z^X7=j*=$ul!lcsBS&2uv3$m12wjm<5R?XDjwYk=wP(yd~>jqpVEUrPXmVG_-I(=T{ zY=ZuPf=jZgZEy_rs5#a8lK<_ZH1{hybol~m6Bvf{^oD2B%*=O!rDJfR9<8F$1ZKCx zh=%60psA4yZT+MV)~4aF7Oj)sE#T2OQF0lJ)6;*SM% zDwTcm#2Mmb>7GR`>O)YOr+lltub4(DdWjXiBww zF)_+VW{V;ir3l!Qqg&!1&4$$qUM%!h8oFbqlvy7FDUN;9{xk(z;kyyGotJnK=}e$; z!-ml(P07m@q#%Q#RY9q?CPRJ~k`-VpNQnVKlcN{m$-q@1MXP3LIB_Xy(xgz;sIhha zIkKXu!PP64ihBHBI+2lfK)#xNhH}(_iiPMDNsbQf6(t|9U_SuoKw1dg>T|0@G+c+o zA??bM42?K}sx(Z+n;L0Ds}Pm?20b1WROu6hdc*asO_QH9JEB6x0RBg~Rq28V&hH`? zWC~N&hJm?lY@P8)@8Wn@Ao(OFz=2^dwOwGJz1J@dPvA@lZmzAgnC-XaP_OgTGpliVI*N531){$s+R?F{^QFZCf8$ zr$2v&)JZB=Xs+MSH#XzTFmUXlX!>@ zN7>N61yk=rE-Yq5MsY{He7u(sjT5-;hY+SD#^7M7eKAvRR`$pKb3K=REIWXj%$|#r zleCH~q~p#`e$aU(LZ?N8&FyY5PP;v%*d^aSO+{U`?^j!MwT zSV0KGUo-O-lHGLLR6o9JG*IA{ku9+C$x~^8CSAN6@V^US$N=>* zMV6Q1O@rr0FV(1*THp`Ra7geBF?`N*GvR4?tAdMOmKb3_gnGo&mw=BA65P0^vJ=u1 zAynab!^~w#toFGL+Fi#ryng%3zQ0dL40{_+Pt)69A!NT}5aT{e1>S&U5}1Ckw)XZd zTEhY_Z&&|)x%=smX>)+w>z^3b*Vf;!rbWBYm2oIHqMxlMWq#!yrTAJqBSV4utVu|sze2Dd%oVzxY zg>5D}7-$&Z3Wa%Fz0)&6tDQ~NH+EJGr+xIpUA5ANRF1OnsE=id4GT^dRdY7JEYW%g zKS3SnkcNTM01{2kwpzZ(6HA;8eH@`k%P>t=ZZw0$%=7qO&^oKBOK8Bn)4D+mJjfSr z+SOF^=RIJy1Vuout!S)Zz)Jr)!CZ8bUIP9W2tZLG6{ZL{BHEqJFI5UB3511a*-OK2 zkoRQ}_D7!;&!hgm-KIT#uCBPo8bx^!qfAvBB28gDWpDLqV3V z<@DYm0oqHmdPtop>H)Gq`5fYAy4=z(_rUm-7G%b+>o{i4L3?mmLFRv!14 z?pGVlAN_Do+24B?xJgV#+<46uB=e9ixCg0tjM$KRmI0Jc;L?l>vkWM94z}W<>`3gcy zNDl4~dHY4(XP?}i9Kicja4@Qb>i~z676H%uN$lmi?KHKsP+dkwmel;qNCHL$3s^80 z2dR!j7;_OR(Sv7Y=4P`Hh)vjGECVwRdou#r;fp^Q1TSO`Vm=4$pCGUYW$E7uqmCM8 zq74r`_(yHRE-g=(3zm{v?OO|g& zjQ=tD-K#Hu>>wQLI|S??#Qe|I_r^yF^fUG1o3FU~du_*&8ONK~^FH3wdD*hg*nW_x z-hG$ilK8(TaRpz9pO57VAG1UsC~Px$c5MONum9oO-};1~)BPl;zWb(Y6KT)aThCYe zw6~d$&qpL(O)vQ8DAATj+<(uR8*=p>Kp`G|l6!}4J4XV9OOT+PlEy;2-fQSF`=uW9 zjSrntk)<)J$fQAAIXW#NwpGeeJ0JAF?rOWL+SZKuuC0!$zs!`xdlE{-$EIzBv@NKi z+#krm$7vZCt$!NmFKi_gzR*F*Nk&C0&bE#u+;# znfL7|TI(BfGTyp==GfyVXc(A%1b#-7Vci=FTp%r7bv#!Zx?OR5Jt2D}o@u|Aw{&t0 zu(QQTV`&U77BZXeo5{b(WPjtuhD!2A)K@6MnUWtaf<)mYQcY_znhTthkVZZ?6_hXVlTIJ!w^uUpz>ZCRB{*-IbmJU3Xor-g1BI}J8CdPAbKTPVfd{pzS!}@ukL=2 zzH^R@-S0-d&?8(dn01MtC!ZqE{2n)bA`;7xBqTo|+mdfYku$-#gv3PG;NtSK_Wbr# zK$H}XZQKen?Yi!*-F4aM0QB8OK=V-y|s;L_&ZTCrP*Wj z!}oYAmKG4o*cE9)e=nWd{3IWhP>aPxW)7Bs^ZbD>fM;Q~w6u)Kc(vVNI=sZdyB{D}L_c2Eh|4UK~87s6*C`oIW9 zM_ylmtf#1&Ts)a_53Gv{!w#JOx*@tGi1)72F85Fb=`y1RaH%Yo?b4U@@mtJHw3Dl8VtgUBCC>G^_v&WmXQp3rz*@B{&+># z9>A6}TCj*FLHW@V9!YBZhf>;n{9yc4%xjwekbj!{ zdFnm)`Bh+AR4{+qg*zucRZLrkBJ$3SaMd?)ht_bmJ&q#GSS9IJU)%-*l@s?}k~##Aq-0A@b|UqWavUhE*>z%hUIm0HHUp(>QPU%4pxFVJd8Yx^GiTd0;*M_YGq;v*tYSU3Yzj$ zK~LSd8-68%MPMC@$^xo~)k*L;rKSv>W<;QP$Qs{xL>dAsbBmt|!jC_>f zVBPs2h7LAKthQ$dQFHgGl-ywtiXCCf`R;-wjv$0DvQFhK*Vsr&bxSsnyR*HH!Ag|A z_#D;TqTQ+z;_s{NeOp2$`?t`8$q;ukgSl!=5d-lEFR?Rms1j+h1GLdlvQVK#O|jHg zl6`W%G(8}hBw#QmCkkxrxu|;&ALt@bI`&5=4cYFH#(W$6D~u73gjnw zEzbY-S``y*-+9Rx`8==Sho}{cL8p%C#I3DJ8aOBTFzdK4m`-wnnx$4!(X4H^>F`B{ zbZZDg#+9V3fRt194pC?9Vq|A!W$`+8o6aq?HN(4Ku}D(^lyYH~_eU&cz~sHp$4D7)gN5wC6uR8KHgI@+xqOq@)FG^{K-ExyX z@=II8Mw$1wA>9H2!%b$2cuL#!?5bz|PO+~H7h;CzfT}l{?HkIR*91j|E2>m~g&G_y zt(%jH5;e7%kVu^11G(+L9OLOF?F$?wNLK@fs`R2r050TLa+zT@oNB`QD?4h$j=YLI*cP>7gvlpbAvVOd0OiOZwKZ9zOROhR^s zhdXsBBjO|WXF{9Z#R=zOU}VT^j4n=1#BHIh$$i6peW=eV&alPz_w{B`aICF_W1PEH z+Bk2QR&0Os&v`+sYtQ8AD}O7JH0JWT^#Igj)v_YBU~v%D^gX45j%ZjFS@tm~H%(5O zRg~Mx7hjX+Cb{wv*|rcVz)xhk3~5n5PCdMLcZ!YdGCEKM8p?twNqr_ zc+v&`A6M@fCEK=ajjpn7+qP}nwr$(CZQHhuRkpQim0e%$ea^k_w)b~tX3Lo~BgPof zdygLb)tQ#<^!Bsb#^v~=j|J28urMM+KqU0BSf!Vh!kGu18skEbYS>~iNQc3XRkZEo z*tnH*GYFKIo;(dgt+n;kDAEv$@XL_XdSL(;(BuuORqN)=`hoC5h~|su)cd&;lO+dG zy7pi46~b-Pt20`-?eDXyJ+o%UrR)7Qguj0u0^2l$1`-lDglXzH(?GlhA7$mQ3;X`M zdI{2(1-zyU#Hh5*o+sR09ko-n8Mzo=Gmh5=NC7_sS=L~+4eOJKogt!^IaQEx~D-&xM=8lc7 zE<5dnh;ryn1u?`LjRZ(?qOz8nl{oe5K8#T)-Kei)S*Qnmr0E> zM+v1DI$9Wggp^Pyd=v+?O!HQ%0>O2dviSQ~k$io>13wy0xR$HKpL-IzzP(GmSHuNw zLCxm953HHd-`~f1US6K3=h1y%w|PD9Jb3C(!wJjuU7TgVzuqqWJP%>$G1Gca9Bpl? zC3ZhbseBJ7+1~eKzh6dwM;cRHdv5AnE77Kbivy$50M3{u)qzh6Rtl5f{8OZSthV)n zbTRPq_PSngD9Q1p>^9Z;eO_AVBwURa_JoJc^}j?}jYOi{_l3@P*6Q|nev$Z1A31is zriH;#-$|1IhS31NXKcgQdS6@Ukgi5rKvu8udUvfnaxs$0cCV>8XCb3I)JaBdD0lQB zMnOVSV(Mk&u9uyQ8#RhMC}8L4I8N)^us?{PlGSyRUJS<)DB8+mGGehbdk9d*M81)n zpniebQgYCm27XL{sDitAQ7wdE0kDM?RSiis@ZBByalxE74;C20uU?+UV%CzENhA=| z7mt<@{Ta14GIy+v#vuu;q@12*F6er6PjORD&X(B%+FJV3vY09~YMnUR3dLO8_XG97~HHUY~ThcM(75^Yh4cJB8;7P1pa zBPIETfstknD`m855i0ONYRx9A%?_Z+%I{4j>Xg(vb>HC&qEx!evIQe!>rFY^ZioBQ znzB4s(i%)BB&L;!4suw^7Ru*|+uJ*$SZyNzQ&S$Zl*Nr+b>RJ*Y0d zEzN=_Z*D8k@*nI;6n9hA*K7YBll<#;9T%` z(1pIvL0nw$X~wny?2zU3N5^CbO@AklU@4>28u_wUFxHd$K zT<5mkV&h`+JU1-x-Y@$M8DfIlK0eHLzi--hZ?o(skMFuTD}L&cjd#DtDlXO?-^cGy z`b&EYa$j2S*ZX#k*T^930N>4(R>uxue!}`A`1IV|P;$Q?SDEQ&2EAtIqT3H4yf^qc zjYX6XqQ{@oYbue8g=0?$F(79=YP*k{7$KgFBK{m@}O7I&yuY6 zn!(E-G9Zvel6lUFfof&bGMOM$K1$ugHUks$!Xoi!KLl-h1AT(D77#U2pG|2pw6N_i zMTzQEoRjvlcgHVPuOwARw3OYXd19^N!gOHeLcp3b5ZNdwIKI+uS(H{O(sqOaG{6c_ z9DP!bSTZf>o|Xrb$!5`%DLQ*faTF$GrDS^E_RQkLt;KD8P+cgyELusgQC84Cy2f>6 zY1?($Ei!T{bRAmeC!j%NXxacS zbe|X8D8(O@OPH)_Qzn06Hx^FMv`eApWb}+wP7Or`4Z#K$A+R)d*aS@Im{e|H+qLqD z%9eU84I2%}zTP`BQ_846vExw2q)87B=f z0Ra!*6-Y}iFc2x*M1+KHg9+GxU$-_~dIci|uNl(k7~7!#;W@vr{~{1vo0&Ws%yW<# zIhjTJZb(A}Qbwj6gQ{}i4&6rhC=jom?tji`1rBQovJ+W z2ts9^cZ1pH)s_s~r}as|yAzdawPwplyU}WjDs5DQPr%Swa5A_^aCqiQ#VZAV2sAL^ zXgz0cdbmXX6Tarb#o3dp$*9fT%*=|P!>-UX-vd4%+!uHC_V>I(8TV=UXAkQ<+pncIYj^#X$`zx-!`<)wIP}BuQktQuQ1EY>PFp!Wy`4wQnbq;q znWZGIJ!i|BlV$0U?LFb4$CF&;1xNs?UgR~8{=u!_q(X>DsDUzco7DI^er^xS&!$EEvk7oP$JMXyfVSETe$LSgK{G3)!y6 z%;(^*gZoeJdjVh@*H5D`Z96|g*85Xg6G1jcx<707n>lXQztG$v@jq|;bo|b%=yqYc z{@&+pt5>P0BU#${+Qo3v3a&!U`AvlMxk9*#-%lMQMfeNrR}jtb?)F(MmEwKBjM4f2 zjk0RlK+fF19}5T&t;4d0lC9tSNxgB~vJUq2&ALw*GY9 zU$CKUo+WWCZ)g;-X@)zn5a}_K6IBr^uk~ZY+GTx`aiW4`#_E;9>7wHOFzPaM98OMtxkc7bxITSfAS_Y!RyYeIwQX86N^Q?#k zHr$B3hYTD*d1VsDm`YA@c~#m%-zKgKCOI8rmNI3-2Ftg_r4pjAs7iG!TlS-$<|Hh& zMziPQ?3gOf`>a=sRP?VU^nt4SjY~$U>l>u%EW}xxgb>^@;5I{X2VzR0u|X3r7e|YCt1#Uz`TflkJXUXz%EZJ;_(HM{D zb&8}jf2~%a+XI0P4D};tp_l@r;lEG@8HuEwp-IV2L9&oUe47kX?t4ZI>7&5IGO1dBubK!nJYAnN?@VXGQWPqoqa9IDbW0NdP4m9@0fVMg&4d zTncUtJ1~`IS&kX+I5Qa9G=DaP;o9&$oDp%jjlH>)<$A5ocM>6E7R-a$D`P4v5<@S) z%X?BIe#+8A86c}vIW;XMDHR>2{Eet*^*E|5aG1)fY{tXgZZLAC)tkn1yQr8_t>!*{ zXsXF(W8~fM3;`X^-eYXvT>uy83MWR7^cI!4#)S%qtY0u63qBgI+#N+6p%4afG~C7( zI#4cX&F9<;t_N}P_&tR%;(7Wr@vHkUY+}$Mv+YV|kWbLiXV4tqpKd|BIGU49C2fm6 zuA81`G=c0|?Iu%GN6)cX2wKJm=wv`;fl$~$FRJ2GBGPcx>BTBR@`1H8xrlR239c_T zhEx*oEgY)*$&szeuR(je3hh0kDvi)RL+s6_mn$dL4;N}~mSoKyj+^Oz=mwvbmC z-#BuxD!@k`62QcOKbhQ*D5B5Tgtd%QK5jt2fe?UT&vi9te4K;uwO;chbj zgz*00HUFK*{0VL)v_<9sZ4MT2gNdAIup~xqmpKp5K%&?R3s=qIyP|K9 zpC-QH?rJ)=T4NND;ckn28GLcX4jN-tixlV5fH9AObSLQP){g{)MSx$JSWu)O|#@`|zihN%{olrZ_& z9`z`CuWbUOV9!Ev8);qOCOXJm;DMxH*FQHU7>KPbFNHGP2Q;ZHl?On%SFu3@-eW`4+3B$;H<+u zWqapXmy>%4R;6a9^gVk|=B$>?yYt|~CTGcw?)jD~4D`eV|0bEWP;ka%|YO0d1RZ~EA1t)jl<&9}? zGz7Vkg-Xm=@CI?iA-GN0)u>F;Y)qt(6R6}T_6O-V~eC60O(%-Y&s);?pT2b78K}pCJxPx{2E_pVDgL6Bt$gfTO~RXnqf(pAF+c|&WslerkUGW2d?xN{o_Cq3NaGe z38BU0ggv8eM_MsnZF*2fi>IdwFF!$KF$C1;+I?Nzc-d@n?N={B zhs{D0@T@U2ILxhz;jMML7$OSE?3+*qDh>ZKM=oR{sv9dV8h*^!@IR?kC|a~OfC%(C zo4&%m`siyS>dm!*&n9fCU`Cx8}843`3Q3yg!TL<|Rg0}N(>QBIwY>330n zq_DAkS1$<1F7{UdvqPW>b`O{n?NJr7NFpl%H)RN(jNv0VrcRG+=z(4VQO014)Q0R&E6t9DyWxZP>XviGy*3K2QHuquq3)BL68xc z0bQl5Ejp!XsXI%Ciq&Uxc0eQmg(*~t=7Rb}en4(>P`6-qomMVA(G;kBetOt^C3Lg# z9|Z=5`eG#5L*Y|*#vpzPpCXYr;NQX!Tp%o`0v7H5<-jYos#pEtu>o z6kiVPN-TM%0ckdan4}`Y!tni1b zJ2j#6!Z2uvvYd9L|L^G-A7O3o3e>-qBA(?M;TN+}qEtQ9mAydgHlmXEkpp3K}Q(_T}kPZ;31mMu1$ zY(vfHs;@Et{v|vCeb)A^@~>QC%+6QeU_GpaJA%we`lJ{kA7co$$+t2t$&9}G$6YCWiD-H) zGunE1Dy$ExTHx}Q>J@>B;~*#igQUky-q1!O6#x|dC1KwP1Z;bR?9{d}(o8QE$S_Li z?$mgMBVm(#BmGbNV$wPig_+s(erS-b#LDa1Gg z`k^=Bzy!d>$?%Z$ut$*PDWz6UsW-s_%B?x58$=tX;N+!9qs&$@oP+uEvFPa{qcH^O zm@HNaFI3)Qfs~`U@N~(4{rhhL0G-bs8FiAm4}jgnH;I%04MR8T?H_jv_Hzq-ZPO)d zKtlo;pwgVz=jEaP3i+GSom7xU^dhDEQ&T4tjI_OPLRh;P!|sgAYTUeY{gJ3V!{ zS+AkDdOOo*L-m+=Lx%_x!Fs=d;+;hQwQozrA9h;E8^kztr@~f6h%Sz(yKor|R*^b0 z1D+5yC_b!!~cf&f{+j$0~E2xNly~~iq56qh@NZJPj4RiwpK z*K@s^M??*VXZx#mHA?p(@ux+zE0+CW{N}SSEZqAK*YBalT&JD;*UYtpZ^@9S-ft}puo**x~O?dYFEm!A;- zpJYY^c71QV(2Y^JkD(Wms6B)axX@NkQx^X~jc&=v;E;?p+$oOOX@c36b6qBb^r++5 zmSuthf{0`-t-d6drOeQ>Z#XWq%g^~4w?V5?AAI{)MjYYXOx?_II%8}Y5Q3*5d3s|g z;|yTw9@42#x(4hz+!b-IrAQU)yhjy&A4wn!Gs0To0cW`?L!c~BioV?3rGl*2!jl~y zvpDyF)&K!p@CbAfdW(&vPTjaPN9G|LqnR>v;Y^2j4+X(;Ubbj2ilNm9TTHAE2c>>g z8l@Y&eBWK8hHbINDO8v9aXKjr$?18^feE$0B>I)q`fqRDPm?g;zpp`*!j)g=N1j-(k20Z;}W!~Wl}qd!#PqG7?@ZT zUMr|I?^fZqKr}=V!d!41uyp9$vp)_HOmf2@*gN1thg_7M^O*8?T!kI1^f`H;Dfz;F z7GXaMLnT=7Hh*mZW&p9`3-WB#z7tpfsXcq2%#g&?HPvERpc>V&ZAV?*hNZ?Ci+kC+ zd8iFChzDaphg3d*G7CHpZ{bML9h%_S$PEZdxMX&i94>Vwn(iR%07W>r$}|I*r=o3& zB^UZG!i)?^?a6|JVF!7dRJ#Ehh4Dj5$>46`|nsZg6f9un7Q5zfqw{am;Z_1)59dAA?HB&7#m`4XyLiAEA2`tny6*-3BV08#l1) z1lt=>;jVNG?Ddy!1qdR^Xe&KybdO;e(>Hym`WZ>p7#Xup@b!@iVQNY08L7b{9kMJ!`f-7@mbIjP}u~`l4e~vynV0G@w|4uYCZP?-VM@L@!UR~ zWVxKI-&@bmE4Wx~U;5v79vkGmU0-h_d45JSy?B0m`Qq)s?k3-e_z@;<0dr%?M zUjFg@17@%9alq*q@sou0%J4s$mCwbrUS>u{!n1{0p#9Npw_O?RD-<>D0lLx>M?%N! z|3R753F`aSVGxuAa?BV~W|SS)f}`X!yk^pp!J7bzh&g#PAss)LZ{c1TK5yu3(MJ(*X#_S2>=yDL?ka>&` z$otD5({EIW9td?6sMbuXeotZOhOdyK*h{L!h4BkBOgOvlHXcN(lw|y+90WrcD^MD9 zq$iEDxa3DXVCuq_=LIF`QUYpfTOm)~(q14xe#k`(C3sRl+WoG2CvB72bAV6n?wFw5 zhfOxF2WaQc^r+%!cul=zld;a5Md!;!EyTV=)(NDPrx_}iEy8!2NcA5kZ|@=RcL|~e zQ-L$E6lN3P!S7qK4YOwkXBdYc;7PKf@E5@nOMYdt@mp2fQ1+F_5@a0mXbsFnM4flC z*T&T6Pbt)Xk)6HQ1P7z+s0ZWh_&4Fmyg zISF_P*4IxMlywdV97m;PmXSESr#AgN{Lb-(ly{G_Tc0TM5UKqs+b9=tLlz)|OZW#}4 zP#&QN(wrb`Xjc@$S#GNyXN$G1NtwD};vS;i(Kan9Z&)s+j*+G@&5xhU4n>Qn7W4~w z|B}K#KuzLYcJL$FtpnSa@`HVmJ-c!igKP9m=&xf4if}ir-UP_VN$J)m*Vu_ulQR>- z`p~BWUJmj)tvq+=!Y>M)KVV1w;G2-kM>0S(g1y63uxL;HyvBYd(!-HU|9;mWkWVGu`3@krLZp{u_wb>0q3MRB)1k70j4`$y_LWqRd5Hu}61 zNL$f*jz)gEtG`d*cVAbn^qxnzxgu_W(IKeqvAJlx7t{D&Ct&`#|Mn^fdF|&phjc9X zCq2gz_AZm2l72KEQDpZ*$6jcl_rpkX*?NR|;{O4Sejww9f5Bf%@JL$aQ`pexYj7%X znqvB=hyID%mj@&r2lBcKehHa^-@(1H^+nEciL5uN&56xKyNfJ*7VCnA{vibvSJ2_N z`j~l(&@R1_qm=iH5&IrI$+55`ToRecsSVd2syh(jbK(kY!5JYYz&LQrrlDd}?Ka6? zPBp4Yjxj^44Bg@q5H65Busk?N3X5<8+foCeQ6wvNy~KgPD3@F+slw6BF&nuZEn#E8 zvDhU6LWl09BezVOS}cs4N;^E72|KSdID5;He+M=qATV%l$d1S6I_d_cC?F=ipoDC2 zo~TcU98?=+4Np>{&<-ZAAmXE!y{1n##2po zvvi=q4C-qN7ZqdXg0q{0Zv?{v78=C(gJy|tyV9KOiSU<_4=_X{hvcR6g<(MqvIfFF z5O>D(5B+gw?6;M#6J&F^i9N*NP;>K8vIhn`Aaw-=Yc>_4a?dqDsb>)xHqsJk;4ZY!f7Ev7w41Pc? zpjYI!^rF^59$A{vSWA`7i_*su9-(P8#4$8s;H#hM52s-#vQ`^mrgRCy-mNv4qWV{nn<|=26_x>>o%#3IfD4w?lKeDhIh(l{FR^K}u;XqY5 zz9lQWZG@La)}|ZPjRUN!bNrqK^>|58vq=MGSIi^yBp~Jet!E|%TPThJGXpN zDbTgO*R)>UR>jnu-VwZws5tBbB)wn=7)AIXvsLFk0sd79JJ-Lro%rMvlilg<`9v-Jg*`my`r7;US= zf#|`3_L&*fcLlHexns`9msZc+as?S-7w6``P!WwmzP3Mc#xT0mu(99552m?`z8Ta=0?($UzNg z|GGpmb8bgZqp+a#rynT z^kA4-h5k>>7+DXgX*T)m0JhU`$^>OAQl`!2W-+)+vNFrde7z#|t zE|C_C96)G*yT25K2O*A|X-*zTq*?gf#hB&MAwjHDosebJljeB^(%@6U0<{9V_?_|9 z;knF~Dh=_G@Y6~3$g@PVsDu$F0?tz=>gfikM;L{pz#hf{Jk+uIsZN)@W8b)~TBZ#d zu+XcOvsRdC9+Ai7fmSl15$^JF$lbj=V^k-Wq1ekhX;&-Pcr2W z0ee8bAw*j<=phViM|TG+4SP4QZv~Ul^e~TMyBM$S?kbHJF$A~+m+zYhUFNmWiW7Y} zS!ktWi{ z2xIuRWTOPb^5Tmts3$EQNUjKNN4v}+>=L5~I>AzT+8oF|#~HBd-|+r8#Or%p z)q3?kXrqa{$xla2$I~fV$k0#s`&5>*?K~$vD=5>H>ZLc;BoJoTh{IX`p*M&LGw|y)-2U4@JND^S+M@^I>7}D1uQz;7;dH@Ee5kWb)7?eVd z2nz{ND!g}C?fa&qk*ar>QMUSR5c~njv?^8TtH5`uC&e43;^J2=U zl{)b@TF$QVAX7PVt(A0FtPJ5&sw?J^biWMsv4S6*x2FvV24HXj5mLjJrrKpj|0|7fO8k{UVJ^Qg-sx_J& zhU<bpoMx8%2UD{;63m@gq4^3sheu!NMX^;r7N~ZXHeT^5RgbeJkhtNGV>8M zNdkC+NO+MKh+Tf(-tR^w!f_HYcstaVm^HOSQfVS-;Q>X-Tutbjb9EE0#5jCC;L59v zJtSGVv(RAd{H2;f$Bm45=kPxu+IFT5p$N|v>_ojT!GaJMVp|)6=uY^y8zbFwhqCPF zJ6?Y>veCm-bK1VTjE1h;b{+3!_&z71bUzd^qn^A!(zIPUpmFY^wyg$8RXf1a@Q zdAyyM(QQH5n*YrHngT)7uD9P$^wq6V<-1FW^S)5Z3!LFNPFBJm<9Ob3>N19)C|l_x2dHwfr)APaEf|O@l6I! zFP_o}k)|aQeeN`g+U}Q|0#b#kGPG54v3=v*T<)f_`asviJ&C3fO3t zNNt6As|@z$1e6-Wr6u!wpQXcUcD=rp9R|*WGt(h6N4WjIN_?k zEiYj0ppWbZs&;=FPe(V&Up){WO<{&Fw(v z`NPpOb}x6YSMRG*e-o&g{NZu^I8o)6oefdQtkl$gIeo1ah16kW{`P#1qes=k_q(re zZe}!R0t^ngy)9D{hfZ&c2NV=63@j}{NwH$umi1cs%jnIngziB@dD_2k~d^~X$ip$si=JJ`Q!7)`;?1ADWTzs zJz?P9rvvfhjRIb`bsU$~&naE9LS5bXu+Urn-lMw%702+yF=*qb`?!ICbhL<(tK}^H z+5549qH`oX9ec^Zzn|B~fe!)CSFZWw;>doO**Tb?g`_lVStJ*cLg0jyArgY&I}2g{ z0^UYKGpI^Lv|K;-{Ky8u9$wyGk+g&GQxGObHl~i8dJN`PWXL-pDiU&QJnl=PKEmKMi=CzEtixYcnxml5@ve|;e9IsS zAO(RXGY|>NOYU$hQT=i=m|+$r&$h z;Tu8UBf(nrV$4~A80^2|+CFdlMvCc5DD_sR5$=H+ei2>9Q+!km)U;&!<$#T$nb?fE z6mGiWqc$D=^}~s1Qy2NTo}<_pIli7HBF-k5Wy8z{WFj z?&R7({n$~D)6{GRq*>$8QczRUzAt9dEr>nqotrGA6ZzmpI>4UNljeV$i-`q*}TbDHbBtFc4+os1O|s5kB+FBpz& z+R(bzgRXw6JyKB*Xr=qK@jHJ6-m3n2w#ciunu_%gw>mYeH}+7`mJSJB{{Gm_F=xmZH@a6))gB;EI?J5zxqk0|S6u(@zD{j(l+9B81;S0f%^A8} z_|5k+Va59$=lAXQ=X3W@_l3?Mzngo#{#PfT-?dy%*MHofBgpa^e6BUPwJE|XNM;=(O`_0CLnSfG5ng#FUI z|Ea2@fB>v>&zJK}4mYgoLdRhmrl$F_9mR zadAFDn1{G^dCkDmqHUkCB(kCB_%LonsVQY-Q+t4R|6)V9NlqMkBN*1aDJ`OenNh1! zH(#B1hAbb6(~3BvU)ihi{FhALlBW2vVp}QSri(%6TG+GSguX~dIY>(82An5k!;&q8 zGWQqCo3!&V9=6_Cy<@)q33pg&Iy*8&C9>zRu_b~=lScGBo`JBmcIY)5;rOpM|2QF2 zWQ(Kug?xJUVgT+rVW)s`+7Ul zUA;lBzd%j_yV2qU>LTrDMV1R2m&q$;PHa6ilak|H5qpz}Gz?@^ ze7pvNKs1A#;FcSf`-CS993oQ!ISMcLnB%UYWJlGvNzV`$Ac-aqg6$cgF5JDTQP7mBu2KE)k~o_ccU}Ar+jX#@QP*Va zR*}vxIuT&R?{<6xX~>on(4gpsu)p~ zw=Pc=$P(M*vKMy%LZkfOtmHYP`2$ea{1YUr}EH13q{h87uZZv8B&u3qgl5JBbx1 z-4&UUV6zJ%o}Uk#N9SwWdW-mfBZ{;vRjta3Y*r+i5w@S0?#+cnu2f5BY`>&Tcf&}Y z|KWvwe1HFHWoj`m zD>dmT>RSKr0yu{c7@V%__xYF}nuy}~(=GakInhIH;m6p^zO3y?>i;tI#9a;I zw87Y+|32fjJn&>M&p4g7-nzcLr{)`9GE&zq@1?oi_mBHno$u+KAgL+#ai=N_^$Z=c-yvnvsi{4fKhF<0ZzLo$S@R($`$)#otsYTfPr8 zmbtYlj;PR-0nEGZisi?G5&Z)K`a_{Gc`w6cp`oF&?EArRK4(I5+-C$=lyAdHDN_H6 zXMcDQ3c~v5E6|0?H1ECD68Blh>36gM)&~0bTlcB&o=b-Bc%QY1EoH)bGH~JGZ*0~$ zzPBW}9OsLQ)K@;x!Zp9Ivrs(k`-!-o$Ll^h{(fEjg%CP|cNC?3X$q17;h7v58ZO3n zY9_kDM)0i#Nzo5}7EITg3iYZoRSjdKk*17w*KUI$bHqjCvrT80W%;FVLHZ$;3?zVn7 zJT{n3{vH31khq3_MMw`xC^0p8x|~#;S`Pjv}yjTz-$@-)6;xhKFucfq`Ne zCG=9Caxh>Ry}2GEOc4PfOX-5wxJ>7>XdIC6t;Q)+n9)O?8G7`(`-E@+&wv6 znet}8s##>@SSeg^VRmFpGSr&+94@;)rEtAcF&^^|PW}xCFf9yo*uV#sx2)2vTes_e z81qkWT3OSqG&VUZeLXPK%U9L> zXrHrQmso@C;2#$)KLlR35KOQu^ zkIPCVT@B#1aoBr)+aKiHabEgh30>pFb|Go4D20+ba znTm!XJBvs`5%brQ3Pzfx3Up%FdiAd6%h8rxAzPieidX|Q?Yv{dJiXzb8qm2`fY4YN zS{G%37MY3*&gL6b4^}B8fVCK^y6(g^;R5JN}5^w=)U2=A*N-k++yuXsgrbGn`IJe zm=&2+A)#Y4T40hjqT7>O9-EolBb@>BuppV)CcR6l#ENV~v6y1!r$gC+_p`bX%$qZV zf`a~x&(0LX^LcnVjQevFG>g2UvRbR-H2{KkwW510`rjS%M}NHsF7A5hFGLOZ&uaOz z=i>k?yNoZ*$rBC9JH?9gDsXYs(j$)lc%v~;c+D{PJnyHjF29FfS9ET$t~3^`ZuXw2 zP1EC$(5}+o_taLLd5~m8FXLolV5mySP*xVK>t3+I;j|xRhy$nM0~(Z-Hl!U|oR18s=08{esccRr;E7D_{L-I;e5-u$F zE={H!A|vkZPV7xYBq=8+3H9krS(1*^j8oE9Xt)WeHZ2!yrDMo^l7~Bj1I$KD7jq*P z*(owcjVVfyQ_kNDB0gV^_J{#OH~?b~=CZGMX?sjyU(aoJyS)Fub0~i! zR@8Om_}~B1h%x-BD@;W?)D)Ep6=O^_&Bs(dwT+{z_YJ7O8+~bV7CvxUzy(_hlM!bk z53TFxE?E@Ce&5A53M#a+U1XU#S1H?@eyk{L!xf#YH>tF6aFO<~p*|m{6>&Wv*y}ul z@x2U8kmYqRRpk1-cls@$kYWJ`dx22)oqN~6t;W#gSDOy%^ zR+zz*$5q>q2bYuQmOQm4^}~fh-jqfzoQ9;FCrtcBfOQ;%Ae+XM{I?o_#RUA3bjW=j z@_ij1MA7kHRyXl}?8E(W*47rW>wP`>nf!c{Qs;dvYd4YhzoD&8{1e(n3EJQs(Em9e z8pHoTs=hj|>aF=&`q16oAl=;!(jC&$E!~~crF2P1Hv-ZP64Kp`bm#l!UcJxn{nPUi zID7Bev)8OyGvlAlZPvj{Cd>MM8Ltq%R>pGRvr5v>+HcFUu`Xg1?hZV4FhUD-bg*})Nx7{ zkTBfri_U(&ECTwCgg9(Q97(z{RpEOz^L@|yZ{Lu3!4sCb_u_^j{&7tKV&GttQ~1r( zbx!p?b1N~<>ZRLtq6JiWFXC1B1;u+Q%J7%Isk8Is>iO~8Fd0n+p#s`!HIv(t56=va z$|ag+oV9HQoJbK58zh;KqCr8<1>HN$ntG1CmPMJamlIN(Eh1!P&X@u=6|MZr|LlUp z9tp{OwAilB5Zk{O3GEaT_i_w@|a5%#2rW|dQDjZ2Fx+`3N$Q=xgcX@bTWeWeTY zcC2w@p7iCIR`mnU>Rxgf46%4p*L0Fz!$&a_?1@u-ovqI2T4({(B8DEjo%q@*zlYSF zU0ttb8j_;^fJy+MbN^$xo4f;&U&CjI->9JSb1RAVt!#bqX@n|Ir zNYm-?^pysgj`hDIw8tm)>JKc9XtS?&qiw5ynAn)w^@k@&s?=9cF}4w$QEWD*{a%!Tspw`YBS&t`e!fB*YT0*K|PxtH^Y-#K_aL+%!3ztttJf9vIItJJvS>7*EW%^WzPNe%AJ zr5uc;ypze{RNz1B>36FMev!+A^gz#kIx_EO7GH}E>HM0i-|ROQCIFPY3O&#D3(i=u z-#QLt?js@p!3p@1<*! z1Y&}Pdh6^aCB{F}2>`f~i3qzE(kH-^hP~o1z@u}5Kk+dNBv&0ouUAMTiO zPjEMvC6eGP&J61;AWgClzE#bi9{hPR6HTx%%T@dH(b)4* zAUxw-o6m!9Ycb&N0D9Kz<9C8A(23`4%k%Te%QMhCbnwT`N((WvV~$%$Zg>5x{zE3f z;r_Pwd)O6v@z{D}?>U*iX1%ksgLIj@{d_|J#t1V7eTf^qEG2Y5aWN*8o%%}oj`5xF z>mT3wZl(R_Cb$Pc1aN+;M5Y5<>wheDEdo>=*=#qi{hpKk{qW+Dqk^@B8ec=kTxgox zRl!2*Q@mta-{dB_Nxp84f*4%lbr^I#7~YQ`KN?qk`3j(o;YJS!$YUS(75(lXcZITz zpChz>FDK;Y<{nnGAThoPGw3)}2kNi+?)8GKo7P<)AXoM_ zS$6fdj|&MIVN3fj@<|YXJ+#4LDj0@;4h@SO{2M56+sNu*K|hmPt@g3b+15vKL+!1G zty%xv3_ZF&ei&MPCy(f)Fc*nrj>p!l-_tBD5(}+6;7(p2$4!G%ATw8>$5^}2#_#;z zpw>*Dpuch1brFB%6x!3M?W)|~BP%Q>NNgMf!k~)jL5O?t+J$IybF=sj&chFZgnv> zpD~tcyZC=_HFpMT+)>}U_tp0IR}up)03t*Xvd3`gh( z5jc^kA^XQZ``7+5!)t46E5Gjy?KoIj&tFF@G2@Nn`klh~F*7qe9F!E*Y(Ws;zFFpq z%zwqTqdMpn*P@tms2D+w!562CE zrom!^Z4mJ~Y#9s@BeSQH7lM;7b=2-)=%O8u-S5a=wyY5HyNg&Kv-M$#^T(}3MSTkL zc}tvab*4GX!3__OsgAa19Uhcja5oPUSGpTo)H!rWQ}dy$9E#aLZX&=()X?b&#YXy! z513JpVPA!yg()noR3~OzML;}m@VJ4kX8hXnv&C%++RK77eOc`E@inUWkosPua+ay> z);Llz<)hxt+*?jRpUx3mp#yOT^XN9%1hD5uM~#7W zibA|Lk-}apsyJS&Unz6#Kk6aukXcr02W!_oSyW5=$jEzX)uhJGO4;$F+17)lqCz$bG zfR((nhax4Z^!RE0Ij2uzBQg7t;)*mAo5I4?JU83FUzOf)1~2(pUGx1F(1$D`EnC~p zN-mhRDdiO~n{VLJLF#AuT+d%39eM2zt9~-GUxg-y+cXz?nAgf%aa;BA7mJp46Z@)? z%iCLu77kRHgty=f*ACEH%ZCG3_vU zIpWO0vCBK)deby>y-kBZ4u&E2c359A>^x=p@bmSsHb?uf@*fy=&<4Qma_`)AQpmTP zfa=bh{K}tQ@dLYmTxpf~>*YfG;ovm)&nJcvqz*L7_`>!$(BNKO+9mgUV!8fdz5bhq zu4#g(pOMsVwCvYFRdEZ3X!s&PozYxOP1v%5Xe2Ka4f-gi1-RBG4|6F~P zm(|v>v~V>nZolzrZ%k|m;dQmLioDpkbUScb5U@C3_K|v%2;ZDNhiprtWq%r6dfJ8E z=5TU7vc!S@Jx=dvJs4eK?>N{CKX-rffM*N91VtfI zUSZP!4k}N`PeR)0mEz7#U^X$fFrPX&X7$9T z`}Mfy$=+kVQGWr3G5Q@lgLKrCj{NS14GM=LN+ljiVD%|`FCv13KFgt03G0tHh)sg! z=P^J@yeWb{xoByBXtDQNT*MjS_v^&XJzDc7buQL1lGHM<>}7(vU`PIHGQrpbB}zQ? zmGk;+iiB#7WR8?;%g!|f2W}90)!o6!bi4~^#q+Ryxy4PBg0K;>ZrJXcb~WLT#IhR^ zY|xItPE#+d@(=1gB8FUJjk7#;|Fx_<>XJIyC@KvpPsty!){z?D=%7_#XYQKThQJ+u zm+ZEHsk&tDLd3^#(w4Cbr?S?EZ{I@~kOi({dG^6nEM;v{2FiW^Yid_3%I0|i9}ftg zHo;PK{jA62=LKFX6zzqZ-;G-_PmWMU`?F^O3$oB7bGWiu5SU$lQa}8x-^R*T!c*Xz z`0i5+_M)RYE*#!E(K+*@I`12%-Fd`I*%obdd34X{F`wg)dmQ5e6He!^7~_rO$Gy-T z1w#Z_whdUB#unVq-g18e`xF!a(rJle%OR=%{R9U*$rX^QjNz}|O7d@^GL0yPPU?XF z1UnRQgYDfCEVNMx&x_qVIeA%_Z3|t>k3uEIW^(U@S9+N2?&{$d!e(>5yj7at;EXH< zX^=Nbvo2IPhe*PGo;wAZ|K@XM3PgAGB3>LIvcu)>CCez0=Y%61)jxIo>_hBkB9Bghh5v}cW9#iOj>yJB(A9aXB@+b z{_|`}1jnP}rZ`Q8Hx~z*?e`K)p30534zQVN1h4UImD}r*PaG0;|MZUV7hj`AA!$w) zsy8BL#vs8mEvk>FAoTGSzn_^63;UE;`0|Y0>15vAcUy^W zo~ie7LQf?)gZdwK**#Cub>l)Li}-BMKee_3;O`mul2M@;vKVm5Atu6)=ksH}Sb-4h z&{~d?QysxaDoLYUJM?-6f~#(jDCl4u(2VevP($#%QZ!WA$Isv2oqAT`^M#1#?HiJE zPzxEcj7N~H3QzVt=W;69cFd5jy34u-{rpbn3%Klb#1a!+iW`2fpyizH;H}gU2(z)e zJZDUT_s0-15KmLDP5M8_gjf&l%7KF=6x<$cKA`hLxI!c(&2T>YG%tI+q7PE;Dqdc| zn7=+8B{bzY#i0fGNC8C1Lrm^c(0>ta79^OJ(33Ksf!TU zi^o=cPe&^F-Ww=xr-s1^%X6@KlxI%U-_-Ry_iB$b#dh(SSoZc+vK@x_Nj<(jILTzh z6BN8!SuBWu$D<+ED?a@n)*~P=@6!5EBy@DJ{vk{Uo((e{xkrzGdA&86=`%+K>0xN z_L>j;x5HABlAkSw0MN<|H^0B9+h`X8ad4lY)OX6ri*2K-S*9h@4_N`nSI%T`j19Sl z#Mu7mWY&s(9L)!Z1{P?u-X-826AB)QIIQdYiZAvYNif0{cZB1p(m8E6cr?Gu$y6y? zDKoN^+Rjyhwb?(@lqoW!2(5QD!;|VYebEk6#Lc4?gpy8_#!g!`)l@WeAi2F=lU6a& zL}7^7#s&r=0P%A0ZB#KK^hhI3Bf8G_3Ka-UWHg=uJ;s{^Zo+@ukKyVoplCCp`6p-j zFJq92>hGDeXIyw6p^SQ|(Yh;eVx)hDo_#AmiyHLn3;;y9l$309EpSHjcW(&6g#2bE z^Ur|)WnbiW7nNONj{~+-(c-uF5hRY%q@%|aT*nod}(DVi@JuDg#R@Y||sl1wAEh8pq zDlQ&K6ye<3xW(Rj^A)r36W*0fKMznw89x-@e%EMGZ;5}dt5&?#d=0n%1gD0`s5#vI zqZbkJDVGV}dt$_*3bDRfSs2nVRxWs8Ku)1KQGpkhOX}cSfFBYPP83MO2)2|CWvxb9 z#SJ=yznKGrlTLbZh8)j~an7DW_MFHrPWZffy(UyLZFPzfKIq2Fu7@yr{t6M+j6Y8o z-BY55DiHfaeWj~t%4=+NE0W(D_7V5z!x~w~ow9-_v8`51e#(gSz+a5L@A`6SFuRdzKOxS%g#t^23`=2Vkbiq(i4DJ;Y8CZpnJQ z`x=g{8?*!!reJ73A7U!fN65!22>4Xh@YjC7J;F*an-yV>w26yJh41WMgszXl6)Fzy zA>27-KY<|OEz$HK0^WqI>DOrmV8JDUGh8Awglu5eVbC!2M#?!BarZ96R&+8*$Dynq zy|jEkbssshn=y0c>h zMUuj%`-4)Mvl0ret6s4U41chB$9_WXeNhAie9p3_cdnA?;3{(^SU^v=QD@%>%9M_D z5QpqQ9GRop_GADRGYMMKDXLP6^Qv@J2R&XMZOSj~u?!Co#pMYhtorU3x!M`3L35tarWbhyc7d!DI zYlxXmj-pn?Yxw-AI>4xc<*)Ynr5A|gBFYU1ckRuW=gr0*+QUhkyWr@yhA{}DEF9*H z!shp=ya&t;G2ym$((U`c8DgDdVlC-MHd4I3#Pa|f_MSAjs{fF$U##p%DWQOw&6ixIki?-sn-(<7n&*7b&A4;62DCx@T&hN0_Yb!hKkI+(Q zGE1e(g#FgtbDi3G(zE#LQrWUz$O}8)@H@R~++N7fCtL8xLIZwpx!+lDlZc$1;K9|x z=N8N>&y&nXx4ArKFb#<-W?r_Bp=b+g=}-*24Ph^osT!&3rBNCLDM@ULRjWB^=G*^si6Fr#B}nF?6T0w| z|6D6b@Muwgi;WO$ePID5%2UR{<7!$4iSQ{>Kx#^JD8RS+34%U?su&lQRsvu_;AF=iZvm|HD?&qx+I2!8pBVW<(_8*N3Oy{7?bty zVgGY!Jy1Zd`TX2cbbr21fUp?oR3M&Q<8#D&w`C3kq{A~HvdAR@uuL1Tn9CMCmpA1T;wk$a!LB8YzC*S$e>%%5Bdg}`mIO?6AK$*U+JbN%#Bm1j zXOe{YAcQKbKxO2Mwhvqv>HWwV$ld?`}I4+lT)Qy9*OD?63h%I;kIBk>ALOguP52+b#>O zG$Q^;eZ6@U;o6Xkm1g5#xAA(T*dVvOzm~7r#$O2^jZEZA5d$O|79~J;Fr)U%G5rvC zC0(bZmC`6XgGuYq+wxIaio#yItC>=fvm96iXh^-M1SOmA+bJIRIcMcbbrdLDIq_m# z#HE-l&ONA7-p_Ztc`4F*vOU!g$1QT-MWN8wab)iu#kc?Lb#l-fm2O7Rm%h1b69!Ux z9BJ05h)=wH8NKAYwLjwH_g#TGc4XH5g+rm&9 zG?ft3XgKD8DTn=nM1>mj4Sx!-PRCwMO$=+<+7>+Ssu@yq>3vJpYJiY_xSUkX=J#N= zfm-xvtb(T3JYNPuo$Y=8n%6&Rdte08CmXpsi!uU$qIyDm zhD>=}e3HT`M0_Nf?gu&X*W9!T!QR3?Q|aU8`s-C~JB;geR&;R|T*~=Uwnj?0G6~^asd) z_jM@Nze?LZ`srrXue7{DRBQ?0HW;WmBBkOd2(X|V(4T?y@VKKIRX^&H@lSsAX}6lOM1WjcfXcEhIHalBs1exIcPxGfaDqk z53LEGMV=SL6LcPjA`X^CE9wM7eJcTK$ApRAXKch??*v3|E+LL!DO3#x&`1O{Z;GgH zXNKxz!Hnt){0*@lHR`&8Zjw|LP_yz`uf|NGad5(i)9XoHv0a5k7T1FghZtRa6O9R7n%}n$`?w zbsF^54F$o1$^ajtO^2fD~VE|O!2l4AN1-{>PKnYV^R3ykM9Jz_1W`L|X8t^Mg z{u)j#Kp7*lkvP=HKH$73q;T(X<^S3rczKNfoXYE&+P9ub)^$xt5xP-4f!?=Lg?tPU?U=Mnc)Io#1Z4S|mHM21r`R!#^Je(>Nb)35?4-7EV;D}zfk?}7 zpE=#Ep$PE;Y4Dd^1s_kvK8>Nt(UbDS5W)@{={5S4=~kZ;xKeF`0{1!tX-U_RWw9@ ze=rF(;QMZtZr&tA!<;CqW@}Irjs_=!yJU<08V6(38n`bmAX&W7=uuLG(h05~>#{fH z;wdVQeh1ON?po2RL%{1?>|f|@T|zoQHubJ2SM>Y)8Y&dBXq&vLUIf?F_pZ1s#*kDX zRIwFfx|Se!`oXcp(_b$o43BzxD*t7wCu5^& zp=&|s%=MSFqP}1*76leVxzA@3Uy{?-5D}87uRqXjqH+rht7-C)>)AkP*6XKo2wh5s$Pgd81=uKZ${$3 zQ1z2!%8k4PQQm>RcWv?Rl;-iLy{{^C z^I=7*o>N3f%s39lwg%9Z z<4C&@Xb)feV(@Z5n{MAmyVWlW&tPg>pqz%923i*Oi;FW9;2zFVgVS*=yfqd1^6N)^ zK@gf0<~Kw*{fXJz^6(B>DDsbysIc1L1ne-gL_Ee@^W|(?UOqI*6}!qG|9(Ry|2I~f zh3R%^q%ADpqMa*kkrl|mE#Zq?Zr8(ZPFCAUU0D;Olk$>74-s7Rjl4Ik z{`&&*4zRfX47{*_YK&I7Ye!QP$m8 z(WF`WyazTa~1k*awN7ls>hHY zOO|%vB>(qIw#EE?+FqU?E`*IzbZ^#3u2;P5HF%N0GdoEd8*=ne_#kAIml&pk!VfJ_ z1E)7i_YoNVQ!wiTBqw?R$V@pvV;ep~-sLym?{s{77VCFJu`TULN#+T7wr+llDJAcE z8*|QiQ}i!_^k+v3Xv%GAS3~rgftqz|L^$H{~*TAt!uRB5_ivCRZ^( z{K{=fYkVOb&N#N(oS&|#n91q!m^4eY->?M?hHMbTRz{Beieo*6``0@{f|m!svDUMI zy;J2j2(Rh%Y&7bU3o+ifKddy514u)^%EX0nLHV!_TuptC58XcN8Dvv5H;8)B0nntO zN404<0-kkm2%O5CYRVHAl*(_jhJ*lxtnFTQf+V#CjV2y15+q_^klzq(QAXfL7Vom3 zGSr!M3s$AmGPX@V0LG{_F^isKR&wO{&ow}UpZKxv@4d8oA5m5c-W|+U_9-$TnQZfH zvzT`0nD}f-36u*0l?BI`pza27pRp!SJKKFr>!9XseD`Qr0|f$?O-=JBgiA1)82R_uri@#sK0f47{MXB9Kg8 zd}=wYxo{LIWsZ5UmoV{AWAJAp7nzchYlLP;Sc|zqtj3BC+f5shbQCh{rA|Pz&Y_C8 z9A$7EB3GW%>YAuh7Bs8DCd*>Bb9`^P4W1iSIuf!6GP=?wcO?+2YhP8NMLD_S{-3qJ zg{Jf+UyY+w5xGDU{2b~L`RiTAZ6H;aqI(<(JZGZ%^&bz=S7#jyPI2YS{iceQiEO>u z;I$qKXC_E*lQ+0 z+?5!;4Kzbv2Z{A`Vs1ZPjA2iSp#vdHg<%GnGiYlRjAAu-1{E%$b!wL|kxB2sM9lxq z2d0J(z{6W0mMaXHhcz@Z5&szfg+um)qS#uf>RQGpFh{CDyQF)F$a{!a^RtKreaZ&T z37f(Aw4Fc*vob#VN6M!AS#7@o5ufpdZ+a!?m~oJ&Vis#MN=*riNcHvB7guce2rT~D znucEi2@!k!x#QXtFWA&S)P-9-;!Qe#Ahu#2ZB8Y=aC}%YCtUHRpS1R_e5^|=A&#v$ zSlINUZuhz%q*~1!CRF#k_B~#{K?+*FB_cSZ`fC;zOdzfBs>SV)ZSylg>x8RIstc#e zuf1uCEh^{G4@^g^g$FO^vs9IkoyCors*z-78&rJ(I^n=+H20v%<*+$`$?VJSlh{GI zLR5>c##jCZq@_JnBnCs35kK9?2@mmQ;D3ik)5E0#>C$my1%$h;NP3YYSn7;diHdRf z`7rkf8jTb^r$Jd9(X*V5cYh3Ik<4LR;N))hz(<;(ADt=oy=BDn*wV#FGkNM;_wcbQ z39#=yBYpDj!P7;{9R^BN3Fe;j7gb-3)H@P;6DMZE-=h)PS2>ywm)1sy72YZdw2}8Wx@#|KlH!*Xp2&{Gv_=h-@A+=mh=!I5wS&k z{qAUv|K;(bRo`vNu|H59^(pWIi}B%iqk#!;^suzNq0Ca(;&wQB3`){eK+aw<7)B_R zQrp`Vf;^YFg10F`tD*hZl_=~0R)7^EbmW==?6bUK0gkC#EKD_)xwMYj!%9rk);nU% zbAU8VE%31bF*mC4AA1(~oXNPd88wts+5tJC zSSLk+1j#{-;Q4d!%O-o0#)TW+rzb9c)$7%#5P`;9_30!>LMmTv@)zp>Jh8@meZTn$H^hGCOn!0t(n|u@G8T457Yy2E!xzo(Q_1Z z01-Gvf8+(!QK+0nDn_5^c1g1LjjZRVVH7S?Ahc(PBmZUo2%m()zL5+vNU+aTl5o%{ zgW<|92pO!`3YlMEp+krH-gAEhOLl;x~G%nZ4wwbEP)6^!U*=^mkz}EtDN?mq|q8kL?uQydzqH?`cEiR1r9p z?_xIr)l7da{UCb*8akhM02Ef-%`#FyLOLDen&Zn#(?OQdLgj2-B^AH({*<|W$(>-o z@^radw!moR!S(y#^9@-%ZnmpT^LGA^MZEAp$V^S|$ok*KGzU@a2XMqW;E2Pim3Xt9 zAEIdp%y$9P&5^;?4nGHeO;8_Q4CTK>0wfo?caTd8UFqT}qOFfI5&F zB^8<^#~Il392aN@3?@`OEI9gxa_sl8xLVe{WZ@zqko)Opo;SMoIvQaCX)(X0B4f)@ z4Is9QUZ9K(T?dqLXMuKWpCVOPvK;$x!-*gsk7~!}D<*47q zG1f_eR;^Zr%;J4}8W`pd5sjs{@g$38jGUu7cIRbVXK*#NI6dfyu{8bGp4?pa{{wHB z0sa=)0Eli6#!H3_E+juh&kVc*I{$f(1A7!J7K$*)_!Xj)8^a-jKiSp~NbA@G^dyjP zMpkXlp{T@)qBxuTeK3bNRH4H^xZWkzhnvfb1;oNfD!f%GXy3%bQKp@oz5`T0Ep5|a zos53+qJ4WNuSs<;R$>jKE{6_=Y>qXPoE2uX-Vg>#`R(=JUI2VLcgi#lzw2l7e0$hC zAyJ=rB?H$I^oCOZ_vRahg4URuoe$>huNm&-uVGEg!;1JO?dA*Zrfa;2tSh=a27v9{Th$XP(Y%%ik z7i3>--kYjS+1&f_;PGDLR;Himsg&2{SV?XX8pSTYxIo8uJ5>XbcPB{>Ztn7p$2A-WKD}z09*h z*#_h~B)e6>#p9)|4Uw`iyoU%PmN4ckZi*muoeP-h_P`f^6kqZMD94FUIieYrQzPAnX<`C04j$DGX%Sof%zwj*eH6@q7)_VNe!+;fHJvVl`UsOYH#_3kSn% z&PAGGyNIA6sgBvEb3R;wSIY7WG8poYwjV8wElR6fVF~eg^U^%-o(ix5@u@Oqno-F7 z!_3&UE#GO@TE#P!gOYYd!;A^|TkXDYV?i|l>Z1ER*?29_vczcihlrh$1F$q~i36(* z?u3~9`>QZH@qg&6@^H5QAS0jT8+lVKK-Aj`8F08Jse3H2N=rK__HbtJ=Dk6I2 zQfsi_SIl-d|9xFkNSNI#j{TM_tFxAQF)#2+bdI29mmx#^xKMvqF?c}}mB4wCedHfZ z%?3zDfd&2v@$_>G)sLCK5N@1os#T5A$!vMFn7J+99hUImzX9mHQGl0Hs(K@>iQdoh zJ1X{25hne_B{8$3`m08IXgD<4`Xl{htk$|ry69L9Krdk1 zqy;E6AyREK(b+Gv6}ob>R4D=M9Hf0+VikFg%C0f4K9nv_@g2_6X&-a>7vo;koh|M-6e(Cf&wlck|!F<@- zWN|s3OD%_BGY}Jx4>XEb^mJ~3b2-jEilDy_l^IK1u7D8i(hrFkE&Cj3NAV#{iYT`q z&@~(cWXbU0Npiz-K~7sGS8y(AWNHR27o(?e{}(Ke*nqSOTLB-a27SaHg%b>Zwcqw| zOy}DCVG=-ey?XtsC?rj3(Dp^r&c6^q99jtR?ppO)_}X1qCtE;41-crMqTn6Hd0-t_ zBAH#WEdd?%7d@cTLj1bjAxrxR8J>zlbO!P{M7;#lwq&)d^pTFDd@{iuw~dkzFPsb_ zUt~k%nk*`dgepVnHJSA(ruyf0a~m3!Vny;O9xeNc>pJJR(!`c-Ar?H+UvA-_V#;77Hw@jd#s7w9q+s^>f*0d5Jf z82eV&-u-N8umU9|+`Y6#HJ2Fp0CU>TCO)=_uN=65@&d2S+I{|&hIvyzC2zWz?Jvp! zK@NTL4}rJVW0&7q0)nz{E?tmvSnqmDE8B)JLryX4Nd>Gj32XQ6_VQzy$*&ugks%+u z%3HqXF#DPUDp$1fAStipr|Ie(1x5(^!r^ICBv!Z0i1(P{TQNVgDa1M)@pscinR5;j+V^Qzf7~JaG>jYl)8`=iD7mRdqgyU_njErRJCdF|_ z6*B_GHJ>e(@$wt~US=R(#0HJ$*USMi zl5GStq%yh4=!-=ee_W~%(GVCl02~W%dKKO}xY*LGx|vMRy#&-9Kv1ndJs_c;S>8A! zlS#eZp*<&DOBa$^GO5_#kX+V$UFKdt-dYCC02nlv0>@KHojG{dJM*!l?co5Qsi2*} z$GO*)l@|*jk8DHy-QW2MY=FvZ$B-X=JBAw$RQRm0Kahmp0HqDqg5>4r!e<>D7MA3l zSb>?y$V*sx0hA)0T5{t7DQV-`_oKB2S>IKQ?otnGAE2O?k$y{pk^Cbo`enw-!X7w)dY*wQJnaS$JnCezj=c`h1%1PsYeNYiw<2B z9Y?v;S%j5kTC;#$v%sln!uyx&8h3|mlJp!3N1;H&!ZfYcwb(3`rBtR^Qc3gF^x5uZ zq8=mQaiS=iRZ+#i0<`nU|34YZEJtoQ*u|Hs%GmOrH#x=r1S6l;wIqYS7_kO4C7D?h zzR~jbH^M9?DNndp$bm&KrO8)BwtMtHf0_!U#9?&3g+=zYwIivWRKp}8MBkj3 z8a&CKmT(qvI=7jq!k}#b3Hks+!w^fPLjfpU8F7gxcV9Pyr=6f|bwoe5n}0W?m&!ZC4>HlNAZHO)C10?XtN7j+RWz(zPC3YC`9| z)diHBH6q0UEbwhL#7WFTHNen2*Lj?#l%-%VC=g~|>$#kmlFgjyWjN#LiM$i?H9_@8 zB%OBbo0Fe(|I$GL+TdW?LOls6a7iFed}c%-H`_|m59ccO=-Mh)TY~5Ubo9#S%xM5x z119xR*5|?YL@v<**~9A--y1%`zT|hddu%)4NSu|dNHmDS!M6ZopQ>)?r1?dnI9gNR zlNM3v`5!DDM=E@nO4u(Vhk_C$W=z$V(e?f#EH0~QxO{kxfB{-%{CSQCy8+;{t;_2* z8}U*;2RS9h86cdAgdF7`GvM z@b7i-dU<}*)Uo_p21~8}0Pt*+cJ%dAD)(y~01D%*j3`_9MJP=}Uds5K=XSE9P3KB0 z6-y-FV&u#^JRnbA_YH&jgRZ}s0PRIU<H)gbuRrtRue{mXfnbxMr zNnBccXvw8Ta*SEKOD5Vl43GuB{@`DH1qjXpgqy%=I4hhFMUVS!+Sr4xU zl*qEV=N&*w2cT&mWhoqf4w=Hx>PteD1ygN-OfSskb2zK0dn%?-2vjV1p8?|aT=5G4 zH0u+lGYAcZPoZpEti=tL4Y_Mvc zUbO9%QZBt=c$>k2TID)e^wJ^z`jP;GhNxsjt8O&es!#WEXN#@J&4(xiL+rv1;XSl3 z)(z64wy*6Mk}494wCcY12z_m%%8;te;beyQSQdxamUA3r(sYRC@9xTk5o~rIq)eSN z`nV5JNfu4}fMUQe%VTFy^OwUNTx;{5ag@4YWu9Dll<-x*{nR_W%25RfU#01CsY4B- z`k!PVch?aza)vSH@5S!(m{pV{&6DTDOiVafKMg(4Ib-*wKZUjvZJ6STdt?pYdhY8~ z0OQjlog`@A@XSSn3vbxIE|wZ{q`@ZldTJB;K~{aQdcrWi2?vw}wCm{V2!H?*LC+L) zr>S1Ouk;;2RWUD{-@cdUd;#?8l!rr3!wj0{?Hm)82119_z66%L1}Lh=(Ez3D0VIrF zr{B+i<+bi0d<4qLI>|}659pOb6EzZfzsv-tkyB52;30lP>=efG7^(Qjz*J|3fXs;l0jV`i)FVWVOxl|77-+28@%XkeqX`oWl4 z3k3v$j_x@*oWcF);g_a^4Y=cbE_Ov8ksjk-!5(=YuDT{d015EOIK8h-2R#F%@X~E1 z)ad&&mY#h#nkul07_nr^(9%nEB?c3z+Sp)pq*mZZ z-C@MO11zSGw`V~8`iwsqMAK~{R2*gk2Z}#^`oSY&WWR_mX{W5uQK(m{mN9HD0JAm` z16F^d`T&Drk9BP8r^^Oaw(~S^m9;X~RU>s#r$t!=^lYko7p$hgdPULKgl{peZCs2f zhrA$#nO2LIA+h0sEc$f$gD(-q_N2>28%0MFMn^LItP9m+ahDyh!PJ)@Mlv8iDgQ%= zkGq1(n4dx-gk{{HJsnJVx418gw;MM)zgefGxd<{k;w~vZ0@5M?NnSCl;Fc6pJ7sP! z+9e^@6C6p3?e)|HZUKNF&cAKvQ*~*gvh)nQ19TIEr+eiHP*UWQwr${Zm5h>GvX~-o^HA431kLKS zC4DVa_zTw??nkK_Jz5QY>uDG3$airEI=>-Fd|Qf7AyW*!zo36_hzFlqbZ^f-Kb&OO zwag*1fBobW_(NM8+zOCJ{9-Xl#p@GUZSx`3=7*D0+&<%T>H6F`eT-cAj=*nsKXb?6 zTfY@w<3n}Nn|~gKVl_y#pSf;;B&(|ZQNw{i+LZ}UCYA!GcTJ7w+ zrL5$H0VhDY*ZTwD0F+Qcm;3WR(_gX6!Yk@>(CwpfAG*?sp*ttQL7Ou(QvP2vZ*Ea| zlNzBKx_*T%f~>j^f$s46`?r(X&lD<>BFNHji*ahgb5jj`zxzBZ#%3w~e_J@evm7RN-mvL-!v9W4Dd2C7&qtTCJ~u1Y}ojQ+TJ=sobd8rFs%lfUm0 z0pK_!b`rh&@X@R|ma76ko@85lT}`i}D+(QOY8>5B4%o2(tBQ#bPA7OR#mJ_o{fLHP7-WqpOPy9O~dmJOhJh`>m?Vlfdi zyauAVR`$NNI_Cu!mcDZ<(+H4~?2O)L(1)Pyzz>NT18Y1Fy1IV#q zjqr1QE4JcY*mxQP)w<-jGu=B$@l^}?;QWezk%$MRXba1*9~8Jh@NZxrPzdA@GaKzI zk11{d)%H-!MrwJH_&I<9aP#9^0u156iRZqW|7@s&cr6omnSI}H<^>2wx7sh=GF@h> z8M4V_8>-G#mh1+caw6H!@`<0qs~K<)fTI1eqt~R45RjVC*61PaJ_HJNg=78OwE%bC z^FaS5+inA3CVv2>sI;`gsqd53#Z7;{%Zkfn5`uPy8K~t?B6#mO$L8egKunmnVoY3E@8vDIk11zgU5Q2M`UL#duDcgAWW42~f2otiHTf zipYsZi6A6knw`RHL_RC!o*;jaa)l+O?2bB7!RjDuQG13M83rWGK?JP;G1u8kS@fA~ z%ku&#Ht^u#ZUkZbRF0-0acl}^-rlM|swNkk!h0~h zTX<&wlkdAky2q~Py=PP#C@CscGAa7BBDM<4O`m>pt?I=n`cSRL5hZ_Rcw2x`@P<^s z7@!kBN-j2e;rwpHlqQ~!(mpbdmqzN^?&;r!__1G@j)Feo^hE?RVc;9Z%USLwH*E<;-ohJl$8n=DLdo5P_b#y2H0zAtWm8HkM+EGD?(DIz zc)A|!g~1(^CEN46EZ0>E(s2vP{kV!+gPyDKP!W2k!^umYVUiVMGq?-FLYMQBDx?JU zyuNee{E#}GvCSTc{qO%U--zV?Yf5Wh8wKlve*0Ia=Am(VYyLl)&M7djuIt*3oyNAE zG;W;6HX7S(%r>^2#T(eXw0i1OY#Q0zCcW4Hy zg}5<7x80a+VMT_)tDjjFww^8XWm?Jru|W6R)}#9-^!sHH{~*e6mrrUgb6n!l%_nFB zXc0782;C2}^YFmg41y0E#<^K;POCYd{dOgY6DdXgtV!GZStfXT&|)zBkAt$nK>Q5` zX$)J}fR?x7eV3A7Lbv=>5e&-8LpX$yxWxC#pDUXnX93C5@9Jiu<6Y%>vGo`D1F>?@ z*Rbt^U)?HYTyOb3A+48V#e3!@rTkC3sI!pjpT^riw1h(te|uic{SerbCEC^cfY-42 zH|@q@^}G!}monR>O!8006tJtj)h6)BEto~w6uP{?V$FX!b4aQhbaK0UOXb zYmE4&p}MN&l(L3A%No-*tGK}DhXo3hT#utbHsgC%h{ER~4MRFytruw=7@I++BI4w{ zOqwwqyp+C;)j2_75d0bwol2hbI6{#*%RN2E9MvU?4FQLo7Rj&{{BMU4)5nxEWF|Q zr(f(wk2)H8!&e3sq>O}0ff=RkVE*50N;kRvW35pb_eXZZvQc6Yz)_Ed3)In) zInyDG2%k3r?WTx4rdL73ZSk`P_(?C5*8bRVkFwpI_w~fj|iw zC4ohlEL-y!2pFU*E0T2^aMIh`WZL;%{F*_5bTP9m5beN}YfmYN2kJ@z(W&ft!LkrD zm2%8?;twSnDpXrQGtpBO$f{q8Fjt~%b6qI=q80uPxWVB3$O5>Q7@E1+p!Vfc(7P~* zEiI$jib#0#0pUHb`^jFPrgZuKAi$@KT~DkE%Iz=bvq$R3PDqrsmCeMt*HOoEwRxS~RQLQ=hO!VTs<%^t`#l37DU5OsC29 zR8!5<@0x|7_;f3|Xd6*!nMUf|>j7GhXyvS0 z&Dey}mo2kS-zi!mv)hQ*T|ju;E3E-m5^SNG!YY-Q7*@9PHauCj4Z56?37(b}tsYML zv|vEdR;VBfPp<)Sv=foy8pdd~I#>=F(0!1t7~=Q3U9mjR2aBO zF~8hrM;Id`oc1l5V`|Pec;ZAFyo&WI4K_?+8P3NiBbUlq-fowhti%F32gVSXL*y8R zf6!~zAqsAS+eif14}x#puBvr3YV;^w&R*?y?M9`9Hcxdd4EOMY#Gu`PO{{y1 z#8NhT3aAp?9&oG{7G8mnY_L>z0)1vid#2}L`kbDt?dY9o`e^lCEy5bnDA=~p)cwRa zqG@W#7H7BPlNc64uU-zEa+86d zLv3v@)^LN6rO^EqOWP^wCt6gQCr*CLY+2sXNd9f|CRMoE@(2}os@+QDR3yvgusPV z8IiysmQ2u7KO~S>I6A5!zwgM$zJNQvQth$;9P?#I+pT$W<~5Wjt_s*|6I zE$X))}XmaA$jRi*R?`4RJ&`(O9rl-%Lh;i_L6FC$zaFQiPSukR3W8N<^yvU z%nM~<_du-o&H7$LMq@{l)1d0RltS2Sp2^pS*zLBtW&(M)BxmaGcs!=PQZ-4J#M1RX4hz;0<9=e@js6`9OA ziPRXC(x4^+vKA=+ih_nbx$9r>creZ1VUC&D7#LQQ((N8&{Ir;rS;^zd$VXO5sC zXafRj=If{jcZT$wCVFHY{|ZMw!bNL4FZG2-bWi@y-9+2TdlH^!f+C2Cceep4z6Ki0 zdJH$}XYeVZeY~N+v2c{lIiT#2b;*H9|92qbW7mp~<>!ita~jij!W)2kkhKdP{8bHK z-$fFd#np{!gtW_jWL88Fcwpk!P)wX;){m5k|X#uST3+zw7z!#Lb*vRCKLcUdrO_+4Ect z!VP8(EoMt&uT_$KkDGqhWb^g`8!-(6uz&})mb0(We$+u)Mx}E&i2v*2h}XtBHqj1)UPhX{d?#Sl;iOT zAgYZaHb5Li-O#ySE>jnBfxd=O*PONV`{c!-r!RD8BMSLb1}e2(OfKA_MQOS8Y{3a2 zyk*WLN+G5R;$eZ>#896v_&A{>f|=_3#uvV)UO3!I6+C>_+j8L-|~PDcqzm z;U5Z6kSUaVr?N^nVAsdk!c}e+63fMr2_M3AxI_`R)@nBO9VLr?chD!{VdIeuKm1g_ zfn;30R3s(I*M2<7z)?4uZwmQHsAR4dmwMN^4wx70uiV6o6o0L9e#wFO=lUOVVp44h z^WdOByN3<-2VzmDf>q`jICl^=@mIx_rYc15-7>+f$=^oB9$ivDt~?TfMc6`J5|Y zFV{>OdDIh|D$w=vsv)VXHgT$u?jZ{}v%I##HA|}yA*i2F)lJXmvrX~X4x!AU3>`}K zEwIM@v>4su2ji}DxXlTGC8H@b-?X9U_m$@D7=SR-Ps-7ew-nG(ZLGvLsOJnLhryGH z(giVYJV7>fpg3iX&dp2XEM&94da77sO^vS&ue$HC4&;cME70a~3I~68M<9WlhE#-b z=6qg6>y2jr`XUwe=ZtNuD&Kr>5-=O z{`edo(g_hAeV7bBO@Li->{p-=T_Dy9CQ39Bc!J_BXnIXq8wG#%LzW60B@~<&Sc=Yp z#f{|@@(7W%CAM3g@Jgk?lfm6&UQQ$rK-hGNIGN~3?toEN{H&Z7`&d|#eD5=i13|Fe zJr;8&R*RhQ*k^>zyqHRchlIB<-em`9y`hjQSnJq{eu)AfETtV))ngbYKl0D8UDg*L zlp0+8f1~r3fAtvPfUOrkozh{QbrbxLJc|+cuU8uX+&8&2&Pi5R(T(1|&yJOI=aw~RLoBd%fmtgq%6cia;W-h39c>12V> z^qC`p6_6nEO?jY#?ENMNUk1Lwp9*~%*o^!$8c&+jJvdCbC6C8hdJ|^0gjxz)mvZ1| zG?4)mE0koaI;ld17JFxiCDTeSZuB_(FWxTDB82>rAt|6m%_H|b560&~3l?0RzC6fs zIu6OpoYoiWOg0;LTVvv?^VMbW?f2`MymqWgy%UqWo!{6FL=qUpqz; za0I^;yc`}p+s7(Pu%~VsjyQhI!DYrNA7vQV+(M{z~il&ctFPD2Z$aWPL-qq@qtI$gAj%_?HrW>&&l zT~t&004%A_04k?MAnT1-p2hmO&{nPE%~2)EhE3OY_WgN7l9%1%lBCNfd0ugve4fp4 zCEizPlI zu3-odr&)iA>Nkf6lqgOdj+;9ej`)p^cj@eC7olJr&2_|;<(aTjPIaK<$JRl`$Oq93 z<+MpEb1%Cc?b5FBrcikT4$!U#@iKl0B9}M8<)w@wHP{p)v&0n+$6br?)1k};*`D8rPIHZjenY& zuL}w)ZPXv5#l}iLP3(=Zg^%^Aqq;TbCfVj4h$-FN>F}Jt>k*=yV4%6O4xSbt`wavq ziu9PG8E6Ia({2~77grKF&PZif{?b>LAXtiX&*h&Bc>SFyU$I#xwq4`OZMY`VBT0bj zg&!K1+X@{*lD6+fgoZwaH2e#`1BgxJMh-)ot$yuKpdQ3HMF*T_oQB<3%o6{qn{H+a zNeGL5EFm+ItC=9W#rvR9U7cVqhu9#&ELAB*nFjtz?q}H!34%#%!T;^}cH@?jARwB| zN~JW7nScw8`4#OC6qIjZlmrMzHhKwE>mMi(8}c1KB!OANV5|zN47%sh?+`==&#tR^ z)sJvKWN+f8v9S;rdT1dv6M=t51m^X4!AB;c(hwIKW+iOxu{9=2?ljr$)QvsGoT<4t zKR`8GTgPwSrp%nF|En;s(14rOdo;K2bBf>rP9eJ?b3C8G?gW{h!s90CG*)M${g1V( zDMb*SWy*Mq$?EDSVIh9Gl~JN2PhA`Nc0IXSe)8F-u`v)f?x7G)YhxRqfat4p`V91@ z)AqdNZdz!z8O!Q9G-<*$hzVs}1fsJ?iZUVILRpA9PxPxpJmJvXU>TEE=qEAc!(!-Y z^E&-|nre2m(@#qlb)xapz;s}C0{NraIPSZgx0=95+99PYPqePe3`)V;z!jEWD)Nlv zMK7WqqaGUT%Zym~XS@l3nHuBjM~v_!jM^IcB)MX<;-~!#pCU8lVm`@1sZnEh{snUk zU&Be!h&@!9JQ8vZixd)V0zFOa8T$p5%Wqa(_)DZ;x*UtakNFCVJ<};68ID(Alr3mc zv_X%C@hIO#hY}{3q_1^|PJo9ue7p-sL{yxZOscc3r56zc7jGZ<1v^_nIdUFZ$3MBd z^Tw05H<}_tn3|IcbWk0f1OlH=M56OJ+#st3n7s#}?FsL|Ye3P)^e3Ee3zA}ltRRT# z8$0%+j)5DSnLNU;J8qf>GY~^gm8Lr{Q|}nVhW4!hPm1a*XqQbq)PWf}&_2_^`2-Po z{MybNvW$q=*N2>6o|)*l9Wu?Vs5-3zak#a;$D)u+Vb<@?OGTV1;c`4(&(&Oz)#e%v zRxx+h%x{_Mj>PIpT^X)iEF($72$^ZmF|LEYmoeb5($ip83_x>Sk5!)J_F1K44G|O` zSE4KLfRjvoqBnPZkwEV8X^ zzpnWW$5>PJo7`f1D~O=!xukSn-7o@WE>lG+wY~F4SQUCLOS!k*3Oz^l9@ejN9S3D~ z3vl(#GM8l#`$)Xniuzzgfvg!vqF{SiM{F~t{)SDQZm}vnh^>Jy1l%(ijBhFk;KJDT$#*)&Vxa6!g+<#VCXuHK z4ERB&z;sT&5xqP#`8X0R66%YLDojiLrxa_2iXb}FdmF>B*BwnBE5jX07Tcm`>%Ra_ z9Src8>tIf&%sxlH-#~REU!I$jbk0^x@3Z}&hIF)bZLI`(pJ!PdOL~%YYTB72zJDAa za@aPXw0|S~ZDHs>A~Ub+UMwb0XFJ+?&?u z-au03@Rqukz~70s_)^n5npr1vz#~{MLTO)*UWiU6&a@fY~7RAV)@%kVPjM-$asC)Bqeq-Qd*+XSpyh|6U$y74C^`(~yqGg=P+g^YhQA zA8+{A1EdGIkGzDX^Z2upJ(e?a{iSQfe@IQhSXxsC%%E%Y9m)1l708TOwo>&Y$zxb- z<-cV)!Z3&vFylz=!p&q!ej=(QRMO?%N$Gx@_KxWJ=c2EkK;UKbLbNium2BXR5Q3L> zx2gro`wGGS0||@qhY5FfQraA!`@PF>vqzKHmmhbY5i{S;XPSfR{vQ zz(~5$2T72M^j*Y@tbTo)w6l8wtV{nfBtyV+P%G@Q^AxLomU7%xz?EP~3r>18Y|D^C zTtm8<%kT6L{V!oGEAVLKZ1%darYmUFcb2;@t%y^mPC!~8`jSq0(un~ghMGrmU3dti z=ocxvhvLx%ZhdBSf48FNhv3P|7bfY-W84<%C!jYk3`pZO-sktkW3x{bGS4P*m9Imv zJokrl4sQ>ey$&aKNAviGv#Q?UHce+0wT%CMtv5qCp3T?Gx*KZ9j6GhVu6sYc z(Q`03CYf2-LVd;IG&mxc`+gDovG($X&v*&~!z}7F@Qt?vLTdN5d$u9NxQLg*W==I| zc4Xd-ImC#6&Y(=$7ef5ko@2u4QlfTy9*kIeVzO%f{i>C=hNfmY`|F8ndHY^VQe`cA z)EGZ!I_UV1%<_g5a{o#8FXeg7VVY1JUji74B^#ywO>BgM+3KiJFK)oMb*BDw3ra0X zg`^tZpIc^kd1CG*EwIjFR4e7vW#-TlY8O-jx4`S zq(a#SYq8neHH!(gJep$QYhvtw&?{K=#AZD%ED;vg<%n4ow+{-U-`;nsY5Y@n%uaC* zp%s$C_g~ktv?iASt+8`I<_cCEDuOcwVQq0%&$@wn>mg&E%^%o8^UNSbxw!b$yhlNS zLqWamk^qB$7Ty>Gy~C5R^TSLp#>hVpDRsqP!y>QT;#)Cb3?4-V-NdkR%ty3Uj9nCF z>~^US%d_1;g24N~c&S4`{O9Y4C-l+d7!GOGsx5TcE6lB!pPhF2S zg`Zv)>gLOX;0?**1^LRapU?Z%$j%X^>mqymyWsdi$n-z(pepsx>8w+l9X{iEb3QQ| zUub$xY^%KK<7;ghPYNRct6fX+5?Jw3&)Yttqv*X}?r|N0rEPCd)P{NONjNSoEQroK zq1W}$;zY+_9_~y@p-gc^y6Rl>jElogJavFx!(C1V~kqwK!6 zAZ-%<(koHK?85a(HhPbkxYGSm6AZX(V^e5 z>F^!CLHN%8ftg0FrzkxOFdMQ>L#a7LW;Tbm1^o=}gNM$ocZu9QI4%C-z@E5whUT73bDfY;I@@AdSS(e*LQV!wPCga9ex0du8u7;%)Q z8V^FfnxG~4IQtfYC}&^Z%-F!tvZWxbR!o`Nzitm`(c4n$uhH|{zLg{_l5B#lV-k+{Va?1iw^$t6q+;5Z5LpS)!tKuuZ z-bF_Yn&78E9w3j**L_RYw%F`sPQpvo4AOO6Hrm;;s-$b_gI%FWo@=;FgQy_oPY@j9 z4_xu{t!m@h*zQo8Sko2UC4Uc@7c3HDWnNfZH#M|RD^e^8Qq)9j_e25&$4Dwvca1RBd$>FeEDM$vXU17+XHYW6x$$bdB}ktAdREh>d0 z97)lzfmq7wF{rng>ebLCpLDr0o|1fhIEO%9;Vr3K@ohk0cVGV2X7#faql2YqC0D>h zQ14MO%(kJ6i&X7Z_WRu~3!|39jr9n9+b#xfc~Zo7yaU6t-e~fvbvo2U`{OXKZAa}p zPU+axN6XkNmz49VnICrJ6vGihsZKX|i(NrYuO#%7iAnBk72o}d<*wC!p%I_p&*voy zhot?HUrKmQM(c1kQ9{U`86-HCn+s6V{L+j!0$`&7GUCS4i1~@OhhTDeU8D%Be0j~m ztxNtABPOsUreFcaj_DgdzHy?vf_eoa&K!J-5`yID*Yhr4-jxbvQ{LYy-dM^D?mPHu zAG`pk(z>VNtw9*$6yUaY4~=+{G)X|5(blrynuX0OP*ID*s-CV_(8v2ouLqmQUi+t$N*ziG*(IyJ-1mAIl29eYM2es-d%7!?)7P z92bkFJ~@f6j#iS^wTT^v5sVnG+&|ZgGdqmbBBA#bp>1?$tDl~xHM^s@UgCn;Rm*(r z$hMZ>pxp1KmT)z{%=O!lUDVhNLmv0gnZ3sciePYXqT&~j`l8lwD*1Ubv^)gMIoW7* z(_}<9Y^0_=4Pv;!u~T0l*NB<9Akmd$g+q$7_=Uy?QWu@ARE9hw$qbtErzE@V5N*v? z(NB=yKIX|!1QvrdJX^UYYvNvpnUkYok9*-z9VM$H!#^tZ&RF<>q zFEGGS38|pj0Yb+$lG|Ab$cNMU4pSC^0aK#{=}uedjq6Sys}AEK9cEdfYGNG;Uc5L| zs?|x4+47>3S^1eP>G5Sg7|!F1Ft<`yEtdfgnm1vWEtfaN4s?lriL>5+j!KeoJ{P^X zT2gxVRwmaW^c+5ji<5_^E&I3c+|yb8Jt_jRugRx*xKpgf^&S~ioh_l~*1ri)n!)S& zV+oHPmO?KAfA#aPsb0>X?^S%Pr-ZS2*`anDPTg}lvh5G&+`t<1ox`b0r@dbr*?to) z!@JbFzuyvq1_9&ozvE-e1|y;!07~Y2Gwykj?(%PJ9$kdf$zn|T>i~>y$IbYPb4FoY zAboLr*@O4h(*b3M-Dv!flCk_Xd2xnf3p^Hw_v+tVT|yosaUmA*pDjN_`%6mxJ)AFk z2ms2CO8UutGnGiGg6TKVdF>p0bZ{OThQx7c@3ZXVC^46!3hiRqm+f3zSw5GSS(i)~ zEjG=I3aRoFLzx4(_Z)WccRJ3cK-+Rk5nSs8M0M2bWt)VOoX=R189n&1j!T2;nSz`l zQdL&cF!z~CO(uj9#;$Ml#I?2-Iqlsi-wR04)Z7y9?qkJz)-H|axKdHzWE#R*-ha9G z0n#k-$Gc52zG8YL$=HcIukhuA*t>~Sc^KPLwdAJ)p+w~Zg0|mLbv&>7RP)DAh9ap7AteeGxfkaki{}81f@9nU8d~*%u7OOi z{?hzS4n-|lz4h(5v%~O8$7?wWFAVMO%FdAsoTESVHK7~b68 zv&97btoocR8yL24kpynL$Q@=ZQN!pr4(zol@$!k=yMGamdu8hP+qf9(&n0FMWqzp^ z^5y!;s6HZgI69yi}eBVvbYGvmr{H}!h{~=V(=^a27NVQnB^Tn*66bO&vm?k47It|p8_`MT>{p# zGBnM0M+|l=?2lkbcsSLZ@tcO=VkmN5R&c58M4r_m*4@E~}a zc~F?ZGo^4yTcRD6_59DwYjEr*2x#^FE7u`WBBXt2J9Ts()p)H$_z#E3v-;`4BD~CP zG76DBhLDDGo*KVAh}q9{1{S;U(WG9!d@?1Ty4fQ>Mr!1eT%dD$q7Dt-?~!nU@=J3? zYfoK9!pXcLrS+%H9yQSpHnaAp_E(FvFW5DW|5%+uKP-yt0(HUm4?6~b2pycRR4Tny zi_FMZzib&F$HdFx8xw!Z^jo6S4O9m+Pv$rD zma-LLLf>1cWd6;DZ%BLvXHvT@Wz1>F$*Id|olPVC$3#39!G?4HTPk8)!VrkT)A-~0 zmM`i;8vh1C)B)i8$9H}W$Vyf(9cx^4Kb(Z1LPG#wIJHemG#11-amn=h%DJaY2?z~) z1DaMWj$HwGmx?Kz`k?vp>2f)tebF|jwv=hW8XSU0KVl*8nl(fR#AMpykN&8ae{jZ< z1OL$9(YezK9Lq`!Gn=`blS`=^g_jz{vSzaM@iUv7pI=Tz=gXH%dD}V4jg$Tl<;YCR zgvu{C5^i+!*YK64qugDk>vG+yc5JaT@nStxO&H=&vw%evIM$Fb_#hllFc}0V@+#CE zm_7vQYsL|yDz5_Mc~8VG^lG;RCeF_t#`Oe*BL&;qkt;e!tiEg%%yB`3?sDlgy;omtM$s1kGR7T)&z7oEm0W z+FzT*OQ>owh7{ACQ!_hG6np=>2*!n;0N)soQ?O~#;=%1YIGK6Tsu^{@Ho<*zD@NdcIl|b% z)W&~5DD-vJ#YL2Y-9!HEW#a0EAMaG(@&0Eauj5CX;efZQHt$_F)P=|PRZs^3N5#@y z!%&3781KV|Eay?rSN_Krd$Eqo)mq)(cL~tsK)9)=HaEjx{jVc64g&bGP}WVWl;bJy zC7-j?|7QUN?djS3(7dQKN`z>&oBAY+N0fzgtd}pZd8|YMr8-z{78miCsnC`Ba~=#oFN3?lfb7jpT|Z2s zRr##;oB0~2W$`D(xPSu>stLFccgi)wv&hswi_ zqmbJM>v;aE_PEpTVbjsq@8$78z~TKWX2{oknu>ejy@{;bv0P++*MWg`?KkJ$q_weR zTGjfO$4T;T_Q;2~-F~pBqu<#B;^NPXCSW)Y$eJ)~e>IclgO&sjVs^q9#B@GQ5*bP4 znMI1Pq4?fEih*{6VJs2aH$eZ!6b2U$d=93g)>*FKEUZRe47&8VdDHgi zPwYRv=HGja+HANj=+g5W|k_3T=}_ zmMESnR^HXAFI(t&9$#1z?x7r_Rn*)?@!WjcuWE`V(%R(*>jDuJ3KmX8@tjQ@=FqbK zS90wbO(RY9y+@>I^ZxeA7oRWU@2h-2QIYGS z+C)j-5b4`mW~s}?>i{_dY$xY=G~xbwAb?${m*$i>BugUrnRj)kNkx2GS+n7=Dn(?W#~SJDa!t}Nvs6h2@eDwDC^kSPY1geAl)wigUn=U zC}p$L&^WBTQQK@wgt7Us70mnNclYl|^{b%L@^r6h*e=zFz^)WqY zBuh{TCy!7we<@bk7h%IZX)W=m$bmdkbDz8IUMbuR zg|LWq%$90gXO|S%X0Cg@dU|}M&Z;mYeH6%~&sQsnegElNMivNch>nMWMwBbNot0y84pWcs8`%F17Oqgs0#D}69Pu305v%e zyp^ZMBO3n^)Z|OgA5(voyTKSze5UJ!?AWSbKU5}V;cx{H_20&{7}-Uriu!gs|H|Wu zJ5Y&221R;)^MnlpZByKA%0?dzyF^VzO7yiSvHappdZK?l)QCWv7i_o-7dH4z5Y(;K z@#yJnxbyXpjZBM)l?^TqgEIdj81V+B7eb&J{ z#a1|&zWi&1T8n_5uV?2>0X@G}F?wS8o3;_7>XKE>iI2J zET5CjY?$ji^fy2MO%=$lYGA=G+DA=jka=WxL~47hG@?xx4jQIV>^ZPxB5l(juY`qK z;)*_>TWGX?PE|9eFvZjV`r94Ml@gCcPN03IfKU~VMtX2UMI3p_l7kq|Wo2Jybo<>1 z;}lYx`vcCcUso7?L$B4Ehoc44<~e+e@GYzUs+za)MF-GD%kjsF)ys9fgB{>DUG8h% zoz9iYW_SfUHR$_Dpi)HsWwIIWrH(6*JzdN;Od8it!eh6}CyYE@E{AikN8rYgfdX|R znJm8WOn?5GdcRf5rqu4RlK|0h^Cw$H+hm=N-^l=(^7BmiA_ElmRl>+VUD%~8{ZdUv zC|URKdQydI_w3wudzet?$L{Y$f!qU#L0V10=O7 z71bgJyFM43{XWm5*P=mblT@n@L+0EGXR)dVfSu!9wId8IRC@Gf0$vA7>Fx<`&Sb1< z5?>1ZW`$`;)nPL62i$#=uI8&*Qc(w3-E!saf}iz?jznn^SpU^Y&dc>H z_725OwCt8-@V0!o7iO}r>y%Vv+&>l_yn8PX!a+O+RWqAiQmfnAk&~bYT zSRUA@GxvIV(Dke)Us$ut-Rcp$J$gCIQ|^|n)1HePSdb02+ouy?dI*gZejyVVmHGUo3b&&7=AjRFG6B6ob6fgHqIsDGzL5ltN1RQBE&67 z1Z0knouBdepq$Jrkb2Su)Jc5qp(?qy;SZ;EgW2|+W&i2C!RTj&dMj_-DsX%NRy_k` zhUv%CBow}*Cg}?o69~R9@^Do)T6v$i)+^+p8s3P;g!!Wz2^XQ-Fv9=YzJUhK6>t60 z1(C&?M;T&o2T9Sud&GybC#aImPlAJzq31T(-N#_8O%=E=uecGr<^-$^h2sO=o#hyR zoo?A=ZoL%4`IP`c5>YWcV@H;rKPwOutPzSEizzx`J^dqO*9gdmDbe|LSL1Qyu_DPf zmX?33hE&{o)Kmob)U0URi^TnKc>wT4E~RU9-TT6`-V{6+goO7Bo*PlZhvkn*lqWTkv?~Ne{O@T+X|u> zgb=F@JfU7uQ3@q&SDp`6uC18tGvU8!uuTVZ2z!+Tn5AXrWASFG^qK>b%aa3!eE-gu zX*WC`(w#UKFB@O*50&ByomLlg+uo09A2vJ|_L6k!x&m>(YC1Rf#|1vEx4(^RVy1cQ zx85M~ybkUJ!ecM*tBoC7mkqb8fCHPixHC-MUq{YYw7G~c2QUND89c`2PV*~< z6C1ZD{DroC2ZjUfkKMHkx(<6lBeZOOux5I(P`D-D?`F3=V}BPPBB#x7e7l3s<#g20 zov0mDF`2=|Kk`sNla*iUetUA+$03!faXp=KE;I9e{F(~#3m|dJYXDawD*E=PW%^!n zsQO;hUxut&24L!&HdS4a;W}kOafzI-)as`U`OFwyBnBEJhTv&k_`w)mbKPMG7i2z= zEzAEh_w>M`DRP|CZ=Vio-~o+Dk&uq+6GtA2)k>3mWs|oifLaB+_}VHy*m2V~776jO zGbVWPhbv}W-k<-0S(dYp=_Q6*8V8v*nl2t6Pry-#OWjG)v+9HX>NApW=Rv+*^f~$-&Bn)`*pqI>&gF>q;WY)w4oq>Bp79b3CFwoUOd%FX-6vwAdf9akM`k_cCOo2O4#(=@Ke|lF_P3<*I+WM^hgoHZgIxnT-TXV)^n@FeLyV! zF&ve7)+9BNu|ckS$l{T?m#C}%_=|Mc+G{6b0g!|?TW8`1S&s}$j^rH-3J9)oKD9fs zN~dI1T#~jD&57|Fy6ua`g`E%JU;<7V-p`VH!J?;zDv5+PyR-|0_EZj@o@Ggxn=B05scN^_^y~c@1$fnOS127tvAO*l5Rg%)1DI;bC5)eL3I#ayz+uT|`sgc|-tw7X z1IK;sD`IDDXu|(q(lo_*gMW}jnolR#39EH#KMSR{13M{xA{Zh>qjkIGF*{`d8%6eL z7ciDY-op)rMZ2(jVGVp0As74xzlhLuf&}sN;O45XXsOf#(83)rJfoMP?)b}I;;YT> z9UlJQZS+NWHt;m^GYJBua_UT5u&@s0jGU;pv>+hgl>+Ij53`;QMsoBXW%*uvi{u8p z+s6l0AldpLg#tY9=9ud;XJPuL`zn4YZV&6` z1)tsOaaSU)R$@Ir)Jvu!BXsa_Yq<2PzW8XB8r4iXgR@u(b5{4@?03UVz(KmUlF~^S zPNhIr4pPzaZ2rxwl#JIyb84mKU0*sB!&3DE9vU9{+|c1Ce0Fpjur5yNbbN8MT*b1w zHhSSPkRUsPV(=>i+vtLi?(3Y#c!PeSk4VAkF1^~Jlont17hN|1N&R|}WDjLCk$49Y z!*|TRF^F+O*MEIQ)0AiXR9(dZ3+pNss_0p>3GX*nx@pvEjS0=<->|+b#;;-OWJCId z$bWYlRtU7dr(W(1oOzd)ag3^VtF>ZZpA_uKy-YKCTZG*rsob8MG5H@44J8O_ZfeIt z<|j($22P+xNXzC*>CFR1RhbbgV;iOe6Lx&tdS5)xmkM8Qj}~&pCeW~N?$mV7r%D$9 z)I15&m_ZnQiW)6zdto^eFk3J81R@@1(MOtt^UtrOjcvIk*& zO}f}|uDXHW?jqYHS12Uk&!Ek@*;!X6!ocLE=oMezPd70B^EO#4Og*N-YnSykvv2&=k+|UZoLpYulbaI#!;0GO^KTCt82yuB`v1rP0*{(EsY>Ee##S63p)nHdQ%AxjMfCS)=q zWnS@(dZ`5DxpFUuHn+@3W^myBa>5LDN+Q7n-OpcGCT;0@Ga4Nryh~}b zmRnuRMAQr+j!@Lr{ftF5Z-U9$R1PzYBU)`X_7p^o7 zsC4-vX$gFAqM{Jl*iujmx4Bne5uI_APtvB3prmA6q`qDLyjFSfiPM((ST&XSVle3= z&_@EiJAK})Q`MjS5HV;y%|jpNl56@LCLlIj(Kcx=Fuc9oYuXJ0&y_dJa6mS~Hp9qJ z;KSRp{NMGT@6Reu#ptPFeuuVzUHyf?{dA6Fec8&d{vgvfqhAe+oE8sc>Gy}j^@il^ zP~*DS@AS-5jhtaA4QuPwh?{X)dyYjnRLbzi=MDP<-3Hhe;`9Ec4?g43MH724&hZEE1+!8Gm{1=zR`frhc142)M&r@d~)-W{j|5_pu=JJ+ib-}+enj`T_d z#6e;1)*Ho_*B|(6X%j`nf1t!h>-AfHb2w;tIzJz6GaYwn$6yGGG7eZXpDCK-e|f4T zAt9kw6EZ+JnnN(yn_P1}YwSRbrNweIz%i-U)X>y;zsrnL$u4QpcjF08#6wmAnf+3= zWse-D4Gxck?1j@UJ1NpNnoJkSj699oK3OHSxENfv3}-G6Ll+k626{CSd`huIJq_wo z3?c5g!+`I4T_XXvx`z=RD)PXf%VT&f{HR6L6f$zu3l8x@N8#{=8I_`T@1uROWWK)g zC%Z1S+T;;9{l>Z7>^#T&hT00zKOgD~Z5zAmYq3U%O9*G;h7%d~LmML_>EviO36dBR z$CHGdAAZ@S>wW+dRk1>m9&`=RmBYNYZY#NptZAxxxG zpm|50_vd}Pw`q;h$1D)?2y?oH2FNJ$D9IrqBwhmSp`Tu>&4*T;FBnV9nqB(3ugs~> z+edG^6xX~Q2`HL zU$ozOIDIatXkfRTp4E}OyFv@=`Psa$lPxf~Rdwa-c|3b$`}!1RQVYW*Noa}neQ8l^ zeMgQLHAivE+!4#-I{q}1uVcufT2DWmZD#Q30O{5rmRj;3vro6))VaSMu1(a82zeF) zci&l7lQwvym+9B62W~;*o6hCS#dM)d1Z)JAZAkF1%t$Igl}u|YA?Oj6Ui3BFSi-Vcu;iHfzayT zwfwLtQ`g*Me6Dxd3$M@r{{Zv+3=>y^%R_B8$l)^o(^n*vjs(-T80*>7e(lWF*cYIQ`89omVW!~w{VF^$dAQ~ z3H>%?&d<-!&CTuhdVp(8oH&sTdJ1U6(p=eg;afx1Mft=P@@Z?kHP;{}+VvVk(Fx7D zZ+H>up-Y}|T8;YrgX(iTC1YkQ=RATHzPY))=CfC26P7AxZJ@FFgV;2xGFDvFp8KYw z=tS~=ea@EM{@D^HI&FCeEcL~vx*{rrC7*ncY{C-s(`hvs%Ffw!4IY;ZhhQ_(SX__C z4d@5J2i^lHX53F>aR%2o-PQ*1#pAHjd3UwwY-Mfze?Hm#^{)MJQJFP$;@H%ru+R`d zu9vTr?Kyaq%Vt0M(7j+}bZfvH6UU_{F=z}`aPW_xeCTJtef@=3|7fw=ByfINv1m$G zCZEp%baG=UGKI9jsIY*$z0px}Tq{dESibw=UVDsn9b$DA7hPsBnE)M(218;}N_=8c zuZ8-2l1hbSY2L*PEH+SVs>N(Z%#o<5X!dCQaZ#xa4Rw$yp?SU7Oh!X^ctk{0biXKL zeKy=k$_P*ckwm3oBdJdV>q{rJ0oH~Gu$0R|IH!0W5WSu$1FMrPHA*aifrfrKlvY5! za(IH$MxqbWgfBfSgR;x88CI$qJ^XJd2RH~Aa z5`o~_yKAG-_{JM=y#N0DpmnTWyVi$pAo0ET-a}$+Y%J0X3JNfnlFQ{$QBf!kbmMP- z``c(iH}E)gnmVWmuE3WJ?sp6X0aMK-?fG5S)*5k2_N`ZLZK-03p?YbsO#=>DhX8?Y6Ts{W7UF}ss*YBp?oVw;~1qA(S>uSIH;uELS30549SX7Lr4bhaI z-t^-cv*+XHfO%0xS=oEA4#6%&4#cJWgm{uM@&=%sa9eVH%AfYZFt~kYaE0=zYiydj z=I`FU^_t<{Qn&pL<(vma36lsX!|pb5@>JZ~O;vfmlhSc>p}1ryIqT0WOq>+^=$~0K z6`f1G40jsasy_Wy>|?KSf@6tC#Vu@98a?vfUzi$-9J%}ZTaDA8tN8F|$v=4?&LjOT z+e`lDLm#+ZcKMW5$U|drTo%1AGrWX?XWVZnKHcI)H$=%(;~xJLPB{!Z&8e@eKYz8C zItT5P$8D-Bto`Z_(n-tja&$whZP)19mdu+8QH-eYYcCQ7eBPvt^oFM9#+KH;n|<%0 zV`Y`q1wce9YJwHA^(&Xm$({lZ$iF`NA~ZyqmK?uf)$-7g;J#N#<|HjJA}j!Cqq)?r zu3}{@r!?N&#~ywb;G-d|Nk@A-!QG}(nwy&$OxA>~$+vzv;5D$=Y^}Q0Y%-#9;2P?d zmWKMe*w}b3?W1tjvFIfNYhZpvoF=!?XaVo=Q9;xIu;KCkM&KDCkV*+P2cy$Cn7|0|Lf&mN z5tI;b9xAQ-@pU`wPLs~#bRYwb%^?UE5H>@d7Cq!MD0o4}6Qd8faYMXAo3lq>a#A{W zjzgto@MT0lopxBjZ8KAtES5kEEG0}EtGdFpW(6^VNA9-*kSH)TZ~jTmnByCE}X{f)HQG# z+np9YoyB8{<-SvdS)f8fbl3!{2onbMcGzLiwjBS`RF!M*(7;>_2!i0z)m={1aN|aF z#zaitA(p`qNPQVFf-bAcTw4U84bU_I?JK6=+oddx-PzU7bda|zE`%(2N_%c+rvW`~ zo36oSF@fTU!6lSRFxmBZY}$JK#W>U00(_&u^}xfG6B0v=GQ3;b8=WQtiZDgN41tdg zTP(J*W0IbEWW%!gvq9FFJ|*-0Pq)1P#n+JZ0Akyp z%E-K=1xADg;FDs%w2u)#m6ec9X*_n*KDD$oZQHWBwWV1im0+Oe{z=!DI&W7Vi^cTD>wip19fOAwmc#&qj7Eb{ zD7x;sdAmACw%eNpdW1HJ2Q*@)fVx3I8E{)5Kr3R`3v&=bHpp!>S^$s+y6pp~s5AoP zaM39=JkY&KUM2?#aE!)*CHs5_TVPn{1R!pJbK%wPQ~cr=zwpU_f_(A%>#qlz7%yaw z9656I=+T#7ei~hLsQ0t4&T3V}|dJUF*;yE1s?0MtmO&K)-~~S_U79`za-@dRbIsqqXV=v0%gzCSkd9w~cV~w3 zTuXhCRoe*ET9}e8Nu5sTVNP~^Qvrwe_EvN4<+g&uf~fSM$t&T&(zN%VF0;P9@ECk& zFcxUhP;$oHSc(-pmXzZ03na-?1D#epE(iK%D!*W?EtF4QrAOxaB90WGy%ip z6Y;WYYRtKXrs{l_JUnF1qwJt?%oN=YtG@V*rKQ4Y&?0&nKYFYvWja$7)SiE+Y4<;H zQEmAL+0sz?wAChuBFyhG4Ql-Cg5^xnh7%W-lVz`;0Ey$f1%FbF_tAXZgb9U0% z!W+?zF11-TwY6XU5r&Oe_c8;jqHedL{6g!A&j2$4g<(ro(yXPznDMZH`ui=Grc%7t z!;rCrGWD4q@b!s%{B=09bX4SOzTfP&8eI;%C~=}9`#xYImgaKJnH`pfQs7E%yF-*V zO`I~bZu{$Px%&XS=u6Jh87xWGQc?WGE?v4y4#y-XW{pp;tgF9VT!Ijj3RKHB2kjXN}r#=7Fqp6AUzS=|r!A~|m0tnJ(bKLpv^!+jxnR%30 zfRIb{cnE0YW}C@m(rGpAZEZr47`J4ZEXZ!ND??P{$7N;+{2#5$l^v+@y*TbJ}7tH#Ie4-K$cC;T4WZDApSd!3yPr4?Pwh z8An`a?&(H=y4657fgo3rOi{d2y;5dKn?=LA_0z3HLhf5?{EtYAG!g|;?${Ajg5_PHkmMC0$fqLH7#xuEiEl? zzWHWSQqn^YJ@naUpCQHXnc^=3ydy6!4{KV8ilwHeV&xDNbd$&+Si4#q%Bnv31uuGR z7jy#{f%^0}^;=&HU-=@PBW&LPA45fM;--IlIT8Lgp_*XpxN^F2*I!xk2rQD@E*@w- z{#oR@SAr%j?a4No={pVFTF#@^PXTKoh2ZT$A%w$@td`1vlI$f74zu-O2*E+T75VFf}+y6`ya-#Gn9oJ;SB-hEr1L5 zwgwQ3-44|0#3CFmO2^N|1vVf0P=F?Y3ELx_Wq&INF=F zXSSQ`uEcNr8$$$|2C<>%PD_W&WYC=63JvRs)h|%#td2`ZtH1c4pvkKQvEy2gf6;XC zgYbJ^P%Yf(b~{;Oxx=VNHvpbc@b;{SoES%6{l32lhElircXop&bjc4bP34Wd{|3hl8e3yXE(w|L`jF@G!M^G-K?nE!9q&vFbn1+1eUopLj!ees}Fx zzr&lNkOfaPefv(!u}_tA9_EC^H}Cs5OCH9LXk&0h-af!EY((P&r6{mVch!D+YT`p{ zSA4o<=ke3$gB9|G=x82~Q&C-eKJPMz&6+wfBPKGu?*>7L%Jugwd1&1|klP3>h1=b_ z<%=4dTjB5$m`?7K78n&4pob*#m;UUgUByd_f{Pap9N33Quo9UpIwn@32ySd_Fd91G zOA@d^xM@w@XDGTcC* z%Q33Hql0O*QC=5ti_SX?792n{z}mnKhvwxMgYt}5Yc4PAmIOFP`9PE1zq2ebeCP(k zog6rD0M^56mV55GrzbNa+qP}n3JVM2cp?^yeYzWd?uM@vCQ_h*R8>{s9S$@&;E>W& zZGq{yGl7K9kI&+PNpbz=cG@g;h1{?tECnHlp*z3V(bfbIp$ocU(>3CyI?#ylwf_yD z2&O~{VGAro?QPA#Bj7(0G;tZ=0Eb@J`0d*rg~#KbehXZW+AV~-Wb^~ScUugVpZo$6 zk{}U?k|y&b(#(`A2qPjHJGcGPk+zEmLGh5J&vF<$tj(3}1&77yb3*1k?8Gwu+%8$h z0?GJ=cxs|i_0i94>Ket2`=Hr@$QP!X%ZS7wOqyb`XdQ^I!jH`Wt;5_{46Fob4p<4O+*Dm4o3I#c z4!c2z*6BQ%ef@h~-hM;P zC287BU(M{`n79?s0(Z!Nd3O7yL*Xl)by_XeU;LUaj|f}-EGp+krWO8fMg2E#!sf~u z_qXRCgpY=D_5&*&Snl^PM1`pj+G9TblO14MZ&Fk=F!K zWAJI*^A8v^bjhZkO+uVOM3w5#Ue%YJ<^;$1b2cCNz}i$!7f6(I9>v>%y08D&Sf1N* z=>6yie#?*Y8rki(Y3ssQ|J-3{1#^TQ5{K5!O{MJMC~?Yk7d)QM?E*p+JZpo#_%skP zOJlh#YncL}YB)lEL`u-Kwdzw}!M#W{W;ReBf4g_#4xL6{GH+Hyc<8qWkDWZ1D?#uD zUf8&_lqWW!>rk!UO_ZLJ7#&XNh~AiuMx%!+m0V8Gw6Yt^kSU}EZnp(I zR-?PUfgaMQa3}B{ExJ-@2T93pKXvlB*TaJ1bT|BH3b|57R6!O@yL&Bem$7N(eJ7lK6ofQ*aMeqj(I7P=8LN3SSW^_7zP7YU<-dqfa7)!6xlXufj|okAG+Za3PsyK?nYoI{QI~!)YjI%_10TZ7=@I^$&)8B zXTsybue;&PNknbDd>J?hvh^e*3YILw&4Ms8cI?>B`e}52#F*sgKcYes;`)uw7AR&t zXsNwysx8!)pLIDbbSei_h3j)sfJzw3a^a1_kw=0$0dE3?+kpFqqrJ&kk&6q%x)=91 zNOA}hr*ITecn{+fWx%qNqlmC;8@M5HSk1yIqf5=!<_fF229Lw$BcB5A$9)tlX9!Z@ z9t!saI9~|jCyEjX4+8k4fc;_bXaUgxE(~Z3pbT3XW7pMV!4Dr9t}-5z2DnSG#7Ziw zn*|k|9RS?aZTk}-5?=IpZU{gGfn?l#AC%Buc+`rJrO@vHQ{x8576dv0M}T1wfNcRn zhtp;T!-4_vi(ZvMF^7TBDM$xhuG)79=)|E{+cfnp$G^a%gUVnC5+@U#xxP+Fub~nG zHCVV2Xa!dlr!PHYJMxjaDqnInE(|m~p=!ZlN5U}&ZW?$h8LM&uCrQRFAe7rE6bzgw zIa_mPtNzL<04st0={+Zvajo;(^ScQGMP$0KxN_D5z!#vJ0dym20-=Vnha8ck|#o0DMjUsw9eZ}v1bw?2IDs`Qj3CUf|1xz}}qJRmL5PYXbC z!#BoGF(J9B0wy$j}#xL!<*&$N5epSLzWvx3lMVw zD&cTMMMyqC8#D^d>7jf1XwXB0$S8iTZv0@oXK+v%Kt@mnNFV*4R1D)ql1-5oxYH~! zd{b>qdLWho`vKjEjEwXd5{U?dg0~jFf@iP3J8P5$YT^D$s#IA6h~r z-TOB9>eJ3&J)|gsuqfv~8l1h}SA*Bv+3hmvabE>q+_|#F9TeIMR--U!YUJAgCUhl; z-k>`+=vFw5IvRtCPiM8b_`ue$HYOQ27p#Gfg2T1j|Ad$Mp-Z1-2S)%6!4taa@JG0I zxcNYb^2Wp5!tphTsT~?EL4AB{Ir9oM9T?u9z5G%#p!}o0A zsN!H2r=hLO1@In-y@7aN7qAr;mll^-n=IBx*RM)TOc03#qy|TTdC1)nYJoob zr1*M|P$_t9Kpn$rceoI3S}up?v8GjBT2iEIZxe|6_iMV*B0K>f;g;c}il-W=Rm#v% zU7NPFxCo--fZ~az<>=W2BB8-x=xA>Pj}_i1;gQjGwN=nzwV17lv@v?FW1v0txE*zu z?U(jLT8j~rH5$+j;@d^cC|rOx2-xBpw1Ee(7quW3w=NekGX$vNBt8TbfcUZZrm;^zng8V%ZAqATK#GDgs9aiU^$BSPO+#Dq1pKBT%#Pl7-sC0ta zL7XEu(gLIfhQ$KIhi>36C!*d%f#`25Oj0H#m zW`%e$(6;Cow1I-YcKMO1!j$RWnu$LwQLxoo?KeIl-hpoi5zpo720jnJGeirL&?q}7 z($U_6WKdKD(dnoct}6@{4wey}?_E>PC8t#nb=ctSd#^oSJBJtl+@()kfTu+z z74LtsW!vuWS~OY~lYw71pu*(%*ry-9Z`I=Y@*udp_0>4v>_7C{U*Boh>jgX>onZQ0 zD`+xVs1*0NhmRW!rU%z8pEY%|P|zK6cXfoxe@P46UJDRDDY{ykJerEgS$*4tVzJBP zx{!0uY&JRUwz#-03T4jOGg^%rt}a|IA95A@cJJ0` zS{fSaCuB_;MQ$m$!Vp#1;RE~Y>uVLt5THUyNh$yN_dm@>Lv>B{#!WvWR#D_OvMd1o zbb^;bFf!Z@U>4rVk#F)wur_?q2BvLP0@tuZc#9xzAg4D_C?<+{tau-^;^?Hi*fVcO1Rw1p#mnb2usmuek7oCEi1MzX=4yfg|Fq%LypflPS3bOX!_gl+|X0gsZ3iVBrV1sLL$S6(@O{CGh@0niP^M)~{S|E|~T zA9>`FNs|a%0O4SPUtkC85b!>H=myq=YPA}kDY$V!QZza`I?xKpsnKYV{=^ecAS_GZ zDdHWtwo!fJOQuu>J&V9oP{+_+_!etOgy8^q;pl+Uq}{G-VF=}cTo33?aKlrqQ2IQx zO)+&HR@eHn3m&^!riuqZV5+^01*Cl1Iy7V`J8x~R6edi9n+kCY>|B%k7>vXr!K$Fq zTsE^^-wKZvNSQK((zd(H;xD&>#GD7}^nn6iS&0GK$+e)FpuUutY%&feg_`c;P8f z(*V)|Q?P;}`4P#)HKMBoW5WA1AZ7p{W}@5PLn0Qe_w9%G5bh^<0@x9C1rU`Y0&!s> z5h{pG7bZ;x=+%7mV_|HT_xa%Ob>|K`hATpp5Hx2fIR~E_9xj#$XA9CG+V8XyTJ~8? z5SM|Vs@8@QP#7R|0!ogpshlZNK;{FbU?r4-Qw&1k0-)(7r@tRS6U6DW0SvWWIwTo8 z8%P;~xnLe6%Usl%2gB=xo@kFVRM32UIemS`e$@J3`s){unPoj(?Y5ZQR(y;uyAehf zCQd~Lv_fRVr4de^of{sU;xRK2UKhZf`ow26#-q4!&Dn1NRMl+$L-^8XVxIU*^(Vj3 z7oP<3g`z;{e08|YZRUnDzDJi)!bJjdXaBxye z%9!+VRTUL^7juG@A)_nK$g0)r{`uCMl@;Y{*Ka^j^+%uFIC<){+Un|aIj8wtJ_&Bz z^vf{Z8HhzJ9-fd0#VaQQ&cN-Rz$@^Oz@wOWAVWYDhz1-|yoiR&L4{(v2SHN^VNvgt zhgk3;>s1Qra9;u0-heIO9XJd!@CyQ^!VD1pMl^gEm=r)>m>Uz26}}Mo5Kb%jR)B8{ zcNKhDh_4F}5pQ8H78W=y7;uNkf}{mV3-rMP!-sBwgo&W7XU?1fsPWt1{x&;1`}FD4 z`}XYv`64VV3`9+Ydj0atFTvUN@w)LhKR>^&t`1iL;szW{o_gvj+%6DN1vvqB5DpYn zDl){z#`>=m7)oYl=9n>K_U_$_L}c)v!i~n(T2sIMPrR71UC<4zQ8~&O2uwEYc^gqY zpe}(Gs`mUYtfir*>8}CsfnwU)x*cycfA=5Y7kH9kWf-~Pf3W74PhDHL^|j`M??X%! z>Xo2Zgf9KDQ?EAFU2)s3)|MJuYc&7@tG1qqX3|s!1rTsF;FO^~yAu&W*veQ*`rOvz zpV#mB6X#S6mgfAZvAnRQc6BX+!UAo9+=eiI0+!`a6@`ii6gPC|c0pp3DGY+4l(Qa$ z1Swb>hKlprbGre}5Ew+^374@AN+JkJD~QXy<`|QJ761T107*naRLkIYAwUTz6TI*Q zra&r8o`$Pn%Ofe&Q?1{BWvI;67k&@CqvOi=GzN<~^FEd&6pLQOF+p@#)q*F*W9LFe zwPEL9+b@0xjvh=G{or!^wKSALpcI-N2=WSy##~nn`&e3Qh(lQzK}B)d5h~THspE$W zKsNxg(0bxa28$cE`oAO@i?pY|22@VIq3yPyf;Ft%Zs zIVTmTbyBsJ!Xhe+SQg*xAv7FeSnB9_Pp- z@a5phqY1T?BOgQeF=G8Iz2x+N2e6}B^bFV#7&uAVECwR8p4cpi%R=Qi1(XCM1%Dzp z(~7XW2-C}wg!s#PzX_F)P4T6G0c5T#f=DX*?9UUjU?X%@ZMp`{xo_Nd0tW#o1F>7c zM&{}QA{$&$+FNa!I*5C2(hWN(47x3de+E6r`|PN&c(~Q1%;XEX&Y7p zl$aJ89E`6srXr2a%?Jv6skm%%O&y}f_R${GvobFil{~a=0F)wgpPq$d>hK~c>I@t{X2k25Q*w&Ye$UR#Kh#3)G?{)>2kR;aME%cI!88n zqXh_T1F6Jn71IpgUguu|vOstgAG871h6Ah(LtFaRP5$VV(jM$^@jeI+e0 z+AT1A=mxl>GiJ>A^Pm6x(MKOa@6#9YWZAN1ix)4(jR16vty{Okkp$T;UAhFbLTh9D z_U%9}umQ+8d-g2o7+A?yS64$*W7e!$$iw6D4j(>@41E~}VkIt+h*GlQM9?KaBqTx> zZd4!p6j4+Kv6-r+KLm9GScWKtcxUe4@Su&$;haXCR@FmE6s5CCxHdD7afN-Dw8M0!W&nFr8O4n z+z{Y0)Q-~AU||4R;6vpBF?35$^TJZiHJ7pE#&X(FaRGb~Li?lgBG@IenH!*f8ou%c z$b|q7a9H##p+dEE6DTTvvj^^AeGaaQRb7jqHsX}2p(~#Ukfh7qPvh{QMS{qz7)l7E zawF0}6w#d9$%`BlweA&2cmOhJ*!y>n!zrDxG;GliAc=}4-Am5E zJ^*HldF1~!?tQ!Q+kc`tFnVxEz$FxlRb7t=uLJ{yLIFnx!AcNx#$OhyCF+x3;}Bm$ zUn1Zn)uN4rH`CP(U=1SdBI-)(@y)OaaG%gcPYYtk0nll?bfmMO*9gJWi59Uy6(>%P zSpAav`^`{ap+I_7m7rSuG=>j!9!pDk{hq&5DRhM930?kUJYb<~8@lp^wwzrQI)fdW zEKZ(=z$;9i*ewsc+aA6R%`NgE>5rdyc;1Yu{uKqLAYrPIm5b)@IdG(=uA%Sf#?0)A zZ3ca-R;xD{73=^{A__$=lj3fMo7mMszCHiCS}SY?`>f}-u+rvJSzP?32^Cb1h{_RA zyd7TanN>OA~j7%mC3hIp5 z-S?_)%z5cjK4dolZQ!R`R(fU9)NCvmBO;=dO4W^-M|jHloU}m zhSzRH1>`oGE#MtNur_D}!iC|bx(JR2#chYwDDnCbAO=C|py9P2ahYCHhZof#k&V$j z>}20Zrv-)&-GF`Y4s)e$uG6iXdCBWUs})C}$h(Xp`Pn8*JNQ+V}CiMw|AJufQf zKISraFyN@-T^A8~9{z*B38KQ}X@Q55v>Abig6MJaKYABROCS>G6cEdVwWU zLI>Anv%uxbR}UyNs`*a@&w3Cq7oZk`;tYW#>XFwV`_d^=+ie?nVj_pdN==N9i;3#; zGk3Y%h<*aKQG?Oc_v>aeSt}RLRcmxcvsqtd^cTb=7sND!$qc7Cf8y=ku@@dh2W%I2 z(Su_d2$`fx#9x{;oFsJI-f_A8t%JCssis(rtQl(u(3z&j#_#s;hVK>1B7RCtTtb^p zmztI)kz5D%4WMQe(P&za9X@0-8wm&UYx3N@nOXHdEzjweb zA9rsWb@G(yM~@x^d1G{`hulU3-p-~waSpGe0YDC{a|!N-8+;AS9=#tIQmHOt0qk{L z8DMA8fUd*XZs>dlNrZfvND%m_a=2VZlcgR1TkW7Rb2w~d5b!wIA7GPYu|8P%Tna>N-dVmsHd0r_xm1tS9=B11rW;4Ym-#BB-mnDCuNbT*0?>A+!^%O_41xdTo(EICXYc z3n01fl5d_C6BS-v*LbP$ijdD2@_7J}5Sj^yMomK_Vz}1THz}6Rzj-ZJbF_8nPoBGQ zKJQX}W3$mr#Ig3Bfy%~}(uxIh`u!&SYQ;VNOXz5LNw7WU=yHPK%3im*S}XCP6nu+R zKcR#4Te;3gch7;}CuPQ?10Zg|P7^0jnKWsNGzd?hz{gq_3*x>IxJK)8w#UPa%!#vS z&*uxSTi|ucjfXw}b03V`<(;ru95!-voW%+VX*+5 zdLXXhG~8IIR6G(V0IXHu}Kv)BpK& zOJQk6Vr(>cVTgEATvmDb#Oa!brpNAEost;e_o@*N_J98LRh z$1TAcon{40ClLu6)VhPG^VZGJj!*_c&IadDx3<0GtNkZG+jCUF=g!U=qmT;soxFJX zY<`ePIBP;W5yWG7H?^*#rdb^tEDM%Ne4Xq|`>@r;l1kt;$uZ%q0Hm)kWp95TV&E|t z*zg*|E2!@koxSdd=&&nRtXREz z^(g5++0AY&;4mAsIXk(LDctZxec^GAJe-INL%##Rz1>{==uC234d%n|3`5^-oKQ;0 ziUBkf6&?nq&wqXN#k(JGR_kT{~o(9_u)(>*pLG4sCJ=7TxxMMvn$I9Bo; zCK0`;lZSG%qNC|N#=5Dg2|>pJe8US8hfssU3OOn&3QrBN0BUP%@nyi_Xu7If=qt2W zm}3^Ryv)9VRRd$@=H{^3?5v5CIUIr^7`VaVtD(OBO3|gz@QC#OqZ|0*p8o!LA`vQO zJQtDFuBYE45L;D6*~Q%RGiT2m`RInz?f!Q6H%peS3{i!4!(;JK>Kkgm-LvzNCpNNK z!@g7v95ZsiCkx=~h+78E`u_JG`alg|vhPGsrTdZ&2NS78C>8R9B*N(#Dao-BzEXOl z`RfCx-rsg0HbVKcN7qamlYrksXmF6z?TU#A5ed5f?)uW!E#}#F_{^K1?D*Bw_pO>Y zmBH!jH&)Z$@#L#-#YL+A^~xp?Pj0af!(E(+h=O|%G5myg5_Y`b1bin5fP;vq)8z#R zk&)a;3y>BVHVfQ=>qD7L_V(Lv51R$ahK9ich)M(D%wcGVY+|r1KxOgR83O?w`r84f zlTGQz8vnZL$EGH~^rMXl(NTL39jk9{p;KwWilBKjrmeqcF_<0KWxd$}ok0(l%YO3I zBayj7iI;@tN{UTa)siM0>R@yu^tshz*j`A}uZ4CtQx~r@+jmj2K#Y#eACnBAK zkV!F_bXBrX-KLjPqw$o@W_$C^HxY8{rI%hpn5~YE4n*?8`uz3RUk?ut2h#D&U;Ywt z%K!40zd*ZsG+tmo`V37(yBVr03{_G;O5KnZ4yPiKxajgFm%|wq9U~BmSZugnQk^cR z#bi=z)CGCD3Po^yeBup7Z=S%QGXQMloIBI5(?v!_^MxWdi_mPsFUxE)HMcb7oIO2b z_S`{HFui$iZY>9s!BDDHhY#%^KQ1dMSRvr^nG6=BznxBpQE$+;sgE8yl$kZj=Z0`= zO^wXbR2nqP zP=&7qYCN5P<+HuV7!2CO%jWu^4V;>sp2%gf5~2uS)W8i(aJ|{0Y17l`v|y>2&0^|J zX2^K}Qv;wN)n`B~vTq1&|m8Q+m zVK4~<+z^?RLBHk@0#O@Wh~8+j+g)NIPo|lR1FRY_XWl7I}~?D`RY; z!2th}lT8j1bXO<)HdaT_*K|WA}#uYQb*BHAP$P`@>hp0Mz@64#n33TJV_uj+b`Sa&z zWMm*{*T)}!%;j>QdFB~-GM1H$`=agG&+o|)oIl&L|BlCQ>MYlHx}$m3dAW~F6Yi2TjAj% zlLrgoqC%zEZT8mIW~;@v@}AYHsbg59M08;Bk`*6+@b0Idet@tta)pA=<>BS8#bQ&d zo2x1+vS-ZBnlzbyr~Uxx4Az)MjHpZsRK*dUdQ@+SpMnjF8}LveppC)Y8d|jtE!yPR z@bKWys6$Y6I(MnGyr${#)$?agNcFFx6Cy+L>C!e_vm4dAqdAujo+;pRnD;N9sgz5O zpTB%6zXYf5U6?&@@;FV0VavXgC-aMvqr;wDy|A^d9r=+VHZ=J8NA8IURRUJ1sBb)W zzOcSk+p29hTWyol6Iafj9IB8);pReN`SwGn&*T@===43u&z~(QMK{(h%2owQyDf-5 zfQ!miM-N2k_-b_b&tSWb47x=l~AE1WAsG0AO*swCam}Cp(O$ z`xnhnDI^EZBKCo)9d4Nt0z$@+7MmN4E#dth>FtT(gL^F0#=*t`!g5b`rwmer*iBL2i{X*QiU$1B_%w*VeRsT zvlT%%CV*quFBjyOloYw#E|FLg zAD6IX>GIgP1Y`)v-tXOslcuJor56}J7=vW8q~tN1o_#JVDz=MjTYu^v@p-`w3rV2hb7ySC z8rQef^|LiP4{zoApy2^@qgmTNf6BPfV5zUF%jLD_3d@3n#F@!4Q03^oev8cxJ&r?X z@)|U{vg$^}SW!wPr8Ny;bL5v)f%pJM!^Pr?{L;$On)*{0i@|XyEUyEO0MsBv)~SqB zTv_+OA8swKu3s~EO7{5FZHG_&{++MrRQml(XNdVcZJWMct+Ux3sj-n!p$Z|77ZxmI zGl>Xv-4+a$2Q8kSX*62f3?_A32Lg?aPmU3JF?xwmu=D5{KpSdZ`^A!~vPkNCJAHac;zv`#b`=I#Ge)Q2t;vt?7 zfVu*J-rwS=+W{{R{a?3k-Kgt1+2N63fm@ntjD!my`#@TNv;b*=0a&24qUt}NZ8`G& zS!Hlgd~|d~xQfeWgM3k4SO5MOUqdNw!>VOGUSHocf3agvMO95vX~ntR0-@jqCy7->Jz2&X~HT))pzsdXWzmNHo&+$YmmG0QF1IuH`Mj%g8QqrD1 zd$0t(#nrgKx&H2On6#{5nmvCHBPxR?2^}ho!|#(4mC@fG_L4tXsa&#jB|g368Sp7S zUodrQHa-KcVdNA9QiIRP=?~eHL9hU^%I)h5{9c+vq*o9(;ME1-tTi}PDo3Qj}!`c|K9R#j8y#GgZIEE z#aD_+r~l#EhstW3pZVR}XD*dm?G7x>#UehGJJaGLvr>|nbjI5G*&BZGr&D>wAMQB# zv&YsfoR+cm!1q7>%m3wEE?+w@8|cP~+`?b}{gb$G24PH0T#SY!x;%G=Nh{q8r0z@H?y_65Y7N29&%U z(gLIf`ecFJg2KGZSJG1wH$Jp}#iDtNAYzUMK5c1b6?8_=UdSImCV9fxF@35F5mlMY zhUONTR03|S#d2L7m&h=cN*N4i`#x4RMA<;oU__3mOG54v-;B0Fhm%K$J}nOI1BXT)_qcj(>P>Xqy_GH3&0Zu+>KC$ zEKDi$aziK(qixl-Lx%zi8Vc#n-^q&v`~_3TJDsjWIR$_lz`R&7dlK*thus09L~WCr zMyD;9mH`f9v!>1Eb}yfiwP<=4h#~Of@R|4Uxq=Ht6_F|hBD0_f67iSGptl=M(9Xai z9+y|D>%iRr+9Kp#yMMxcrrUyW6+uHNm|IkVQkf}nkOJ|wg};xjnvcH<2>=alNp-!= zW(Oc~v8b}cV1DM_r73aYS!3c*s!833Y^&!@ojYkPhs6Z?1BIFvEpZ7TZoKpLezfx9 z+*P25fT*(l@M+j8dwfcdDmHK^f@=xk(n4@ChI4!Uxrc_XIq0N(vG2rR|N7UVYhtKt zK6dQbuYdjPp>Bn&|4y~QZJu!7sb(dwi?je~fe~&2@HSa2#)dV^A6&bV&F)mnfa5Nt zHy&BPYWv;;bq!5Ebi?6rV&>C3Jb`E_2alaxIA=!IxHKRgT}bjug)j|?V>!)bT6R=BD!P*ZQdQu1Gy%jq=e z_5Q=b-GM>}Dzw{dL)QfQKe&DFr8b0b8@dZYyMr$D(95-ur-s=A4J{gQH{gE}7ToC| zX)u`qZMd8+$aV&aZ{n6TeN%L1UDs^##I`!NZQJbFwr$(Copfy5>DV?pwt4b?|2UWH zZjZJ1u2r+vteX4Rv?5&CTFtu}lRqbNE@Fst;}rNSY`Ze3fgFzNaGJ7R*m zzgKUIPiDk!|Er!{&`9xTWiG=<` zW>A(DES$YgCBlEDWe!A zHf#_~gax=1=I3q2|Fn)jPHFaGM9dk4Q=Ug*17<-}guhU>$;Xt`fve$|SK%TF9Q09@ z^vz0|Wy^UOsTM~k2*P8U;lvtQ>4=8HX#5QV;z|G18{DwmW>uyIQZnvsuNvfb?Ms#Y zRPcPi>;NBNpFbk)?|(28)YzH?e1NdbQMDyy7ATFEfMfcTB4q|i4(^N z8M2BXd3YH8$#Q6b z=$OxcXeSo(liOmX42!ZR`QH+J4PcLH(13{n1D$kW_yGj54QO_Aw%+>G`K=+U>F6z~ zLHg3CXT1Ri9Gb`TsGu-og%-6I_|gz2pWPy)!pH1$HY5|FpscJrx42oDJaJ%D$g!;^ zALI>Z?BW&#=1me?I6mnvHb8$A)<8MX7&kYUEqK%s=l7ZF1*TimaWM2 z1Gj^0_M7nRBW9^Z6-{Gtg~Z)<(msmvO1Fr6$MDGbcEcQ)W-dUgZxAp9ccc;^A8trA zPn9C&q{MSGa3hEokIS3uBuR)e=mE+l$o|ycBdWAhH4fCWCo1W`rdLxXoWEAW+~6IE z5t7mjB^c-&>0b@x z>^~YQGG;b^!i$!jj*id}I(0=;YX{B;XjWFBBKV<@vUpjSVFdZVp~)l+Jf?nlnIW;z z@>Jg6-ZM7$TWGytz-X>s@X_z>Ay6+E^5aln<@_fv0%E`$Gk%8hTc<-POPNKx1Cu_U?)^@a0UIa`LD)*b~0 zNFOFv7%#45(ECtlZqROv#E-rV*oFnY{a2(q)`Zvi(&eAFlQDMR`+HmYEzf>}8lIeuWbBIt5ch<9xQl2R3C33&Jg`w)<3`BXgB zfh0ueNi`PNKp9Ad*;)uWHMZtD94_9TiY!9Wgz{l?`}UlBL?u7MZ?bBTO}*jr5M)VL zSzvy=z;~gJolq1H9ZW16=p{C1Fc~`txcqP7ePvCjG zCWM56J%tc}mA-**S-=DG#HpJ6-nP*clVg`Jm5#m=Ka_e7ymb|oA{$G3ux;5 zqx+jW+(Dl5qp(IntUBXA_?5tU0WwF#R?W&OariH5A>&imr$d6TNMmHR!?B`=Y49 z+Lz}Vgms6aXh(xgm&I!0?dU2fb(M1W+=@FX6B7hE96M-cfoGXawOSdq_E;!bRc<%8 zZ)f6jU(o1b7@+@L-ze5-v+y>o@ds47f<1lj8{itsBoP=-s`GbdTnZwe(+A%QTiHv2ja5CQueNQA?6k4Z#~`Yf|7M*dc1UP+KJPv6^L1qrFpUtk1+8y;|4 zw|=P;AN(@L5S#+0eYY7zjYu$pAk)<1LhuGfK^4y(a%V2vc-&q6UKTCuVL8P^z*_Dh z_IXt{yYx_ zpGAun@$*mpGnLC#S6KnV6Td2%K&a7p4H9SjaRQ24sKnHC2Z^wIG7@-H)S}-caa8?) zz49%`B_35y3Ri6{ZQ|=sUR5F3CY_f_EUz6#%6MrAN#cnZ#zJ7Czdn9=4YV54k5t}K zF#MAVqxIBgyb?r>wEv*&dm9lI0-$53WODYFqWO^OP~PXRvLoG8BoaEj!CCV-%w98v zgGuH8-i_4IIAy5~MLiT%G9n%l!V9Lgt=iiLTL^#$LVx|2>f_Bu%}e+;hjyQ61oag{ zf4}G;S!pWIm!k)ujZ0}Hug&0JDpOu}5)AByJRTllg>gX3k0~A&{XFS0ncM3R00Pyn z!G!e`le##lM2CD`Dd+VdO+g$KDn@H=`J9?@C3=~tiZII4ZB(F3c^ShMkk9qf9~SS7 z`?;_ga3dpS+yCQ*J|y<1!$rE?vl}mR0?da7T7IMsi-Ppkl|h$mD!)RRC9wbh)U{}- z|6;z%ao8Fb{wKB-f)x@Eq|&8}md&G2PO}jwXK%@s{cgvBPn#%Rc=s{N=Czp*>6&1R z`pMzlFC@a#>Gk|hxGs~O1T_)v`^V1Snn|l~n1pQ+AlL|`f=JTwEx){ZS^DW`R5mH5 zfJUV!hT?3A_%sj8OY*z!Y)O|XzU)61)uw|Ei45~?wg9r#0$&dm{GHD3K_gO|V;ko` z5!x%+%IOn2^iRjRgQh zz{b+qfI&$5c3TAXn;+Y+1uVv8p>Nh1A7{2J5kx>27r{Kt8nAr6pMTzXd%TXjoapXq zh=hgsp9o8v*{O3am^%6A13%jKYKGO?QU8hOZd81^r3=6PiRz&0t zl`OI+fvp&IBAq!c=qQ^iEKg3xrfZJ^8x#H?^(Y1F*sc~cr61S*?>6iQvFvFDismq; z{@Pmq#^-UZXljtEo)etrl|f6y#Lme^Bu|sLCJeEUD{K?K{YBQ*V0g`%|_|GdV306lp0Ck{d;;f_Nbj5#?VSNiSgC zw`SGp&WS|QO=n1l^olk>_(E{l$xiG_=~=X4UW|^$9Anc&0S|^`)Px7EGvu5}5rUzE zQ$X&;B}#Ucv*=N+;u!b8gWfox&udiT3}X%Bzy169i%Y<=<^GjWn_T;Lcs#p_KY$Z& ziZNJ;3?j|qN9rTc1J|dMj7f@b(sa8pL0m30sip(hVu05UVKk`NNI|MVUbd z>x@&z6GXtnfgjfY6PZ4268V$7HaY=h7GzleyYh?@xkvm@K9jkGeVQr*UADgag)?Pb z;jTg(4T<6nU|_dEofnlbYlP@jE;L;wHB0JYO5- zd2vv%d_QF5;+2_v?t&8=Dv~(c(;}sQLJn6)OVw!7o$B*Xwo_pKO^qEpwfA_Cj1kz| z5&jeNX(G(HM5h%naX1xB=;O$S3;D^!yNVwDL__}195Ez$I&3$b_nPWfmrp=U^)wZk zKbB-w){_xAxkL{W{?a9WozZIg#gd#D5kmZ-#bzbs=_un~Clg3OqqZep)2W@J{q&s2 zf9cqsK!tku7R&CgZL4X6Thy{SE?qDWnTFarC`wCj+~)icuK=wlQOCwX7YSpgjT(?O zzZVe6v5&feT|sHeJdDDU_`BfTS_JQZZiXcQNr%G{^5;jIlu?5TRjInSWqOUjwdtWI(1Y-h$FSLrP%o^*X>1dn5tPHx?EgC}7QDV5okD~>}wK}!Hs<=)g*qlPB;fAm^K%8BflLxgW zY`_^QEd0ulRWVx*xwJbYX8iuAZY=z-ZWJgIDuFT_CFOYsPU$af?3xb9p}bZ)ojh0c zy#r1eIg!CqP%k>^WJ(mX+036*ZN$Rn>XftY4OQQ{TK< zS@;Et!y+N|>fKOv?bSi7-F@tCUGb0+<&G917?Hxcj;9*%AZ1)~FOV zxAFqGrq#`g*M~|wW2SKw>}7}3;CKTKm8oqI@gU=QlY2AK;1KweKrR@S#pQ+*Sd+Q| z2zbq^y*?P^sMF|aij-ExZ-%r_=ZTv2(9mz6qcUx}IgfIvkN1O_SDbAlgsKR5 zm?Bqb0(hsgvE!dhHw|X5@`ggTN?gnIjjwbOY!I@Yd>GyT%(OAF9>Q7sVb=HDW9mb%^qrc#wPiH zy+co$1e|niw3G$y&CMM(f+et~?rP#cv19MC+2xgXGIHTDXERb4-oAUc3!Fa<%d;yd z<7i;lrbrl1_cookzFJ^tUJiPqytcIkW6R>TU<6J5S)Jo;k7zPmgoAr*mhZ?+PpUPo z^(AP!lzW7P<-w3`66I&mMa0Acp@A5yK9#9N;Rk(ZtsA)CZ@n`phJ`D%deJW;<)9LO z!}M%n5+haVbx`pta9r&7BA7YjwtAQM938s9DD~}sAarGs-TP*3rIx2RBD#U4?ixxX zvdMo30LsyS+?F`%IS^!l6R0HVw$2;FirShqfUabZvDX0w8WiB2Z`TG14gtUAY8W;_ zD#F$E%-9yUC0NYZ>oJD=6EIu#U%+fEuIAmqe~5K54A8(j{jqj@TaIpbhs8z(e-yuP z7}xx<WQWTJ5%@6tG>U9X<2{f|9)eg>eN4YWyGVOtE*e4j^Cl)y_cJ9YXM(zJ-DZzasvfhKc6;8V(Op|k z$4E(7Nt#9o3C{eS0n@o7cJ`0dE-~LGs6*_&5wTo0A3id2mpTiGEL{2ihN}7c&}8-p zpj#Io7mW@b5=q19XrLVc>^EHz_GPtz;zrjYSaMgwVu6`5j)#txzQNXGJlMJeP>vKQ zAt<)lRF0guESTFuj}q6CzvZA5;sP@;t^)@I0uP~`gha{Jd$`zdD!1lH?L{JGgh;S! zQuG;XtdbMPZtxcyBmB5GhX4E4FsKte{@b=6L$E*V{9B529nePo_3oU~zd$s{dI1JH zQc1!j+6;cD)2PG!z1y0dq$8WI%bk4r97C9X<-Jp3Sx&gT)!oMK{zP zjYWX3dS66(d|I~3KhAr$dgKOPvFgWtxrz4Lc4jm1-dQm9#dh%qcW3?*A$H`n9b=iK{Tt>u|NRi39)tr+y> zISwZyqkJD8B?^DFApo#?{M3MZ#ir|~CLPKX92G(R?EtTecGu0>srON}I+&Mc+ zs@7d7z+@N|M?u)eF2=Ro}ch-FHio?Ok{bU-~U9} zyzb}G=&D~kaL*JZMVou)?anbd{)t~=S?!%i`b(rJlbc$45oT)WN=GNQ@r_j^A*#6W`I=e&3nNE#^% z6dsc1J)`Y4S+zq%A*vEpwp@cX*PuVrScM{biX&rkOo4s)v+XaLLavbkuXLV_C|E0Q z*M%=t&u_Xvrac|Dv{~2Oqw~X$yy>RbvQRSgQ0XTl55Eu^SxZ4>ptP}|SX!F`pq=Le zU)8ljnInEv(rlxQFWTvOR~NVGe_dFn-D}fawO?vCT0^Pob(7p|gGwtfO!jG@(p9!3 zv(gt@dMK|7l|WExksrlMYF3k_+*w8*Co=srAi*w@QIN9rIqsnh;3olIKSMsQRhzGe zLP?wrPn*&YH`34#d10Ze621JRYo zER8-}OHD@%7PG7lE^Y`O$8JTREMVop#BAVXYnRp%*y#2C{1#XFS9Oe zz#HT@uzu&BF-1h7NRA3BhI{B!y*Et(o?R(c2J~}eVlgWlg9o4IaRP?C0m&puDQFGn zf7{8*UThjB1C@@P9tc_&-DNZrptj@7KEpl&0$u2g=x5q+nWM7{!Ry*!b6WUDnJkhx z`C~VvDG{0@k`W%;NBkooIer`6oY%|*yCv^|T1iW4Dw>kW+L7k;px6RUsw-FKv6T!G zZ8qkTLFSvpy2`_UKnm0mBSBAUwl>gSWkqxjEAoDKx+lHcJ?|%T#WkcxC%8Yccx!K5 zT+)pq292S|=RYRUgMh!ANI{!~hQNmV+5i1 zc7rI2NM4WyltRLi&IbE{q=SXC>8}IBz$3$}0?$ww9bhp^mWU}+{(*mB9{gQ_+HWXZ za!R$SW13P5I;{pcMtJllH7#W))tB1+l>kJP9vnst8^{(Hk~Bob5RQPA3S5Gp4xsDF zv){rktTtGdQ=eR{Dldo1kTX8@-A*l})!05OrR+_-3%rmiL2goM;e6ZA)I-yW&Ma-? z7}i1?pQ556^kZjN0w|^UlbP&!F`@MVBAZepq{&+-3*gy$4XHFr*Td_bGh|c~&}ug> z?N>*pbuxSB+%N2%qUAQTE3lpf(MJ6EsNs*N%oG zls&(1*)&xDsimO5bw?N8FiyfJ>4pz#f`2ivz*a5D}o$Fu>~;@i4K{{LX+K zHl`$#Er~#&EX?^6B&^9MtW|0`%{eTVIsV_5-iCwRmU$vZlz{o`mm(@DJ^jr2V=nHr zEb4G>E?#!)#$^ByRIik(suHDrv^)h&aWG7c)8COp6m-X!>_rwzBsn+S63pq}x)bZX zX`Ol%>5|XQ#qV;5tlJ~%IF@WV7krtg{HJ=J81TQnE{5nljQcvxL5w*08%F*#@o;Y@ zS#yuIe_UmfqB7H3Lnv*DFZhrnv2LmIZ0Jg*VvVxPCNP26I2r~cRW0k28o3yQZ`gz^ z9YB?K6-my;hpw6)VCybag}BLPFbFLSN$`)ubx2p{G*8#%gB*PJ)a8)EU`+n0si2}i zYMUb`oQ>&*$a1~dQe&lh@6@N%MIYmfYG=>&pnrpXrf|m{$4KSF_&FC_ur&iL={oydTE~Z3Pu9MICQ7RrR!2 zW0#AtBy`z4?rW*CS^Hiy^u@+*_y0{C=zV*6xB43emjo^PThHBW1xCyDmgd@Z)e~Yc zv2Xpd-eHBZ8x^GvF}|ghlsgqMD${ICVPPS?_Z|#7Oq-7wyM3t5deiAY{_l1I$ca?i_d|022$(hN zClw!TX_s|90qmbaXx|+!=W^U$I(Ww5+@a4sObyqp<;z{<`kj*};ERt5L3Y8eJZWb< z`?;OSwSBW!{{}mK^R77e{^$^&oQ%wJ_lCD#HK(ksLs!sUo)a#)urz(HjrFf=y0EuX zVF=3io*Cg;ZKH4BdiCq^bX}|TqtN)A(Bm2bg`vX0*}j@oR6=2bPC+B)?9$JX)E{oo zr(H#^$)oiv%;{=GUPuaZq9N+^fphQ;1H&cHKL(E%x82&IC4pmAJ_>9aUzlg`RuR^%JNT7v~B^V4ko2e^*wGw zWD=Dg&!#cUSyNA-wJ~WfmMK|~5-GHZxHPb_h%vf|EiQa54M^}*lh-5RVl2(3?ts|{ zi$XLE5hLrbqOY1f=S)kLEKk5AKt*{kmLd7Y;C$1lrj#vu$`2kL6j1c{Ba1HG{wKdU z@8aa_{e^skYPwb1dct~*~J4V#xK0J zcDL{P)TbK>kwlWpVdSxy$}F|Us``H0W^FpMz|#f4Mp89ZfDJDQdFLCC?fbaxX-c&h z*Lzr?NAAe$l6?2gvfuYt_3<)Y-e|Y6u8OqTHB6Rk_jjsFV`NZQ?7JPm{X?gG?@5ye zzxPT~5?nN(j&nL;H@yThD2Z0V=6|KTg8+83_Nw+3Fsu<-M9IRsXcxSgu+N4Hez$gap_f8)*vYIn z40cm2s5GJqUei}NfV1LNlXhm|HvaO@!K#E)qG86qnto{YIIispM#r~gTYBBFQ@@*l zN!tQ>A?W&aQ#6kMCCvV|^Wtzlf`3@+o@X^JC96Udw*AZdDs4N2UB14*eOy3)yc5t= z2;cLGI?WtdeKv0A2(M?G>2!UNQ)9Y6Hm#ZSqOEoUp3dZt&2dpCh>eJm8W=V(s4%(S z>4`C!<75jg=c%$V5Zx`0jtEOiAz?_xhXY|&uB2#F7*j@@IU}S-k;#)>zK?9HIxKJ# zO(nf9km{-+Lw=B~&ZbwgwI&ju5?pzfs;H~`b&jDs+;CT%)7=IWA}ZSJyXT@SpwKhX zFqO%A?wAat01o|eM1f^8zQv1+FMp?^v}w#Kq-E^ScNWO{mDE6F<=oRMS>vypNIbEUv(z919z{{xBs675H;nM zd1gh0*Yi12=)GgL@qEeWUD#9lu@F}kkT(`^fGe)mc~qZ&#PH{3uF(!QSd8!URQy`A zbJCz0hiCh1{&>rU6%EhF07#~gzWW^yC=ibN7habOE02^yYoB8!`ZVYA_WSWP2t=G4 zkMGQzJg@VH1kKX?oD$|u|MWT)r29E-d5!(L^=iMWF6v8C*tN)$+0J^;`&dMDx+r0u z-vwWmXgMW)7m#xh>KR;lg-S$aRjSFRJ--13yEquFGLSPi4danNE`%BBzr8S_GRj{p z8vdHjWbhnORL~**mK5S9DrsRv^ndO%^%Kd`v;M1H)pP-Bs9Sy7WhEs?*I!rHYApZ)0(1npdT;Y-yMqmG*W02|rZVY_oOtk;AG}shYk!Y*9GwS$WrGz(tj^hl zZqglN|9ZLER%-+y_ps|XJ^>%R>j~8wxb99r;|%S4HT%1Vm&I`J`gsplDCJi1tWHZNL-%vhF|e% zD}B#bJp#Y+LnJf~`Vo$u$a9nocO!BoTQ$P&T*y)8pkkL|9P? zm~Wde%Vagr+#I>2<8Oj%VYiy z680}5+yi(ZGWF4W$8|)EbW?vQe9dN%1jzlq*GAE2YSCX{mX!SAQ`3~tdr{90keQT7 ze)q5(p;7e8^lSa9u{N(>DyiqAft&NrHVmbNKMDP6AhmL4z(`Z65{P6twPz|ZVKEPy zexEaW82@1VuUd?h+7-(Cg|R4+Zt*ed_W@zezH5CKA@9B44}l-t$!*Tm^R`qA9@PKS zHx~Ht^yK^fzGG)@esXbvgNcbL&c5q)mk{R-I`T_ESl9EE)EM}q-gq3j&XMQsq{yEw zGAe3xB10bn6)Ym0$jrSnu0H#yGzvx-cP+tJtWNz-3QTVglyo<_BK+3gH6OR5o>ZOA z9R|)CU?QDRFsZ=JqS8j99;CuDF<=WYp*k(DHgS7ulM`bqkYmkBF8n0$Nw0%3_h8#34($lv3N8&q0}#OI$|>?hFIy^tqXVD( zq7oIYFS-tG;h9?B++EbCb+y-;Wl4f^iP*zaNvL`?LHC22N^ntoU|%^3#yenQSkEsm z%m{Fu?nzc`P5`MblF_=&cAjCha)7%g;6XYKP-WaKo2#HzHyNkq+DI3Qh7|~nja;`b z7st6e+I;O}jgA$rXRlCYiXfpeET-WaZr2}Xc%uV!ikQ|V1!9M{&T6VhXxG(YZr@9p zPtfP2!2P*0wW=Ga^W@+@3T9FsW@%$CK0!pp*D^pmG$H}%LE&z|deJ;Jp-XM}4w#*b@}dUd=-{+=Jg0prCC~cx-6Mr+stZ*`1HvsGjq3y!BK z=hwCB5D=~AX(bBTlQ90<4J+p?#sX#35_ItAov5<2zY8%u;zLW5OKJ*gjb1mhL-;fWVyyrpRmj) zTnqRYZ!0xAYI?f6HtqbcDP!hOkiF8>q-vX-&S!kh?xwj@5*+yjKkilMs*1vgo(*>Y zZuhR{8?kx}aKc>Ds58Hp#f8rxVYCubv-^aYvb;asnN*J~lCpM&Z~ZG(*?+pz>o2dc zARl4jce=WKML)GMMR4q{fw52ihB%%7n+{A7u~PH%GcT?nIdLUShRoo@K#Z^t_%HPoQ+D7R?beekWz$t4An{0kQ_32w65q%N0$549M!VAN=IfOWq>2K zKbnQpNB{n+)ep9&2Cy zJ3S?@R8BgCJ9WpLS?5&4I2I{ip!KbQ(1SKvRw00ZpZ?x_sX>j$LuV6TR_h^n`DV*E zT}9!zqX6R%n?uTZdPrZns?g)De{~wa`<~oibu`xxW(}Xj8>#B-P00(@N8@-hFYNr~ zzW-swx07068(m|2Zhcz7CW_JUH&+5*6%fEqu-6;umi&|N(B{*Lr~G?b-eq=s)#J;& zvwLWiFli9mmos{gh%!L=+rm3|+l`FaX8=v$7vilS2TFEdy2;^g*6B1^8%kt7p+#Ho zMFBksOgKw7w$*wdR2U9cd?VENyjM5OGGG*ovxySyV^|5%X8o@{o7s{OBJWUEtAW5? zq|mLm{!-9Yd!=*DEVKUQoG>PaKL)X@A%wTevG0?(BN=1YH7i4qfL|=m`XLwv)5OpE z(Rjj^!`JLxj=|Nfzhg9eDTalc>nDX2uJ>8!;)PCt%0v1(7I!NX?+j7+zJPjPFs&t@;Tx8fm}pBYnzIE-C*y^EY81~$Atpi8uMV1y9m3hLSitYJUf z7dG2%58SMT-0^U$Q7LvGhO-qwP~4;T9VGzLg?1$x*juIlDefOfAbB{uVUFk87cO|u z10Ge$!8-Tv!q!{AA8Gv!yANl(qj~!+OR&olHwYHU<g(tkW9yFO zg;ngS``7j-4-h>d^8pVeO8i#G4?38r?DxObr6C2oYixdGVcz9Hjk(gPHr}pB>`2>p zj@`yhYkOj;M>d4+ckHNI$Uv8A&GOQHkSsqu&X1O1J@6CRF?zb5&xMx4Io8z8u9eB2 zVWPxDrH6Iig@+(n>*8c##QAbYO4HPFe8CKmy-C(S~wiso9nwCpvF& zjBL*(0c`dNS~}WT5#iC{R1Ew6ospRw2T*<$ju(Q28CptUCj+NV0iGb&hLyvS5RLsk zdyj2<8S~r>;l^bqSp&e^tYMjQeR#%egnC=#oWhcd{c(=Qba)YM7D|OM zpTaO!r85E{km6neLHuO;(Y{MTOCvyQa5{kLwbgC>WkHg1o2^#lOpSs0QFniRf9`z4 zIV!Yef86tbGv@{!oQ$+BB61$>_6NChynTM1omuiE>4OB?@bm|B*GYxrY@+hlFK~KM zrbOAAQ$-`a>Aux8dP@3k$?n-d&o1vkz8;wsVrPQCr@f8*=~(C z#NfN2Iuz`t95Drh-Fm}GMOK}G(t#4p9!^x{FJodOLw?k{iVP@oq$6c#`-Pc;o@3f} zX6{Y)*}L3Cxn1U$X{EJ->B0?uQtobE`mav1i5pLXiN_Qe@u!{v&hD1NwJ!GfGhW7< z5qSyjf7LP?IX-ruBEL={MnTt^3ns*pH9JMx@v5^@1;etU9aTOR4vxbKZcDabK9Yr0$w~ zKRT;1)4%{l1-J4oe80tgN?$oB(@41DUV7|l#d~bednFIquP}daF8RjSdtD4S3i?ZB z?v0Z#Q_I;kG+GQKdk_qdX(R|pzp;HWG##wnh4osF(8zcjC~sCmbjU{1q4r%GA7nUuf1u|zfZ zQ2|@Q-nKLyJkU8RImPEF>zg(iq3=&r;yv(+`&A0N>m{#oqedi5VBMm>tT-j(iTd8m zeTU98)Y2vY>T#$iXb27P9S3p@0JRT-pY-OSEk`c9Dn}=0DzXO4uq|Mku8K5q^X2K7 zoz2F~qm%=ahL%!-gb8t0r?o1mM@{1^41}$k#?7?#iv-NrI5DPHV=9fF2irE@GfrwM zi)>s(&Z#jKa*29cjv$PC7-Y!QwIPN!)9g?8YgJ_?(|p?2`{s#<3OKBrYNplE5a;ir z%I)z2`=(02v1^AE4Dem|z%>^oFEv$EGc{Ch7a+ixC{If03e+gWt#YVYhC35fFGD5R zN3M+7&TOsT);u>$m+zWTT|j7&!y!P!f+>fR9+t~G)tqQ)ja~h>;_mVcLd=i=A2fLy zGb}q${sU}?;OpMA=Rksa?kJp=7-X3j!$)^sufp#0DorY{>`)tVszd|iq4Cikth?ZR zQAP!o&C)LuHx}$`W3&td(Z7+q#^_T>i@}PK0ej#yqqM({3Nh8i$tX5}QgBEJ!f7ix zl*g4MW_l&nm8H4mU#H_*K#hD1Vmk73(=+~e6Wj40(gh5Uwr+dE(1LV0jN-bH$ zeY}kFi_xUacpo$02m@PC+{D5P$^WwklIrN}e1CgWSnTq+kEi*!-|i~d37N9}y9Y?> z5$p6Oy5U9YL;9*EWwIZ-JyAA7c+0G~f82cnj{^yH`;Af^(f@}Bge1#oPLUZ^zt@vo zRqDk(+HgWOhm6CzNG5!JpdkoEgg}J(4TIEFh^^8L`F#v4&p7O-DD)6my9|H~YxTm5 zz%t}WJhEtg3^SA}kRoX)>P&r^JbRw4jLKcKq)gMc9TjyALRcjbifsB%1l2;q`SZbU zp@lJo^pPSOuTEG7vHK#rLYsScg}iX))l+7WY;plT1o*v1iw>{`KaV^8;}Zb1%YepvBV<$6oqZ_j4}a7u-WE z^YAoXt_XO~V!Ai9S%wdWkk$@q`Jw!~dFp*d;c#|G-vkYmr=cpC9P4i_IUH-Vg0iXy zvwZ=&A%@&+H5cAa?A)zhxLs5`z z!{Bad+~2F^tVB1+g-D8t&GU1)ss7Hn&-ez|3uOC$m~4x(gE*bOEbYjq&uevpyG^%Y z3X;t~uOk?o2Y3N7Tf<@@Ru;JVY1l6qO;?CLKx$V=T%Lf^=-kxURoZR< zz%F5Li=Jft$v`3U`UMq|TZr2?teL}V+!~%G&$X3f6NR@63bmM_yZ}qr{vQl5;Ha+^ z(v<6u$0T+ z!j#{rp{iJ9nWjXU;9_rd>%26p%;>q}Op#UdYtiyD$(Du+b0m=@ArUn32>`*p2;yMQ zj1Ii(O8!-|F37`NGB&^E5~fX#YB63*nl9zlgU_-Y{?9&!k&9D&cdzs5p09$2eSp1j zPwV&1c&yxzp#+KIxhicZgC#7FDK`3~lJcY4Oi$a-;7m;J6MF$d9E0Urjo<8U@Y5{I zQ~ceW%)2KTGhL8NNPE@@?D*X74;X-JZJK4n>^A?G83I zDzPoAjUI6Rk!yHsJHlwUm30eXN$-5E?bq#8gQ38vBH3?4#)jn1)~5}ffPjC%aUN*JU+eGM(Jt&Huj|$3 z+WI>5@_biUS4Rg|G3W|-Hb)&F+*kO5;~cC5UE9&POdECu_uc?#X<@cxaapmEMO)48 z9>hL)A4@I)A_O8YLsk5#mEir$2Xfa2sJ!pZah(5jpQbVhwG+iiJ`89(NEZpHUOXT{ z@JS{Kx~-6)ocx8LAg>5i{MFk^5bW{W&%l8I} zK*^xA%19$Fo5?KV>f_VI0`nT!1=c|KqWx2T!Re)al#YoqbufE%=vC{oRavBRS4TRp zTqu#?=1}Xb72xZMT8R3P=8JXmx3nE#Im4qmy+0kUkYn%{Ceg|Hd;Z|?_d?RXP-BA0 z*2oM%*?PNlpq?;BP%w9=A4fYQnXQ&Lw12l)F10D0X5lck{XYhf%NaUAEC#_rQ7djy zIsdDtNfX{vdBKI>`g zSU72Ad35pbh7g%bhOA@@bfr2;U*Yy@hUZ#tDF4^+O3mrR7|nW}yDt=aG<%P1lr09T z5KUcF8g&l8Z5{08&`@HX?$4Q?JI(JtL?I-ThTz$MfvwJDpOrhkhK-uueAj&nm<*!< z#8U$DKRsoooorQ|44a36*iMp6L<)4H_4s}lNRSYF`rE4wHs1xM(>1znATlN1C&@$Q zH_j#JP0>j_G-IbYRznL7m=Hz$uvZ#emV})9_9}%>rNfM3~oJuJGc`d72DJuV#Lb#D?4b z;xuh%&8-QoKo`H~Gc@34Y%g_MnX9v@2Am9!01iyM!N$*tBo!3g;d$KzeB)$pnJ(UT zvKI99B*Ti(whWHZ&`Wsm`+YzQO;j-a#?8*!&vqe}C+(a**IoZMFtgKE{jQdr`}=c+ zo*NWc(2{=99t8GD-*M1O*`V9kWtJ?=$eL2O+hWxKq`!y)L6*~EL`djzFL#B8E5$s! z5SbuLt&@~h>o42loG<@F>VhpFEF+fNo!(BJQ8pI0!58a)ns|lO?%x9J6Ec5rZ;&la zvmgke0Q1@#ihfCLKs#6A-~h`(P3ILLk7AOyF7EzigikxCJ`V1pxV;#AWPH=^rT62f z>%rq7hMkIvs!Nw%od?hF!vPlpn8|^whA~qXL;{$Xb8YJ|dnp_egV>9G-zPM6O%R2A32+da6uvN3w4;qe-b-h2Bncea7@b=-&^Y;Q~6|aH2_-5b=LATLHZaD zW$HJO3~JRogWY{Jj(gEhbs2OA;t!xv6<~t)1j~o5$sFD_;pluX2V^Bm&IN*zF&t#_ zjEU%*GZ^TIr7G>9G()HzIS1Br0V?qU5M>FNn>8Ksqc0OiRj>oe{GN%sFtAR?1XxfJ z@e%S&=rQ_VKyG-elt9G(JbQ`4911GPEo^RCo`0>&v9b2zm~(My`}u&{ZkdxVjR(7j zN{&9iF>+A*xYisXRcUXRKkH5zn4~h)b#HOnXbfh-{GbMJpAtF8le`LT zt~<==^Qvx`rw3n64sXcJhjv5i(Nqni_lbS-ae;A+UZxLQFd_s63r*+i_D}uJ%NS^g z+z~rS%=RLRN(~9tLaY+a_Q370{vQDEKoGy?TC$u`>2-dHr&tA(=t+r!V3#(rmtxEN9b}vwjoTxy-Nk?jtg4BE+~0_=-OOtzH>Hl@=A;xN->=2Lk#2in5X= zix$9FC6P?tbKA{lFX5h7lNg#UEC}APtnr3rOJa#cS8rc55r@tEP-qy7&F59rG)erS zhwk07V{ahOTT&RTn>QO4C`G}*{da6|oK8|OS!;K7r9SuB&)FtV%$F^k5R71pYHc@t za%KeGp%UP(W>coiM;Pe-}$tLe@6Sryh}e% z!gx=+hjq=Yl8?UcHn3e3z!(8;k}k)`9=g3W=nMMEA$oA%`sD~>0>g^MwbiQ@&T~8M zH?LfjmgLUf!LmaCn#Q`~fDgf9Te}C!3-fPV-3ZGL{H{Ou=zWJywma?C#q+9{)z3BH zjOB~wf+gqX74?XAJU(qRgjA5{{jHzR8twa7({(-fc89m=Z80aLFa0 zD}k51)QJkjb09xIAD9Jp?0DGem<7VVZrir)&;R_-5%dHQ#->f1UVZh|`|i7s(9u&A z$Th&hftV>oy-KBW(G8e*AOXr*xNzYawZUrRx#yny_P4)1XU?1_pM0{Uqy!jBRaF&9 zhFBqyeyDID>C6qBi6|$r=646)`k%rLPZVx=GTyRFFLU(8HC{dfQm{@Rs5Igy-}A40 zfEBEuv5X$w4!i*8Vd@X63z!dpF%)b2SjlIsPFPqNFS_wU&e$O;v8F<|#|OR!2u=aJ z19(Xwya*gv3?mVeWvnfuDZ7p&PVTYzOX1%E9}N5ScZw_Jhvq#e-Xa6K#4IkvqU7GVtb4VeS#Q^H1$1v-@BFi6Wk6V1gG7d8WgT zz~M=p+#Nr*9W}zKgt)`(e!;!qM!OERBw+``<)rIH$jzV-kc=Ygs=ahB4I5p=k)bJD$$Z#?7`2Z! zV+tupTk9oZ)!;5>^eA1w`)2(3j=ZIJ72f*szE^)pDjJi6F)l{cx#l() z)4{iYmcQm9bZ~h8>pd@h55Z98edfPM4!@0}M$?b|p?Seoy%cv>c*nD#j2+nYv(o#& z02l~#D)3_>V)Pcm(g(q4;;sA8*s*-(-_s<3Sx|{XbMjKRoXN_zG%xh1Y=D(nT2^H` z1^SFQ6_sHZ#j+l%*rtGFRMi--q|LI@y)5TaXuFlA8BM0(kwV8cwoi5RD>h274r4i2 zOcz-w%VdkAr!~YpxNal>#*L2sd_@BQj;q^eJdLpgir4gt#S7!{6kfYVdjN3Yd;sdx z#ckjaedPW-F&W7Bd#fwU8tdn`w6)Kvu0l9iaEo4C*;hI@5TBzbS`pT^v3{Phw1XDt zp<&(Cv&zTMoV!^&yAr|;e`NEVVy!-R`thq=@jQIS52wDv`hjkIV(RI~QqqjC=z=ni z6)+9|Lg6J#K={((3fE6(xm-?}~0H^x?a1QdEUwNg^chI34f5{f4`<%LG_@UOhI#C+b*ZcfIuFAxNfrc@m?kYTn=un5 zaOKbqIOX1U+iiRI?uB0o2sjYZ^~jMU|L_n0(9zNHg|~eYCBw@DJ}C$nOO$CW zX(=KM{;>;x6T}38>IjNZ;w=)aj2+oBxansw*?_i>m3**HIA+yT3W2a;vY|*B&|z4- zMKJCF$H%}szfh7f_=Y6g_OgyV#740cR9XX-P+Y@%Uhyut5vC}J;tAwM9O|$WADPi! zz|;abPQlF|$y;_0=uR_zCzSYbW~dX^4OXj5NsgdBKtOQ?Q;cQ|g0Cy~QRQ)?Qqp_b2clNyW?Fp*YS6)=M}4pTKY&EQ0aN`gU^4-x2Ky#k>3nb9 zy0iOuJ)hBNMzq;Q$%#?T(1_41y0nys5)&dFim*db7C(G}vIq`%{wf^CQPF%1lXdYS ziH>9)%`}ssGG0|;X{|%Fw@Y@$!g@5#mQ3U^qHyL9;(C^}*>_z6h>YqEo{k$|1+!5xwR|uX9t?#9p10Yo zYgQ~P4ET!+3!qg^p_u8C1har&{O*O`fe_RW7^Sv$blrdZ&4~L}QCf`8bZMp8j&n#r ze-0bC1`X#2qVEz@4GL7!Ag06t_H{j&lrhsD+6Y8Qnw_)Z8DnKnDGPu#SnAZFkLdVp>pduBk!MT*9nsx*zVlzHAgk;FEkpag^c*|qNu^EQd zxKSWLYVf8%0@o(RaQ+qV3-5fk@1=j|{fOvc_pi85aOMqc{so*7K$9`N>v?}4Nrn~GTOWi79yZOh#q{) zQ+E@BZg>~nlIlJVW0UUw7FbV!3>EYr7GEJ%vjjc}gYW)o;LV?aLqU!YgVt2Zx{}?; zfIhHe=mwt5nW476*M0;L0ZaC=@mzC=g_&LtCE-tbH`q)sz`*12i*w z*l{Q`*ov5ep>4kfDGBfNBMbb11~>l_K~Ev8bl-`QJsW#o_&4AwfRjq^{|gYW=8np_5du1Hot2tnp1G| z6NaNr(e0n^ec?M&BGmKyZ-L|j4k^yr%V3Wqc#Fhnf2z9~w~f8LF4c8Jj0|S`T1V1x z$jxXTaEjjNzl9zF2!pE&=xhP^!0ia{Ucns%ngd9KMB52g0>XfN09VON??gZxWC!|| z={ZW#520{GrG8ZH#YCu+kB#R z!JKLg$^IkFFh}>e-8Gfv7e7L~4;(sjv<0N3jSFjCF30a)dUgGpmGf$_ zr7^@J6_06R*g*getFvmxI$rG0T^PfA zNXoI;1FIea-vJB|#!q4gb`7qYB_;QN5g}16fijpRAPg!TR$a9>u>5H-Hgxi$J3nPD ztO5wafXSe`!MkuhO0eV=7Tx|yYr(ASU^@U^D1p93x8kV>d;X%^KY?f$gzp2v6Y|ST z@BKWW6AkgTi)Ta62Y_NNo(;xOgj@x^0ysWFBZ_vq=d46T57aC7u@*a>_`tx zHW<6BVEq%Gx;4O43UB#17V|L@uGz~SRShtaa8@p|g7m@a46c9NF{_S)3GBR;P@W4m zeAHS{g@_~!!`jMfz4bQ}l2JVV=r3(YE=lwUfJN8zk1x*WW43rfpZY=DFChky}j_3kNQ{K z4`d3p3Hha^_kEGPU05D=4X&D{h#dno3J$-vf?37)eE~{4#3g4Eu)KiAuEz?YS%DRbL^{eqhw2Nss@ZbsM|5V98C^{2iD0;Q2nqdfE1VE9NZPvclW zZG6FF2r+jNvIoW~=qxY=OMV4_6>yyr_fS&uW3aHu^tDo|Xeq1)o(29?u)~%;^rg%o zj8=*ORN)?jZxrv$cg|T3Du9;Q5vx=VpwNIUD<W*b?PEBBBgbOYeGC>=~V@*%fz#&RowXlb?dbp5?6Dn7jaS~@EK1^{x>^YlMF&HbWu-9R zxFqFY;X+{5f;HY7mN(}6^RVu}d;g(lUw#e2Urco43J=6ZR!7$F3Eu`(U`}J<*8qDv zynYFd10MHKgbn|ci>$@WeyJotCRjXAS?#P^q*0BEnxwO7I%=Z;NNEvJimEcEZ35*0 z#|K`u_&4!JAP!u^xZ^({EzI0l29qV-DT$ocVXet>pDOc-kACdbfw^~<1fVKz-@g5< z)8+|N0#lN}l|whs8~_tb^$l(#KsNwhz#7wNCXk4~{_DTaZ8D<0e&iz`$vtC~Fnjjw zGg9WJfpJGP8b#h&vu5Eu7L>x_u-ol6DuTM3zxu)4^yB-mi6|lgP?Ct;ftQtg{%R;H z<31_l$?zA0NfDV@JBF+2hjsz3=u{NpAdP(Q@aNI3#c7zPLLggHvvJT zsl0|;y$jYc9G>{(5E?Qn0=EbXN7Pb0)^QS~4ET&!1noL+@twJc6gr!;1y(+wigyBh z(O(pltE$0KUMnR=04I@9FJ$6y_C|&T#3+o-!SdGeWY$1A0>S2{24a$X=qQ~BBM4Zd z06&7Y%lIWoEV$-jIIuvZ#>vT2j%P7;kRD-o5iwUzMu0vT5-=_y3hab|V2!K58cOc@ zEQS@_w4g`9Nwg=|xB~_t=nG^EQxxD(;m3+(#rJ&C&+@=hNqln)0SF&W|QUQmh}i5(RS zO8tKhh$lC%xsRLK#VKV>Toai*K?&Lw4^Ls%gXs_^!4fan^F0{{-yH{7q*Yp}n1z#A zhG!^QlbB4F$s{G-qT(lD!<|Z1GRlBR9ZMj)9TAz5YPO6@xET}RFmE)_R*7=wel3oe zV$(tzcv~&3odf2G+PO7)gEW@mz*%^y8V7O>L}S^uXaAAoEo~j0wR4PGE>-bYv>b>9 z?4kWb!~Q%tr7@r$=X~`9Bgjv}|Nq{J}m|&6_5#~SSnGDlNys8-*bTi~PNstZw z!GBTJOxw&@se&me(e3p);k0e%Bn`tX2va_N_M%b@iBN?9c8bAgQcxqOCGr3x^kp_+)gp*wljbpD9JkIXHJUq-yK(#jjWKl zMJMc>up}~|*|fo|VS%^Bd2V7AB14VFZ+E zsH>eKljw}>@U_(FGchRWw0c}#V<7?<6)~?P=mxlz_ z(%2JnHVpYLH9G9g~ zLT*!8&FbL9Y>5(AWQmi3Y_JhI2@`!-=9iU~nRe{cY~-(8bKsMb-SIT#$m2@uuUxI? ztO$=yFT(nd|9qwjqF2*Uq9rzmR*;KkhMix+pej~tqgComD z#8}&SBEe)wryVr_E>hm;y)u=e%`~P2ObJ{P30!OF2JAe`%gev=m9HE;crX%)Aoj{F zx7>1eT$M{AJF}20CIMn^!qA}JM9@LPra`zBui1IS6_cu2(dChVmXzo=1ukRu3`_Tm zR5+u#8EOtofoGJ_G`k|svn5Kc8Cuo$Ce@hch6|b9fyj6k3#U04%>&oAbfzPbjKYpf zq>9ySK9iIbDw;|~M?xbbQAD|}tEhrjJ`UfOgw$awY-~?8aki)^IV+c6n(OEyHl~^Wh z%)ui^%Zf|NN(wP!aXReyoRMtn&OJ4=%1et2jR2WvTs04`s08##8RW~rD~Rf9a4W!Y zL)F8-aujZOP0>KU0AK^xS#=ZyxuOlda+PV6Oi3w3)&p<FhvPmYv=~L4fBn)YuDa*abgUtmfo3m1PhP+CwEuz3F1R zieM>W#P}5)J_epAY*G=j8az=DL50fWmEufFgLjI}l9H`1J_VAUaBL_R1+-BNXanJ~ zF@G{;ejQ2xh8yrnv8{b_`WS8)qmZBH^}5|V_w4WN>02{u~-7xCu9;m(b^7fP8f8|6m-L8wg2+@m+!xA{haDb035lu zv7)H&9XPys`>y#lRpxIzS29;^e>~Ku?tP#EmL{wAM2RJutxnrO+NJ^Ez*$|Z#n(0c zcm{}r5QGHeVL8I=ew}S(cuD68#DQ-(sOU_hqtFJ<>Nf$B7_KYizA%5w*V#Q{cK0ev z;2K0X42j}Va+#<-uJVYP)teG9B`~ceK)X3@jx95k%4R5TgmS>aKHmjzv7cUIt=b0MOWs?7S77Oopkcp3{^<7x z89Z`K051Y-;mRoCDDD|p^60i6#-*7x&Sq;(3CvUpT(juLOdSxj-KGRg3CwT_U}BSR zqkUE?s?rHbh=E)b1RNaGFSERisIQ2kf(a0&MXb}x#6e1i^*@RV$(BCZ9Tda@E|W)# zUS9Ji5?vuWofbP2p=>-72n0cyY+7y1@Vnz;n@?L^w$&fH*itS}ez(hY%Z;ll%S-nk zZtCdjjm47)jENYraIt8pn_oSvOyDojoodOVx>!81=fL6S6K%383p@{(4Oy0>gKn@K_rdqw{qpOZ4m36Q4h+hYOx!II+Fal)yk+(B#+z0zzW^BM@{Fok zDp5`mAB5Hfm4}WB#U3XJz<~`Y9uW{8>9}I-oMwxopS0OBWQ0-&Fcjt|(mY!_w92y>xB-v`^gZY7ZHI)boWB$yq?&zFZ zJD$2MNRxO&i>0V601kv@CMqqU=^QwNoVX62(4?D)V+V%};5*mrD~XJ=G1ia(KJZR~ zT(C0=ZCDYVkl1caOP)A9pZD$C_x$tE-+c4U>(;G9SIo_nz_la+6WzF$M$_zzDFIUg z(?tSNiET}Dv_%St$)FMs(RQ25CiAQ)(Xi^p#ITQYw5#qA!@`rrff&;)Wlu%`U zsv=klRW2p1nO;5>4+QdIwE<}3;!x^l91EY6)%zj6G0ESh6rY#ajSvfNzy1q5X2uudCI(v+6|< z`e&Yb=DXkhF8H2JbmJ`Xm?vhu1Wa^e#t)&{0aF5|1g4<`2(ef)YLl2!OSZr%XA-ii zDhZL2!LqN?5sL4pof^kEC^43iCDz7jT45%psG5@H(+U>>*TzUF9f=waDJHZr4Tt!O zHVHfvd1bs{yP}m&%gUfm*{s%S*)+3J*R%vcpr8XBV2>p3D2fCP9}&RER`9RszMtMX zz&e_kAvh1h!NU2%+{jG;%MAwnLy!r>S#+xnme8_H%MG|NmP|hIfe#=E!?bLm*(g&2 z7bSrigKmJCY$!a8zUD&X#5zvjbo)SkFZc53Me@f;Il)E4unx~8eS-xpF)l7ddPo1BGugD_&oSy-{5 zWW8l1&*(mm$#vv|{TK?@cSkSleKDLp$r1uFizNR@5u`}Huc!nY>e6$P+}KexO;(64 zB?{F0gJ$ae)m!RtI^hgVvQ58jFg%PKK<^V82Z)?BpY+k_r!Gppi;x0YO(55B73axW zeLIH_j1T|-KmbWZK~zVlbb;bJa*b615^Q|~_camdXc>m&NejlZjFvi{jWlR1mAS15JonFc-&%M9C#a9McurKzrdr_{bxV ztXj3Iy1E+X8@qS!#`o>H&ml*YLf2DZ93R-@g6(_cb&&HZ(L|%5u%JE?NT9RGU4! zU&&ljD+F*ygc9i-0$9MD)= zfnP8@cwIKD7fV7M3q=F4fUKyJm<4YcF2O}s;5nTOPy-;qF&xO!2tYw4)7Tn1QAf~FBY!>B%{GI_<6@yhdgiEIdj$@4^ z06;WaR<*1orLs~+%!*j>rSUuhM-i)Nii9F1==-EGJj;P%4Q)o*sv@I0bP75ufhw>E z)CFq{3RjFRP9ddccFKEMBL$xnwf&un}(nn*x)SSi6R z3$gJK1c>E`)gSHwRmN~>SWb}8TC0T@1Reu}GZF~sWCilmg*7?_!5stnA_0lx1w@Jr zj!yhd1_BnBD$2LQs2cAl64*i$6&a%?il{=txMSE!79w0-Hu#I1fxO7bokeawqfA^x z>kRaQ{56s=c2`mKOsF@O^4LaWKRk@CiFb&8Oe8AR>m94IM?9LPW$ zZ&36I?b2~Kqeli*LJA=b9z8-+2{=T~q5$$DCioay%@RLi^v!8^JpTTB^^Tce7bO9K z(MsK?)@_Qda?jAPm0|&N$THQPz|%ptDcK4p%?MIUI?Xu=Cx1RqyE2JbBAiS{0>OX@ zZCunCTpO8XQEYop4YdK?xHdXu_QI6F43>cY>Lu@B4KOvCJ?L-L8Qh@@X*wpg*%+o4 zj9{Tnbc09`b1X6g)Mu73*}zS4>g|6a62AO-1VN~&slnv1rKRO>|MqWT&GFPzPx*Yl z%U|=gP(vgV*}re^+poUde!Qvu*pbLc1Q-WM->-$fT@!tnG3Z7*lYQ--twiQP%<%;! zlsrdNP>T`poLEi33y?5B5%@qiS-?9-f7*~tM7tx(rmlbDi|DriMu=LKxYQ@0Ns|r5IXTYKnkK30^LxxcqTJ27)mCS z7K_E{bb6d_yB+Wb-hC?Q&=u&aDw@VXuZ04QGl_UIt0*jx6oNvq0x6RP)*t}iz^2N zLeK}j)uf%Ml}x2bgfR|?7{S9hw4F4Dpd0uJkfxDcBQbW!kAXlVXg7ctf^Lw$lg6OS zXfSSSno%Jxf{tnVKCg*x=mNjQUv7bF@QIMVhov$atR=7mKl-<$9u*5oP zX(W>x90(1K0NOCEHZJiH%*f&ZZJgX8x9kuW-8m!MZ#LSLz_ljuD|{62OfC91URP2kj)r$;}6Pf^bG@$V^oPf_bVs& zs-ZyQTLy~IcPvHMl}R}!AV>sIdv@+J49nOu9NhnMxry1F2|p`VC;?OMOvgG6%a=YvP@4_ z>q%CeAxr)EOG>6;D1s|vHH?-w&Hm_eA)Pt3kpoyB=vV_eOZr3poJs`C3xZ*hi?}ql z#6ko_KmdK@NMe1Q^~2n=Iuu4SX(&d|KQbvj8B^5=1io!tTrA^QM`WI-232D)rVX2aoI?`*7!Lum#yz$Y54BJ?BVhmzz!HLk zBMScg|Lna9fMivD_Fc=ZTWc@VGt;y0z_1Pw*}<>~1ChmqxS*h*h@#QFiAmmUU-BlO z=6g+yk*IGXZ%iU?s3<6?fTBo55H+&MBKyEB%+k}-OLbM<{rf%Vb~ioUJ=5Jo_fS1^ z@61%+y7%0({Lkr{dY<$A|9^RiAkoO`E_J~os4||Uj}QzNb!WK5jy;?6+@=`UB-l_} z^gU)Z)Kxl0H*}A`f&47wUV-;TfOdm}z4&J&$>jAxJC{m@ws67*)aKRTQ@Nl1T z2@b;lNjOKa?VnDwbw6m&K?;CooANN&T$8F1Ylbi!r~C)&izt%A2m_ZkN9ng>%gGkl zE;cTr3Go5uK;Xckna*UWL6RS%ZGhxaMQ~JU+s{nN!UZRjEQ< z5^-SO$ndiLuvs;uW*RiHbeOpa@DVod@zGge5sB2(3f@1-KW>3Fru?)9%r_zgxlEoNGeybk2-f|8%k$mee;>plS$#`vOuX+y7baZ=@Ynu9I-$8(U0Ez<~N^l#u?4k%jW3K zplaj0-+Jv4OD{R=WT*}LwPJDS+b=xp3x?pJ00hU%l`EU0zXxwGW)Hg|q^eD54sa>> z5t#%ZtX$JRya8q?_d#}`HBg>`48VqAsK%2*z`Tzi!<|5YPNAQrr~qG53l182TV4XR z0G{=13p^;s!^BG9hItyeD=-LAp>U%nhyw7prBvafW#x)^wh+jIcnWkbZQ)C=fTK7l z!cv%LGET~|E%{1Bgx<)=t3;6(fWJ7}1afDQ>QTUz77I3k7@#vk*YhG5-;2NrJjcOF z!dJkKd|o5uQHe2VPR}XSL>Ocl4NOX!*b`B;?%WMRAVHJ$7s3L?#Vd|iK;3L5O*w@6b(3li^1ka1C71<0fa?^S~ zmU-av0drsoB^8UT5RQZi61yQPfTdyue9lK%76x2W&96JSzlb|gy{QnMBQ}@eLS!?R z2!sK>R4rO$Tu^(o1O>q*z2PxpWq9ydK`M{{gXxj$kSd@Eu|${&x(J`*;}M0!lOB`E zCqDF^CCu6w$SPPIQ_Df&-(j9l)aqO|jChW#o`)t^-0f&?a@4M%o5O!1vJX#QTyId z9zZ$)N3a{X(A*`6fZ608X;4FD2rcRW9RgH=fK*Bd1|kk9I0iHTs1yPMN9Xs{=cC^>z35*By^)2`y62=ZiTapT@#>kT3&`ZPSdegq9#c z32Fq*hoLr@3BqkC1gHFJ#Ar#hAU+}G243V~4QAY|8ccz2D3k(YM5+)P!Gl>zSdSZe z3I&4zKd0s`O9P)4tQfF)N@!V}2&AMyz9FfHmIBm(q&;zy2zuZM2AhYrF#Qq3!9t|C z(M^bY5H+Huz|acOOjCmP;ncMJs9BOAEpHYB>zTPRD+O{w1J!yLD*`VP)y*mo zwMyxL`lf!U6$*-{i5U(;t{Q$Vrct!~Rf4vCs1uPnD#|6@NGsyytU91J)~!2mqAlXI z+1A#^um7x?gru|6Z-EG<-IgaZ9ZM3Ql<7BaQe$F)!~%&05)15Z0k%6YyzoLCRZc(s z^xf|y?@}9{fBthf+yJ$4@o7hoqBdgdh~T*BthScYZTH{9_w=jY`OcLqS0;vgwJb1e z*bNLM+$jKNa2Pm_@Bd;OK&EU=3&#bZT?&9T*_6(5YQQIWk&7|pGa!_bDg_t>*eOtk zw(gyV+h{;s60ei}ejHGAnUbayq=M)9o*;)B0}_TLCSlJz6%oc#s1;18{c?pkRVV|W zpqE4+1w#@b0c^^{zL&~)Sr4s8R(U+Y%pM|)kcg6gp&%$tbD@ALs2P&N5EVkn3<-^f zJYVP*Jr6^6>Wy((wKMdab9gO12cI0kx4K1Zk;I zK(|5cRD-9DEu@Mu_LYsB?yz%&dd<@7U3WP9PsJL(0OiXl-D$;3EjhN`)5Gm*)}U(}h%mNP#|(ONk*z z$Zmsx1e%4q(B)7+f2oRAnL7~&c!$7`DZ36;L2;PHA(s&`Q7jC@#+mh{K_RnYaZ#Eo zve~>!Q6uY$eiMk2)#Gcpnj#+OpU0Zj!U|!!h)JrYq0~HLK&YO&c!g`@y%xECE#$58 zGFd{)ta-1YS!cZ%l?Mf6OM8~(u4gT;TD7X_mTc0Y!~(C(0%>w#tvNZq|1HjIyXy8#jOB`A2`bCu$=u-_}yT#15X815`41&CJusju&&N1&Or?ZVd^P> zCjtky=s~hdIpRn8IFp1aF^HMDY2#7=YUSBLE(SC%KL$3yea|Efb$#-*QVUQq%KL@D zG8zFjW&Vi#fcbzn$%2FdrrIxus{p35Tdo~`L>u&0Sykj=A?_sZKKCMkHyJ%L0d6v4 zI>_e1emq%2P+{o0KrdaRQjkAm1JBbh{8+-e=i`k+I#q}p+9(xKZ4}E=ZIE+==naDW z7)1;XC(U{wqCwvx@XdtD$%1|h$%`rww?obnT~+GVI-WNoyWki^Rxs_FH8VEGPf^hb z{6%?zM1o05`MB@+Om+aHgz?25Fu8;}5nV#^5(ytf49Fb`hmt67Wr`%Ku?u)1(FeE> zC=jtCtVF71_7u(o@kKgEx?bMm{XvEv6DUOn)at-s97IK8Pgp5h<61Cia#*H5?k4ILgbTcjR+1)C5~j;#%V)& zTU~-&1Pus}!125Sii?~aOxX3NRvUhUfC`}AARt{U_!b=jd<7f%4nZK~zW_+45X}QE zZJ@K+;TH@AXy&uILea+61OQI2+1{6i06zt7ybJmYs?vX7(Jh6;P4Y470t}b)Hu#)| zQF#FSDXd+4Y8OT&JOxj(w;z%A6>3B5Mt`kVbEvpn$X5!4X^;;D1dfILMaUmXfc$0j zw^Dy$O{jODi%?|Hz|TQ@h(IcFeBV%Sd`(1SPQI;Fazp= z46r%4j5J|4m}y1ufIBf%6fZILM78L4STsBZO@Vx-0JUM;WSq5fdAw7gQg8&~F}h)B z1W9jzZM(D`GgDCSC{4x;~SBB^H=Y3;3?T;TK`YvfQ$!bGfn$rG+aGo6fLcYXZCdd(p4QD9W&PkpZ_ygc~Wy?fy4rd1riJ3kYcEfUte_U z(Z?RTVv+_~XG%hF-0_`nUvS|?3BfT*yG>)U*~D%Pqb?9D=Yy&PKqY9+`~%~$Yc4g1 z4wYhiJcg4h5?_u95FQfD6dVNXK?hKf?Qu3k5MKZl@jBq)oa59pX?Z&k)lCt%rRk1iFgM^O35ke2w_lgVgmMK0x2PoLvTXK ztJoe)Ek>6(2Tq1FW)>$GD&&gaXs{CpT?2>0{3#?1;u^XIvjr1{HY7^HwxsK+07|7n zmwj;?a;A|Og-)}U`N@hz3nZtkFl3<=EW1>M;9&Lnp2HH+_z(gSbk(x+NMdM8q=#rU zoN|Z*(!9$+Gm{*N1rB%%@CnhjF#b?(wy}10cE0tkZ=IoDz4+pb-~H})k2&U;nQP+4 z8*iMU&X2A3l1naGwrtth>(h1RFaF{$rmL!?p2Px)1riG+78qrLhaP(9?mNE|_HMiQ zl%rz8VpM^>yjA%q`rgA2+-+NDpLan*aO}laGjVG+u^Rxq*aZ9l{mV%K=qNToX+ZJG z;6B)%E80H?R*HELoHS3x+vYDLH+cofgF$Wxj7hdro%O$RlzFd7GmYC60tSHpx(&9LFhpqHGj2>k5&T&4Dtlc!cvf;1N2P9 z1jlWV2vX3JFrzksFo5FVc6FIvss9iVsRB$(8Ni#PpHW{G^$5{5ZR8wyHs})nh&>UZ z7y;e`rWr3d+Y8ff5Z3G3oJTGKzHKd)%A|tLVh0^nzjK$S3C%tkf` z^Mq58VMs7bRsmwQpdzy7zyXvC_MFxdPhmrNBHUyeUeI4_z=yd5wK{=EAwxuG@d$m) z5Z4sq$}!<#m3$ZP6mAVM5psZGc`&OYNzzgYxoHg%2Erj#iv%j-TjEtfpH&{Ujo1=I zKn`(XSn7yD=%ZFH(k%R@fBoDoc+dl|ZaO#V`=at-k62 zk254X4?o0Un|m!Jy-O@`;93BpcIOLL=NfC}YYyB&CL>EMkXRtGKw^Q!0!?j!JMOss z_M5+Y;+h5P*PqxnB0b9FJH76(l?&#z-+J!@AG!CxuKnPL2-iCKXOohz&;qj-$;7@2 zz_R!-N(`mdP^wPg8`njAa4wbr04o?9U(oGA!b2DjwFkHnPk_(~f&s??vJTW;^1_Ni z01rcO5axiyt^h+(6e1NwUqE}XwGNF!Tm_mT!X(A;9l)tTsXe3#G0hN{foqC6DmWV% zcM1?VaWrhqpe2T_!>A~tqQJ7C9b;VVX@Xlo zxggPL5K-2{zH*CUZXv{AJBSD?Mxhj0giwKIkv|bt5q*{u-fFo?1SI;(uq`fUx()-O zMyZ;4gU=GBrEwa?4hvGFWKrv}DCLq@rS8{V+N3sR1)>JUJ0;{u*3V|>8mvk_ZB>dT z2qSDZzqP%2*Fn;=!~zGj1)|!n+HLTEEVu8d0v)BT`&W({<%XBPH z+r?QtjHK&{1@>S8n2p3iSA3pG*|CtaR zd$9NPU!G0uh9vdo0JRCh^4A;&n1e_m2Ec#Doq%(LZinEl(57&oxoU7cFY*J)ptK-+ zD^&!_BBUY)~()0X^9*N3Wq^5t1Z8V@Oe<4R?4F)eXE=Wf}&^9A$^2 z;0YOWost<=ec;H^9lQxB4w%JBB~66WhU+0Ks6%d;;P6lz4531pO7OePW;hy?D@Fv2 z^dB*?VXP7xwE13T7c+q{G`QCo#wj9LM%`(kenIqL)<&AR5(=H7S=PMh-~^W;qhuhd zEUqZ55mXAqIKfv4Wd|j(o{ETHf}p~VT1$A6;KD;*89SXV<#O$%Vn?|J{}dAmMFI`Q zvxO^&(zt%Kx3#ynwQ@~-gpQcq2#J}Y)HsS(Ye*+%NL_(9Vcx_^Xx?cJ`a(Tnsv%tP zK4Rst-f)!?*MeURYmZE=IE}D^2)TjQ1AfCif>G2SjD`50R2I*p0)L4K5=Hd}M40lu zaES4u5)mRXho&Sh@^$iDEJhHK_Hwzc+{)6M+n%r+&G!qC^ypx)fLnKd{UdAf;kjkU z9t^`x#+X_$9BVaHf;VAu_|-hrC9 z9N6^M2=?f3WYh zdUy9#RbADbmqSL7rU*$=X@f9Kg*LJ%)nz2k)jF6gAXx(;ynF+`g!<{(p5_Jbuy_zX zq?sR99~w=+;9VG`zz(5>(hqtFg)}TRmj(d>ibNcqc2GgT@u6KIEdz}v*Q zeGx0nhl+I3#>bD4G&VV4Qb#P?DwL0a3B&3^X%P1545U^8&(>ss3vZ- z1-nIZo<5V&kf*%4wf2vaK17%0wr`z01QrdJE18NDsXV0! zi7#VmGN1#JnzRyjc-;Vc;{Yrk%Rk}KUV>-g1>s#h^&vP;HS77}o z;Jf3rC>AZmyli#$(wsX^5}}xNhzm_IY5~P1GLGaQ-SuClRt|hCeSZnt(6fVQG~=if zfa*~>2<<9?bHPppHasKcA<`W992Kev>&X$wAc;AC6E#@A>A~aPB0V&M7NY~c%aHWH>mqiKkdHxK4oRJ+jB9)4 zyu7k7n`1o^g(@^yatk0G5}@=fFWxfKpVCxurmLl$ZVLu*D7 zF=A5^&7n&2X*W-Y_E(}Pn&u$Y9cq6#STspj2fm0&2SdRl!8_N-EKh`vmD7yWVAmDq zGg@C2DjO_z^GCqf)~ht3=V9YTLJIC5v@_(?ihS1HQA-*&<`7Gq3d9CJyu9XG`^^l- zoF5LmJFA=JxQ!4#n|4S3laR0<_Sv22+Zxt#Y?Gtn}PmPxjb zwKakk|MnsY_{v-VGO>{P)IvDv*n77I+kt~d?~-&UV!tB9UkUg7#P*toQU}Y1@>G6= zGfe4^h$|-iw|_oWLz0!Bf1V@R|8a$FZ8ZLu$MIj03<&8KO!RYHE&fW0M6!b)SsMNH z+I=l+R_6IWG%t_ch;oD*yluOm#i>B@f9pzgrzGXE@~!+!^@E&>az9anMSx}r#KWbF zt35b%WR?VlLI_esEXBHsQJFz<8=w;X68TD7GMmxQ(znoHS8s@lz9vg8L7Bzos#M9# zrK?_%^8*7;)Nl7C8AOK`w}~5R9>gufBJ5Z3QW9l0H3OK}1QkA?!y$iP3seOXf=_vRr+>AHp3cq3*z*k^IAp*)bWh(X%8HJ4Qtp+|y^1<1*NVBYvO5_8-9izfM zgDvp|Q8>&7pfDS2llu!S;)AqJ4 zJh$$smkLjVIcfr}(}@0x#hdO(K4wd`_UZZYfsKhuyqC&>aVi{gxGJdGWRFCk@Ph10 z$AqUfO2u3j6FwlD6NJ+ARlh+3r?X}|Zx#}Pd?%Yyf^d-9*9W9hoZBGw|1`D*8N zt}FR0|C!qlLjE6OayEk8+#WkdB}@!Tg0+yryy z`Q6P&Dl)gCA_L9dvv&mp|29PmdwqWU9A{bY03S`v1|1Dq4lW{(mKlxsCLN|XlpYD0 zky%KtC>t+YT!{P)0zdXVw|ja7j6GsO^1S(L8L~Fvp2si}lk7<@=;GwCTX^n_PK20ANyQlU&wwA77m^VZnXcDNap) zaAmRi5y-lc|2J1`+eFq*y#jsg_fy&aS}9ij_ogx7V*m(!qCXFAhFmGC?zWA49Teba ziBYb75p1%wp#11fYPuYq?RO$}{jv8VkCtH0SChBY_{|`4tWBVHzG>XC|FNjVcRcqJ z`=-EwJm`9>OMTDz&ED=G+B*siR^kFSo}rngPJEQ?LawrQy$ME$cSNF-_?`1flpzIu z9%h;^NP*yA!7mxE>MYLF&8$J-HFDUZm?6nhtfndbD|k#Zw5?r+w8ybQtNy{0FY;mV zwxAerLqdl^ zM=n$)pE%=OE~Kar8qz!pNk~41BY~156uD2}p@KJHZZ1f|AkRaAiFvh# z&0W0<5!)q2q#4m<>L`W~Z$k>P0>My_Y$_tlddepsosoo1W&QPmt22f_fAne0utTPyuo-v8Da z6eBC3&%ObigRaCeanYinT2yRCx0c{!|F4~tLoU3!eTB)R=cj$z-j~$=H+-mCL~2%V zh`OhCzZ4>zYyPx3Yz}!eGXBZb?DMyYWg4kws(LIr|2ddp%rB_FO?{=rlK_h-4~p_= z5kuHwslt7k-^h--`g0%sk&wCX9r0wE2H0E5)fhfj?3Do@z#4%+bp7|f1LW=4S?8PM zwxd)5G~zIGe^l8(PY*juDr{Cp+M{mu;!a5rTPz(1h^dcRjEFZ9S4k^oPdGp&hP%Y) zq{|cT#e39vALyN|n7C_7kR*aiS%pCi9dFma_0PQd`USGag zWx*PJrC#s6F(HW{oD&@jQ<}&hQ3}~b*%ypr$YdmmI9L?!Is-UparYbXE;L&#kt;TF zcOn%ooL@6RJp~l9m6&FPzYambi>_X`GJ$hXF66O5oWgV zqu&%Rki1^6dN6R%3=?)g*k5c=o63_V5#uO5t0<8HI|@V9A{)j^42?*K<6JF9|EFiv zGM7};(AyF)Dgy@5O1lR`J#JBsW;{?>apK9L;-NGsyA;KA z4NS2F0i_7lL+mh$D40nAZ8X$0v{}-C;vgZ4cMAv_q~cUENMx{sO)96>OPW$cD3TH7 z1GI%C#C8%TLYbMLnCkchB?2S5frbtiijn(vl}Z!1=00|jSTZ!GSjtxfz%yXMi!Vl9 zicZlWzUxH$LP^;ZBw^U0bIKhQOx%>eV$i)BP9}IZjCcxR3AR$vf*TGG-gb*G5(sSb z#f?~co!ZxN&%+x)BJp_05b@+GW8v6@`}}s-X3}S^?@iZ@3a3+%iBoXHBit|ahPZ;# zZ+rXWCSSO*+iyPB_D{d9L#a1SxjkdUzE8WR8~n9Ly&8W(nn|YPXUoiQ_?PQ{7k{cW z%qu8XU;g7ibiqwE{oeEH+X%*y=z`uZR5fyQmNBu>C9)rpu2sxD2Kcc(#fI6L;m>HzJt>@=H#n-Ub@N{O&VRoC-I9e#=+${c2HG$^ zyR}a^JFYh!oqHL4oeB3i9KXFzQRjV5CKBg&zE!>!LKA;XW;*?H?sVq6=J1!YYN=IC z7kBv>MYZ41qgxHUmp1Mp?b)C7qJ$WuLGK-EZ;su@!DBd`3z9LFB<&Zq6xLt8^mP}_ zc{1VpP&dJc&_=9?%f}0QFkr`4^g+q{m57oez6luTMiBw{syOoe0Yf5}QxEYEGW>`b z&hIg10+myR91yda63iK-4vT30R9uLUk|=CoK#ln(*u#TaE2wou|{;2YOB2k{lO5mf^EGPfXPwZ8`JcZz?=zfBCpN0Cc* zAd>mCwYOuqlA4-?rdHO`Q^{hb*r1eNDDhcZ-?`T?REoDbZ8p`$#)tE5#LE?G#1;AP z{A*oU)|eD82hIPJGJ)0;^3fCHIW%k)9j<_WFPfmKgKm`&F0!Vxv79T*!P+X0B&$%2 zi>xk2VStsW_9+7|5wgc352d50b)BW})+g3Xim2wV6U7CCq(wB12#olOZ9*K$d{2t!Lh`jTRks_ z$qR5N=!)JcMkU?WMsm51r_~K?7S~w(PiyASMg>{dzz=S2^tv>ywyI0*UDl)H484q~ zh-cnn+5It*#3pNJF6}kD+V`C57jS!eQ!($SJsD1}#VGikc95vk>~W}c;*!;yTcg_r z=6n-rMnTS7vFV~LQNljc`SeTE6&LOw6zGy;Pg_qto92%Smt!WVEV=wK?{ zz&9;o7(3#nVsdHE5Cq^kuXkQXu1)Aoq`fT#_B+Lp&AECeQA%|Sx8Qx z%l9nN&+~_a1fKh_;$W-q&ynWuM}YaW(8I_(bN7uWb#2Z$4iLsUeh#=jWHkO+CGUFYs*6K^h_svp+E9 zGGwnX)}+0H681qTjR5mP;xOnA1Zj~en|2Exp?--5=z*;lEYbFWb-KcXnhlGhB%mdP zvfwTivfJGq%7?@vY=!Z_NCv?uG-1+Bq9mu}#bAjBfTD*G3_mt|+EK=oA`ww0gjjOo zfGs3IA)&pnHrUt0z|Ya}^bYhCEF}4XMy2r&RjK`oaz_2J?9IW3G@(6WAN7c00yTDS zJIK0v4lE}*OWPc zUomXJS~&-ODd^p5Xt86dX0S1hx&Wq3wk6{Nbj{kJ#DKdas*OjelZNYI-GXKi!c}hy zGtiusTL`m%t>jO#kwBoViaq5g`^ln)Lt~}z7$x-85g3x1VGXr02D}2`7-)(lrk3FaX6+ z?|+R2UH^tWQWLFf(O%b{UOnIaaGKxZbxcRgSu7_GJY3GKgHHMUXe<|gJ=zR-xi=uyh%cq?g>zC%G6hgm*EzN0J z5aAFssfM^J$X|##k*F}i$t*?N;Q$;91P%-a0#^_sAL^|sCFsCE~jR1(3Kp0d!5IO2`LAaf;yE)P8Ll-h4ijm1v2C^b&fKay_5-3ay@Y@b>ySoVV)OSE-EH#IPUG(X*-5Nc=SqFaROTgbV4f) z@jpep?i-0hcNK>GKbCy`dVJ#t@>(_KU_N*BJgtorp|Pb@^mL&vqW=zz#^j_-VKLGt z{j){ikF(X7L4QFNtD;ej)fE{w#I<<_)B=4u z1^E95KAU$L0V}`9`ueXEsg?iVsRP9jq#X0ShMZ)0sMSZcyzBRlN^5sZ4|YE%rV@tf(X1Bv@txODXc*S-FHus=iq5X<%cyh&4Org zBX)ib!m3!CN~;jxv0)ZvSmRSKNKj0W!{ zfmv24uzzVhgFIPX;V@r9abc%(yo#d}r^}aqq)<|B?kEA`lQcMC%tL$uNF9$~QiPt0 zD`if!>|g@1B>7^8M7SELuV$dk)+UIo8z zbfIF9C(lhy=Q#qH$P{l(7l4_32cKaAVNX#FVBDDPq~b35?ReQ@IdY$@XFhSj*W^5O zv?j$Hi=u5MLVboT5+&{bpjizDzzRO{HkN}=k@w7d*`1Es|Ax&77`r{_^Pn^RtNm1u zN9FJ9-<1l3+5Iuj-RLL;<`wZT>k4BFE+(1pr_XLvE0lNfS-(3&o1Q8*mS4`WUyJgD z-wf;@X|w!onXM|cs-Jh>5BU2$@1v{vSM&W9^rtX1@Q0WCT8iIG>kQ?wr}f)RjQo?Y zw?B@0S`8e4odVF6u7IDr!my-l#96tZV?DJG7(yQbltxhX$$YHjrh-W)!>FAn9!cL9Cb!y)=`S zpa$sy@l~u)qy~Y%!Cm5s!Uxt4cSV+jnOxNGKEyH(8^ssfRkKrUhH0$fz|H%g%K8^8d{}(IGj{vo zY!Bb!1~2f65R>}^+u5qsJj`(uQuzU%)D`iE_-GI=%vy2{?C82{^worNXYYjf{5q~%NHuSU@&cXn{mq z#j^eRikG7#muyjnr&=njCLV^MllXmRC^vYgvyh$4AmkH8gIQsi$1F5Xr-<4fSwjGU z4yXmSlBngVQ}7_!8^t^YaW(Hd&%yM;DKWo82Ckmoft1vsG<=PM)tVr(Xui^g3O~`m z_l2ScrJYIau@mNGov`byG96-njZXKH)a+@tY15mtZ}muh!QO8~3|dDqFl?* zfC2+~0ka8ah5J!F6Av5ouW{=?Af}_kgk2Sbe~J&6H6p%(@_o~j_eD>QQi&djrjqF* zzSsnKqX8Q1rq(_TOeL7fqoYQ}M%BqNtCYrw!;xAfDisDQ4U8znPat;FDsnaWzlakE zazP=|EwN$yQ>N(iXwa=mXD4X59UDPVIZdS_-b6(eC0LD5PiV2n0MNR?WT z`DE;(jzpv8a(|1?fIgNAqF|K>NVoiI?HDnl{Tstn8P>(f#gK_F>4dN;ePkr`kPwLC zZ4-$DTv6jp6+K*^Q<#NQbN>TZSP-#XF0N9ab*hyWLZX#IS%Lbq{|ZP59*}{i>1DNn zg#gDLVZQ@E*l2AhuH4M6;c#o;>y2^~Zr;%c{zb9&X7PFTjgQQ}$7AK+)hjjkC^pya zs7xTO;P@Xgl;7+0xH%(izFUDP)!5t~c>iQL&`SB6F!qE#^z!K{PyhOLT{-aaaNa(n zp5FsA3)*gp=DeHXu~w7$OnSY2r}k=)fR z5q#3qdB5{hz+>;EC9(Vo`uy2tR!2!WkbVY(hhP>`7>QS?{Oj=WPY3`!vFc|fkm5*a zHSJTqelqF$>Dm=o{yhEjsj$2hp0RtI-(`2;I80DK0u~7>+l4^=_rIQWBOMCM*|J^% zm}1n|E0bVB7Xtlur_W1kVZUqSEC1*4^55%Zg4wGw|?5ZC<={zDGDdixenOY-`fd4nW`yGbT6XB1gV zZ^S;JFIHX8o8%n!bj2p{MfC{xMR7p)P2C&?2#WtaB`eU%4#OG0&wCG59@$(RrKYHh zhu?~iM*5f1>_yy6l>|@ZqjMi`$AwrOY!if%L?XhBV4sjmttSdMcP1V=7u3{(=CgDw zvTfiMQZU)qLr$C-oNGAE?VZ#CL&0yFx*Y-my25o1jz~Z#a%sB|gSHKd4f+9pr$RD^ zZczH|LX?|@QdA))(HAwx(lR=J3Hkqd0U(Y$bgm&I%S!PmSD9XP5GO;#fkA_zAE?Zf<5b8% z-?FVR_R$2aiT+`K?8Wl3ZE;l|8CT;XahjE3_#VN=miBmP<$NW>vvoS6|?v zT_a+m1RHB0yKIp5+%qc;tvxw^1*JhgqgEJ^IYoqzfzT800%<#AgcJ;q5-|;d3S(Lb ztb`d!10*C%>IWHx=E+Zyx<-Gno3MFI=mg1o#KnRHmd+O7cH$MtCI^P=fWjKZaU^A% zx=HMvU~I`hAOp?l^Q%KYJMT_ zl9s~t9e2TH>u~EwyG^H!cp}4M;QI|jW;&DGCK-zbRM)nYwuYbK`#M|;E0)p?)dYLo zq~3~aZ$S==BdF10e1uTNpHVx-666RZLJoPT&&fl|)rhAd$?0Gn;u)qih%Z(+j7)bh zw7fS$%iGkz5R=#gJ-bIXqtu`fDRaNN+G&cCi|-Q%Tdzn0V) zI>nI%IY@b)tSWW=5^F$TR}Esw*J}tx7jA*~kq!UO3i_^h3v=vQWSxy1fn84!axj^x zJ=HYeqc4txC*SOpe-r z(2-P1q}Iz>8PeY;!{Fz`9_Nr9H`BMU&&@4!z#e=#aql@$z{An#V7^0>OR)qRjr#q;yGfhLA+($$1n z6d+?AqoD$SnFJX4y(JK-tgZC;QR*P0I3ZUfI}SAw8fZy zom=UY%aNcH_e6{zb=X5^(FmYV0twr+;qWgq9@i$FChZ45kW60;!<`t?-2>FW&Hg z9U_azk2BCvv72K-KFxT6Gu+p6K*z*LMNgDISV=l^D+apSEngEqoBs&H8!0(W+>?o@ zne!Lslu?W>5O@l*WO2ha5T}Ynk01%aI;Kr~0c#5pHHRuwa8>h;{E0d-k~$?=V}^t` zU>t5S02yM@07?3)5YbU`e=@DR!5iq*In)kfCG{s#4|g`8_sqHk*(4}gs&bjnDPujMd1h3=>}!>#iZ^XBgXqJN-4=!Q<8 z-M=pzTdK{9<*LoLo|}h7n1m7!gVECV|1-8C|mZOR0=H67AJJd`6V89nSDi9h{( zdDPDG;D~8fi>1CR3geD=q%dW65}F25`SHHLE*2gGZ=Q?m68&S%#XkGL1lPVU$750K zI>nWm6ZyE8eNLTMioeSpU;pjCOnxGNzV)al;_Et8ojgV~`}jQr^AW`}n1w#M6Yy5` zAsB^h>~23g?>F?z^}g5c9$x>{^||lfp4=z!Q{Czk+2`-kpKb9B8d#%uqh42of9VLK zG2;|ah0UC>$Skv9_Qd3{$Ruco&j%@~c!LQFhU~MOc%TfZ>2U<{VgQ2iUO_)7Dj^>v zwqap0?NU>QfS}ebRKUf zhJTFasADD!MtvuyracG2>7Ea5$_Sz8L-YX)FFK4z)1kwvtm=e1ouosN)o>U6^ z_$ZnGNXWR2Z*@_RSqPf8#dZ)8G%_53cHpoS4#&??2+d$Ytm*F*)$ja zkWCY)lH_p=_)fV&rqrc~q6F-9tD>`^orDhlbDDS&2if!kKk(Lops%#Zy)lMW3TljI%pT#z>GvG{xKxZ*RcY?UWyQ2@{!cWX7W_YVHHl_P%ow`* z`rLx~K$7B}GyC^5*)D9}yKcil=ZEUa-x*YjUMDH|)9;?%@Ak#GUZuCC60GqFhOy0;#>5DVZ9bE z^<1b^{-PD|`_8wPyILziGvBl7N37Y#{U`hK2t-^H9Ah1_oazBkA| zkbJN6#^LaCU$M0K_|2{X8+okh-_2`k2-lZN4qY_xpah-m%zbz}lMaTLQz#<)NWiv#du*21qbpMbtPj`}^BB5eXQ; zj>rCtxrFpw41!GBq9lSgtNSAi*NjM~2r_6$xQr6?fIN zW1#|zN*a{*LOJlR;r9^d*?M?_KOznBKNu5XWiF!u zLG-X6ms^&9RzHn?>`dNP3j*p=*h_uyzwtO3yyHJ^vGwVd$6LU=QqB1oSu$2}>Us?- zbKAeb`e1hN(tiPTxc%+rsPWsc);e14cXC%G?~ie;ujZA%@1jqfI{roW`&tc`p5*>G z!Z+f6d(zkm{QiYx?f$vr8~8l{!X7MX0f)GSBTwyiwpZY}aL1Jk@&yq| zVKs5zBq%6oGz^-Y0nBN&vU?n{Kw6Y4G|0+Sde(ez_wtNRg)25IU7O$8S>YP{V{SQh z%!kSyRW-OVmu5Mxu8{FdLr5B1r`UCYfDz=&X0M1kJ=e0JzdpyCPzli(Gfc`O5t7;n zB7_QlKC|>W?4|;V&uB%W+AGI!V}_SF8K@$h$5LcpLXASI7d85FQ=O_P;E_=WOav z{^==-Oc2y!G@P5?xqFCZ*1UKW3)m%us_#FW`RDXhtKEOSw5X(95#RVD?=T&B#E643 z)`5{G5~Zx7ppquMAUxhI!lKLpDU7JI+}4HzM#AbuXR+C)B1`hlEwd{F8kBxa%Ud#T z<*QRKB4{bAsf3vKBb`4SCebKOni^!apoMU$5>0^TrNjpPmZpekAIk&FOQM%Trv!p1 zqYL~3Wo{7$3<3fX3Uj!iP15!k6YNAzf?7I(5((*&wmS}&SS{H2GVE~RY2-SAL|w2F zxGE^MDHnQJ3(aU)Oo5#O`5_Ks6kP~5gcW!QecTL78y#MmCkj3#C;A^A zrYZ}4J}mmkt5^pxzw{CXxNIoHoHkobq~thto#pI)90TpEjq7w8&GK`ia)I2CcIYxS zl@RT_!|>tY{}kn+kTv2;_j!HHmzx>3`aFCMr#N=vMTh?*BbZnG$a%o-`Xt?mlbkO_ zy(oG{o@!8>)i8khpr?*8_gsP z8~U$toZ+n_*oCEog$ne+2Uac45>J8>0Foba?4cQeRY8gHPQn4JhS6t1uJ0ctQaPEE z;>)eWbS!yj3bw-W5uB0|R32K89251fQ-9Lq&ov+{B8Y1tJ`yv)r_x+NwaTj?+-b$6 zE_|B)t`R2{x*4Q(UV>u=(Kc;99|=`b_p8c>Gb|3u2CA~UOBNN$75f7u@*>49lPm1o zA+9?Hq-%*NbaX{!?D3>QKY$dB!R+eZlA#f{pB(! z`ScpUqIAL(>b5Z5!~+urzY#&451uV_stBHcU&J zm?5j1ifpX2N$KVs(6d7YH17 z2Z@ru6%syyiisQm;Q1i}-|6M!M7ZDmWTvgH?E)C3+i{R^u>YSGzDsX=yVp!EZ*D3s zH`mkstnhc=bMJKweVzB`g>k>hAQ)5Mh2ix^-_LhLuWe5E=E3rTD$VMhhcfPMw<$*A z4*>M{zuhlJC*MiNq6*&+=(D@rt)`z|&%fP@C&2#`i~+IlE=V`}1pBm&P{o`m^pTyh zDa<%|$B}kv3sqe0LDVOw*dD2?xkYx&S3@EJ=^^{e$WEn`sg)FRsx};nt@?{{R&M?GhxPTmlNX&NSL#tL-&lf^weojHUEZtv#xw8f=%X?B(kSM& zz+MwU3K7ZW25Lx5+L*qM+y$3>GVFyXCs>uZy@^MOqO<5{fty zl&%K0)i{X)I5*_4!KMYSC<7KJ1`N#hLaeoZZbV&C9j`4uzYilJNhLTI+SywM$C&!L zW;(=16W~pxQW*bH8k;u_41}!qDFPuT6n7)pVSCXIoW8ti;mQ89@AQ&6(OoI5xb3JeyF`_W&Q?SRzW)ymF0eiGyfQ(YVg>S*5Ygz} zq*!F(#x81P;v>O9{~9pMnPnY`2brYZM1;DcMPm%Y32`wc@Uu8XX%iT@Z@Gy^G~Pp;09L_6o6u7U>YE^uOhOJecfYuV zs8$W!|Nea5hPDsl*{>*05KFiP+Kqn96Mv%CKxBf5u-`MKy&!=*3#V3k z_4>+q!HXuIjH_MgFzX}CK^&Uk1h)inZLmEUWe#|pLjHd1Y&Z5;3Gs2Kx<6f*>HOIC zn^4^B=5pK)b}=7HF~9J+`jhF@d4j#Uy@g-jL08B%x?v4;wjlaJo|?BCgQ4q)o|7u9>xYAZ|IkSXS`p95vQrIBJCTUloDCk0go zc^7gm)v#H)kdcgv%K}4aCQ<-u7nh0k+#*_2UU&Fabo|GtbaMf`AnJ`?vx;CO3b9^AdinR7N$jhEf#P;LQIOi_P5kaX0mSsQh(!UR^($ zXmvVs7(mwlcEq?6bAV=N?)D@n_a8%eL#I#Tb=dg@j|^$ZTn1lDJJ7j%!zGOMG4Sc zn*bZxWU2oQ)RtKP_C_$3hNK3iOyw`&`dWSD&NyZcd~VgwcNv0>2L1%AoPwnqQ|>h; z<7SsURGuJ+L0b|n@=z4!WHNzr>@cK4vhW!(H>>eXQkh2aM0_w(c87gMv2dIfv8aU+ z+@lePYy2ZfdRG@lq&gWlZ%+TfVd`LRhk?gW{{A?uLO`=`fDB zMyq?M12aR#9`X+ncQI8w-l;SA7B#z&M5?y;bf{5ZcZvD?77CWWe|WDGSFk;Yll zm8!{?#wio({OU7{HL7P zH^}wd`vql=GqLJm8y&$Px6VK0sId~_NkMh|fKJa5VxlHQEOuMEbd%BhpG913M(B1{ zEB;h*9REOl0z}XVrZN~qw*je9D1}M2KY#xG5KARGZ5Vzqya|RTA~}^JFb-}1FHQ1z zHvc<&wa3ak;QP_|8?znb>s|PE1icfdKb*kl`+fHtm~)$D(Irjb3PD7<^>(h^FK2`7 zL9A`)iOIy`irC`$yzTYye=2eE(MTK|*ryL!v#^L-EvN+BpUZT?w|G0){rBW%E+0?4X;QOGw{rl8t zB=f1G!6R7^CN;cQBE$0hMuavv5SJ16gsUbB;7{SaKGf5={cei}sbWF@0#hdU5eJ1E znEW=a%niFA=@1G^grOSLGD%4_a-J4_mi=ZPL+r%-<34~vjV5~NP>DHgCIl&F4d8UM znNEvAGm4*OoOQug2Y8*+?sl4x2W(X{Efdu|@JOZTM`hUyiVQRc1m-`OTnVcI!R!W= zM0RbVM%iC~S|H0=S3zw#+KYc4-C+_E{wsCEg$1ioB-nxw1TW~_<2DI`ODAK?ld9Fb zxF%1fmr&<*KDjT^Qgr-R&$)oh!Rxkea%O@m>7C9cZl%foLQ48G$YKCdvX0_TmQ*Ig zkS0^#{fB5oe$PLR?*dodn308wRXYYPe&_T_Mn?DJvU|oq5zsZQ|2}dPerz%gEY`b-8U4|zHN1|9Z^-I?G4X=dQ!SP6wu>qN-SR{9dlsKA;ejQH^nB4S-({h6*!-p#0oe z?R(mCpWW*k(jP~nNxZtc>i^vAkGCJVKO+9xBc9mfr*GO<^BM0VjQ2J*C#x6+*jR=NB{B_!tnjLHQc!W6N?@k!@oXz z?^R5cP%;Mto~+beE4fGnBspF)x|of zEYn|}_dIElef)f;?yu+`Gx&OAj}GBB4d3m*9t`X}d-5#L`z&iWh=_G6+W0kRmdeax z-$xnB!>?yXk(F@}jr3aHVE?Bo)tcH_--cX!x&l?fh z85}@KL?f<*&$>ZS0vS|!`3+wyrxj)o-oYhR3@`wdXN4e;nq{@IQfXkR7HQ05(}wi& zv;t`w4?_?_VDoy)c#qbvr8Iqe(T}B!m)q&M7GnAhfS(P@gj2AO;(oL5R5fIExq`Ep&88Q@sypjkMX*tYGB zZQHh;Y;4=MZEkEk*7H|E+m9}?L zj()*+A@)u#$)ab9-WB=@vYLMh4h2d0NaWS3^ZBp;%QoEovUR2SEYcF>cJ#lE-z=ZIFuj-m{9$Pl=Lrdb6`P!hLCe2iQ|miqO!X%I zKWa3f%E*U4@lq?D_sZnEj4Gra0f&$NxIsFNcg#ZyhnHH^u|o`%;PlwyDsm+W`l&Fw zgZVG{WZH2H&>=R><@@bS(f^!o=XKW{t4}2OV99jF{~}>~S(8Ak&G9RGG;gZD za|O)iz?5^-d0+eAj@M&1QY@pJ55d(}A2-k>G|-qk18m@)LG26OAJPtSA7K(K+LLVj zuj{D}r95OjZ77w|fdF|FB{hpWIU;ov+)e}gc_P6LX^(oN1O*(*I{G=f8e2r$wHpG+ zq^;u-I0Y!TN(n>JFAi<_z|B3no%b|~x_{AzDWo&DLqxI50VEaoD!J25sT zytBqw6>1h~93v!+2G~%%5G=BPu|^Hz(z)jyt1&g}&C)!q@eqpRL|aGW2vwp@KALuw z9uLx>FxxC@5Z6qpsg6!u4yRw{?r5TwmDD^AM4wLbdlWAdQ)0rO<#Uijgklm1aEc;E zK4?;k-`_XJnH=Vx!YY8Z3de5ftd`PUmWkZt_C3AGdgk3Ftsd9>Q3dwcc0JlKa;eEf zEfNGqn+|MLu+laBh%I8>MBZsP&|waqL{`~<--E>T{PFUHWqy%1V|bFuF`#_zik zG{sHFVFV?Z+s#{H1b*gd+kq`u!Bk^+GJiNxr=v7Y)CsV!_;F7A^-xY&0J6WeW9V9k z(}81{7SbG+uN-w0Z0f(mR|~L+6rDN=3MPV5tElPm2G?YbU3m$E+?U+IzUhRV4lNbZ z|2iwscF6+tLsiAHzx^pB0LI5a#-o4YIiiwJvzrl(P=Wn)JNND!oj&&?9SHJQ$c8Kw zQu(sc0EOCuzy#G0x%;Z6Ya~>BhhD2KW4h-~q)Udhlz1NWnNiJWn8AVP>j3I- zAEdwrmv_6CEFVYI0u9!c7d0Eq+B7QAc?=??d2Pt&z}R4Z(xth;PhaCVrhgTea#N43 z9g!e5bObi#mB?_&=#gBJTuM;KFflrg6jn^+wMN{yVb=85$+2|?AQF^ z#CA6N+;g>=nl%^DjE_TUIAmBRg#aT7ycWrc&l=GZ;iHgvlFuyNQRTy>Gk@}qD^04= zG)a^yzPkuc6ya*GB8%a-1&=M3^j@%z$Qv@`>Y}0FSy{#@=>){P1;N3~`#nD&B=LTET!{Fn`2#R0}FgnZ|M|e)(uVurH zFMfs9#FkUQ^`FdWJtTOWYGF9?$KJ}+AmYdi;W?MZ4vj8(az}= z=pJPbyHz1R#F>e-Ju6(c2nDmp^JTJgLcTI2)y6j6>-Y77HiO&AUytOP|6`jx|A9Fm zNYVb(TBbJTb;=QNP_j1`(M&MS%5bkb+Q#pUAHD+p-X7LUi%9c?($JjHkBu{t3Z%gM z&65cUjW8o&;=|y+`*nZ5P00&>;FS1KfJ5zQw`2$`fVMhKClK`<2YgJfHSlJb-K)O1 zGb{g|{4|D}!F3nqCsb1hksrKj#19Q*n*t{_O@tNe;Dl}Bszx48iSsJ*^9o5cPhsrU z))OuEH=*hh^BMLE`~8Z)<2P!f^hb(tp#0-#KEraBydiR;3no^ABXC0mM0kkgr_fC% z)T=O@6EgU2dk>at98{R$+FONwh$uuR>p25>BsqqZqE)IwHcv-=1)vL|WKbwBCuYcW zq&7yv*5?FNZROFDSqBU3JfJ@z z?}S#`?1dtbck(D$t}T&D$Qs7ISd$`J?wiWwsL{z-dL_<4kp^kzu&i zY3QKa{W5wjC28f^^XFRGj_(TLH6>^EHbC%Y5GnH8`RnxrXxSV6=yLy?lkA&>iW!$E z7aKS$H>3Hc*pJQJ>BTtWdz4um2$BUxxL%9k3~*5 zw0A?L5ACndF1t!l3h$ws8s6W1va7!ZRe{n%m{7hiVx z345At1qYameClO;We_SAiXld=WcLm1D;*yJvCN&Xl)|{B6DbTgVUmNXXPJi$*PYni zA)XbphdcX|yB;dNz&-&pY@4}J?(?fjv@R2uyuXm3#FEJ#?<}%!^dCwRX0`Ikd4kuV zj<+n`F*g0E{7*XIUEr8fF%8lJ<0_snl5uDj3k|?GSsvV7bWJJrBI2!1Io>1ib&vrE zPN4|2bI&vYGxY&05tQabeFU~qP7A9M+oYvYs#8yIb{P?85f#>2iK+LM?UgKENKc@Gobrwy~nW!fXpW4iG`HO(A zK+{Fcj**6=x8PFChREyZRi#VaEW31VoL{T=-DZtx5Cj}n-(xhL!^fW!hpzYjSBb_e zCO_uPr-S!rL7+3NN3O^AD@-f%{o+q1ipNR=nHT}Djmbv z)hVP-?5Lqm4HU!BqjV4}lk$ahHa|=aQQh$Zjj0xO4fImPkk~8iax3t$K|+DB%2>h# zk^Fp&5#J}PG*CB+R*Ht&@2bo=hmQi0k~q8)m?AkrPwuW;RVcpxFEiZ3R1^zaqWScWK_~F zhI)0K8Dt7%Eq+)|!R^pUM+P?cGI{wDvI zJ_<{g1Ea;HJT}#mMkkBOpy%^q{3_>OlD74WVQZs#dIs$|O9Y&sJ`M$PTa@VZppqAS z^z`~)&B}Q#EomAmN{L28&@=V;$BED{?P<|ekr()kz=|I=3L;24Wu)nF(}qig{wxAt zltCek=_Zb)VExK*n_c5~=6TL_N{xB0dE_E&Bb;s-^fYQEi4+{0ad~ve*C8F%)hszb zR=v~+OS-aqB|oX@h^7Vi@6(;*R?r+cOW_&ieARF@|bkRwYmR{@HfQ9 z+O~rP7-5;h{dF<-Wq0^^I|r=O+e?<5`aA94Z$bT$%R?WMqO%0xVm(KgvWRoSSI&MH z6>oO9mVZ`804dEv#?_u zKzGB=gfnm#k}TL6v~Ym$3}5Nuz-qulNr{$s{SKxS*#Tn3bX$}fJxcNr?5S8}miE|j z7`s;hwj_oZ%!rLRP*^Wrb68+x$1R+2Ke@3( zrE`~4ArL7Qs#4k!ADiAd6ssjBBPJ5^au}@Xb<(s)+_$LCU8sc%IzKrghJ=q=;a3Od zIV)vQsgk9bz17UefS@EU)E0pmgqlh$JQP@wE^`RLtQ;>*K+JSA*{ zI5KGSL}%j!zwaq>`>H-)pVx+;-w`qvS2Qz~xru}Qo`9pt*Tfo~ zW$mdg9U|`@XuYRGxdNRf|EK6*qOYXlgXVje`$eXMb{b-^MV537UixDM;$p#VKR7E` zEdnN`Z4&(0$ViwwbQF0sEDUopHClNnAQ1(WCPCiIo44a9xruOOP!0m2JPIhE&W29( zD*#FAMR)>dN3%zj60;7HG;Mts;fhH|B2E z3-W5{0EGYP>N`mL-ft5Ee1Tk4BdVD=iQ2f}=b|9czyjh&Nc;E4-1kS6qA!37aOHn0 zN)*iex>O+$b`c!@wTtw%%UbXX-ya2zN3^P3p4<>^}Eg1 zVW-6}ASCcTx9tTo;iRtJ-Q87lPi~!RZayi@(Pqa@#cgSdXUfE7+>CNvAC=>e$&q+4 z)I?8Dk1e@JOf|r@v1cv6q}?P2g3+r%cLF;^*5f~ekL&~feNk*9jH z?Q`6DB8VBcd3*Copg*}7FW{;9cp=%&-^g{O_py*L%l{D>$LHvwj%L9BPuFOo5pP#) z6LPaYh8#q3$Z}UH2nyPOYD66TZW!cL8Ap0^^#^3zyQ6}!;3~VE2n;VW48;9Wbx2Lj zpRi=Up;LBOB*RFRG{hd3z^o~*e%fby;_^Sg(+fF+jr3JFFVQTshRP8yQPG5#5C^Ca zL7!I_Ah7o#P$E088{7-Y$#ayfZ*fW4J*%j0v2tzP@dzMi=}ctbFerE=3Ci#>aBM>E z9zioh?%b6RRoE?}Vd-mh0n@wsS|k}4j!<+arB0k#gijG)vU?(d&_gm(rh@~X@B=@E ze6U4-I>C=Y?`h3QJQzdS9tuoixeVg71~QNv4u(uR^yq6bEX%;-h?2us1<6r~oz6D? zR*h^daXyd0OC&=ghBf_rVjP3u@CCwco`VWw!@elOxFiS(x;-{1Q^Fz^@6!}-9i*h8 z%J4%@HoHYQx=a>IvZd{JCQoaxAGxZo`;D4#To4|d9Sjcve{vRt45UO1m@5wGVkC;a zYK6RmJ<0L@od<(0#);Y#@Q5MQP0-iV;Sl2LREyt+Z7;VisnKO`!@<2ypV#8z1;IHK z(cxc@X2~$rVR_C^Qs8>(tg1733Gqu%@VRqtP$1WNiUt|^g1jP1HVpa4k{{f~Vc7eB zATIyIyr|d4pFcwf+Ln1?P@s%@XXDxIwjD@{u@AD0y{SmB?WaVZyFu?)-PbLP3S_#l zqa5omBfC*TRA<8zNo1TnU4y!8ssUO`zE5rC)4fK;c-R)yea7OLMJ)Nm0c0iOzdVi96`v zZOH~gtJB)}@htyl3CDI029wz;0w&JnkAD)(@Zdm8kICAWvYQ01eOr zvF*UDM5L%(^{Jpp)tGe-5KcT0K>97e&_N+vCM0*Yv&$qP5%V||UraJEo6^BJt&&g; zu@w}V{GA?o+3Mhm+g_i-HKo~Vp zqA9Wx17Bix?AnML^Db?F6x4SzA*v#b%$vk37Jo1d|5-Aph4MF0i=fDH0C75L*yQRn zylB8GT3N;yfgZwVFJYW4i3whYra-YmMd3$=97S^{ye!DylCn-5I|&Ibg*Ay1Jc6;Q zb5{o3xC9%A1gib35g|VaalHu=2$T52LM2Xm4vsNPI%>#_#aeJths-j?plH=FJL{je z+01T#lq2TyIP9DDMxaw^5$okX5b$-Ne~8p#Guwgid`1Yyh?&U$o={nzBpVj;Q>R*7 zNjZ}h2jiHf#dGs|=^cem;J@2VQRL%r9|9*A%6^yU5Ks*B{fgr>;6E+oJ~r);hH)65 z)ehv{1OMr|CEccm_V4>YQWj-*-L`>(X&e*^pK00-?C93gc;k=m&&Fm1{NCqnAFLmG zku`8x+L?x~I8xleu5662vdQ9$(Xj*Ba|B_)I->V7At~_RmWHI(w1(B{{Ojc~ZV*RO zn_**jn!~57y}1Y&c5w;PDm<=dU|JY~*GsT^-Frj7t3A*hXc=T|1PUF`I>!?c^N4q{ z^YJGqhEdRCJM*cH0B>dAyatQ!YiK>$@77$?YiTTg&2fD8`~(i@N&Eaj@5m)JKf3#A zH8T2K=!Y7Dy9%uZK~75qvu5;p1VUp%a95G|&}5@&yJtjf0cDb8RB4pLRHEbT+oepP zrlM$rDco3p%yT^0EBeaQX-Y_}AsJFG$}%KkN#qU~M`OE-vFGgP7S2XBDf4coHh7@z1uoQ_ zlIIm)smgR=lGEEvER`x$IUjwI6EQT2q;!P!x)R`-XlEEH4@zt@HQ9278+FlN1Q|k= zEM+4zHip#&2$aQFsYS2(@}wxZyVYH4<;Xr}xi*GYAiqSwh-L~ms$i4Zi0=;T&JK-x z@8LP1E|cb+U&@w6M*R7^&SQRls38(PCRK)7lcQhG8Hdo)5k_cDULIwo3c!GISO{W5 z{!wX6Y?aqatuI4dxytk>iiO7{4^H-qHvYdR$u z6)*dFBY$EDzkk(X+{b%i{%?(enu4z07umvuCT{ZjT&1;p>>zLJ^_V=X_x;W$;rh{W zQ*1>KOcMO~DLEJXPr&;tlzED$Uw>mNzTUR~sl@Mz|4_BymHgRaQ%hk(Jw($pf`Q2_ zF}AU!P@h1IRzlp5H3<_m0#X<739-`ojQWrveX8v)gHKJ{K*ueV^buj7Gf<4uZDfXo zVVK1Q_2OXX47FW1%VtT9#QvLzg!4`#DmHKAIxE%SYo33 zxG+43O?Y6@H2#9!f-HN`j97P-unN3;B~Fn>wA7riIiF1hVd(yE1<_<|0r5E73~q%w z7VxDoLf5ACJ@Vlo;>|=g2pu6GE|8t#^O@K4*mLW7`Q~#SDGL*pfwQ}3k`CyGX>h2z z9E<7Ch|MbEVXaoX*Y$XIVEQ4&U2N*~D=vYNK}dl!|zo%84*i@RYZoZYA}w8-C!pb+ zD!=p@XutCGeMSIXW{w@WstL$pKM)D-Ni{*n-|0Qe|M$b^J9I(UO=+u9q7q>im(~7> zO7PP1JEI(j z;gmy=2iWt*@3r!_u4kAacxSPrxg*~Z??9owr=BxkL0`6+Fd^RWLW0e|kJRhJ*VY<# zVFpDwO3?CHnYjBQ{BEl{f@vP-rE1v3{%;L^w&>NujAb6Xg6jxd1LG@$Yy+Sdr>F!r z)hL`2m4Y~$xtIo6?5^DBon5np1iBH|oSfhn#?IrY>A|aL*wB9W5sSK~}2Su|9HbM9k$1U}iz*j9k;xR|bHt}5k?VyM)eQH= zO;AwZ>zHkV;N5)RI}y|Mq1$=j#>q~sk!9hGLf`I6_G2_z+WX|rIJ<7lA^Z%jZA8Lo}zRa}yKTS^03GNSQZ}7zcDs}pN z7PsUNEPc-mzY(BT?KjDAWe`wz|>|xZK**)IS;}}Zoz}Z(`En@ zNMCI0h+zMyi!LZ%47iG9>hSB9vdNbFsKFx;W~tC9M?y#7!%0D)vMp;@ziTxidH>L7 zE@{%CA{J>)>3q@OeSt(-q~Rc*px(5Cqo7BOg3zYKf?Y}^;pc`a{nl)FUZpO&ECZ81b_hrZJ?)mtV|Xq=l|vB2o)sG; zMiHf3_N2H#mk~E{K;8v(M8QiMh?!v*QGz3s0Z^#db+aWC2zcGj&77Ms4d%$6jh545 z6gX0nCh22ptAmIqrQ0_AM@0m-7Ii1giu6qOCMMe2vYIV8+FNQ3TD{k%Fgh{9M3Uy) zmobL|+FHdAq81d^EF*YSs_gnkMwm57T_^aXV;e?@_IkUO+U!E&5|}^&>JsHE-o5X$ z(-YlFVFCiJbDoI=s|ibRk7GTh5t<7;r9_56@l+; z<@zrlvAdeTAs+}3Y0a9XptU!Jz(?-+CG5xq?YTxK48t+@H0DTzHXyU#cf4n>h7q0E ztca*~iS|eQ9xRuBT$F)2mWTS920|nt-~QP`lDe6&9ELuNQzJ z3rIU{D`jz@b0a9YQ>a2Any0p=ZD(;B4LYez8Oj*1@WgscmF5zJ-WVwOP?%loA6e>- zO2$fTLRzRl$7$#6C)Dp@LzcC+X{51=an@AVa+>G%l|JfkWqny=GtFL15c0I1 z_H3x}x0(;?_<#FmkfbJa2ED#w&Kxo#*g9&jm1Eud zwlzW~4v&9=(OX>_br)LAu}tK~(^&A5$`6OmYbIrzhOSzI4oxzNerbEW;;>igms=Du z*WZs@C%HbxkGrDovS0>f*S{cW74|v}-{ybfJ4GQyyIiko)ayNkTnG4Z#y!@xW2xS| z%n4+jWBmTb%kkFFTfJqM$Eo*Sa(X4#D36@=yY1iQ!~ag``sSclQM38{?~4VWo9%tK zj}ChMyUm>|LMJOtKu7rd%3C?CAs7UoK9*@|(d4dhHdpjHrL%x&09a4}}vx)Et1U zKUy>A1nGn<0J(^ZAG%PdkwPv~F2&SfJG6}#k72099vC0R&kXn3^Na&2qJS=H1Etpr zfba~92U*c@yxui$g@m5rRp>ip1BJziPR3N%+Ep%F3+24U*+mo-mR-BPI_}Wr<=1IH z=cb<(26LiQD%Y~YH}-l1hExcs)fxx~=w9K#4hjYNt17_)g#3aai(prHw8CerNe(DY zXTcYgS-*{co~u_uA+%ZBS$Jv%cAD@Zt_jkniTqNc!#kE{rfKW3W{4aQLL{!SMeh`) zR)T+tKEg+#L;a=ID0PS^s1s$foJ4~EfD@K2$n3l{af>!4!e{0}k2ZQZf=7eT5@kAE z+WSizJsU+^XD(z%#&ZIgJ<#7zgGu!&!-v~SW*o!Da9A5Bk(EFyg;zaFIwNWFaq4us zt?BMYwQpv9`wk0R8=Mf!QmCc`5mtN)O=ASP+_fPqKxtlK`WWqjg^FH{8MRuZgv}9_ zSR?S^`V<^zR3nz6A5iFb))&uCS0P`}_oX1fs1*F~dTo29cDa4W_g?dP`*mU%sZQ(Z z>dZ2?yO-Vb2yY2MZou=RXZe_eu-?11Sm#%v_;%|d(C%~o{_rHpMj&VYgbafYAmbn>5LeE4QsqsWX>BJ=mC*Dz!wxgEL6u**?!W zk#txyM5Ty?4cX{Grs~2a)N75Q0_JfH8ILcyiGnbG$z-{>jIV_J?U2}?HC%{U|F~!y z{WtD$@poPIUcm$&*!p{cn;t!E1jVvZdv#?$3(%rTqJGeT-6H;veV$Er_+XHx(`06s zCL*`UBu$Ai`*}GI+e4@nSvYv3q_px6ta*ol{n`wabU+Rs_}Ix@P=U!?Qgxvb39ZZ; zNimfjRy{@fz>iG{Z70r&#A*Ot6!-snHxYaY6h=apLUq@s4`5K4r^#@h6oKCg#31v5JJW2ltWAeyp z5b#*f3pC{`L8_C$Gpr$S`QQsJ@vSmz?(`LCHzgxS7zg$Zzd8N(-iHXR?(S~oDCNC) z^`pGyOw5~c9g&n1Hj5p1)rN~iev27*F^!G-pCf2f@+py$9)`di)$Xnq)-cJ|_RGGY zdFFevMplA{*~kCwGs4;+!6m)DpHnr0{;I^D)1nEEBmpBA2WN!7+4`@A0 ziSF`A3J@zs(Sq452F^`$w90gZD*TgXmWoJ8%E4{&#YB)J3vxW7@OsH>=`j@LKg6(A zeC@CWaM9=zkSS2&Cgsh2+HBN)VXMh>g{8IgoI_b?blV#8FRhz{rhe#jIL>4`Ma=RC z(-23?oUF%7rkzj!!vDA?MlB{!PV(=;m9Br5`GHwxwg?`uxWt@(ARL=IIj>D zId7X9OW-+dM4Ngm_PzsD%VdCw!{{UxWsl`t{%z-uqnN@+Ph%!hVg_p6vYS`) ztZSM&GLL8r)$JCxXxf3gQo0V@kh!Kq9)14l(4CLi5$|s*k2bc6SFfW+jTTg;uGd*i zVmM?b?NoDI7NS0A2ZSC9*$-|p1oA$pl&R1jNkivcTwKhUHH}=%SVUE3U7k)q91`@8 zuyeSoSIMxY>jdgBSRs~81f8XS+VLM{3T9eOhh+}a+nf%e76fWdMImVsIsHJvb@v6M8Q|K zW2qDi*3d%uOFt2B#We)NVeQgOKx@6-#pozStAQiWg&&qPya53lN7!__bm6#@ot-1i z=CV=8c?|c8)HLMLHa(9*khW2FE z)D_+wwL&rn_}|T*X_J9C_#m2&N&)-3+mi5L=sR+4@!I<^Yp1vKt`^C_{m*2D_mV`& z%(n)La>&E4e!P)!CS(LuQ9LFCPxz(lA!heZaPci!2;}Q%RPvof(GX*F>QOvhUlC#y zgx0Zx-&r=LnNE5u@Iv`P9=ibw)o38D1C0bg2nQu3)t3Db@G3-;GDlTxG{RJlPFiNd zqx1-ILkzt?4(Q=%36~)8g^5f{4aO?p`N4CXm{=1Zle?=8W1=^X>XsS6^S6H)ie5n^ zQwu!-QP+Evx%Wg`OMN*jKpmh?Ek{u$!4}*l8rA|D4yn`s^reA&io?1;wdX!@$+Qj| zD<`L%iU^lMA1_XtNC=;xE}Sk257{Ob8T11bH3**-Be?Xi*~K zSwQ;&s|Lq-)y^PeshZ4QM6z64Yg;Ip(<&k*7;J)Ib7&d{L<3qfp)Te@>`FmEaQ{?E@kyQL0o)JMNi4B@+Ap|BMFDM$-N>tJo3E0vv>L*vJ< zMD7T`vzi_6$Fz)Fe6Ne7h*^Q>PkS;BCfWz+JAR*k&bxxqiPdq@SPa|4yAw41CY3D)JP9(|R{q z;#fa`u#b?LRIG7fX%XnEv18}F7C{c(NC``b5@I7I16W|(%8wzeWU4qA^WTX1FzWND z3G5V8N%)81!MKTA>U-i)fhON#PT{A3Uoi7%yXI3MSr}_9hSWg;Xe4mqV3H7UQBa&O z5NHUUBE(=k24q{XdSDf(ooe*ZNWzmW@GC`ECG|o-gcOemhw%iZx!DAKEwIx-^VsYI zTO`JCemt9CqpbXZ+;SeF)>%@H#8t*W=Q^rFoP*kz4vYqSNO%h?n+&Wj!Io4+=t3E1 z#Yd+I<5yxTbQ)j@Qz>FJWzhds%$P+Q32sc5sbu1UN{p6X+`t$&2gS^C(HI8>C4f~6 zDHp1r3C)f9`Vr44WKSPO+=C=5#BI)I2;N+XZl>hXrHB>TqFjN$iH z9e@AFA0q#k&+DGsF+9I2gC^{jmX>_?F{oMY>!GaXrz7^7B&1HCuga%|<7t~0T~FJm z^?cJgzUGYsN=QJ^Fp{9}O;uMY-*s_~%#j7&EiXjnp2AF=Xe4C}EG zMRc{kq$U(sr*OfeP=~5{MPeVXvJ@I>vJ|>1$2(Fco<}?zpkasO6LTT@;kslcxAtID zr}-h@MEw?a$-x$w%&`ql^Q15yHK4#~tNd&x=kbpzKsj)r7>_#1RD(8X`m>xG{#_9I zQ)r^mmsTBBsZjRA)KmHk>cnn4voB`Ur@|tmMP)KsvjtCwtIuFjh(|8dhpZWk6UTgD zw$FwLgH#A2I{|HRnHu2W z%|*|okab9KNjz)u7~SU~ixGblP9+IM#>wHvLykjb?g~wW(+^_+P@qg(K3lD*H~*}& znaoTm$p&A~C(qFcwFn6cEpkC8-Wb5_YXVHet*m;_SXuj85mEku#g`uDA!(~J<&0Di=*%SE2sPMPY=B(Hy0#+e zt6<`d>`I1?Xilw3+Ro_2DZ)gp(AQ=mZb>6rj)BWTpSDvVDzK12o=V@0D=Stc4k(JG zlv*t1*4c@K{Qc8P1{;;DH4(L}5i^q>6GYmEY_E}xhy4_C+|W(LvoI7umSajL$wsAv zmb;X7ekS!q9@E*P+DNOMPBpz`NHWe4qc9mk!Oe2K*4T@%lO;}$M(Vs^Ls3&J9TJ;e zqBhN)rdNhakCb&CiZI?raDBJ7*>KhQUbQ30Y(Aw%X<&KYta>R+vDSI*4~6e`;S@TF zZ5HQ(nD;(RWcWRFKlgRq*Zuvnf1l;LlVw>Tc%7=cAsOr3p8=_zplPQH2XyH8xX0>i zI{VUZrAm=D^u8^0dDu_6yYQV@KD{N{P$#%KzXfh*M*lE(y)9pX_q2=$1pl+H4G=TT z|5=~fA%=74%^bJ2+Y<1(Ih_|~0bwoD5oS%~x*mA`%2sciDt%o7%~*FN=^BCE+=k@o z+6VU{1wp~sM#E=Z-T9t7L5cuGCFa3E#fYOlf{cdH6&FTk7gOOgN!oY0BLu*HDD;a#?0P*dpu=p%+d$o`dar=qRa=qB8r@*&80^-RN# zDCb1ON98@j(|k9<&7<3y^TZcb&1mPS5uOdG=j1sk=BES~(DseW81e90fmnq;T!2-= zpYKpcRwDF{}*dRuWgN~+4rxN#F$0yqjiSzijz>ym zZj)A5B~Nur@gj51Ys_!RaH1)+W$4lwKerB$(Ar1L3(dIvdaA8ll;(SG-)u`1@aelz z5GBVHGabh!ToMG0x05Mi2m=oHfO`JdxRvjR%!7Er_a3(Gmtu?MyxL#Rj* zri3!UY`kCigyS%LKxnJkMy!0mR&>w&Q(#C1xvZFCfvwH$5VZMe$Kg2k*upnKx0hDsK%?*kMW0`D`b2?7rrb03$63I(0lIFPePT+z5KTVP_b13wXS#WBhze@(rUWgddKH$MH2lVF!MAeYrZY;o^b$&S!G#mnX zQxjt8=-DYHHx#ePk;w!CwwO>abYd{*4g77j=KBgAk0u;3=Dd&;RR2TY2u;sFjIuSw zgo*B9I2WRM;I(?B5n-JQ2}7bprwVijCq9!2KUL+P)LKojqROO5<+&gdKabX?q+p5p z6JwVnhdF{db{i0F&az2d=mB`4kg5tQkeDYzeXHnB1I;|i-%(0X89|N>Hm3HbE+GZf zkn_REp2bjQkHz8fo1c^Os?Fijk#**HSvXHCFt3J~1UGBJH&G_hCptSe2x!trJKnv^ zj;zwt@Ij3Tlj7Uk+h#FYe;h`k!Dh{5vk&%}Idn<|CxPVCiA@dUjUYCgov(Th=&@-P z1O@5mcolV022$CE&=@-({Tf*(NtR9OC^P3xhZZkkOP8_yTd%dSu+>}KTtl^1qaMr} zZw0NZA@rzRm&6h#R7#2ag9y3p>z6~heshclExSnxChQR)iKAuKPURfAL#%x5kM^+b zcU47!+a?SZ2MXts@7v~J@58v@rq6TU0hL^K=MEy}yBJD})z5fN&NxE;<5E?{fRo>x zgnoxX-|q`Z{vX5F@_8(ub?pY?UB{m0GUZ0j1GbzggnG(K-%_>_(o4gZ7>O>ib8I zusYyo5cy*dyq$<`pHQ^{{83%aXWo3^Gm;La%ow)Df019IDAIctO<8QdW8TG3-dI_^ ziG``Qo^5BU^D2b$W>- zA7+d=**v}uv7L9RI)5i`i_3J*f!c%|T+(2Vk}N&G`R8&j>iw)ULvmDlF1O;4!j=|re6=^Scbk|XWYP*dsi5$ z!Ncs`fJIu&f3M+aq6CTS)Q_m^;clGJ&vTIAex=r6w0NtptLIOg5ooE`+@^`)N14QF z%nB5cO|O7Jm6!K98SvPvyq)j8Yy&1<>$*=;WX$CA`(XX$Bd^|fJ_TAodEN{{_1~p6 z$YnAW`RlWhcU`$TYibRo{Jw&vLyY4IhwldEQAGrR%?CODVKXu|=FcBIhup>^pCfEm z?I5@YMJ+ft3KYu#O|+U+e1IcM-gN~LToYn2q~2`?gXSN2RwCaNf!akd;%f)RBFL6j z4`H+SrI^^;qv{|l5)oIU-T)UCn#cj{klGAJ&=$kWuro8lCj+vI`3qrt$c%*8h*euB zpw!oQf^AGZ!^7qU@`^%IHKmse32~o>@i{F;QkmRUF($>q1JnZ}zFgFnBDJjyEhBdW zJ&Vx54%Cw*HiAdEjq`mmiqC;zQ+vTyQ&3N?)XKE#aFE^6 z&sBIv;bco%-F5Di3QTd`;RRX^=)PUeHsXOjkp# zUz{)3DDqym?C~GQNj(2&LHC~>n99e0t_-}_Jt=aO;|WCl3uOxu74V(ySIhN2x9w-* zdx?1QE5`q+X@f{ysu-jsBv7#yUH%coQsV3G7%|VEWypI<$=qR#_4>shj3Ptx2$`az z?Y_!19#e@x@w9=r(7N>_BQX`fM zFERwcA0+kM@fpvR-T~zn8-3VM#D|t*nrPp>A5~1nN8Ai<|A8eBIm}qXc;uW7ER3Wa za(46v0Y20PvM*TPfdalL;{tNmHbL33(3YvaXzM2#tTKt4(k+FR`mk974jR{%Q!8Pv z=sp%U%GbM&nwM@pmsq4|t1#b=d)h zGY*;B6p(GIuRTpAbE+EfIV2 z41HV9sTk|3gHHQ`FyFd6HEx2odt#!snOqP<5f+3Hn2Bz(%U&7_+1S@NhXCSx|6myDgy4`0oCEzG#_ z4IN24Gn;W5{^!lb_e)+MGany%%pH&ZPeC9Eh(>u4Vz}DaeshDf8>lE_GIE2H%83eW zJSh;ww*{X2Q7H;MQTTF!8&6N}PyZhPctMB0>{aXr!MX0c@4lB_dg&9N_{0~#_(g7$ zjM|;inj3I&z4+pb-}~P8zV)qdee;{&ByS?rhWpdsfL-00E^((SSM?JsI~LUS2)BW` zTg4^L0HE2K%DrkI5>wP1G2ds#K-7YI#n^5=-0M0wa2AKu zJ&G7Qgti)TgNt#ef!<;k6Eu{LYOI8dfP{eH&{}Xep3R!ra40eu90*xQ&`gBIHW5uW z!rhEE6dg=8zje%xYE@f>vXe~rEnmK}TqvcI$?o1>$Pv^%=sxf{fLn+XiguHr8ymK` z&3CLJn}8zBe!f%;$VDb8@(uO|FPa+5(jL(^qOCp9moIen_GBHx!Z-*$P#h53As&tp z3zYaDA(**!TfY(Ndp0yg|GSFBPb4)apRO-gc2e0~U#5^ur@NAU%_daty84FctB#H} zi{5#g$gcJA=~IYsrwk1-_lTqR-~YhDj~lH=CY?U^xD#61+VnB(-}=(&TYmBL%dfho zwe4ugRtVQa=FEjRf!OLV;ypLI1!|>y?)HC*UHoN&ZZvvdz5aO?@GP)73lK{1&O7hC z{r21c^7r*!gAv{Km*o(+Y^coERmRoLt)?d7M@!^Lb4%x9eI}O`?_uqg2Pk!%ghmEqiq)j^2)kT1hGd&gv1x>z2Asa4ohwWT@ z1M@84TA=^pekjz{Uy44UB5H%+z?FcLD?wGPK#nFH2s{Wx1s&%0Jy9){(p3@<{kgma zqz3KdU1R82L2&pSL#c2aiUz)8T@UFFT{QX)4TEzuN|(vdu;_`%4F1zWjtNSu@%j)O z?+KL)41`P~;f!XE5nzAacdyBjgx_!e2ZAvTPa`zX>kq^THLxPSECGkG9mG`)M&g7H zZ5>&KT#$bXwVB#wazr=X$<*5m7j<=YS5R@3aM~!No8XX=osd(P(lRN{8N(R{YW)Vc z{u(6Br3PteEnhTOEUjLNygXPn@#tob6v@FM|UqVKC&ci z+2Gv%I)>fz#C<01cfjm@_L)3zp@Y9qYukv)Q>IRyGOcBMa;C8S{`&{t`Sv%z%q62f zi)KSiMEaHPp6(elW=)^5FKJC*`qLl&^IKmf0`8|i^N&-f&lq+V9gYL4IK>z5v}d1Y zkDWCfhv$vSv%v7TKsKAb@x~hoHZf_^Bs?+>JM6H5Z|UJbc%N_W+O_}pfBy%zC(Oj( z{LSB-amE>Y6}3SmwQqm>+yC`n|CLB2e)z*5;+!(@i$9!Z-vAte5T{a!R?6Lu)mx~B zZ2y~?S_crQP#zoLu5D;_p;jzcr^PK>pupQa3k+uq3@LWQXvpg~@z-H-KBX>FSL9Cl zdI-8h7KkSx%7WN}x(kRt&qDi>fhaetx%ba|d8R}|!KN5sF`qA^>@X?{Gw*fas74HC zgf(ElxC3cB2n3>VmGX)qR6{ujTjAi*;KZX4BvH!;gch3uRLR~{%}~$jh>>eZ)WA`v(bTKML74S`w2_V5I?1dC9sL3ylAB}-0;iGe$k-$)S0 zCLB6SnM^*ED222X+{Vmlnw)`6$21|E*>pLwp-Uy7e$6dX9(kh>Ua zE*fi_HDmhd2@|YvB)DSP@{ZLwp3^SEUuUfNu`Pz=oo@93_-QAbz?1kINl1nAN7eu`zXvwLkopI#R$0g!Tnm~7h zBP8Vsp;oMV=AHfn+kWm#rMkO12QHfR8S9_==U;gCobxY)-AJWUYdY3MQc0M&ZP)0P z*y9$cIQge;C_Z;b%O~dhj8o(u?~&I5&jOwW25$j=>DH`SgI;67f&~Z`zWUX#5=V33 zTYB*I4Be8izWVBSzx&sC@I!ZU-JWx?QK)+LDkJ4%Ko+YkZ^%)4{b(#;Lwts^+U!G+W-Mi5qo3-2krXjwv|B+dK;lGYbqM zb_3|I&!9er<_0M*z?K5EwiaRmM91@6k;PBAhA#QnbaK63uO6-D(a-47?n6g zI3?Uuh+i2b{YNwy3UJJgWU5|EZ2{;8cxzN5IN0Dfs8;p!LU>+1!byX%sq1utLAkGZ zW4@_O$N07qr$Mc_oM0mLlV5sm<`9N7!>SM7WZLZ?r#rMpe58j=0s~<4LkKbHe5I3o z1l?&Wolf?qh=-hrM7Gd(=G!88MV=I>2=wd%=E~=@=}aM;%j28E#>A>pGwnDl!keV? zIf{k+7FkKcfq+x36-rJzn+YWCP*XhC+)APrn$r}>qU+m`eQfLRF+23YAqX!gUo0R7 z;g9^=o!wm|Y+?2zkqB{K%T5XL&%`N{MvWcAr1fT#MCf4f@OS2Sh@df4P$tPs&0kot zYE^4XeB7wfBzOrxprH?fKQj~%4Y{)~j?1*nx+mmJZ*R}iRV&u4=|Zp3lgjk=WwHfQ zvQ!ikcW2#g)J}tMM;v)HQ$t4I&dxRXm2|9GP54h#8&jrDZ)s`WfzE&6;t$W6dwBcE zQT=9LR|l>rW5J)kX@+`1L z3vm5Jn2GDIyYB6`-@fd!%Rc?-Pmdlwdey2`|M4IHflTkrGtWHlyz{nr@~}AY{PWL$ z{p(-H*VYqKls6W@4feDKl>RMP8=E*W8KUk(CJ)$`QaUubnm+J>4-9E32Uqava^;O&B~=YZD*hH+ zP;}LA<*WW>8J`s-zMMZ;sg4g4vp(3JErs~u<3w6#xq6DHA01<`MszHKxmI>|&XP~68 zVgLj#sYlN)CgnbT&E~E@@fstMXbId&>`7CfxD6K{iZC(e<1CkeP{ap?M<9ErVuwmg z07U_dATGjRB+#i1HwxO0MwRY^z93eoAi)!CzHmfW*ukMfkij%KjSOae8pO=1Y^xK5Lct* z#0iikaCvPu(>SER7|29y8_iMT7?QWZxHCbd7B1J| z9%}T1bs{Yf|NMB^kV7`NV$~YxvDGWqB;ur%g+H@H;!G&%k|C@sQF*k_i5!46Te@;( zPj9j(nS$NuO=kP@**x)ZSv7Dqdx2yT3W>NI_uO^IZ|=KmZAS;|hb4o}_TB%0OFw$$ zvB#ZAaFY$EYisYujh`@e`t*qtCSmzamx1~E&@!OW*#{n^?=FZ>Z)q8Gc^xA8$U3=}dS6y{gYip}}U4&ZU-FM%8`st_9FA#KW*o=46f!uua&G0+8 zrCfB;MQv?u0|iEo9QpH~|C|^c6DLmG?6u-R$p(MHl`5Bkz>wag6N%|I)*C$)t{B@w z!7)*8i8;&5ff2Fls9bq+dx&d%?%x%PwXm&(j$H-+%A(U<4(7@gzg6_ujRv1>2eXk2Lur0w*=;bHsA|N zUW7a)_p0TI>`Vbjpu9@BSYG2ViO^{D@B1{|BQ z(5xW1hE_l3mT0g?IxLFEjL#%ZG5K`G>$qE$xEMK7jBGiZDTXNG%~2D_kz;m?8AKnM zVy-8XOQkb}_6*r!U_VVT4Y($WJt^kloT#%zNERDoOWhZWM6CkKoSfa8vO*zhiYMYM zIr@Qlu^}tQSkgtXtj&I-*G)><5JSOzDNmRdr^pF?xm=;@6iUQeVS!XbMR=58GMirA z)oCSSfpD<9FWJ|ZrZCu;K@&o??C0kS7O$Gm7CJh*EBRu`EE^a-vge{o;Ese%lw5IT zd>Ztm8@R+t zG$m%v+;_snN#w^_{Lb4y{IBmoI-Pmexr4v%nK*GD>gnw2q@(Taqg=0M)G^f1lxQLh z?Z5yK)eOA$UNo)+*7Hg6gd2@JOK5c@kzeZh^XKyx z6$7sa|2@BjrW$lN`V?cbt;Ke zDAS-L+0kz8x8HvJ7Ng%mqVi{d_GkE{P!ra`GX>TIY6JR%6e^_7fdHC2cdohY-qFV1 z^Ygge%sJ?btC2u(T&VI!Z!sG26(iB!bakJwv)>52D`9h4*__T#ZVj|Xf&c_`<~>e0z+n5gFEv@ZoM1i?)MjTLz=J^A_lk_ z3T)F6KJf;)^C&QE%m*yA<;w^64R;0YjHBm{uO0791`L6n8XoC(OR`bkJiM{E4j zf|CB_WGchEcQcM*dp&rCFK(nF7A;1bWgVN=Q zn}AfS*0R}RM`ssw452;Xt@2I@;RMnQ7!}y6VxSVP1Xgveeeso-7Oh%Q3t6jHuU*yE zfddVs7Ev4e`Z5TS$k~%Ekhr!~C>1h!m0|u<)maV+ACpgiy1s9 z_gBCA)&Ki{|8Lo{Wd|K}5P=w$FJC@u)-167)TvX4nRWQZVbpWx%pvj!yaq8f3H$_} z|L_0)Z!Q&)u3UKGh3sFtbSc9}+5rAX)G(ZkaLOsCJpAy(D^{#Px570K7j+O`U-`;c z;JjD^7>7}F{e0z>SAO=hpM~hbL_=lxj3r;GzLN9xlxhcr%QM;|2|K?uRm|nHb|7rG zhH@cav*nxZt2A5o_*Pu2b$5$MqKMeL3$;kaw}#kXQLCv~85a(Wh>Q2xu?+*_=0mpK zefHVs&_fUX;~)RX$L;L1&)yx8Yq{X(0}DSjW5x_VzdP1p@2tJY0z-z~fUM|C=js!q z+xLR#0+p!n5pf8agKvfC*ej8P5I#sva(fUf02l-(GJ?avJdq+nIF1@=SxJjo_7O5e zBm^o8VirK?i*115=$GkpP965CMoy+Zs#mL!C6aA`w&mxd=Gg$3>XoTU8V`j4r7Zi@ zN0AEI_Zpv?lYcZd1^2`l!7yLUkCce!2$Kcp7rk)M_wdri9cnq%k-) zM7lzJO+{vfP;#{@3;`pXO$K1&yIs-?{5yQ4q zKLgaMT1#aSA)!DhA0(eio&X=HB#`;QSl}mu7fBFD5?hQ5cS~?!K?9;)6oKbZU?=ua z2t@@au$-)X(dkQPR;^l-BGPQB=*SVuAt+Q4f`jQM$|-(LWP$1GN#*mnOF2GYCYMVl z`wA46jL^yFGJG7U6Q%?I6=W96vsAG=**foFgzC-3fGP(n8ZfQOK#FL+R z{E-#QmkplX;0GSNUUAjOPd@dGaT6zg@7rJh)qVHCLmhkEypE35zrOeG4_|uus4-)P zsV#@+u>7&s@V>_n&#`%<@+>giEx--=Yp%KG_~VZUYjKyJ;qu~}-~1-{77YJ`AN&C5 zi>u(ul`F5f;tIT1{^x)GXZV=}xPIV)2O?qMx@O6eCGZ+3HQ+l4w27Vt7ZolDpL*&k z5F?i-Tu%)%3&<68_G}`FGJK}^AOG?(dB}}D zU#|5ODv^-=cCOqOu1*;dX^#hca+Q_1nmE3qFGOIuXwV<^>tdDBG1sw3U~HQe_xZ<^ ztxCX3`l_Kybxc!;P#s(=?m!k5qS!zTk&}px__btN&gF6ti_l3#vE79x-O_065I#X*GyUu-)8!Hn-8i^M)+|rcBs((l0Jp<8^xPZ^GA&E-@coz~Pl&@q7WNO?bPvtV1zCN7gH|#qeD|mk( zZK-x9d+{CU?C$I$+(bHwW)Jh=u>s#;Xnm%I2CQ_`O+ms15oU5jqua2}NgQ^?w)OZ=fKk)Wl-R722Yi!5F2VVteR{GbC7 zy~kqE;t{I_LB?`XFY!X@O{MumGvZL6C><>twXOepJ-j#gCN`pbPMbbsfZAZle0kug z!p9|>%?#cGeW~QhC!KuSS?5j|KjDQJo_p%|k0*P&|LWT7F1YBDMGN14o^m1 zw+@l6Q(CMK+~xegrUg6{3@<)Z{^cI&OT zB8P!-AeakTc-X(o*&@aZb#m~*2mk)}zo&jL>)ascXfH1|WY`UW7q_|1Y=Pcj9?>K4 zPwECS3J?N9aI}osM0_86n?@*dZ)VP}(4oTYR>$zig~%C#);@A$&tj2w1C3z7|L=NCakIx(7OD zW`#*-N)_q?xefbSbH=`!9cEG58Zi%4>3798FdV8FwS`%wTAjSo2sp3KdvOMx5uIic z!$k{Dp=DTb{BGe`q*V+@hqo0)#IsO`J{g)@Rmul~7jSh7IZ|{|X(EZ{wXCEj(c&^o zfPyMI?6=8Nr%swOmB6BF)~s%A8{v+Zw+}w>oBQs*W8}zDXPtAw;ElkIi+v9`a8&y! z=i^WBi1PiG%qgx-xZV45OrLA_0`<% zhwXA_A9(z=uYK)DKl%|GAA~%oo_Z>b22MH`TyO!lg6Y5-4bkrnJK;^wmlJ_N*7{H37dE8|jNFq!cSAtQ!d zGzdqb+x1EXVsL&PC<-1LWrf%WHVc$#@IHu8ffyk&3;A-%=qZE>4IJcS3_9eo{;8+V zI>BL3AsCZ#=06Gyog<#b5D&Zt#llQf9P+V{jip9h)1W8?Dz;3ND?ws#u5Q`$AfW+W zFBQ=Uh!df^_?b`{I2lk2NH%f+v;p97`BBK{A!u_ho*AlxYF$WeTwX$!0%#c=4ZES1 zz|x>-Fc^9UlA!v^BnIIu4Nm_XcngKXp>PDlg3p2Jg!l}E$hWHrWLyYfa2O#E2+$HO?985Yx;NQ}KnJT;YiNHYbO{pZgu1wcZ;`4OJA_#rXj_T_ zNThNwVu!<#SfZ5)fhNInH9};V=~A>&ykX=D1Vdoh4fztG&oRuJggm|g(J{1SS-fl{ z%~6mPccMgZh?{`;@RNF0becxTrmFPhPRx1>iav7<>AJ{?*~iHoY27Q zU>9}mkU4XS8+z|OcRuyx6BGBDlt?sD@gH7z{`OmLT(V@*hd*+~)af$@-xNt?pLy!{ zq*?jni_bs(WDsQ~;2CRp!1Wd6;>HV(g2SOgOF>+ff zL#;!OSmY|uJ+6=kj)VR9t>C`IO4t<*?yfrUBVJhW6^d0<-CPTT8q&mQ%h$RJ5MZm<%#Fa$wxXDDTM6EDV^L~q45sQ!6{JPQzHaHtr8?_9I81tf_v zz&+@azic^zhsJqCxAb*8UiZU+w&W*ax4xw%m(VFF{|U&!Pn)44r0p*O3JGuk%7sf% zU;=*#HUf+YUIg_~SHE%U$e;ImMT_el(;acM8PH1s-s+aU*+AmDyG{m+SctEdV5KC7 z0`-laNrWLm1zHg#A4b8!*)S0J8pzLuzQ~tEAl#j}Y1;7FMjsF=bV4x%EPYW=3bMB#% zo=?giGQ8KUXMx>n0e+k(Oqc*|0k{Q~;h#dVfW?a!fBfSg=Po@r^IdFD23g{6a?fD5 zc4kjiZmv+&_*-wig<1h8fUNLL86$`$9H>gVoj z*QGXC0Dt}0f6e{>&wS=H*IaWAaa9>C`kk+S^{d2QAww{}Ic~|_*OMKtnG5)gQ+p@r zBm(xb9(3P+1R9efK7zaT;+35)HwU>#XQiuFENZ8V^4yLkf}OUnGbj)1)auGun|P%8 zeVc)?ObPx_kdg=pzfkmhEIWEF%+^BEJ^7a zae^gWfHqJa6e1}r-*i!VYiKV37bHpy4q|pHUSkb@gz%1XCea6Q;C{L>viS<&Qz-x` z=*cS*y2=rNh#EMss10Zr5Hk@dqv3Eo98N?ca(1u-#5w?d@<9cA^3@OocasTJvJChN zb^%ZoD1(~8zb3B-ciHtl#hrh8Osg7#>Q}@{BqyB4_$2JWX~C|NZ-qhI)gYV=Is|^Y z6rw|e=R_tV#(N5R$@Lc)nm&xQU!UZ>TC?$U;VAQxF#u{tQEEt**R}?EdWP7Rb(?SedNETKCMl<}2rX41S zu-e$cw)_!WE~&$O^ZU?i9C!S@Ifu=~5bmv;S^Mt)H-GmTmJU)2QD}qSCQX_8cc1#~ zukX2M^_rCjAAIO}7hE)9pGl`TpMB)f$H8_X+8Q}()SxRHwgt;8U*l^nIAJy)rA8bi zzFkZIWWHJ(v3FjdJPUXh*cl6Osl=s|n<50fL@Lnp&N~l(1a~J%G)S;=&N=6teDcXV z(@Nu?3wk8<1}p}i8ZN@T{r20*5lev8fB1)gxa5*c0Q`+ViXHAD%izmj{xY#Pjy&>6 z1Rd0~Xwf2KZNPV&fByNO|NQ5tOqtTK5Spu!AbAjavz>4l@kqw8_KErTYx4J&xMyjn zt?J^UFPZf@{^015!Iq4#H*ZZwHcFI@V)fk>rAn$AXsR4BIyk)@g$1`)b*C4(u~YNq z%DzxTkvnfr-=X@=%Kha<^@6_auVHh&pmFO z(UVR(iMJ?*$l8P1KlZVYU3cAew8T*yqu9?*;=iy5?=UKdb99g5?edLpe1p#^A`ud@ z;6B#)QBk&GLGSQ;Wr1z2<@c%|+ipiFPjmts4_qPRzd`r~-6t2w55s9Vc1Te+bh^ME;*OXv7%NqjG}8p*1Y5$Fa66pSRfb5YQK=1Udpw0r_HJCKy{0B?twseD! zf;OQe)C67AU3M_DLW}^ni3-9WCsBd^$_3ObN)k&asDkr@4lIoy-X{$D#@j)@D?~CS z3#mzPiV4Z)0X`3qYDOmVqdc(6IC_-DiuPN{jH{s@$Xc2!7MKGh8=;7J8irAjiP7p1 zG(*lESfqeBLaYsQ0;#3ph`BvGVFpx<4a>sE*Y7hl(skj_7lWK@8C=KK= zXno4n0ueiFeXK5s5)5AnU`%pkSP%tBIUoy|tXeQ+7!?+Q$$!Jy6(WO>oYe{k@NU7c zhMyxX4RZUU;y^4?2{P@tLn`} z7OK}#$%EY0$%2BjVO_{ag*A<@3-b=WgK=0pCFD*DO=<*AOg43?i5fGH73oSSfT|@F z4flU`8fpdXhR!f6slM{Ux87L2dR23C%K@_w8mk*w+x?ZuDTg0%%mD`;%xBCFN8-^a z@~YMmBZ%msMN-VeUG9eN53wo>Pv2a5_eJs;hY!D)4}rpVnkC&3!>BlgXMf4(E^_z> z8=^m6Cp-&y7TDDm;71S8_5JUE9{@(`5VXKRHR2Q!aFlz|=s1A=!yY>U?+-cT5KOBZ zY(5{EP+@iuw&0<2+;PV-{6@YG+$AdlPaHgw+{hXT5KwTy?2suITupF?Mjk$wn$02m z$bDj~QcicJxHcHZOF85B*$LdUeT4S2ivBLVvMbeu6_}Cm?Gv#QQD3fBTbcGXq128A zr!`k+wJXgemgE)}_f3+H0?6rLhxb2W%RjxG#L+3oeHUIdbp4_d+(YXWqPd zq?JWfb>odULS+2YKm8L&AspeGjymcn{K&YZrO|)>=YQTMYrv~yZ?M32U&`+(qX4~y z5fEbm%7AsK62t+>2SjLJ5f6l30d=sO2BebwLaBlamH`kh|;UKtQf4isPtV zJR%EFVx*uiSHdYFV{wRD#a)QKa6vDp1F$#T3psHAT)$Poq&qK%YrX9&1a9YJ~dsw60p)nkvHH)ibEfkS)ei&`;L&2&Q#7M}Py z3$XC?O@Rpq+7l0q9Chg)r#T3-Q+WE8mQT$0M-qGI?0BR0EU-6NfFC%Lp)yh~(D(r- zYX&!pf%JsfJoeaQhds67PO-~t@EW!MLk~SPZQ3-@BSA@Az|VfJ74R>B_aPK3#K+lZ zpN-Mn2n%ivL;4YJi_IOv4}bW><;$1faKjDE{`vzXHpionKKkU7Pa^YS_=tL3HNhSq zI9Glnn<8k1bG?o)TU(K-6av0+yrZ54Tnh{xb_0PG;WTX^ozj9Dmcih0 z!#`BW<|#a@1O5t*26~N9LitDlfIv;|n;Vuw_qjpBd{)G9f@xhx4`F=6k_h;jq!;*z zn2)+agDxfnH}bX6&u<97P?da1bS{P2LA&BJ6!jA@3W)$|q5JUs=Z_mRi zj-M~^K#_UJvY1hp$85N2FdLdBjm&4*0Ip`tF-cfRh$=HPlSY<@am^G%enBfS_4?+Q zS*TVEikDg_Jn1#MjWPrV#0J!cqj(B_orKQ|1Or+R#jj8_7?P>LyVzC@)+1!JLYz`A z6k$3LW^nlcd4aD`HLujXWV0o)v9Z@=x98-D&1as)GLh%6FKO=J{){E-K{JJ(+Q=bt$4_!GsA4SFN@%4@H_ zvTDV$OeU=lqS>(g%YXdie)}JA^wGx;x~Ab@uvSbn$XX#?OZSu)JzIG8R(*&Lzu2BS zH#qU2JurRrwMA~fo-l>EN$=bw7wav1!mCDQAegb(+X!E5rx%NtVd?KOi-W}RlIgo=8A2^!~ zRKSOe=r+(~@I**C$V~%P;eOD#Oj@yG1tc1$5Z#tjxC=)B4npK`p9yM%SQ~r>VVz(- zILZKFPgt%VyD?y1@5RYlz*av9TZE%^J(n8w0oet;}%p|a7xkM%l!;VxDpvXBy{S~=DEds{01aeg# zQ+cDP-_Waf|IQ3qI8@gBtJ5E}x%f-1$qGce>G zNDM&RVr`&t0*-Ci7y?bASOMc=J(K|qVC!pTj~-<-(&^AeqQPp`gSD`2m7IvbR;V~o z6AS=O2+0d>hnjIF0n!F94hF^~!D~QARF&tX0_TMNsSvw|h`0iQgnx*(Nd3}B^oZe$ zpi>M7#6rM4#ITSn3tR&hEMQsqDl6g-#v|e8c$`@aFfY_r3}gxg!mL1i#3FW6Busah zsc6u09Ff-8oCygM-jzhIhO=VMS*x_gEF-^BJQNfc8mNNYB07Sys~`v|BOR%b@wUW5 zQ74UxqzLnYFvqL`h%Y=)*i9@I9fuNfomfPovCzcQU>mI&)(`ZV{`gTRmPi;Q;GAMy zmUh8hGqM%0?1Fl>T16$5-RkkpAAIHIKM_w9Q-9?0t4=%pY~;$YO4w@Y@}+m)cI)q- zcy#)VnG+^V-2BO#>|L_>oqzknKjGm5jU+EQ^VL8pUZB?@dCDed?1CNT)o+#~jhMA= zi(}Jw=>}U3cd$wcnuEOvI*L!<4AmAn`XjuJ9R1M^ite^IP7HBvNKv>M3JD0o4^XJb;n*ffbixA)c`@jGD-S2*vpF~KFV~#n7XrnjXbQ61a zd7jHXZV|?K<&{^0@KKdcnKFe7AV5Ak4$%IYXP(LZbD}(AB?6Q{ZLpJssbj~E9S}b3 z@&5082R`(n4>9#gu;p)u`N$?_G*nWC6`zGSAhc&*dp zlv`VDJ4)QbK(biv%UhXBtvONU_UI_fn$;fXVs(c*=AO9G+TfxTWe1KYP=o{-g&rCZ z$Z(wsPr(Yj;)*M94dZkmi~toW5pC4x){-SlxW+{xLK(Mmj&jI-?LB%H*ro-B47;I` zNFL#`2_go7bb|9>bnu%&6(9>hpTY7xnj7u{soKe@f{HwHqgzaXP82?bk`U#gNC)}S z|7Y(^z~s29vt8X)z4z>z(JqY^FOs}s;{{`5_=(vJ25cN`7MqYf5Qjh@2@em%0n8He zJM1Je5HLyjfdp`j6T)g6jIr^KH`$hDS<+}3EwlGhRozwnzu&Ey(MXnL$yZmJ3^D~049&GNme6E{5FkV+;HgMntiL*$&=wn^1#E-*?m zp#;DM+z*_41Rn^u(RFnxT?i5j+!H}7@Hb#*5WV6z5;d~FQh)#*LMa+P%lm}nfDeJf za0!7P0yrji(X?vnJfy%N2pci|1Q8;hf<7kfR6Z?(2MLnSprmSW8peW*CyZZYG@c9? zf{_}GF(wu81O%aEDU1@4La3~edPXrHm{tjf$&wZNfLwwGm}P9{YZt?Eg>*8P%2@eg z(A37zBkWozWLuh>;e8B`3=fq`DPK(^KTFyQ3b{;6A=_NYG3L-r{CbX4m~d2AtA%Ca zMn+1OTW`u2_|k_CqRcm<;Ls?$kwT~zgaRNhL}pf+*R(^aVEMVCO9ts7bVZoACr%Hu z0T~yuLMW;!Pbu?zVMaD8ZaTvQlY{^&3}*VC$`YAEz!ETzZFo;(#SY~;Ecchb^MOOD z;XuxBLjUf6*r#KGfaHk3QK5N{3p@5j?-)mLMDL>w*wE8lK*) z-ug8j5$SLip5!bzVPEAB*KwvnQ$CwH^|di^<8YZYt|zv@w6FjkEdJ`7-~49ey(3lv ztMml?6W-5qF`5EU>seUdzJ2=_&4w@s@&VJLYX{y)^bzPRHwyTr+% z+q>WW?j=i>M9wN=3Qs@%H1rKL9XUReWJeJc2e2SGBI^FHfBkFF4OTn8_O-78ZA88_ z&<`nQkhfg3X3dX&^dq!64&1zlxg_J%Xq3v{rfoss*F*I4N2z{`LtbXgzcH+S;geGYNA{wSjDbbPPX32 zv2=G>8%={@a}Wi_?KBi2q+7u_xB|y9EC*31g|9JwO^$Qt%1mPeR03@re=FXJEpSj4 zm^SDJo*KA=>6^Sm_zyr6tbDV;EwTgtoIVr)79u_1+u`Qn@d@`}(+5`M2woa~1a7Om zL#cmC!+@$tuo+ZbZ*ZtQQuV4~1O5hnBwq%~KM{m@wcrw>TClOBI`vOGydh_igIt9J z5+ZdH@|`(JG>!=6;OYo{Krvvg+|(u+hMM##QQ5%t#c!r2KFclI)}su3LF};|As&Lj=bHoB;f}DVJ+2Hc<}5gj^?@jMR=Vok${*ni?uqsxB-W zZWo$bT6XsL4UG)bO8TaxuL;^ycj{i1NoaryxVBGLlz~T@O(ah5f zWRkPXY!|$yd9MfF!BqTi$Os(3QizyhhjccLOexVQsJ)PJM;{!;${dDGqtUkON9!=u zxVI`2wzRbmcH<*)?C~qkJnNk1-NiEa(87Ln-Rh2=I}W@~et;R|zx?2bPd?=|ZV@r? z#B;$d!Hp&!IV}5$mO05r^{`ZRWGV?ZKVm+D&vLbh%3*gd$(;BKr~9A`QJoL~`h}bSGx7O*#wuH?c2FL-i;TWmPZ9ZtDovCc;F1=sbn z{z%$O_YY*-vSFc^B28W8U%ou)oct&k3FB45axH_zYvOp|g5~V7cKx#P7#vLVF2H8i|ZmB$ksB_@A8?pdT^gvEMSEnrR&K_(bZsoi-cVN4pVo=w#a zxQ=!mQ=P&5&>TGLc zBzF@Q<@8FmGBh;M+S(SynjT<<2U4u~x{ok552WN2DKqlJzX_hchmYvK+5=gGw}?zZ zeS@SbiEI;rD*GyY7>^GV*mXoMbQs$lR~1_zw!mSrK($)^$xnWQhYzUn%U}Ky_w|}< zuEFo3a-6G&9(w36|MD+UMug%)4$H{lKP=asI+a{^-F0N;Bz6D?o^^Hf4gh*^@(~}o z@x~hwp`fWasKlq>aLzmn;94UFfxHMEBoGOj4y*x?thH;`lKS+U-~1+{Phie_-}~MO z@WL9z?_6-f1@Jqt7`WJx97^uynhW9@R~m`!+`sz`wAfNfY1v_WzfLRYwzjr=1j-{Yba8xZfu78tj{ zv_UuU%6AMzBqft70%!vl9lu^an?6i16Qa%F5`1d;9^lVF1IFd!eGmd^T_89hua9L` z96dZ=pom0(%j*RQcqm`2WJl!|>Yo4-feUi1bx?oN5vo%him-f?I>AryiXj4+7!Okd z<06=m$!73|(+C-hl_9^I4PF3FtV>#x6e(1a%FLlL;dFAHI*}=h`0|R}-!)1oRkZ44>VO)Sx%=$ghR)QH&uDIplJe^}>oL$(i zC${a0ZQHifsIhI^c4NDZoyNA)*k)rj*z@dn@AntXe3*O9y07azkK=BvuU@al!`*{P z*kbLhqJYhE?>%0y`zOiwf4kx+4x3$BtR{$Q(G7=JRUd&0s5AMb!`VI%6pqD)E&6r-AhZKhi!D4Cl$ zoG_@q++!=Nk&m!wHl9+}w*7uz=Xd6c^ZhPzv_@wbX+q@t7if6--c+#Zwj=1-{qUvl zG}SPA^)K7E$MtxP`EyM<&~Ky8fwaiZa^*7GQP^pxTUof}=?JF_UBG<*R(uC^D4>HP zK`X99Hh_ATMdw+Bbp+*!f_smfaE)hcpF|U9@GZf3%-#e>RK49BJ%ANNBlHZG9n>A? z?&daVL%{R&OEZZcQx;i(K8(8Op^VwU=d6rfO$Re4#ALVZJE%NpWm7*AFo?wM_xSUB zSyLuB3duKVwE*;PmsPO0LE+rlDyc39AOZS5xM7e@a>c=annM8?R%9OjFe(F3v>5oE z!7W}J1v27qxAkIqiOcMipjjP(gCfPZs0)%>6mN!MN`8dL+Fw^o7!2Q9U6nJdW|_Dm zZ2q(wXMn4Zmp^oU%etQNNqp#R}e0jVCR&~-C zG`6{m_7I%240|6E6+E9w4Zv2wJ!L#xljO!sf|a|Brp|xqdG7v8PVx$y-2%YNOyglb z-q6%GfT}nE2^T5(5cQ)|3f`6~>bZ_yHB9`9%U)!9F3p)kjUj)>s2at9HwQ}zDyI;} zD|dOF6f!tv3S@vaO0%OdP#Lrde*pj$MkkTE|kl}1V)fmlE{yRWni#Q zL6yVWqE=Kn@*TBk?LDL$;pKW*0hT&-eMxVDv=<0Z{oSY%2@+T$xg)fd*uTjAcu_@i zzHEij`Vquv!{9n%_wBIS#xQabiH-&+Jkhmk_++8jPtI0Yk-^(k&EWd)9Id-yy77{} zOpw0;3g+iZ&~GR#7Y*IQ{ z7rvf)XmB(}8A94#2lKtB=yUK<%9YIpwJDrTy&yTp8rYb&R<1TKPEH8d!^UqqL9c{} zDcyRs1+!dOq#l^j#`AFyK@I5^>B*1=VuqNLsg`AGwaAFbD2ZvL4{o#Px~+a^tfPzt zy+%R}q_NmDrjV*TyO;5LTt6mjQ>jerkJ&JSWFb!n3O5*NIqllzl=22;ok*B5%1hhP zi-piX4hD^>YRZeK1S;?b5?_F_rxXi>MgJnyOEtvg`a^xBk-BQMjG(6<>_MN>^YrNW z{w5sw`Vsi)AVm9i-WL=x=e8M>j?;BF5Sjx1u|?Fo)>s9TwE#l4r0yI$9|RlLc<-~; zr$`-O9l#vqje(<8?zt6EEs+;14HZfW@R}eDVe^Dw_X+B?IS{R9g%$74e)m2~FZ@_B z^kw`P#ZX_}YcdRfS1pnwyub=8JqKIQ)#n^lcQ}*U;?|`&;B>>%=CBnbs^c!55yBr& zzFJJ~L%VtTUqv=lICL5^gQPyq!E@SxI%n@r47z zJn;XhtZnGPV>h3qW*s^S@@l|Edt6K0FxEiTX;plN9G#Wry-q_MefcjiAF?x6E?Byx z>ynQS=ns$$PlT9MTSjVA!w);J>s@N{{>DxzKns4gEGl-&fh2^^0{<19h@y{p!<>?T z1M30?10NZ#lMk8}XX0X*_|;=g0lkzD#YxE-sKEbujq-1creTBuSvXme6a6nuWwwJN zk~*WZ`|)T;G)jrB@FA-U@8PFJr!dj$QAOG($wZl_63ESNUf32F!F@fK%Z*Zg9*7lO!o!?cBP#7H# zXb`sgCJAds+hgb^kEWqCnq&LsO- z_x=_Iw3*awa8@`-#UNXWeY}HS+D?3Dsv!;$7PR?+p-h>Oko7*Qu(%|o=40~3B61@x zSP>xAzW2VfD8*#b)L^+Og)!}6EKhqp!LF=1Rt~&nyi-uekd)S;Izl^&&xQ^nl6J9jEoLbBlcpW3$>!3P%oVRGOZG>aoJ<6e z#Mod?C&(ztJUUZC^9JcGMatC=ekNq_4@Ja5T+<9#cjs;7m-hu@@9Ll5={uKxMK>_a zAE9ksABjqQM@yldS>lQw)7hgiFwob%&$ZEtW{8kkxV6Va2eTXgs7P=5{Qnq#i^Sv3 zdu`7|v>!A;CA326kOfTJ8j6pozkBO{!wyrOreKFPg1KfgWWrCiioIj^Zr0j>t6{kBX?_!8JAAhX7?jtX79(GWxr1>aLmxBqa^}^9Manw**lt> z`aUeRS(mr4v15@!ezCW!F3?R?(Q{Y-t*3u-_C! z1IH5%XQwTDY1##I0>bq~Uwagj@SEbdyNq6~9DD46jS zyKB*b%>=1K^g?7KPT}9Ji)0nyj`I4`F^^u)hTQr}Pk^hh6!}L$WiHPBwUs}Bh5skl&7#}0aps9MXpG^oCd75MMb zxDY-j(l=?$i24uS#ys9q$UrNxZ?{!RGy+RAAK%rgVps%f(PXi zRJ#D!ZtmVIyHAKrn*02``9! zFbJ4a-Gi92QsEfY0SP|ilZC8f{i2qRs_;vSaD8@fw!2gDm1@cr`06St2rrP0cXexx zqpm57pB7+H=$zFomCM3+Lc zUx4sn$OaxGp0d%;1 zlGq0>RIz(F3)tAzfN2k#&yp-CZyF-=xm2ljH#9I`HWIkz?E2X*kfF4-*9eI%!8NP}wCCAHBLy1HI7~zt3-r=9 zHH;!5MB8w>U4YR!!mOe>MMgXG9asu3>SLUs6p*7o#NiO{kfuPHO07jbwLiuv8M}?y z4hpTr9~1B&RE+XBKs}iUf4n&U<2$M(;y#3bMTQqkx}d_2SS9qjc$+2gfp*8u`F9){ zouYe_kz@I(E_M=(amtZ5^vvs!Z?ns@1HWYZ=VQ8uw(I9CPg+;fsFf0nBd*!dF7@FN z1uT+JW(vz*0Jlo$uj1=7a>(d3a)dV)6Qnf=w|X{VE=U zY8N9Fj1n}#5-q4QYve2#WJ6XUP^CcDj71kIi#fp!$c&a1vL)T?hobyxw81lU7sDD9 z%!{8t%LJZ{3$n%qlzM{=8dp0c;l}+?)_|6giYFb?jk0fFRU)N_yyrNqu}qPXO-+Y< zOeHMh@`K|-H{-J&$L8l>Kbg8dw{x_oO(`hNbeu?&l4|{XT61}6s{wsiWhd_&;4$xD zE&_l}I|u>9chf^zg~T~#BUzaGv8SvA)qpLA$cFdhk*ZLrRifU`3-hq)d*CH`Bj9xH zHmgTTS=-yPVz~W+R(F(V{oUmo(8887Sq_?f;N#-|TEVw=!iBCNPN0aECv%c3)XXDh zuNWtZO_SP!Q&l!+B9I9UJ`oWEL?%aw7Lq5^lwL6mRLjJaOFR`J?xOILSBG%D)9oot zWFmY?7Zl_53Vtl9?|eQxzVBCj>Cwd-Q9_gbm@IHUj@|Y+;s>=X-B<=8+WHpECueys zB%Ap7TstmhsSExrFU#`aiEG^Z3F*U5NZB<))Hz9Igw>%T*i4Nj$Ma+98NKNSg`0al z{s({jy!!zsF5-RObw5M?N^krHQ^pajA+4;85(2(BkNGQ%JPq(Atea%1N!WDh?8?^g z9i$>AF@~*ludgMhT+Dm9Zr(T%!b^bm7oAGU0rRu#?2W9Lj^}B>y5R(Hu11vxNeAsS zG~*zr=!S=$#!0DSN%w*8;#9f0%ptVt&4{pdRDri_5l>O+b!-v-4i!(0GlOslDzz%c zr&zg^K%LRmKs>>E_r|MeqHU>E@wbcgwUaJ}vl&(0da8dKYGpwg-HCUOkIsmGes%=4 zLLA~!vzzH0h5E@H;@(_o?BVyKxZr74C3><2R0V{f0+U!JbV7JZC3*q)y0 zxnYYH?2(PYKP2HD^W9P(l+gA)x2_UWK2-s1<@M7<6&4-=bT+EgB+X~ z!BAhiK^;9h=*}R}$)cHrcoVH5Q4P05O`@f!P8d=79drYXO@&1Ol1yZR^OM7LimqcW zGIuOKG&qp4GGO8r?_$FN2@q&9!*TKGkZ#^Cyc(z?;lFPTQ2{IX!f7A+gsi?#9EP3D zdD*k^eDO+2v+`k+Nk@lpiXlJ`cdgwYdrs#s@)D>?{ZYbkIV(58kiV!*wYZ~zmwh^E zgp7qO>z9jGO-8OPl?#f-4epyi-spgYbPHhx3|BN z|G}i!Wj6mF?y9E?M3+*xo1ILmILa)OSK++6JU`tUs5?uz^8a`=Vu5AfAN&N22tm6b#xRl_U|3r68;s3m>lsYO0?YfUa~R&sNKMp4-vC7yua^-SDiK z>f!#y89|bO2-s|+(5c4J=+x&F!r*T5&(=o4wj>cRa&0(UZ%Zw8p>K-bBc7@ut2S53 zHZc!=O?n|S%?2{-{M|!b``PmSe{AZjdHwcMj?-OjHVN?qt?%#&RtWWWX>^S6iwImc z%}8b7eTLGx9dHZ7l3>ShjwzR*#%=nRQLRYZ@}GGzMF~`JDddAv>>udXm!rF+(Uo|I?VRWk(Mg4~GSN~J%h9ti*>KDhWt(hf zQKEI7*u zg$&J8(QvrPu_n(f-U%hJ>=V7sqp3@{lT{G*5sF?yG)ACkm1vNvkS5tjg=iuFDfoev zFh#Wnx0a2HFq)xIjYq2Q8E%2QhDP8|ImXD4hOkJQ>$sLeN&QR5d#dp|BUhu)o$HIk zswB~f;7nhgPCTHdC@j@_RaSn z9bk5!pNoQTCM&!DzYmD#ZU5Q4QwRNrx7Z!0aT0WGvBcLUfu~dQozE4=_lH17-Fl<> zg`-K7IYgAt&(D0Mi!5LH3kF0broSd7V?>`r;&Xu9wKNl|&PMH?cT>%G#+=z+v zM}jgEp4}G)0KzCa7gy)zb&%&n@-(=s!|YUKDg_M&lC*{vO=0DGpkQ0}Cdqk10n{it zje&IiQ)}?s!N+Ha*uiQpXN91DVDC)R*|X$4WKgf-3UP)=g8QBOdA; zE?Bi7wLZ-orhfe9{$Uxad+Q3?0@@JAdfi$pbQ7w>aEm+&yAb^Iq_D^^hCU4aAZ1yE zC0QG+xWNg5LP%;+R#G!SbLbrC#-74Ih>AVMUHuR&QLG)g#P%SAHlD^rq4^W{=WF{F z*Z|T4_H4lvZHq!o?Av47iG@L33hM)O@m`m>sm&zm4FKxfHB8 zZh9J!pE4o)W_N3sxvk$u8TEXo7OtQ6YXuB_AEJbC;5+JFwdA2`$VJ?+FyQe_PVr>p zD)zQ8*5-_xtg{tXtsrUDoih6y0s6QNF4LepY6Z8N?8zO%F*DC+J67y(oKzJL%=O(( z!Rp;8rb;>Qu~N0XCSB6^T`u;4p+p^sX!ypQ-mU0_x$tdUf$v_HWv#!<^w-A&=TB|n_v@eA zqdO>{2mE^6j`;Zaar5z60tNd$!=NAT&3ipSxa6cDQ0P;jI44S7-*uW(S>X0R0`Txk z_qF@W>*kxF@KjLT4_6S`#1DjelP(Ms$?tv5=@m_Jt#d(S?*j(m7xJQop>3fJtOJ(G zNXLi^SEspm5OW~BVZ*HF2i~9Bdi_;y6=59c9pFz|_^{eKOWE1$IJNIW+;;gl3I6C* zDr#Mj%4&ks3)b;ENy~K)Mw{um#%}0Xb4Spp$8Ve)4}9y>{YDnSg}iy_JZ^0fY<)fs z>tI9x#Y~oi;F|O|x}c8wBNC~|#jARN4$=))MC4A4ltxAc-&@IF4-SB4RHV*RNt96k zf>dJKRzSZAi zephFTEFdQPMfx)dpb2O461i<%9qek0(UwA0L4yrW6bvBsGhZ5I?=H|qfj_94w}s%d zk~zcrAQM9e%BxYE^-J2M7wEt3tS_88XzkimpnBYI;PM=S0&zp@KO>NhmTqRPfQ&ovRCAakEcSSqMEkZ)V zLt@{3(I-||9-Q)BOhAHc2v6>$(=n-XFZ*oaua3oa!D}q01@MnZD=S_kWF7U(D@-LtFYB3RXh}UtxbF z7Q(rz7NPl-OZP90`C>5{6H44mVOpVgj|4;b^nELXPOX8IMs|kAuo%!=ih_WnCKH2{ zdXx?i9y3iRpJ+)t8h$3?SQQY{fpkHEyP{$*$;#NTga43iM^9AzZY@f6-y9e2t%s^uzjs1 zGp4)J*abGw!TChv$af)(_Q|HjN`4RN8Zhf)FwV2WLemtR8Af-Mey zf$gw|-dX^9IKP{5In`vtV1x~E9TP(PYcpCn+TouLpC&>Uek1)sf9M(py!{7Z>-?H> zL=8g4q?Bz*WLxnyRFzwaW&Q9KkZ6&jWV|b)0EN9eahT;ST{qgE?bjNv&MH=Z3u^u> z_9C+&9_Ew>FoH|*4GWkMml((T2Ti}6k*_gFp_CvNQv>69A@lnsNvr$Tf>2Ax3&-K? zL!%LVO%t)FyMBz;>Ae1fx!!2-Cpb+?3mD~^y$|ply}DQtbU&}EkQ@yF@%&hrnO+Ac zpT&O*o*k+u@;|ljY~60W-eb?4pTC7_cfC0Z--(K81f>N$9pr3Q%fsh4&=1WvK$W*a z$yoXS`x_uE>%ZDw^&{Imw@ZHRQ`W{$|LT4e{GBit1a6|SH3Y4Hhx&IsR{2Rn_Aa#LZ zOyTaijZa3w&Ae9`sNzP9I z3s|7BqPI8;N1rHp<#N;?RCbcX{u|!8Dqx4KqM@)-K_661_C>Ty(d}N4Qlbi=`zKPH zt{cL3A@P7()Mrpef6I}0vD4n$p+yUnQY8QpDd&(X!oz6X^u(tHUx*rLli5198;4=j zSw_#wWnG=vrOGPYju=48k471eZGA?ZIV6p}bdPy5U=t7|Qr`1mg{tp#r?L>`Y`W>ZMpnv0qV_)XYm%B+Y}opRW}zSj|vl1mP79F_Q@!e%Owy zfD$tXI0XOySpZgcaZmFkm58mW1im-mh#gY(?7D4BP zKUWY-Es~_-s)qO-ym5WGv0M~8;?&0&^;+GQqd_NNeeqoO$~XLFgOeN_3}PBItW2jk zgWSV}r&hZn+V6J7b^M+WV5+kEP!fu%;7|!7yF`7;gCpQhYBEIwQFqcfO$y+9_-_J$ zTx;N>=m9ovi1Ou;Is37q+(S1<+%gY5)42 z@CV+;e($>=RXYRq$oxUm#t#sLYIp1~tqAE*kk`MV`MhJWz@UESRP1q<&)7pKM|O(N ztj4}SsqerJdnOnvxA6r+k)qO6?$+-9cKYpYMV^uXHFksx`|$5B-l(WB2N*oj9S= zRAg}Bv?UUQsfT|%k-92&{P021f?bEfW`NU|6=$@%vE@cwSE=)T^R~~+ zYgV?BSBxRb%};v1{uf~kwh3qfA1ke`quoW`!z;GimAHr+l2BDOQZz&_8(@JIDe~0~ z(U3DKCSk<7S3 zg-FJFoeIt5If)L0m{wYzf^E7r=zMCA&=HZg%gM+3Hip`I(t4xb%)BSdRF>NzMLU7`4lnZ)!#f~;+m^4E0N@w-H%}) zn0`%5NCg{L>6Y`UviC${Tt~ldaX;H_7NSh7mMee51-E&4 zu;YNJnI*~!?Pq=pb+GgDF>w5g9cTURJ>H?d3KoIy>H(+ccwoiTZ3>^CPxd_H70TB`_}4$>@9j^^b-xd*dtWYOJBRqU{pKl_-=}PZeYXOYZJI+af+t5e zRO|adUY`G2$*-Wq%>IyUXU7C`Rs~pjAoLYTH*MZqWN{K+NXFn)H zit9G&2M^Oep3cG965e!QF#siJH(+JFrc4tjwmt@lw6O|xUN7ri7UR=Brkvg2!i-w;OO35s-d1fC6>rG=UBMQaYoB5* zM=#EO37~3Fanh88GfXp6G+m^j8Xi_ZGgt=N3tci|KTmTi$V~y9hEgFsa{%7~O2$B0 zacLlTDjExDk!jLole;2(sr-P1uO|W$^4I5L;!dVv>jRvVL{HV>zt~+ykk&=oB=}K5 zw;5;J$$UN9AM{6Pz{u{nwH{>S3c4;NlrcC1s#)rvah05AU95959G@nZumrgN8bOKL zwA#`U)szDJ3o0D~0&3Pvn_5AY|Mqe?_77MR z3+qeI2wAV~RokpDLtHE5&yfGI+t>Ne2qg7`oH!Vf_UmKk3+Y?vqj%5Ye2$1k3f4am z#+V@RQ9ICmNVjF)PWsYC|8xjLnPS_cPJ0L$G{uN^sD9Snbt0l5VwRX3)Z7SO631L1 z7T(7V#^l{3nA;g8_C+*{1nVlt1*vz=ovh)8Q8nL_jIK$-Y6Nx_GOD z;j8d)dvA5rN8j&v%E0PpTI#j00(`_AqnJ&`!7-7t5nAvamw!r}LCr%0k$y2wA5(bGfCTVW9#h!2LAMbB1Y9 z;V0rJXs$z7a^yO*5b;Tj2&sI+0WiZqj@NkpS|bq|@?9hE)JNFU$^4?%cWm{#P>CPS61 z&vc51T@PC9pPZ{+o)G`wZ*#BfM8OW8*p_JG~ryFG8Gc+>q&l@Pdd``stV}ofPK(E0n<@+ zK=-Bgq)bQ;}W z_3FH+A4}P-(MRIbHyz7N!}m{C!5g3~s-aw72M+r=MyzIcn6uQX+(67cGB0OHDwLjw z1R{Q|Izu7@1h<26>Vhs1Of_1fuj~XUeEZUNLU{YDa>wG|vm=u(&j~DUu)M(g={m@A z@b5oTUJp~-%J0bh{CxL|KOig4`_qQw*PphYM^JJz#f;uJ2ir7p>`bO za`T`VWz!c&Rc3a`ZKNbg21!0@0wn9uvp5(E0JGl&MaKSnGJKolSHZteHVL&JNSd|yUQ`rM08IV zZwtu+FatLx|1raL)Tk)NZPP=bHLccRhStSmWKT$~7JK2olOa=i$Xgm4tOoNJcS~KZ ztn?DGL!{r}sL!=Xf=}U)&ChZjIa(mR2R3e(sMAVJK?1^fjR;l33^h94gt`)j4*FLp z6`8h%H~<#>O(q}H2jsYkbrw^>krGjl`^R54;-_s|Pijgjha5n)L=diQrEax%U>bPkH$ z`DvS2akBwE4lkIAMQk^5t{92Hm)JlN_W2Ldp!kv6FhsDrwWxrdfO*z-)eeu z3=og~TPe?=eq_tu@|MeR9i+l*UuEX0wDLT?n*zb36cTGqRuRf>EKV<+{vbXcv6PMJ z#*^WT0s;=RVY`!a1DwFcE*F_T=p6Pg{mx21>r1j(IkMr+=^Oqv$a+hTjhp{CdnQ}&{QX;1bN3vB$WAN?2yuSm^&F3z@$YZ%SM#FM z@7Lp;|DZyHcfhvOYsvh7^tj`WUen$rpOeBq*P4V=S#_N+vmLj*nd}4?zE@Vz2B*tc^2aCni`rVb{mcSzt*P=LLqNa>^KkNocGUcBjY zgS0MV3dyTN&r0q@f0snRK-+PP*;u1Mo+ZVME{Wp(3ZK2ZlqN&ezH!d?y{d~|2t?!w z4o0?2JRN}?QP{H7k zyBp`!a+!FHTx2%DvP+S`ZjMl3zzwB>%R%h2Wqyh^ezl2~a9LoPLcWlHLs($5$$E0V;tw)1lB=1KRi)=fYo56H z;1Sw`v`FnK0+MlBL)_D}zF1>1!}e&F$-U}A3V`GMKQyPF=8gp)xmlGoGoerZHae?JU8c8 zR;25pPe+Vp@%UeVRYntSxhhcij0%NL(xkFMzdiDAgQ|z!XNm_I0$&Cvob;I8_5b5AncKQejik7q1Ifaz&OPwoWU3*icYP;ebVcL z|69cZMPo#iDuN$L7evURdcVEqv`KRuj8t8BeKVmAaAO>HO%xF~$terG!oIx^LHo&HLN2cLL| zMAe(c%ezzqkJi+SsrIF$a0F!COLm?!l*MUx@+l3RhZYhtXO%rTb1~Ql-jLU^@M_y-tF=&jnLRy!0HK3B-H^r2)am; zHhWe$Cqpd=J+5&eO@F}ST^Fi*5x!~i9x@76d$B?kHMuzDAz7uVxb<>cjs0}>2Bf9= z+G*?gsM|5?E^GRE0aekB>K+H8{y#iF20j+_4(WRxP&yuO907c%mfr7h>OLN*YmX6N zLa!egL}(&@o_qT-*MMX;d&R@PjMyY`pdA>n6;y@FG6oaVjgos=n++JL7AnH4^E;xK zWng3P^Z<7EZA5n5yv_#Z4}AZ6TX`*d--QwOdR*4VP~%8`asg^89rh~@OT~N2<@gRW zM3RAb+(!plNVBTZT*-S$9ALxEtur~v+Hy!KPO zsHEH|npU{T`3gyF5Rfn+icdG_6kHn03AIsZq*mS8z0qdQj;8>aN}oQ-f-A}QrM= zCpO8Em82zG!9Hxlmba~1^N55}XZS&LD@t_Lq^mo!qGKhLlWh{={{R)Gxihz$sx0h& zV&G}N-G%iqo+h7BeWcc+@noAF9FejF{9Yr0BoLOy$U&5>4``hr9>GviMJhEky!tCMx5X^6|R1JZB9cqx&fQ*S&{MF2>vfYVAww`)a=3FljPG@Nd%uq#)Au6!2H4K-A8cd zOF}0`?nHAZlb;{FihBwuXqCBEHyfWWNU8Zk|Sh_%OHPj0GcD z{8Aq&TzB4+4hOBsf#(ZNm4l*K7&)E=HYs?!{Or%v`Da- z{IlJ^hu#4}v-{`b)Hf|XC~}HR0!Z5>sRwBaw+%in*T)a0-vk$cMyCA(-j*B)qp