diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..6214a32 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,12 @@ +FROM pytorch/pytorch:1.5-cuda10.1-cudnn7-runtime + +RUN apt-get update && \ + apt-get install -y \ + git && \ + rm -rf /var/cache/apk/* + +COPY requirements.txt /workspace + +RUN pip --no-cache-dir install -r /workspace/requirements.txt + +RUN pip --no-cache-dir install 'git+https://github.com/paperswithcode/torchbench.git' diff --git a/README.md b/README.md index f690aa0..4b539cb 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,18 @@ -

+

-Easily benchmark PyTorch models on selected tasks and datasets. +-------------------------------------------------------------------------------- + +[![PyPI version](https://badge.fury.io/py/torchbench.svg)](https://badge.fury.io/py/torchbench) [![Docs](https://img.shields.io/badge/Documentation-Here-.svg)](https://paperswithcode.github.io/torchbench/) + +`torchbench` is a library that contains a collection of deep learning benchmarks you can use to benchmark your models, optimized for the PyTorch framework. It can be used in conjunction with the [sotabench](https://www.sotabench.com) service to record results for models, so the community can compare model performance on different tasks, as well as a continuous integration style service for your repository to benchmark your models on each commit. + +## Benchmarks Supported + +- [ImageNet](https://paperswithcode.github.io/torchbench/imagenet/) (Image Classification) +- [COCO](https://paperswithcode.github.io/torchbench/coco/) (Object Detection) - *partial support* +- [PASCAL VOC 2012](https://paperswithcode.github.io/torchbench/pascalvoc/) (Semantic Segmentation) - *partial support* + +PRs welcome for further benchmarks! ## Installation @@ -10,13 +22,11 @@ Requires Python 3.6+. pip install torchbench ``` -## Usage +## Get Benching! 🏋️ -This library can be used together with the [sotabench](https://sotabench.com) website, or standalone. Below we'll describe the usage with the sotabench website. +You should read the [full documentation here](https://paperswithcode.github.io/torchbench/index.html), which contains guidance on getting started and connecting to [sotabench](https://www.sotabench.com). -Steps to benchmark your model on the sotabench website: - -1) Create a `sotabench.py` in the root of your repository. Below you can see an example `sotabench.py` file added to the [torchvision](https://github.com/pytorch/vision/tree/master/torchvision) repository to test one of its constituent models: +The API is optimized for PyTorch implementations. For example, if you wanted to benchmark a [torchvision](https://github.com/pytorch/vision) model for ImageNet, you would write a `sotabench.py` file like this: ```python from torchbench.image_classification import ImageNet @@ -25,7 +35,8 @@ import torchvision.transforms as transforms import PIL # Define the transforms need to convert ImageNet data to expected model input -normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) input_transform = transforms.Compose([ transforms.Resize(256, PIL.Image.BICUBIC), transforms.CenterCrop(224), @@ -42,79 +53,10 @@ ImageNet.benchmark( batch_size=256, num_gpu=1 ) - ``` -2) Run it locally on your machine to verify it works: - -```bash -$ python sotabench.py -``` - -In addition, you can check your parameters (such as model names and arxiv IDs) by running: -```bash -$ sb check -``` - -Alternatively you can run the same logic within a Notebook if that is your preferred workflow. - -3) Login and connect your repository to [sotabench](https://sotabench.com/add-model). After you connect your repository the website will re-evaluate your model on every commit of the `master` branch, to ensure the model is working and results are up-to-date - including if you add additional models to the benchmark file. - -You can also use the library without the sotabench website, by simply omitting step 3. In that case you also don't need to put in the paper details into the `benchmark()` method. - -## Benchmarks - -### Image Classification on ImageNet - -Image Classification on ImageNet benchmark is implemented in the [image_classification.ImageNet](https://github.com/paperswithcode/torchbench/blob/master/torchbench/image_classification/imagenet.py) class. - -#### Benchmarking Pipeline - -1. The model is put into evaluation mode and sent to the device -2. The ImageNet validation dataset is loaded and transformed using `input_transform` -3. The dataset is put into a DataLoader with options `batch_size` and `num_workers` -4. The model and dataset are passed into an evaluation function for the task, along with an optional `model_output_transform` function that can transform the outputs after inference -5. The transformed output is compared to expected output and Top 1 and Top 5 accuracy are calculated - -#### Expected Inputs/Outputs - -- Model `output` (following `model.forward()` and optionally `model_output_transform`) should be a 2D `torch.Tensor` containing the model output; first dimension should be output for each example (length `batch_size`) and second dimension should be output for each class in ImageNet (length 1000). - -### Object Detection on COCO - -Object Detection on the COCO benchmark is implemented in the [object_detection.COCO](https://github.com/paperswithcode/torchbench/blob/master/torchbench/object_detection/coco.py) class. - -#### Benchmarking Pipeline - -1. The model is put into evaluation mode and sent to the device -2. The COCO dataset is loaded. - -Without `transforms`, COCO dataset returns a tuple for an index where: -- The first entry is a `PIL.Image` -- The second entry is a labels dictionary, with keys `'boxes', 'labels', 'masks', 'image_id', 'area', 'iscrowd'`, containing the labels. - -With [default transforms](https://github.com/paperswithcode/torchbench/blob/master/torchbench/object_detection/coco.py), COCO dataset returns a tuple for an index where: -- The first entry is a `torch.tensor` (representing the image) -- The second entry is a labels dictionary, with keys `'boxes', 'labels', 'masks', 'image_id', 'area', 'iscrowd'`, where the data is of type `torch.tensor` rather than lists. - -You can specify your own `transforms` to transform the data so your model can process it correctly. - -3. The dataset is put into a DataLoader with options `batch_size` and `num_workers`, and collated using [coco_collate_fn](https://github.com/paperswithcode/torchbench/blob/master/torchbench/object_detection/coco.py). Alternatively you can pass in your own collate function. -4. The model and dataset are passed into an evaluation function for the task, along with `model_output_transform` function that can transform the outputs and targets after inference. The default [model_output_transform](https://github.com/paperswithcode/torchbench/blob/master/torchbench/object_detection/coco.py) follows that used in the torchvision examples, but you can provide your own `model_output_transform` to get your model output in the right format. - -The expected output is a list of dictionaries (length = batch_size), where each dictionary contains keys for `'boxes', 'labels', 'scores', 'masks'`, and each value is of the `torch.tensor` type. - -5. The (transformed) model output is then converted to a dictionary with keys as the image ids, and values as a dictionary with the predictions (boxes, labels, scores, ...) - -```result = {tar["image_id"].item(): out for tar, out in zip(target, output)}``` - -6. The result is passed into a COCO Evaluation pipeline and the results (Mean Average Precision) are calculated - -### More benchmarks coming soon... +Sotabench will run this on each commit and record the results. For other tasks, such as object detection and semantic segmentation, implementations are much less standardized than for image classification. It is therefore recommended you use [sotabencheval](https://github.com/paperswithcode/sotabench-eval/) for these tasks - although there are experimental benchmarks for [COCO](https://paperswithcode.github.io/torchbench/coco/) and [PASCAL VOC](https://paperswithcode.github.io/torchbench/pascalvoc/). ## Contributing All contributions welcome! - - - diff --git a/docs/01.evaluating-multiple-models.md b/docs/01.evaluating-multiple-models.md deleted file mode 100644 index 485b2b5..0000000 --- a/docs/01.evaluating-multiple-models.md +++ /dev/null @@ -1,48 +0,0 @@ -# Evaluating Multiple Models - -In your repository, you may have multiple models that you want to evaluate. To evaluate multiple models, -simply add more benchmark functions to your existing ```benchmark.py``` file, as in the example below for an EfficientNet repository: - - from efficientnet_pytorch.model import EfficientNet - from sotabench.image_classification import ImageNet - import torchvision.transforms as transforms - import PIL - - # Define Transforms - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - b0_input_transform = transforms.Compose([ - transforms.Resize(256, PIL.Image.BICUBIC), - transforms.CenterCrop(224), - transforms.ToTensor(), - normalize, - ]) - - # Run Evaluation - ImageNet.benchmark( - model=EfficientNet.from_pretrained(model_name='efficientnet-b0'), - paper_model_name='EfficientNet-B0', - paper_arxiv_id='1905.11946', - paper_pwc_id='efficientnet-rethinking-model-scaling-for', - input_transform=b0_input_transform, - batch_size=256, - num_gpu=1 - ) - - # Define Transforms - b1_input_transform = transforms.Compose([ - transforms.Resize(273, PIL.Image.BICUBIC), - transforms.CenterCrop(240), - transforms.ToTensor(), - normalize, - ]) - - # Run Evaluation - ImageNet.benchmark( - model=EfficientNet.from_pretrained(model_name='efficientnet-b1'), - paper_model_name='EfficientNet-B1', - paper_arxiv_id='1905.11946', - paper_pwc_id='efficientnet-rethinking-model-scaling-for', - input_transform=b1_input_transform, - batch_size=256, - num_gpu=1 - ) diff --git a/docs/02.adding-paper-results.md b/docs/02.adding-paper-results.md deleted file mode 100644 index 30c6857..0000000 --- a/docs/02.adding-paper-results.md +++ /dev/null @@ -1,41 +0,0 @@ -# Adding Paper Results - -The existing paper benchmark results on sotabench come from the -[Papers With Code](http://www.paperswithcode.com) resource. There may be cases where you are -evaluating a model whose corresponding paper does not yet have -results on Papers With Code (and sotabench as well). - -You can add paper results by specifying a dictionary in the ```paper_results``` argument of the -benchmark method. For example, below we have added new paper results for a 'MyNet' model with -Top 1 Accuracy of 75.4% and Top 5 Accuracy of 85.65%: - - from torchbench.image_classification import ImageNet - from torchvision.models.mynet import mynet101 - import torchvision.transforms as transforms - import PIL - - # Define the transforms need to convert ImageNet data to expected model input - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - input_transform = transforms.Compose([ - transforms.Resize(256, PIL.Image.BICUBIC), - transforms.CenterCrop(224), - transforms.ToTensor(), - normalize, - ]) - - mynet_paper_results = {'Top 1 Accuracy': 0.754, 'Top 5 Accuracy': 0.8565} - - # Run the benchmark - ImageNet.benchmark( - model=mynet101(pretrained=True), - paper_model_name='MyNet', - paper_arxiv_id='2099.05431', - paper_results=mynet_paper_results, - input_transform=input_transform, - batch_size=256, - num_gpu=1 - ) - -Make sure that the metric names match those on the existing benchmark page on -[sotabench](http://www.sotabench.com) if you want comparable results to other models. For -example, ImageNet has the metrics 'Top 1 Accuracy' and 'Top 5 Accuracy'. \ No newline at end of file diff --git a/docs/03.model-naming-convention.md b/docs/03.model-naming-convention.md deleted file mode 100644 index 1d3c26a..0000000 --- a/docs/03.model-naming-convention.md +++ /dev/null @@ -1,46 +0,0 @@ -# Model Naming Convention - -First, check the benchmark paper leaderboard page, e.g. [Imagenet](https://sotabench.com/benchmark/imagenet) -for the existing model names. These model names come from the paper so are used as the standard naming convention. For example, if you are implementing the -EfficientNet-B0 model then you should put ```paper_model_name='EfficientNet-B0'``` as your -model name to link to the leaderboard. - -If you put in an incorrect model name, the torchbench API will raise an error and inform you of the -mistake, and the valid model names available for that benchmark and paper. For example: - - - from efficientnet_pytorch.model import EfficientNet - from sotabench.image_classification import ImageNet - import torchvision.transforms as transforms - import PIL - - # Define Transforms - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - b0_input_transform = transforms.Compose([ - transforms.Resize(256, PIL.Image.BICUBIC), - transforms.CenterCrop(224), - transforms.ToTensor(), - normalize, - ]) - - # Run Evaluation - ImageNet.benchmark( - model=EfficientNet.from_pretrained(model_name='efficientnet-b0'), - paper_model_name='FakeModelName', - paper_arxiv_id='1905.11946', - paper_pwc_id='efficientnet-rethinking-model-scaling-for', - input_transform=b0_input_transform, - batch_size=256, - num_gpu=1 - ) - -Will raise: - - ValueError: you entered an incorrect model name 'FakeModelName' for the paper - 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks' and - benchmark 'ImageNet'. Please use a valid model. Your options are: - - ['EfficientNet-B0', 'EfficientNet-B1', 'EfficientNet-B2', 'EfficientNet-B3', 'EfficientNet-B4', 'EfficientNet-B5', 'EfficientNet-B6', 'EfficientNet-B7'] - - If the model you are trying to evaluate is not listed above, then you can add the missing paper results through specifying a 'paper_results' argument. For details, - please read the sotabench documentation on adding paper results. \ No newline at end of file diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index d4bb2cb..0000000 --- a/docs/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/api/01.datasets.md b/docs/api/01.datasets.md deleted file mode 100644 index ba3417a..0000000 --- a/docs/api/01.datasets.md +++ /dev/null @@ -1,18 +0,0 @@ -# Datasets - -## Ade 20K - -```eval_rst - -.. automodule:: torchbench.datasets.ade20k - :members: -``` - - -## Camvid - -```eval_rst - -.. automodule:: torchbench.datasets.camvid - :members: -``` diff --git a/docs/api/02.image-classification.md b/docs/api/02.image-classification.md deleted file mode 100644 index 453111f..0000000 --- a/docs/api/02.image-classification.md +++ /dev/null @@ -1,9 +0,0 @@ -# Image Classification - -## ImageNet - -```eval_rst - -.. automodule:: torchbench.image_classification.imagenet - :members: -``` diff --git a/docs/api/index.md b/docs/api/index.md deleted file mode 100644 index 64f330f..0000000 --- a/docs/api/index.md +++ /dev/null @@ -1,8 +0,0 @@ -# API Documentation - -```eval_rst -.. toctree:: - :maxdepth: 3 - - 02.image-classification.md -``` diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index e211c72..0000000 --- a/docs/conf.py +++ /dev/null @@ -1,116 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# http://www.sphinx-doc.org/en/master/config - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) - - -# -- Project information ----------------------------------------------------- -import os -import sys -from datetime import datetime -from recommonmark.transform import AutoStructify - -# Add project root to pythonpath -sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - -from torchbench.version import __version__ - - -project = "torchbench" -author = "Robert Stojnic " -description = ( - "Easily benchmark Machine Learning models on selected tasks and datasets" - " - with PyTorch" -) -copyright = f"{datetime.now():%Y}, {author}" - - -# The full version, including alpha/beta/rc tags -version = __version__ -release = __version__ - - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.todo", - "sphinx.ext.mathjax", - "sphinx.ext.ifconfig", - "sphinx.ext.viewcode", - "sphinx.ext.napoleon", - "sphinx.ext.githubpages", - "recommonmark", -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -# source_suffix = ['.rst', '.md'] -source_suffix = ".md" - - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = "sphinx_rtd_theme" - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# -- Extension configuration ------------------------------------------------- - - -# autodoc - -autoclass_content = "both" -autodoc_default_options = { - "member-order": "bysource", - "special-members": "__init__", - "undoc-members": True, -} - - -# recommonmark - - -def setup(app): - app.add_config_value( - "recommonmark_config", - { - "enable_auto_toc_tree": True, - "auto_toc_tree_section": True, - "enable_math": True, - "enable_inline_math": True, - "enable_eval_rst": True, - }, - True, - ) - app.add_transform(AutoStructify) diff --git a/docs/docs/coco.md b/docs/docs/coco.md new file mode 100644 index 0000000..583c781 --- /dev/null +++ b/docs/docs/coco.md @@ -0,0 +1,343 @@ +# COCO + +![COCO Dataset Examples](img/coco.jpg) + +You can view the COCO minival leaderboard [here](https://sotabench.com/benchmarks/object-detection-on-coco-minival). + +!!! Warning + Object detection APIs in PyTorch are not very standardised across repositories, meaning that + it may require a lot of glue to get them working with this evaluation procedure (which is based on torchvision). + + **For easier COCO integration with sotabench it is recommended to use the more general API [sotabencheval](https://paperswithcode.github.io/sotabench-eval/).** + +## Getting Started + +You'll need the following in the root of your repository: + +- `sotabench.py` file - contains benchmarking logic; the server will run this on each commit +- `requirements.txt` file - Python dependencies to be installed before running `sotabench.py` +- `sotabench_setup.sh` *(optional)* - any advanced dependencies or setup, e.g. compilation + +Once you connect your repository to [sotabench.com](https://www.sotabench.com), the platform +will run your `sotabench.py` file whenever you commit to master. + +We now show how to write the `sotabench.py` file to evaluate a PyTorch object model with +the torchbench library, and to allow your results to be recorded and reported for the community. + +## The COCO Evaluation Class + +You can import the evaluation class from the following module: + +``` python +from torchbench.object_detection import COCO +``` + +The `COCO` class contains several components used in the evaluation, such as the `dataset`: + +``` python +COCO.dataset +# torchbench.datasets.coco.CocoDetection +``` + +And some default arguments used for evaluation (which can be overridden): + +``` python +COCO.transforms +# + +COCO.send_data_to_device +# + +COCO.collate_fn +# + +COCO.model_output_transform +# +``` + +We will explain these different options shortly and how you can manipulate them to get the +evaluation logic to play nicely with your model. + +An evaluation call - which performs evaluation, and if on the sotabench.com server, saves the results - +looks like the following through the `benchmark()` method: + +``` python +import torchvision +model = torchvision.models.detection.__dict__['maskrcnn_resnet50_fpn'](num_classes=91, pretrained=True) + +COCO.benchmark( + model=model, + paper_model_name='Mask R-CNN (ResNet-50-FPN)', + paper_arxiv_id='1703.06870' +) +``` + +These are the key arguments: the `model` which is a usually a `nn.Module` type object, but more generally, +is any method with a `forward` method that takes in input data and outputs predictions. +`paper_model_name` refers to the name of the model and `paper_arxiv_id` (optionally) refers to +the paper from which the model originated. If these two arguments match a record paper result, +then sotabench.com will match your model with the paper and compare your code's results with the +reported results in the paper. + +## A full `sotabench.py` example + +Below shows an example for the [torchvision](https://github.com/pytorch/vision/tree/master/torchvision) +repository benchmarking a Mask R-CNN model: + + +``` python +from torchbench.object_detection import COCO +from torchbench.utils import send_model_to_device +from torchbench.object_detection.transforms import Compose, ConvertCocoPolysToMask, ToTensor +import torchvision +import PIL + +def coco_data_to_device(input, target, device: str = "cuda", non_blocking: bool = True): + input = list(inp.to(device=device, non_blocking=non_blocking) for inp in input) + target = [{k: v.to(device=device, non_blocking=non_blocking) for k, v in t.items()} for t in target] + return input, target + +def coco_collate_fn(batch): + return tuple(zip(*batch)) + +def coco_output_transform(output, target): + output = [{k: v.to("cpu") for k, v in t.items()} for t in output] + return output, target + +transforms = Compose([ConvertCocoPolysToMask(), ToTensor()]) + +model = torchvision.models.detection.__dict__['maskrcnn_resnet50_fpn'](num_classes=91, pretrained=True) + +# Run the benchmark +COCO.benchmark( + model=model, + paper_model_name='Mask R-CNN (ResNet-50-FPN)', + paper_arxiv_id='1703.06870', + transforms=transforms, + model_output_transform=coco_output_transform, + send_data_to_device=coco_data_to_device, + collate_fn=coco_collate_fn, + batch_size=8, + num_gpu=1 +) +``` + +## `COCO.benchmark()` Arguments + +The source code for the COCO evaluation method can be found [here](https://github.com/paperswithcode/torchbench/blob/develop/torchbench/object_detection/coco.py). +We now explain each argument. + +### model + +**a PyTorch module, (e.g. a ``nn.Module`` object), that takes in COCO data and outputs detections.** + +For example, from the torchvision repository: + +``` python +import torchvision +model = torchvision.models.detection.__dict__['maskrcnn_resnet50_fpn'](num_classes=91, pretrained=True) +``` + +### model_description + +**(str, optional): Optional model description.** + +For example: + +``` python +model_description = 'Using ported TensorFlow weights' +``` + +### input_transform + +**Composing the transforms used to transform the input data (the images), e.g. +resizing (e.g ``transforms.Resize``), center cropping, to tensor transformations and normalization.** + +For example: + +``` python +import torchvision.transforms as transforms +input_transform = transforms.Compose([ + transforms.Resize(512, PIL.Image.BICUBIC), + transforms.ToTensor(), +]) +``` + +### target_transform + +**Composing the transforms used to transform the target data** + +### transforms + +**Composing the transforms used to transform the input data (the images) and the target data (the labels) +in a dual fashion - for example resizing the pair of data jointly.** + +Below shows an example; note the +fact that the `__call__` takes in two arguments and returns two arguments (ordinary `torchvision` transforms +return one result). + +``` python +from torchvision.transforms import functional as F + +class Compose(object): + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, image, target): + for t in self.transforms: + image, target = t(image, target) + return image, target + +class ToTensor(object): + def __call__(self, image, target): + image = F.to_tensor(image) + return image, target + +class ImageResize(object): + def __init__(self, resize_shape): + self.resize_shape = resize_shape + + def __call__(self, image, target): + image = F.resize(image, self.resize_shape) + return image, target + +transforms = Compose([ImageResize((512, 512)), ToTensor()]) +``` + +Note that the default transforms are: + +``` python +from torchbench.object_detection.utils import Compose, ConvertCocoPolysToMask, ToTensor +transforms = Compose([ConvertCocoPolysToMask(), ToTensor()]) +``` + +Where `ConvertCocoPolysToMask` is from the torchvision reference implementation to transform +the inputs to the right format to be entered into the model. You can pass whatever transforms +you need to make the dataset work with your model. + +### model_output_transform + +**(callable, optional): An optional function + that takes in model output (after being passed through your + ``model`` forward pass) and transforms it. Afterwards, the + output will be passed into an evaluation function.** + +The model output transform is a function that you can pass in to transform the model output +after the data has been passed into the model. This is useful if you have to do further +processing steps after inference to get the predictions in the right format for evaluation. + +The model evaluation for each batch is as follows from [utils.py](https://github.com/paperswithcode/torchbench/blob/db9fbdf5567350b8316336ca4f3fd27a04999347/torchbench/object_detection/utils.py#L189) +are: + +``` python +with torch.no_grad(): + for i, (input, target) in enumerate(iterator): + input, target = send_data_to_device(input, target, device=device) + original_output = model(input) + output, target = model_output_transform(original_output, target) + result = { + tar["image_id"].item(): out for tar, out in zip(target, output) + } + coco_evaluator.update(result) +``` + +We can see the `model_output_transform` in use, and the fact that the `output` is then +transformed to be a dictionary with image_ids as keys and output as values. + +The expected output of `model_output_transform` is a list of dictionaries (length = batch_size), +where each dictionary contains keys for 'boxes', 'labels', 'scores', 'masks', and each value is +of the `torch.tensor` type. + +The expected output of `result` is converted to a dictionary with keys as the image ids, and +values as a dictionary with the predictions (boxes, labels, scores, ... as keys). + +### collate_fn + +**How the dataset is collated - an optional callable passed into the DataLoader** + +As an example the default collate function is: + +``` python +def coco_collate_fn(batch): + return tuple(zip(*batch)) +``` + +### send_data_to_device + +**An optional function specifying how the model is sent to a device** + +As an example the COCO default is: + +``` python +def coco_data_to_device(input, target, device: str = "cuda", non_blocking: bool = True): + input = list(inp.to(device=device, non_blocking=non_blocking) for inp in input) + target = [{k: v.to(device=device, non_blocking=non_blocking) for k, v in t.items()} for t in target] + return input, target +``` + + +### data_root + +**data_root (str): The location of the COCO dataset - change this + parameter when evaluating locally if your COCO data is + located in a different folder (or alternatively if you want to + download to an alternative location).** + +Note that this parameter will be overriden when the evaluation is performed on the server, +so it is solely for your local use. + +### num_workers + +**num_workers (int): The number of workers to use for the DataLoader.** + +### batch_size + +**batch_size (int) : The batch_size to use for evaluation; if you get + memory errors, then reduce this (half each time) until your + model fits onto the GPU.** + +### paper_model_name + +**paper_model_name (str, optional): The name of the model from the + paper - if you want to link your build to a machine learning + paper. See the COCO benchmark page for model names, + https://www.sotabench.com/benchmark/coco-minival, e.g. on the paper + leaderboard tab.** + +### paper_arxiv_id + +**paper_arxiv_id (str, optional): Optional linking to ArXiv if you + want to link to papers on the leaderboard; put in the + corresponding paper's ArXiv ID, e.g. '1611.05431'.** + +### paper_pwc_id + +**paper_pwc_id (str, optional): Optional linking to Papers With Code; + put in the corresponding papers with code URL slug, e.g. + 'u-gat-it-unsupervised-generative-attentional'** + +### paper_results + +**paper_results (dict, optional) : If the paper you are reproducing + does not have model results on sotabench.com, you can specify + the paper results yourself through this argument, where keys + are metric names, values are metric values. e.g::** + +``` python +{'box AP': 0.349, 'AP50': 0.592, ...}. +``` + +Ensure that the metric names match those on the sotabench +leaderboard - for COCO it should be 'box AP', 'AP50', +'AP75', 'APS', 'APM', 'APL' + +### pytorch_hub_url + +**pytorch_hub_url (str, optional): Optional linking to PyTorch Hub + url if your model is linked there; e.g: + 'nvidia_deeplearningexamples_waveglow'.** + +## Need More Help? + +Head on over to the [Computer Vision](https://forum.sotabench.com/c/cv) section of the sotabench +forums if you have any questions or difficulties. diff --git a/docs/docs/imagenet.md b/docs/docs/imagenet.md new file mode 100644 index 0000000..d75a5db --- /dev/null +++ b/docs/docs/imagenet.md @@ -0,0 +1,277 @@ +# ImageNet + +![ImageNet Dataset Examples](img/imagenet.jpeg) + +You can view the ImageNet leaderboard [here](https://sotabench.com/benchmarks/image-classification-on-imagenet). + +## Getting Started + +You'll need the following in the root of your repository: + +- `sotabench.py` file - contains benchmarking logic; the server will run this on each commit +- `requirements.txt` file - Python dependencies to be installed before running `sotabench.py` +- `sotabench_setup.sh` *(optional)* - any advanced dependencies or setup, e.g. compilation + +Once you connect your repository to [sotabench.com](https://www.sotabench.com), the platform +will run your `sotabench.py` file whenever you commit to master. + +We now show how to write the `sotabench.py` file to evaluate a PyTorch object model with +the torchbench library, and to allow your results to be recorded and reported for the community. + +## The ImageNet Evaluation Class + +You can import the evaluation class from the following module: + +``` python +from torchbench.image_classification import ImageNet +``` + +The `ImageNet` class contains several components used in the evaluation, such as the `dataset`: + +``` python +ImageNet.dataset +# torchvision.datasets.ImageNet +``` + +And some default arguments used for evaluation (which can be overridden): + +``` python +ImageNet.normalize +# Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + +ImageNet.input_transform +# Compose( +# Resize(size=256, interpolation=PIL.Image.BILINEAR) +# CenterCrop(size=(224, 224)) +# ToTensor() +# Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +# ) + +ImageNet.send_data_to_device +# +``` + +We will explain these different options shortly and how you can manipulate them to get the +evaluation logic to play nicely with your model. + +An evaluation call - which performs evaluation, and if on the sotabench.com server, saves the results - +looks like the following through the `benchmark()` method: + +``` python +from torchvision.models.resnet import resnext101_32x8d + +ImageNet.benchmark( + model=resnext101_32x8d(pretrained=True), + paper_model_name='ResNeXt-101-32x8d', + paper_arxiv_id='1611.05431' +) +``` + +These are the key arguments: the `model` which is a usually a `nn.Module` type object, but more generally, +is any method with a `forward` method that takes in input data and outputs predictions. +`paper_model_name` refers to the name of the model and `paper_arxiv_id` (optionally) refers to +the paper from which the model originated. If these two arguments match a record paper result, +then sotabench.com will match your model with the paper and compare your code's results with the +reported results in the paper. + +## A full `sotabench.py` example + +Below shows an example for the [torchvision](https://github.com/pytorch/vision/tree/master/torchvision) +repository benchmarking a ResNeXt-101-32x8d model: + +``` python +from torchbench.image_classification import ImageNet +from torchvision.models.resnet import resnext101_32x8d +import torchvision.transforms as transforms +import PIL + +# Define the transforms need to convert ImageNet data to expected +# model input +normalize = transforms.Normalize( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] +) +input_transform = transforms.Compose([ + transforms.Resize(256, PIL.Image.BICUBIC), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, +]) + +# Run the benchmark +ImageNet.benchmark( + model=resnext101_32x8d(pretrained=True), + paper_model_name='ResNeXt-101-32x8d', + paper_arxiv_id='1611.05431', + input_transform=input_transform, + batch_size=256, + num_gpu=1 +) +``` +## `ImageNet.benchmark()` Arguments + +The source code for the ImageNet evaluation method can be found [here](https://github.com/paperswithcode/torchbench/blob/develop/torchbench/image_classification/imagenet.py). +We now explain each argument. + +### model + +**a PyTorch module, (e.g. a ``nn.Module`` object), that takes in ImageNet data and outputs detections.** + +For example, from the torchvision repository: + +``` python +from torchvision.models.resnet import resnext101_32x8d +model = resnext101_32x8d(pretrained=True) +``` + +### model_description + +**(str, optional): Optional model description.** + +For example: + +``` python +model_description = 'Using ported TensorFlow weights' +``` + +### input_transform + +**Composing the transforms used to transform the input data (the images), e.g. +resizing (e.g ``transforms.Resize``), center cropping, to tensor transformations and normalization.** + +For example: + +``` python +import torchvision.transforms as transforms +normalize = transforms.Normalize( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] +) +input_transform = transforms.Compose([ + transforms.Resize(256, PIL.Image.BICUBIC), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, +]) +``` + +### target_transform + +**Composing the transforms used to transform the target data** + +### model_output_transform + +**(callable, optional): An optional function + that takes in model output (after being passed through your + ``model`` forward pass) and transforms it. Afterwards, the + output will be passed into an evaluation function.** + +The model output transform is a function that you can pass in to transform the model output +after the data has been passed into the model. This is useful if you have to do further +processing steps after inference to get the predictions in the right format for evaluation. + +Most PyTorch models for Image Classification on ImageNet don't need to use this argument. + +The model evaluation for each batch is as follows from [utils.py](https://github.com/paperswithcode/torchbench/blob/db9fbdf5567350b8316336ca4f3fd27a04999347/torchbench/image_classification/utils.py#L189) +are: + +``` python +with torch.no_grad(): + for i, (input, target) in enumerate(iterator): + + input, target = send_data_to_device(input, target, device=device) + output = model(input) + + if model_output_transform is not None: + output = model_output_transform(output, target, model=model) + + check_metric_inputs(output, target, test_loader.dataset, i) + prec1, prec5 = accuracy(output, target, topk=(1, 5)) +``` + +Model output (following `model.forward()` and optionally `model_output_transform`) should be a 2D +`torch.Tensor` containing the model output; first dimension should be output for each example +(length batch_size) and second dimension should be output for each class in ImageNet (length 1000). + +### send_data_to_device + +**An optional function specifying how the model is sent to a device** + +As an example the default is: + +``` python + +def default_data_to_device(input, target=None, device: str = "cuda", non_blocking: bool = True): + """Sends data output from a PyTorch Dataloader to the device.""" + + input = input.to(device=device, non_blocking=non_blocking) + + if target is not None: + target = target.to(device=device, non_blocking=non_blocking) + + return input, target +``` + +### data_root + +**data_root (str): The location of the ImageNet dataset - change this + parameter when evaluating locally if your ImageNet data is + located in a different folder (or alternatively if you want to + download to an alternative location).** + +Note that this parameter will be overriden when the evaluation is performed on the server, +so it is solely for your local use. + +### num_workers + +**num_workers (int): The number of workers to use for the DataLoader.** + +### batch_size + +**batch_size (int) : The batch_size to use for evaluation; if you get + memory errors, then reduce this (half each time) until your + model fits onto the GPU.** + +### paper_model_name + +**paper_model_name (str, optional): The name of the model from the + paper - if you want to link your build to a machine learning + paper. See the ImageNet benchmark page for model names, + https://sotabench.com/benchmarks/image-classification-on-imagenet, e.g. on the paper + leaderboard tab.** + +### paper_arxiv_id + +**paper_arxiv_id (str, optional): Optional linking to ArXiv if you + want to link to papers on the leaderboard; put in the + corresponding paper's ArXiv ID, e.g. '1611.05431'.** + +### paper_pwc_id + +**paper_pwc_id (str, optional): Optional linking to Papers With Code; + put in the corresponding papers with code URL slug, e.g. + 'u-gat-it-unsupervised-generative-attentional'** + +### paper_results + +**paper_results (dict, optional) : If the paper you are reproducing + does not have model results on sotabench.com, you can specify + the paper results yourself through this argument, where keys + are metric names, values are metric values. e.g:** + +``` python +{'Top 1 Accuracy': 0.543, 'Top 5 Accuracy': 0.654} +``` + +Ensure that the metric names match those on the sotabench +leaderboard - for ImageNet it should be 'Top 1 Accuracy', +'Top 5 Accuracy' + +### pytorch_hub_url + +**pytorch_hub_url (str, optional): Optional linking to PyTorch Hub + url if your model is linked there; e.g: + 'nvidia_deeplearningexamples_waveglow'.** + +## Need More Help? + +Head on over to the [Computer Vision](https://forum.sotabench.com/c/cv) section of the sotabench +forums if you have any questions or difficulties. diff --git a/docs/docs/img/ade20k.png b/docs/docs/img/ade20k.png new file mode 100644 index 0000000..7a7be75 Binary files /dev/null and b/docs/docs/img/ade20k.png differ diff --git a/docs/docs/img/banner.png b/docs/docs/img/banner.png new file mode 100644 index 0000000..d1ad1de Binary files /dev/null and b/docs/docs/img/banner.png differ diff --git a/docs/docs/img/coco.jpg b/docs/docs/img/coco.jpg new file mode 100644 index 0000000..c1939b5 Binary files /dev/null and b/docs/docs/img/coco.jpg differ diff --git a/docs/docs/img/connect.png b/docs/docs/img/connect.png new file mode 100644 index 0000000..a7430a3 Binary files /dev/null and b/docs/docs/img/connect.png differ diff --git a/docs/docs/img/connect2.png b/docs/docs/img/connect2.png new file mode 100644 index 0000000..b25daa7 Binary files /dev/null and b/docs/docs/img/connect2.png differ diff --git a/docs/docs/img/imagenet.jpeg b/docs/docs/img/imagenet.jpeg new file mode 100644 index 0000000..3002ce1 Binary files /dev/null and b/docs/docs/img/imagenet.jpeg differ diff --git a/docs/docs/img/pascalvoc2012.png b/docs/docs/img/pascalvoc2012.png new file mode 100644 index 0000000..68c6832 Binary files /dev/null and b/docs/docs/img/pascalvoc2012.png differ diff --git a/docs/images/torchbench.png b/docs/docs/img/torchbench.png similarity index 100% rename from docs/images/torchbench.png rename to docs/docs/img/torchbench.png diff --git a/docs/docs/index.md b/docs/docs/index.md new file mode 100644 index 0000000..6476b71 --- /dev/null +++ b/docs/docs/index.md @@ -0,0 +1,90 @@ +# Welcome to torchbench! + + + +You have reached the docs for the [torchbench](https://github.com/paperswithcode/torchbench) library. This library contains a collection of deep learning benchmarks you can use to +benchmark your models, optimized for the PyTorch framework. It can be used in conjunction with the +[sotabench.com](http://www.sotabench.com) website to record results for models, so the community +can compare model performance on different tasks, as well as a continuous integration style +service for your repository to benchmark your models on each commit. + +**torchbench** is a framework-optimized library, meaning it is designed to take advantage of PyTorch based features +and standardisation. If this is too constraining, you can use alternative libraries that are framework-independent, + e.g. [sotabencheval](https://paperswithcode.github.io/sotabench-eval/). + +## Getting Started : Benchmarking on ImageNet + +**Step One : Create a sotabench.py file in the root of your repository** + +This contains a call to your model, metadata about your model, and options for evaluation such as dataset +processing logic and data loader logic such as the batch size. Below is an example for the [torchvision](https://github.com/pytorch/vision) +repository: + +``` python +from torchbench.image_classification import ImageNet +from torchvision.models.resnet import resnext101_32x8d +import torchvision.transforms as transforms +import PIL + +# Define the transforms need to convert ImageNet data to expected model input +normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) +input_transform = transforms.Compose([ + transforms.Resize(256, PIL.Image.BICUBIC), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, +]) + +# Run the benchmark +ImageNet.benchmark( + model=resnext101_32x8d(pretrained=True), + paper_model_name='ResNeXt-101-32x8d', + paper_arxiv_id='1611.05431', + input_transform=input_transform, + batch_size=256, + num_gpu=1 +) +``` + +**Step Two : Run locally to verify that it works** + +``` +python sotabench.py +``` + +You can also run the logic in a Jupyter Notebook if that is your preferred workflow. + +To verify your benchmark will run and all parameters are correct you can use the included CLI checking tool: + +``` +$ sb check +``` + +**Step Three : Login and connect your repository to [sotabench](http://www.sotabench.com)** + +Create an account on [sotabench](http://www.sotabench.com), then head to your user page. Click the +**Connect a GitHub repository** button: + + + +Then follow the steps to connect the repositories that you wish to benchmark: + + + +After you connect your repository, the sotabench servers will re-evaluate your model on every commit, +to ensure the model is working and results are up-to-date - including if you add additional models to the benchmark file. + +## Installation + +The library requires Python 3.6+. You can install via pip: + +``` +pip install torchbench +``` + +## Support + +If you get stuck you can head to our [Discourse](http://forum.sotabench.com) forum where you ask +questions on how to use the project. You can also find ideas for contributions, +and work with others on exciting projects. \ No newline at end of file diff --git a/docs/docs/pascalvoc.md b/docs/docs/pascalvoc.md new file mode 100644 index 0000000..4c48bf8 --- /dev/null +++ b/docs/docs/pascalvoc.md @@ -0,0 +1,348 @@ +# PASCAL VOC 2012 + +![VOC Dataset Examples](img/pascalvoc2012.png) + +You can view the PASCAL VOC 2012 leaderboard [here](https://sotabench.com/benchmarks/semantic-segmentation-on-pascal-voc-2012). + +!!! Warning + Semantic Segmentations APIs in PyTorch are not very standardised across repositories, meaning that + it may require a lot of glue to get them working with this evaluation procedure (which is based on torchvision). + + **For easier VOC integration with sotabench it is recommended to use the more general API [sotabencheval](https://paperswithcode.github.io/sotabench-eval/).** + +## Getting Started + +You'll need the following in the root of your repository: + +- `sotabench.py` file - contains benchmarking logic; the server will run this on each commit +- `requirements.txt` file - Python dependencies to be installed before running `sotabench.py` +- `sotabench_setup.sh` *(optional)* - any advanced dependencies or setup, e.g. compilation + +Once you connect your repository to [sotabench.com](https://www.sotabench.com), the platform +will run your `sotabench.py` file whenever you commit to master. + +We now show how to write the `sotabench.py` file to evaluate a PyTorch object model with +the torchbench library, and to allow your results to be recorded and reported for the community. + +## The VOC Evaluation Class + +You can import the evaluation class from the following module: + +``` python +from torchbench.semantic_segmentation import PASCALVOC +``` + +The `PASCALVOC` class contains several components used in the evaluation, such as the `dataset`: + +``` python +PASCALVOC.dataset +# torchvision.datasets.voc.VOCSegmentation +``` + +And some default arguments used for evaluation (which can be overridden): + +``` python +PASCALVOC.normalize +# + +PASCALVOC.transforms +# + +PASCALVOC.send_data_to_device +# + +PASCALVOC.collate_fn +# + +PASCALVOC.model_output_transform +# +``` + +We will explain these different options shortly and how you can manipulate them to get the +evaluation logic to play nicely with your model. + +An evaluation call - which performs evaluation, and if on the sotabench.com server, saves the results - +looks like the following through the `benchmark()` method: + +``` python +from torchvision.models.segmentation import fcn_resnet101 +model = fcn_resnet101(num_classes=21, pretrained=True) + +PASCALVOC.benchmark(model=model, + paper_model_name='FCN ResNet-101', + paper_arxiv_id='1605.06211') +``` + +These are the key arguments: the `model` which is a usually a `nn.Module` type object, but more generally, +is any method with a `forward` method that takes in input data and outputs predictions. +`paper_model_name` refers to the name of the model and `paper_arxiv_id` (optionally) refers to +the paper from which the model originated. If these two arguments match a record paper result, +then sotabench.com will match your model with the paper and compare your code's results with the +reported results in the paper. + + +## A full `sotabench.py` example + +Below shows an example for the [torchvision](https://github.com/pytorch/vision/tree/master/torchvision) +repository benchmarking a FCN ResNet-101 model: + + +``` python +from torchbench.semantic_segmentation import PASCALVOC +from torchbench.semantic_segmentation.transforms import ( + Normalize, + Resize, + ToTensor, + Compose, +) +from torchvision.models.segmentation import fcn_resnet101 +import torchvision.transforms as transforms +import PIL + +def model_output_function(output, labels): + return output['out'].argmax(1).flatten(), target.flatten() + +def seg_collate_fn(batch): + images, targets = list(zip(*batch)) + batched_imgs = cat_list(images, fill_value=0) + batched_targets = cat_list(targets, fill_value=255) + return batched_imgs, batched_targets + +model = fcn_resnet101(num_classes=21, pretrained=True) + +normalize = Normalize( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] +) +my_transforms = Compose([Resize((520, 480)), ToTensor(), normalize]) + +PASCALVOC.benchmark(batch_size=32, + model=model, + transforms=my_transforms, + model_output_transform=model_output_function, + collate_fn=seg_collate_fn, + paper_model_name='FCN ResNet-101', + paper_arxiv_id='1605.06211') +``` + +## `PASCALVOC.benchmark()` Arguments + +The source code for the PASCALVOC evaluation method can be found [here](https://github.com/paperswithcode/torchbench/blob/develop/torchbench/semantic_segmentation/pascalvoc.py). +We now explain each argument. + +### model + +**a PyTorch module, (e.g. a ``nn.Module`` object), that takes in VOC data and outputs detections.** + +For example, from the torchvision repository: + +``` python +from torchvision.models.segmentation import fcn_resnet101 +model = fcn_resnet101(num_classes=21, pretrained=True) +``` + +### model_description + +**(str, optional): Optional model description.** + +For example: + +``` python +model_description = 'Using ported TensorFlow weights' +``` + +### input_transform + +**Composing the transforms used to transform the input data (the images), e.g. +resizing (e.g ``transforms.Resize``), center cropping, to tensor transformations and normalization.** + +For example: + +``` python +import torchvision.transforms as transforms +input_transform = transforms.Compose([ + transforms.Resize(512, PIL.Image.BICUBIC), + transforms.ToTensor(), +]) +``` + +### target_transform + +**Composing the transforms used to transform the target data** + +### transforms + +**Composing the transforms used to transform the input data (the images) and the target data (the labels) +in a dual fashion - for example resizing the pair of data jointly.** + +Below shows an example; note the +fact that the `__call__` takes in two arguments and returns two arguments (ordinary `torchvision` transforms +return one result). + +``` python +from torchvision.transforms import functional as F + +class Compose(object): + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, image, target): + for t in self.transforms: + image, target = t(image, target) + return image, target + +class ToTensor(object): + def __call__(self, image, target): + image = F.to_tensor(image) + return image, target + +class ImageResize(object): + def __init__(self, resize_shape): + self.resize_shape = resize_shape + + def __call__(self, image, target): + image = F.resize(image, self.resize_shape) + return image, target + +transforms = Compose([ImageResize((512, 512)), ToTensor()]) +``` + +Note that the default transforms are: + +``` python +from torchbench.semantic_segmentation.transforms import (Normalize, Resize, ToTensor, Compose) +normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +transforms = Compose([Resize((520, 480)), ToTensor(), normalize]) +``` + +### model_output_transform + +**(callable, optional): An optional function + that takes in model output (after being passed through your + ``model`` forward pass) and transforms it. Afterwards, the + output will be passed into an evaluation function.** + +The model output transform is a function that you can pass in to transform the model output +after the data has been passed into the model. This is useful if you have to do further +processing steps after inference to get the predictions in the right format for evaluation. + +The model evaluation for each batch is as follows from [utils.py](https://github.com/paperswithcode/torchbench/blob/db9fbdf5567350b8316336ca4f3fd27a04999347/torchbench/semantic_segmentation/utils.py) +are: + +``` python +with torch.no_grad(): + for i, (input, target) in enumerate(iterator): + input, target = send_data_to_device(input, target, device=device) + output = model(input) + output, target = model_output_transform(output, target) + confmat.update(target, output) +``` + +The default `model_output_transform` is: + +``` python +def default_seg_output_transform(output, target): + return output["out"].argmax(1).flatten(), target.flatten() +``` + +We can see the `output` and `target` are flattened to 1D tensors, and in the case of the output, +we take the maximum predicted class to compare against for accuracy. Each element in each tensor +represents a pixel, and contains a class, e.g. class 6, and we compare pixel-by-pixel the model +predictions against the ground truth labels to calculate the accuracy. + +### collate_fn + +**How the dataset is collated - an optional callable passed into the DataLoader** + +As an example the default collate function is: + +``` python +def default_seg_collate_fn(batch): + images, targets = list(zip(*batch)) + batched_imgs = cat_list(images, fill_value=0) + batched_targets = cat_list(targets, fill_value=255) + return batched_imgs, batched_targets +``` + +### send_data_to_device + +**An optional function specifying how the model is sent to a device** + +As an example the PASCAL VOC default is: + +``` python + +def default_data_to_device(input, target=None, device: str = "cuda", non_blocking: bool = True): + """Sends data output from a PyTorch Dataloader to the device.""" + + input = input.to(device=device, non_blocking=non_blocking) + + if target is not None: + target = target.to(device=device, non_blocking=non_blocking) + + return input, target +``` + +### data_root + +**data_root (str): The location of the VOC dataset - change this + parameter when evaluating locally if your VOC data is + located in a different folder (or alternatively if you want to + download to an alternative location).** + +Note that this parameter will be overriden when the evaluation is performed on the server, +so it is solely for your local use. + +### num_workers + +**num_workers (int): The number of workers to use for the DataLoader.** + +### batch_size + +**batch_size (int) : The batch_size to use for evaluation; if you get + memory errors, then reduce this (half each time) until your + model fits onto the GPU.** + +### paper_model_name + +**paper_model_name (str, optional): The name of the model from the + paper - if you want to link your build to a machine learning + paper. See the VOC benchmark page for model names, + https://sotabench.com/benchmarks/semantic-segmentation-on-pascal-voc-2012, e.g. on the paper + leaderboard tab.** + +### paper_arxiv_id + +**paper_arxiv_id (str, optional): Optional linking to ArXiv if you + want to link to papers on the leaderboard; put in the + corresponding paper's ArXiv ID, e.g. '1611.05431'.** + +### paper_pwc_id + +**paper_pwc_id (str, optional): Optional linking to Papers With Code; + put in the corresponding papers with code URL slug, e.g. + 'u-gat-it-unsupervised-generative-attentional'** + +### paper_results + +**paper_results (dict, optional) : If the paper you are reproducing + does not have model results on sotabench.com, you can specify + the paper results yourself through this argument, where keys + are metric names, values are metric values. e.g::** + +``` python +{'Accuracy': 0.745, 'Mean IOU': 0.592}. +``` + +Ensure that the metric names match those on the sotabench +leaderboard - for VOC it should be 'Accuracy', 'Mean IOU'. + +### pytorch_hub_url + +**pytorch_hub_url (str, optional): Optional linking to PyTorch Hub + url if your model is linked there; e.g: + 'nvidia_deeplearningexamples_waveglow'.** + +## Need More Help? + +Head on over to the [Computer Vision](https://forum.sotabench.com/c/cv) section of the sotabench +forums if you have any questions or difficulties. diff --git a/docs/images/01.pwcsearch.png b/docs/images/01.pwcsearch.png deleted file mode 100644 index 9ec7237..0000000 Binary files a/docs/images/01.pwcsearch.png and /dev/null differ diff --git a/docs/images/02.image_classification.png b/docs/images/02.image_classification.png deleted file mode 100644 index 60f1288..0000000 Binary files a/docs/images/02.image_classification.png and /dev/null differ diff --git a/docs/images/03.imagenet.png b/docs/images/03.imagenet.png deleted file mode 100644 index c01b4a2..0000000 Binary files a/docs/images/03.imagenet.png and /dev/null differ diff --git a/docs/images/04.codeformodel.png b/docs/images/04.codeformodel.png deleted file mode 100644 index b9418bb..0000000 Binary files a/docs/images/04.codeformodel.png and /dev/null differ diff --git a/docs/index.md b/docs/index.md deleted file mode 100644 index c113b30..0000000 --- a/docs/index.md +++ /dev/null @@ -1,84 +0,0 @@ -# Welcome to the Torchbench Documentation! - -You have reached the docs for the **torchbench** library. Torchbench allows you to easily -benchmark PyTorch models on selected tasks and datasets. It can be used in conjunction with the -[sotabench](http://www.sotabench.com) website to record results for models, so the community -can compare model performance on different tasks. - -## Getting Started : Benchmarking on ImageNet - -The core structure of sotabench is benchmark datasets organized by task. Below we'll describe -the usage with the sotabench website, utilising an example on ImageNet. - -**Step One : Create a benchmark.py file in the root of your repository** - -Below you can see an example benchmark.py file added to the *torchvision* repository to test one of its models: - - from torchbench.image_classification import ImageNet - from torchvision.models.resnet import resnext101_32x8d - import torchvision.transforms as transforms - import PIL - - # Define the transforms need to convert ImageNet data to expected model input - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - input_transform = transforms.Compose([ - transforms.Resize(256, PIL.Image.BICUBIC), - transforms.CenterCrop(224), - transforms.ToTensor(), - normalize, - ]) - - # Run the benchmark - ImageNet.benchmark( - model=resnext101_32x8d(pretrained=True), - paper_model_name='ResNeXt-101-32x8d', - paper_arxiv_id='1611.05431', - input_transform=input_transform, - batch_size=256, - num_gpu=1 - ) - -**Step Two : Run locally to verify that it works** - - python benchmark.py - -You can also run the logic in a Jupyter Notebook if that is your preferred workflow. - -**Step Three : Login and connect your repository to [sotabench](http://www.sotabench.com)** - -After you connect your repository the website will re-evaluate your model on every commit, to ensure the model is working and results are up-to-date - including if you add additional models to the benchmark file. - -You can also use the library without the [sotabench](http://www.sotabench.com) website, by simply omitting step 3. In that case you also don't need to put in the paper details into the ```benchmark()``` method. - -## Installation - -The library requires Python 3.6+. You can install via pip: - - pip install torchbench - -## Contents - -```eval_rst -.. toctree:: - :maxdepth: 2 - - 01.evaluating-multiple-models.md - 02.adding-paper-results.md - 03.model-naming-convention.md - api/index.md -``` - - -## Indices and tables - -```eval_rst -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` -``` - -## Support - -If you get stuck you can head to our [Discourse](http://forum.sotabench.com) forum where you ask -questions on how to use the project. You can also find ideas for contributions, -and work with others on exciting projects. \ No newline at end of file diff --git a/docs/make.bat b/docs/make.bat deleted file mode 100644 index 2119f51..0000000 --- a/docs/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=. -set BUILDDIR=_build - -if "%1" == "" goto help - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml new file mode 100644 index 0000000..c888b80 --- /dev/null +++ b/docs/mkdocs.yml @@ -0,0 +1,11 @@ +site_name: torchbench Docs +theme: + name: 'material' + palette: + primary: 'red' + accent: 'red' + logo: + icon: 'explore' +markdown_extensions: + - admonition + - codehilite \ No newline at end of file diff --git a/docs/site/404.html b/docs/site/404.html new file mode 100644 index 0000000..3270902 --- /dev/null +++ b/docs/site/404.html @@ -0,0 +1,268 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + torchbench Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+ + + + +
+
+ + +
+
+
+ +
+
+
+ + + +
+
+ +

404 - Not found

+ + + + + + +
+
+
+
+ + + + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/site/assets/fonts/font-awesome.css b/docs/site/assets/fonts/font-awesome.css new file mode 100644 index 0000000..b476b53 --- /dev/null +++ b/docs/site/assets/fonts/font-awesome.css @@ -0,0 +1,4 @@ +/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url("specimen/FontAwesome.woff2") format("woff2"),url("specimen/FontAwesome.woff") format("woff"),url("specimen/FontAwesome.ttf") format("truetype")}.fa{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left{margin-right:.3em}.fa.fa-pull-right{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scale(-1,1);-ms-transform:scale(-1,1);transform:scale(-1,1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scale(1,-1);-ms-transform:scale(1,-1);transform:scale(1,-1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-remove:before,.fa-close:before,.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o-up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:"\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video-camera:before{content:"\f03d"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-slash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:before{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook-f:before,.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-feed:before,.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-hand-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-desc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-asc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell-o:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content:"\f120"}.fa-code:before{content:"\f121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:before{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before,.fa-gratipay:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-archive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"}.fa-space-shuttle:before{content:"\f197"}.fa-slack:before{content:"\f198"}.fa-envelope-square:before{content:"\f199"}.fa-wordpress:before{content:"\f19a"}.fa-openid:before{content:"\f19b"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:"\f19c"}.fa-mortar-board:before,.fa-graduation-cap:before{content:"\f19d"}.fa-yahoo:before{content:"\f19e"}.fa-google:before{content:"\f1a0"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-square:before{content:"\f1a2"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-stumbleupon:before{content:"\f1a4"}.fa-delicious:before{content:"\f1a5"}.fa-digg:before{content:"\f1a6"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-drupal:before{content:"\f1a9"}.fa-joomla:before{content:"\f1aa"}.fa-language:before{content:"\f1ab"}.fa-fax:before{content:"\f1ac"}.fa-building:before{content:"\f1ad"}.fa-child:before{content:"\f1ae"}.fa-paw:before{content:"\f1b0"}.fa-spoon:before{content:"\f1b1"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-recycle:before{content:"\f1b8"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-tree:before{content:"\f1bb"}.fa-spotify:before{content:"\f1bc"}.fa-deviantart:before{content:"\f1bd"}.fa-soundcloud:before{content:"\f1be"}.fa-database:before{content:"\f1c0"}.fa-file-pdf-o:before{content:"\f1c1"}.fa-file-word-o:before{content:"\f1c2"}.fa-file-excel-o:before{content:"\f1c3"}.fa-file-powerpoint-o:before{content:"\f1c4"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:"\f1c5"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:"\f1c6"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:"\f1c7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\f1c8"}.fa-file-code-o:before{content:"\f1c9"}.fa-vine:before{content:"\f1ca"}.fa-codepen:before{content:"\f1cb"}.fa-jsfiddle:before{content:"\f1cc"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:"\f1cd"}.fa-circle-o-notch:before{content:"\f1ce"}.fa-ra:before,.fa-resistance:before,.fa-rebel:before{content:"\f1d0"}.fa-ge:before,.fa-empire:before{content:"\f1d1"}.fa-git-square:before{content:"\f1d2"}.fa-git:before{content:"\f1d3"}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:"\f1d4"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-qq:before{content:"\f1d6"}.fa-wechat:before,.fa-weixin:before{content:"\f1d7"}.fa-send:before,.fa-paper-plane:before{content:"\f1d8"}.fa-send-o:before,.fa-paper-plane-o:before{content:"\f1d9"}.fa-history:before{content:"\f1da"}.fa-circle-thin:before{content:"\f1db"}.fa-header:before{content:"\f1dc"}.fa-paragraph:before{content:"\f1dd"}.fa-sliders:before{content:"\f1de"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-bomb:before{content:"\f1e2"}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:"\f1e3"}.fa-tty:before{content:"\f1e4"}.fa-binoculars:before{content:"\f1e5"}.fa-plug:before{content:"\f1e6"}.fa-slideshare:before{content:"\f1e7"}.fa-twitch:before{content:"\f1e8"}.fa-yelp:before{content:"\f1e9"}.fa-newspaper-o:before{content:"\f1ea"}.fa-wifi:before{content:"\f1eb"}.fa-calculator:before{content:"\f1ec"}.fa-paypal:before{content:"\f1ed"}.fa-google-wallet:before{content:"\f1ee"}.fa-cc-visa:before{content:"\f1f0"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-bell-slash:before{content:"\f1f6"}.fa-bell-slash-o:before{content:"\f1f7"}.fa-trash:before{content:"\f1f8"}.fa-copyright:before{content:"\f1f9"}.fa-at:before{content:"\f1fa"}.fa-eyedropper:before{content:"\f1fb"}.fa-paint-brush:before{content:"\f1fc"}.fa-birthday-cake:before{content:"\f1fd"}.fa-area-chart:before{content:"\f1fe"}.fa-pie-chart:before{content:"\f200"}.fa-line-chart:before{content:"\f201"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-bicycle:before{content:"\f206"}.fa-bus:before{content:"\f207"}.fa-ioxhost:before{content:"\f208"}.fa-angellist:before{content:"\f209"}.fa-cc:before{content:"\f20a"}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:"\f20b"}.fa-meanpath:before{content:"\f20c"}.fa-buysellads:before{content:"\f20d"}.fa-connectdevelop:before{content:"\f20e"}.fa-dashcube:before{content:"\f210"}.fa-forumbee:before{content:"\f211"}.fa-leanpub:before{content:"\f212"}.fa-sellsy:before{content:"\f213"}.fa-shirtsinbulk:before{content:"\f214"}.fa-simplybuilt:before{content:"\f215"}.fa-skyatlas:before{content:"\f216"}.fa-cart-plus:before{content:"\f217"}.fa-cart-arrow-down:before{content:"\f218"}.fa-diamond:before{content:"\f219"}.fa-ship:before{content:"\f21a"}.fa-user-secret:before{content:"\f21b"}.fa-motorcycle:before{content:"\f21c"}.fa-street-view:before{content:"\f21d"}.fa-heartbeat:before{content:"\f21e"}.fa-venus:before{content:"\f221"}.fa-mars:before{content:"\f222"}.fa-mercury:before{content:"\f223"}.fa-intersex:before,.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-venus-double:before{content:"\f226"}.fa-mars-double:before{content:"\f227"}.fa-venus-mars:before{content:"\f228"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-neuter:before{content:"\f22c"}.fa-genderless:before{content:"\f22d"}.fa-facebook-official:before{content:"\f230"}.fa-pinterest-p:before{content:"\f231"}.fa-whatsapp:before{content:"\f232"}.fa-server:before{content:"\f233"}.fa-user-plus:before{content:"\f234"}.fa-user-times:before{content:"\f235"}.fa-hotel:before,.fa-bed:before{content:"\f236"}.fa-viacoin:before{content:"\f237"}.fa-train:before{content:"\f238"}.fa-subway:before{content:"\f239"}.fa-medium:before{content:"\f23a"}.fa-yc:before,.fa-y-combinator:before{content:"\f23b"}.fa-optin-monster:before{content:"\f23c"}.fa-opencart:before{content:"\f23d"}.fa-expeditedssl:before{content:"\f23e"}.fa-battery-4:before,.fa-battery:before,.fa-battery-full:before{content:"\f240"}.fa-battery-3:before,.fa-battery-three-quarters:before{content:"\f241"}.fa-battery-2:before,.fa-battery-half:before{content:"\f242"}.fa-battery-1:before,.fa-battery-quarter:before{content:"\f243"}.fa-battery-0:before,.fa-battery-empty:before{content:"\f244"}.fa-mouse-pointer:before{content:"\f245"}.fa-i-cursor:before{content:"\f246"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-sticky-note:before{content:"\f249"}.fa-sticky-note-o:before{content:"\f24a"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-diners-club:before{content:"\f24c"}.fa-clone:before{content:"\f24d"}.fa-balance-scale:before{content:"\f24e"}.fa-hourglass-o:before{content:"\f250"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:"\f251"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:"\f253"}.fa-hourglass:before{content:"\f254"}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:"\f255"}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:"\f256"}.fa-hand-scissors-o:before{content:"\f257"}.fa-hand-lizard-o:before{content:"\f258"}.fa-hand-spock-o:before{content:"\f259"}.fa-hand-pointer-o:before{content:"\f25a"}.fa-hand-peace-o:before{content:"\f25b"}.fa-trademark:before{content:"\f25c"}.fa-registered:before{content:"\f25d"}.fa-creative-commons:before{content:"\f25e"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-tripadvisor:before{content:"\f262"}.fa-odnoklassniki:before{content:"\f263"}.fa-odnoklassniki-square:before{content:"\f264"}.fa-get-pocket:before{content:"\f265"}.fa-wikipedia-w:before{content:"\f266"}.fa-safari:before{content:"\f267"}.fa-chrome:before{content:"\f268"}.fa-firefox:before{content:"\f269"}.fa-opera:before{content:"\f26a"}.fa-internet-explorer:before{content:"\f26b"}.fa-tv:before,.fa-television:before{content:"\f26c"}.fa-contao:before{content:"\f26d"}.fa-500px:before{content:"\f26e"}.fa-amazon:before{content:"\f270"}.fa-calendar-plus-o:before{content:"\f271"}.fa-calendar-minus-o:before{content:"\f272"}.fa-calendar-times-o:before{content:"\f273"}.fa-calendar-check-o:before{content:"\f274"}.fa-industry:before{content:"\f275"}.fa-map-pin:before{content:"\f276"}.fa-map-signs:before{content:"\f277"}.fa-map-o:before{content:"\f278"}.fa-map:before{content:"\f279"}.fa-commenting:before{content:"\f27a"}.fa-commenting-o:before{content:"\f27b"}.fa-houzz:before{content:"\f27c"}.fa-vimeo:before{content:"\f27d"}.fa-black-tie:before{content:"\f27e"}.fa-fonticons:before{content:"\f280"}.fa-reddit-alien:before{content:"\f281"}.fa-edge:before{content:"\f282"}.fa-credit-card-alt:before{content:"\f283"}.fa-codiepie:before{content:"\f284"}.fa-modx:before{content:"\f285"}.fa-fort-awesome:before{content:"\f286"}.fa-usb:before{content:"\f287"}.fa-product-hunt:before{content:"\f288"}.fa-mixcloud:before{content:"\f289"}.fa-scribd:before{content:"\f28a"}.fa-pause-circle:before{content:"\f28b"}.fa-pause-circle-o:before{content:"\f28c"}.fa-stop-circle:before{content:"\f28d"}.fa-stop-circle-o:before{content:"\f28e"}.fa-shopping-bag:before{content:"\f290"}.fa-shopping-basket:before{content:"\f291"}.fa-hashtag:before{content:"\f292"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-percent:before{content:"\f295"}.fa-gitlab:before{content:"\f296"}.fa-wpbeginner:before{content:"\f297"}.fa-wpforms:before{content:"\f298"}.fa-envira:before{content:"\f299"}.fa-universal-access:before{content:"\f29a"}.fa-wheelchair-alt:before{content:"\f29b"}.fa-question-circle-o:before{content:"\f29c"}.fa-blind:before{content:"\f29d"}.fa-audio-description:before{content:"\f29e"}.fa-volume-control-phone:before{content:"\f2a0"}.fa-braille:before{content:"\f2a1"}.fa-assistive-listening-systems:before{content:"\f2a2"}.fa-asl-interpreting:before,.fa-american-sign-language-interpreting:before{content:"\f2a3"}.fa-deafness:before,.fa-hard-of-hearing:before,.fa-deaf:before{content:"\f2a4"}.fa-glide:before{content:"\f2a5"}.fa-glide-g:before{content:"\f2a6"}.fa-signing:before,.fa-sign-language:before{content:"\f2a7"}.fa-low-vision:before{content:"\f2a8"}.fa-viadeo:before{content:"\f2a9"}.fa-viadeo-square:before{content:"\f2aa"}.fa-snapchat:before{content:"\f2ab"}.fa-snapchat-ghost:before{content:"\f2ac"}.fa-snapchat-square:before{content:"\f2ad"}.fa-pied-piper:before{content:"\f2ae"}.fa-first-order:before{content:"\f2b0"}.fa-yoast:before{content:"\f2b1"}.fa-themeisle:before{content:"\f2b2"}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:"\f2b3"}.fa-fa:before,.fa-font-awesome:before{content:"\f2b4"}.fa-handshake-o:before{content:"\f2b5"}.fa-envelope-open:before{content:"\f2b6"}.fa-envelope-open-o:before{content:"\f2b7"}.fa-linode:before{content:"\f2b8"}.fa-address-book:before{content:"\f2b9"}.fa-address-book-o:before{content:"\f2ba"}.fa-vcard:before,.fa-address-card:before{content:"\f2bb"}.fa-vcard-o:before,.fa-address-card-o:before{content:"\f2bc"}.fa-user-circle:before{content:"\f2bd"}.fa-user-circle-o:before{content:"\f2be"}.fa-user-o:before{content:"\f2c0"}.fa-id-badge:before{content:"\f2c1"}.fa-drivers-license:before,.fa-id-card:before{content:"\f2c2"}.fa-drivers-license-o:before,.fa-id-card-o:before{content:"\f2c3"}.fa-quora:before{content:"\f2c4"}.fa-free-code-camp:before{content:"\f2c5"}.fa-telegram:before{content:"\f2c6"}.fa-thermometer-4:before,.fa-thermometer:before,.fa-thermometer-full:before{content:"\f2c7"}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-thermometer-2:before,.fa-thermometer-half:before{content:"\f2c9"}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:"\f2ca"}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:"\f2cb"}.fa-shower:before{content:"\f2cc"}.fa-bathtub:before,.fa-s15:before,.fa-bath:before{content:"\f2cd"}.fa-podcast:before{content:"\f2ce"}.fa-window-maximize:before{content:"\f2d0"}.fa-window-minimize:before{content:"\f2d1"}.fa-window-restore:before{content:"\f2d2"}.fa-times-rectangle:before,.fa-window-close:before{content:"\f2d3"}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:"\f2d4"}.fa-bandcamp:before{content:"\f2d5"}.fa-grav:before{content:"\f2d6"}.fa-etsy:before{content:"\f2d7"}.fa-imdb:before{content:"\f2d8"}.fa-ravelry:before{content:"\f2d9"}.fa-eercast:before{content:"\f2da"}.fa-microchip:before{content:"\f2db"}.fa-snowflake-o:before{content:"\f2dc"}.fa-superpowers:before{content:"\f2dd"}.fa-wpexplorer:before{content:"\f2de"}.fa-meetup:before{content:"\f2e0"}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto} \ No newline at end of file diff --git a/docs/site/assets/fonts/material-icons.css b/docs/site/assets/fonts/material-icons.css new file mode 100644 index 0000000..d23d365 --- /dev/null +++ b/docs/site/assets/fonts/material-icons.css @@ -0,0 +1,13 @@ +/*! + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING, SOFTWARE + * DISTRIBUTED UNDER THE LICENSE IS DISTRIBUTED ON AN "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + * SEE THE LICENSE FOR THE SPECIFIC LANGUAGE GOVERNING PERMISSIONS AND + * LIMITATIONS UNDER THE LICENSE. + */@font-face{font-family:"Material Icons";font-style:normal;font-weight:400;src:local("Material Icons"),local("MaterialIcons-Regular"),url("specimen/MaterialIcons-Regular.woff2") format("woff2"),url("specimen/MaterialIcons-Regular.woff") format("woff"),url("specimen/MaterialIcons-Regular.ttf") format("truetype")} \ No newline at end of file diff --git a/docs/site/assets/fonts/specimen/FontAwesome.ttf b/docs/site/assets/fonts/specimen/FontAwesome.ttf new file mode 100644 index 0000000..35acda2 Binary files /dev/null and b/docs/site/assets/fonts/specimen/FontAwesome.ttf differ diff --git a/docs/site/assets/fonts/specimen/FontAwesome.woff b/docs/site/assets/fonts/specimen/FontAwesome.woff new file mode 100644 index 0000000..400014a Binary files /dev/null and b/docs/site/assets/fonts/specimen/FontAwesome.woff differ diff --git a/docs/site/assets/fonts/specimen/FontAwesome.woff2 b/docs/site/assets/fonts/specimen/FontAwesome.woff2 new file mode 100644 index 0000000..4d13fc6 Binary files /dev/null and b/docs/site/assets/fonts/specimen/FontAwesome.woff2 differ diff --git a/docs/site/assets/fonts/specimen/MaterialIcons-Regular.ttf b/docs/site/assets/fonts/specimen/MaterialIcons-Regular.ttf new file mode 100644 index 0000000..7015564 Binary files /dev/null and b/docs/site/assets/fonts/specimen/MaterialIcons-Regular.ttf differ diff --git a/docs/site/assets/fonts/specimen/MaterialIcons-Regular.woff b/docs/site/assets/fonts/specimen/MaterialIcons-Regular.woff new file mode 100644 index 0000000..b648a3e Binary files /dev/null and b/docs/site/assets/fonts/specimen/MaterialIcons-Regular.woff differ diff --git a/docs/site/assets/fonts/specimen/MaterialIcons-Regular.woff2 b/docs/site/assets/fonts/specimen/MaterialIcons-Regular.woff2 new file mode 100644 index 0000000..9fa2112 Binary files /dev/null and b/docs/site/assets/fonts/specimen/MaterialIcons-Regular.woff2 differ diff --git a/docs/site/assets/images/favicon.png b/docs/site/assets/images/favicon.png new file mode 100644 index 0000000..76d17f5 Binary files /dev/null and b/docs/site/assets/images/favicon.png differ diff --git a/docs/site/assets/images/icons/bitbucket.1b09e088.svg b/docs/site/assets/images/icons/bitbucket.1b09e088.svg new file mode 100644 index 0000000..cf58c14 --- /dev/null +++ b/docs/site/assets/images/icons/bitbucket.1b09e088.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/site/assets/images/icons/github.f0b8504a.svg b/docs/site/assets/images/icons/github.f0b8504a.svg new file mode 100644 index 0000000..3d13b19 --- /dev/null +++ b/docs/site/assets/images/icons/github.f0b8504a.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/site/assets/images/icons/gitlab.6dd19c00.svg b/docs/site/assets/images/icons/gitlab.6dd19c00.svg new file mode 100644 index 0000000..1d9fffa --- /dev/null +++ b/docs/site/assets/images/icons/gitlab.6dd19c00.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/site/assets/javascripts/application.c648116f.js b/docs/site/assets/javascripts/application.c648116f.js new file mode 100644 index 0000000..1619f6e --- /dev/null +++ b/docs/site/assets/javascripts/application.c648116f.js @@ -0,0 +1,6 @@ +!function(e,t){for(var n in t)e[n]=t[n]}(window,function(n){var r={};function i(e){if(r[e])return r[e].exports;var t=r[e]={i:e,l:!1,exports:{}};return n[e].call(t.exports,t,t.exports,i),t.l=!0,t.exports}return i.m=n,i.c=r,i.d=function(e,t,n){i.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(t,e){if(1&e&&(t=i(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var n=Object.create(null);if(i.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var r in t)i.d(n,r,function(e){return t[e]}.bind(null,r));return n},i.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(t,"a",t),t},i.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},i.p="",i(i.s=13)}([function(e,t,n){"use strict";var r={Listener:function(){function e(e,t,n){var r=this;this.els_=Array.prototype.slice.call("string"==typeof e?document.querySelectorAll(e):[].concat(e)),this.handler_="function"==typeof n?{update:n}:n,this.events_=[].concat(t),this.update_=function(e){return r.handler_.update(e)}}var t=e.prototype;return t.listen=function(){var n=this;this.els_.forEach(function(t){n.events_.forEach(function(e){t.addEventListener(e,n.update_,!1)})}),"function"==typeof this.handler_.setup&&this.handler_.setup()},t.unlisten=function(){var n=this;this.els_.forEach(function(t){n.events_.forEach(function(e){t.removeEventListener(e,n.update_)})}),"function"==typeof this.handler_.reset&&this.handler_.reset()},e}(),MatchMedia:function(e,t){this.handler_=function(e){e.matches?t.listen():t.unlisten()};var n=window.matchMedia(e);n.addListener(this.handler_),this.handler_(n)}},i={Shadow:function(){function e(e,t){var n="string"==typeof e?document.querySelector(e):e;if(!(n instanceof HTMLElement&&n.parentNode instanceof HTMLElement))throw new ReferenceError;if(this.el_=n.parentNode,!((n="string"==typeof t?document.querySelector(t):t)instanceof HTMLElement))throw new ReferenceError;this.header_=n,this.height_=0,this.active_=!1}var t=e.prototype;return t.setup=function(){for(var e=this.el_;e=e.previousElementSibling;){if(!(e instanceof HTMLElement))throw new ReferenceError;this.height_+=e.offsetHeight}this.update()},t.update=function(e){if(!e||"resize"!==e.type&&"orientationchange"!==e.type){var t=window.pageYOffset>=this.height_;t!==this.active_&&(this.header_.dataset.mdState=(this.active_=t)?"shadow":"")}else this.height_=0,this.setup()},t.reset=function(){this.header_.dataset.mdState="",this.height_=0,this.active_=!1},e}(),Title:function(){function e(e,t){var n="string"==typeof e?document.querySelector(e):e;if(!(n instanceof HTMLElement))throw new ReferenceError;if(this.el_=n,!((n="string"==typeof t?document.querySelector(t):t)instanceof HTMLHeadingElement))throw new ReferenceError;this.header_=n,this.active_=!1}var t=e.prototype;return t.setup=function(){var t=this;Array.prototype.forEach.call(this.el_.children,function(e){e.style.width=t.el_.offsetWidth-20+"px"})},t.update=function(e){var t=this,n=window.pageYOffset>=this.header_.offsetTop;n!==this.active_&&(this.el_.dataset.mdState=(this.active_=n)?"active":""),"resize"!==e.type&&"orientationchange"!==e.type||Array.prototype.forEach.call(this.el_.children,function(e){e.style.width=t.el_.offsetWidth-20+"px"})},t.reset=function(){this.el_.dataset.mdState="",this.el_.style.width="",this.active_=!1},e}()},o={Blur:function(){function e(e){this.els_="string"==typeof e?document.querySelectorAll(e):e,this.index_=0,this.offset_=window.pageYOffset,this.dir_=!1,this.anchors_=[].reduce.call(this.els_,function(e,t){var n=decodeURIComponent(t.hash);return e.concat(document.getElementById(n.substring(1))||[])},[])}var t=e.prototype;return t.setup=function(){this.update()},t.update=function(){var e=window.pageYOffset,t=this.offset_-e<0;if(this.dir_!==t&&(this.index_=this.index_=t?0:this.els_.length-1),0!==this.anchors_.length){if(this.offset_<=e)for(var n=this.index_+1;ne)){this.index_=r;break}0=this.offset_?"lock"!==this.el_.dataset.mdState&&(this.el_.dataset.mdState="lock"):"lock"===this.el_.dataset.mdState&&(this.el_.dataset.mdState="")},t.reset=function(){this.el_.dataset.mdState="",this.el_.style.height="",this.height_=0},e}()},c=n(6),l=n.n(c);var u={Adapter:{GitHub:function(o){var e,t;function n(e){var t;t=o.call(this,e)||this;var n=/^.+github\.com\/([^/]+)\/?([^/]+)?.*$/.exec(t.base_);if(n&&3===n.length){var r=n[1],i=n[2];t.base_="https://api.github.com/users/"+r+"/repos",t.name_=i}return t}return t=o,(e=n).prototype=Object.create(t.prototype),(e.prototype.constructor=e).__proto__=t,n.prototype.fetch_=function(){var i=this;return function n(r){return void 0===r&&(r=0),fetch(i.base_+"?per_page=30&page="+r).then(function(e){return e.json()}).then(function(e){if(!(e instanceof Array))throw new TypeError;if(i.name_){var t=e.find(function(e){return e.name===i.name_});return t||30!==e.length?t?[i.format_(t.stargazers_count)+" Stars",i.format_(t.forks_count)+" Forks"]:[]:n(r+1)}return[e.length+" Repositories"]})}()},n}(function(){function e(e){var t="string"==typeof e?document.querySelector(e):e;if(!(t instanceof HTMLAnchorElement))throw new ReferenceError;this.el_=t,this.base_=this.el_.href,this.salt_=this.hash_(this.base_)}var t=e.prototype;return t.fetch=function(){var n=this;return new Promise(function(t){var e=l.a.getJSON(n.salt_+".cache-source");void 0!==e?t(e):n.fetch_().then(function(e){l.a.set(n.salt_+".cache-source",e,{expires:1/96}),t(e)})})},t.fetch_=function(){throw new Error("fetch_(): Not implemented")},t.format_=function(e){return 1e4=this.el_.children[0].offsetTop+(5-this.height_);e!==this.active_&&(this.el_.dataset.mdState=(this.active_=e)?"hidden":"")},t.reset=function(){this.el_.dataset.mdState="",this.active_=!1},e}()};t.a={Event:r,Header:i,Nav:o,Search:a,Sidebar:s,Source:u,Tabs:f}},function(t,e,n){(function(e){t.exports=e.lunr=n(24)}).call(this,n(4))},function(e,d,h){"use strict";(function(t){var e=h(8),n=setTimeout;function c(e){return Boolean(e&&void 0!==e.length)}function r(){}function o(e){if(!(this instanceof o))throw new TypeError("Promises must be constructed via new");if("function"!=typeof e)throw new TypeError("not a function");this._state=0,this._handled=!1,this._value=void 0,this._deferreds=[],f(e,this)}function i(n,r){for(;3===n._state;)n=n._value;0!==n._state?(n._handled=!0,o._immediateFn(function(){var e=1===n._state?r.onFulfilled:r.onRejected;if(null!==e){var t;try{t=e(n._value)}catch(e){return void s(r.promise,e)}a(r.promise,t)}else(1===n._state?a:s)(r.promise,n._value)})):n._deferreds.push(r)}function a(t,e){try{if(e===t)throw new TypeError("A promise cannot be resolved with itself.");if(e&&("object"==typeof e||"function"==typeof e)){var n=e.then;if(e instanceof o)return t._state=3,t._value=e,void l(t);if("function"==typeof n)return void f((r=n,i=e,function(){r.apply(i,arguments)}),t)}t._state=1,t._value=e,l(t)}catch(e){s(t,e)}var r,i}function s(e,t){e._state=2,e._value=t,l(e)}function l(e){2===e._state&&0===e._deferreds.length&&o._immediateFn(function(){e._handled||o._unhandledRejectionFn(e._value)});for(var t=0,n=e._deferreds.length;t"+n+""};this.stack_=[],r.forEach(function(e,t){var n,r=a.docs_.get(t),i=f.createElement("li",{class:"md-search-result__item"},f.createElement("a",{href:r.location,title:r.title,class:"md-search-result__link",tabindex:"-1"},f.createElement("article",{class:"md-search-result__article md-search-result__article--document"},f.createElement("h1",{class:"md-search-result__title"},{__html:r.title.replace(s,c)}),r.text.length?f.createElement("p",{class:"md-search-result__teaser"},{__html:r.text.replace(s,c)}):{}))),o=e.map(function(t){return function(){var e=a.docs_.get(t.ref);i.appendChild(f.createElement("a",{href:e.location,title:e.title,class:"md-search-result__link","data-md-rel":"anchor",tabindex:"-1"},f.createElement("article",{class:"md-search-result__article"},f.createElement("h1",{class:"md-search-result__title"},{__html:e.title.replace(s,c)}),e.text.length?f.createElement("p",{class:"md-search-result__teaser"},{__html:function(e,t){var n=t;if(e.length>n){for(;" "!==e[n]&&0<--n;);return e.substring(0,n)+"..."}return e}(e.text.replace(s,c),400)}):{})))}});(n=a.stack_).push.apply(n,[function(){return a.list_.appendChild(i)}].concat(o))});var o=this.el_.parentNode;if(!(o instanceof HTMLElement))throw new ReferenceError;for(;this.stack_.length&&o.offsetHeight>=o.scrollHeight-16;)this.stack_.shift()();var l=this.list_.querySelectorAll("[data-md-rel=anchor]");switch(Array.prototype.forEach.call(l,function(r){["click","keydown"].forEach(function(n){r.addEventListener(n,function(e){if("keydown"!==n||13===e.keyCode){var t=document.querySelector("[data-md-toggle=search]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;t.checked&&(t.checked=!1,t.dispatchEvent(new CustomEvent("change"))),e.preventDefault(),setTimeout(function(){document.location.href=r.href},100)}})})}),r.size){case 0:this.meta_.textContent=this.message_.none;break;case 1:this.meta_.textContent=this.message_.one;break;default:this.meta_.textContent=this.message_.other.replace("#",r.size)}}}else{var u=function(e){a.docs_=e.reduce(function(e,t){var n,r,i,o=t.location.split("#"),a=o[0],s=o[1];return t.text=(n=t.text,r=document.createTextNode(n),(i=document.createElement("p")).appendChild(r),i.innerHTML),s&&(t.parent=e.get(a),t.parent&&!t.parent.done&&(t.parent.title=t.title,t.parent.text=t.text,t.parent.done=!0)),t.text=t.text.replace(/\n/g," ").replace(/\s+/g," ").replace(/\s+([,.:;!?])/g,function(e,t){return t}),t.parent&&t.parent.title===t.title||e.set(t.location,t),e},new Map);var i=a.docs_,o=a.lang_;a.stack_=[],a.index_=d()(function(){var e,t=this,n={"search.pipeline.trimmer":d.a.trimmer,"search.pipeline.stopwords":d.a.stopWordFilter},r=Object.keys(n).reduce(function(e,t){return h(t).match(/^false$/i)||e.push(n[t]),e},[]);this.pipeline.reset(),r&&(e=this.pipeline).add.apply(e,r),1===o.length&&"en"!==o[0]&&d.a[o[0]]?this.use(d.a[o[0]]):1=t.scrollHeight-16;)a.stack_.splice(0,10).forEach(function(e){return e()})})};setTimeout(function(){return"function"==typeof a.data_?a.data_().then(u):u(a.data_)},250)}},e}()}).call(this,r(3))},function(e,n,r){"use strict";(function(t){r.d(n,"a",function(){return e});var e=function(){function e(e){var t="string"==typeof e?document.querySelector(e):e;if(!(t instanceof HTMLElement))throw new ReferenceError;this.el_=t}return e.prototype.initialize=function(e){e.length&&this.el_.children.length&&this.el_.children[this.el_.children.length-1].appendChild(t.createElement("ul",{class:"md-source__facts"},e.map(function(e){return t.createElement("li",{class:"md-source__fact"},e)}))),this.el_.dataset.mdState="done"},e}()}).call(this,r(3))},,,function(e,n,c){"use strict";c.r(n),function(o){c.d(n,"app",function(){return t});c(14),c(15),c(16),c(17),c(18),c(19),c(20);var r=c(2),e=c(5),a=c.n(e),i=c(0);window.Promise=window.Promise||r.a;var s=function(e){var t=document.getElementsByName("lang:"+e)[0];if(!(t instanceof HTMLMetaElement))throw new ReferenceError;return t.content};var t={initialize:function(t){new i.a.Event.Listener(document,"DOMContentLoaded",function(){if(!(document.body instanceof HTMLElement))throw new ReferenceError;Modernizr.addTest("ios",function(){return!!navigator.userAgent.match(/(iPad|iPhone|iPod)/g)});var e=document.querySelectorAll("table:not([class])");if(Array.prototype.forEach.call(e,function(e){var t=o.createElement("div",{class:"md-typeset__scrollwrap"},o.createElement("div",{class:"md-typeset__table"}));e.nextSibling?e.parentNode.insertBefore(t,e.nextSibling):e.parentNode.appendChild(t),t.children[0].appendChild(e)}),a.a.isSupported()){var t=document.querySelectorAll(".codehilite > pre, pre > code");Array.prototype.forEach.call(t,function(e,t){var n="__code_"+t,r=o.createElement("button",{class:"md-clipboard",title:s("clipboard.copy"),"data-clipboard-target":"#"+n+" pre, #"+n+" code"},o.createElement("span",{class:"md-clipboard__message"})),i=e.parentNode;i.id=n,i.insertBefore(r,e)}),new a.a(".md-clipboard").on("success",function(e){var t=e.trigger.querySelector(".md-clipboard__message");if(!(t instanceof HTMLElement))throw new ReferenceError;e.clearSelection(),t.dataset.mdTimer&&clearTimeout(parseInt(t.dataset.mdTimer,10)),t.classList.add("md-clipboard__message--active"),t.innerHTML=s("clipboard.copied"),t.dataset.mdTimer=setTimeout(function(){t.classList.remove("md-clipboard__message--active"),t.dataset.mdTimer=""},2e3).toString()})}if(!Modernizr.details){var n=document.querySelectorAll("details > summary");Array.prototype.forEach.call(n,function(e){e.addEventListener("click",function(e){var t=e.target.parentNode;t.hasAttribute("open")?t.removeAttribute("open"):t.setAttribute("open","")})})}var r=function(){if(document.location.hash){var e=document.getElementById(document.location.hash.substring(1));if(!e)return;for(var t=e.parentNode;t&&!(t instanceof HTMLDetailsElement);)t=t.parentNode;if(t&&!t.open){t.open=!0;var n=location.hash;location.hash=" ",location.hash=n}}};if(window.addEventListener("hashchange",r),r(),Modernizr.ios){var i=document.querySelectorAll("[data-md-scrollfix]");Array.prototype.forEach.call(i,function(t){t.addEventListener("touchstart",function(){var e=t.scrollTop;0===e?t.scrollTop=1:e+t.offsetHeight===t.scrollHeight&&(t.scrollTop=e-1)})})}}).listen(),new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Header.Shadow("[data-md-component=container]","[data-md-component=header]")).listen(),new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Header.Title("[data-md-component=title]",".md-typeset h1")).listen(),document.querySelector("[data-md-component=hero]")&&new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Tabs.Toggle("[data-md-component=hero]")).listen(),document.querySelector("[data-md-component=tabs]")&&new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Tabs.Toggle("[data-md-component=tabs]")).listen(),new i.a.Event.MatchMedia("(min-width: 1220px)",new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Sidebar.Position("[data-md-component=navigation]","[data-md-component=header]"))),document.querySelector("[data-md-component=toc]")&&new i.a.Event.MatchMedia("(min-width: 960px)",new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Sidebar.Position("[data-md-component=toc]","[data-md-component=header]"))),new i.a.Event.MatchMedia("(min-width: 960px)",new i.a.Event.Listener(window,"scroll",new i.a.Nav.Blur("[data-md-component=toc] .md-nav__link")));var e=document.querySelectorAll("[data-md-component=collapsible]");Array.prototype.forEach.call(e,function(e){new i.a.Event.MatchMedia("(min-width: 1220px)",new i.a.Event.Listener(e.previousElementSibling,"click",new i.a.Nav.Collapse(e)))}),new i.a.Event.MatchMedia("(max-width: 1219px)",new i.a.Event.Listener("[data-md-component=navigation] [data-md-toggle]","change",new i.a.Nav.Scrolling("[data-md-component=navigation] nav"))),document.querySelector("[data-md-component=search]")&&(new i.a.Event.MatchMedia("(max-width: 959px)",new i.a.Event.Listener("[data-md-toggle=search]","change",new i.a.Search.Lock("[data-md-toggle=search]"))),new i.a.Event.Listener("[data-md-component=query]",["focus","keyup","change"],new i.a.Search.Result("[data-md-component=result]",function(){return fetch(t.url.base+"/search/search_index.json",{credentials:"same-origin"}).then(function(e){return e.json()}).then(function(e){return e.docs.map(function(e){return e.location=t.url.base+"/"+e.location,e})})})).listen(),new i.a.Event.Listener("[data-md-component=reset]","click",function(){setTimeout(function(){var e=document.querySelector("[data-md-component=query]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;e.focus()},10)}).listen(),new i.a.Event.Listener("[data-md-toggle=search]","change",function(e){setTimeout(function(e){if(!(e instanceof HTMLInputElement))throw new ReferenceError;if(e.checked){var t=document.querySelector("[data-md-component=query]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;t.focus()}},400,e.target)}).listen(),new i.a.Event.Listener("[data-md-component=query]","focus",function(){var e=document.querySelector("[data-md-toggle=search]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;e.checked||(e.checked=!0,e.dispatchEvent(new CustomEvent("change")))}).listen(),new i.a.Event.Listener(window,"keydown",function(e){var t=document.querySelector("[data-md-toggle=search]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;var n=document.querySelector("[data-md-component=query]");if(!(n instanceof HTMLInputElement))throw new ReferenceError;if(!(document.activeElement instanceof HTMLElement&&document.activeElement.isContentEditable||e.metaKey||e.ctrlKey))if(t.checked){if(13===e.keyCode){if(n===document.activeElement){e.preventDefault();var r=document.querySelector("[data-md-component=search] [href][data-md-state=active]");r instanceof HTMLLinkElement&&(window.location=r.getAttribute("href"),t.checked=!1,t.dispatchEvent(new CustomEvent("change")),n.blur())}}else if(9===e.keyCode||27===e.keyCode)t.checked=!1,t.dispatchEvent(new CustomEvent("change")),n.blur();else if(-1!==[8,37,39].indexOf(e.keyCode))n!==document.activeElement&&n.focus();else if(-1!==[38,40].indexOf(e.keyCode)){var i=e.keyCode,o=Array.prototype.slice.call(document.querySelectorAll("[data-md-component=query], [data-md-component=search] [href]")),a=o.find(function(e){if(!(e instanceof HTMLElement))throw new ReferenceError;return"active"===e.dataset.mdState});a&&(a.dataset.mdState="");var s=Math.max(0,(o.indexOf(a)+o.length+(38===i?-1:1))%o.length);return o[s]&&(o[s].dataset.mdState="active",o[s].focus()),e.preventDefault(),e.stopPropagation(),!1}}else if(document.activeElement&&!document.activeElement.form){if("TEXTAREA"===document.activeElement.tagName||"INPUT"===document.activeElement.tagName)return;70!==e.keyCode&&83!==e.keyCode||(n.focus(),e.preventDefault())}}).listen(),new i.a.Event.Listener(window,"keypress",function(){var e=document.querySelector("[data-md-toggle=search]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;if(e.checked){var t=document.querySelector("[data-md-component=query]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;t!==document.activeElement&&t.focus()}}).listen()),new i.a.Event.Listener(document.body,"keydown",function(e){if(9===e.keyCode){var t=document.querySelectorAll("[data-md-component=navigation] .md-nav__link[for]:not([tabindex])");Array.prototype.forEach.call(t,function(e){e.offsetHeight&&(e.tabIndex=0)})}}).listen(),new i.a.Event.Listener(document.body,"mousedown",function(){var e=document.querySelectorAll("[data-md-component=navigation] .md-nav__link[tabindex]");Array.prototype.forEach.call(e,function(e){e.removeAttribute("tabIndex")})}).listen(),document.body.addEventListener("click",function(){"tabbing"===document.body.dataset.mdState&&(document.body.dataset.mdState="")}),new i.a.Event.MatchMedia("(max-width: 959px)",new i.a.Event.Listener("[data-md-component=navigation] [href^='#']","click",function(){var e=document.querySelector("[data-md-toggle=drawer]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;e.checked&&(e.checked=!1,e.dispatchEvent(new CustomEvent("change")))})),function(){var e=document.querySelector("[data-md-source]");if(!e)return r.a.resolve([]);if(!(e instanceof HTMLAnchorElement))throw new ReferenceError;switch(e.dataset.mdSource){case"github":return new i.a.Source.Adapter.GitHub(e).fetch();default:return r.a.resolve([])}}().then(function(t){var e=document.querySelectorAll("[data-md-source]");Array.prototype.forEach.call(e,function(e){new i.a.Source.Repository(e).initialize(t)})});var n=function(){var e=document.querySelectorAll("details");Array.prototype.forEach.call(e,function(e){e.setAttribute("open","")})};new i.a.Event.MatchMedia("print",{listen:n,unlisten:function(){}}),window.onbeforeprint=n}}}.call(this,c(3))},function(e,t,n){e.exports=n.p+"assets/images/icons/bitbucket.1b09e088.svg"},function(e,t,n){e.exports=n.p+"assets/images/icons/github.f0b8504a.svg"},function(e,t,n){e.exports=n.p+"assets/images/icons/gitlab.6dd19c00.svg"},function(e,t){e.exports="/home/travis/build/squidfunk/mkdocs-material/material/application.30686662.css"},function(e,t){e.exports="/home/travis/build/squidfunk/mkdocs-material/material/application-palette.a8b3c06d.css"},function(e,t){!function(){if("undefined"!=typeof window)try{var e=new window.CustomEvent("test",{cancelable:!0});if(e.preventDefault(),!0!==e.defaultPrevented)throw new Error("Could not prevent default")}catch(e){var t=function(e,t){var n,r;return(t=t||{}).bubbles=!!t.bubbles,t.cancelable=!!t.cancelable,(n=document.createEvent("CustomEvent")).initCustomEvent(e,t.bubbles,t.cancelable,t.detail),r=n.preventDefault,n.preventDefault=function(){r.call(this);try{Object.defineProperty(this,"defaultPrevented",{get:function(){return!0}})}catch(e){this.defaultPrevented=!0}},n};t.prototype=window.Event.prototype,window.CustomEvent=t}}()},function(e,t,n){window.fetch||(window.fetch=n(7).default||n(7))},function(e,i,o){(function(e){var t=void 0!==e&&e||"undefined"!=typeof self&&self||window,n=Function.prototype.apply;function r(e,t){this._id=e,this._clearFn=t}i.setTimeout=function(){return new r(n.call(setTimeout,t,arguments),clearTimeout)},i.setInterval=function(){return new r(n.call(setInterval,t,arguments),clearInterval)},i.clearTimeout=i.clearInterval=function(e){e&&e.close()},r.prototype.unref=r.prototype.ref=function(){},r.prototype.close=function(){this._clearFn.call(t,this._id)},i.enroll=function(e,t){clearTimeout(e._idleTimeoutId),e._idleTimeout=t},i.unenroll=function(e){clearTimeout(e._idleTimeoutId),e._idleTimeout=-1},i._unrefActive=i.active=function(e){clearTimeout(e._idleTimeoutId);var t=e._idleTimeout;0<=t&&(e._idleTimeoutId=setTimeout(function(){e._onTimeout&&e._onTimeout()},t))},o(22),i.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,i.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(this,o(4))},function(e,t,n){(function(e,p){!function(n,r){"use strict";if(!n.setImmediate){var i,o,t,a,e,s=1,c={},l=!1,u=n.document,f=Object.getPrototypeOf&&Object.getPrototypeOf(n);f=f&&f.setTimeout?f:n,i="[object process]"==={}.toString.call(n.process)?function(e){p.nextTick(function(){h(e)})}:function(){if(n.postMessage&&!n.importScripts){var e=!0,t=n.onmessage;return n.onmessage=function(){e=!1},n.postMessage("","*"),n.onmessage=t,e}}()?(a="setImmediate$"+Math.random()+"$",e=function(e){e.source===n&&"string"==typeof e.data&&0===e.data.indexOf(a)&&h(+e.data.slice(a.length))},n.addEventListener?n.addEventListener("message",e,!1):n.attachEvent("onmessage",e),function(e){n.postMessage(a+e,"*")}):n.MessageChannel?((t=new MessageChannel).port1.onmessage=function(e){h(e.data)},function(e){t.port2.postMessage(e)}):u&&"onreadystatechange"in u.createElement("script")?(o=u.documentElement,function(e){var t=u.createElement("script");t.onreadystatechange=function(){h(e),t.onreadystatechange=null,o.removeChild(t),t=null},o.appendChild(t)}):function(e){setTimeout(h,0,e)},f.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n=this.length)return D.QueryLexer.EOS;var e=this.str.charAt(this.pos);return this.pos+=1,e},D.QueryLexer.prototype.width=function(){return this.pos-this.start},D.QueryLexer.prototype.ignore=function(){this.start==this.pos&&(this.pos+=1),this.start=this.pos},D.QueryLexer.prototype.backup=function(){this.pos-=1},D.QueryLexer.prototype.acceptDigitRun=function(){for(var e,t;47<(t=(e=this.next()).charCodeAt(0))&&t<58;);e!=D.QueryLexer.EOS&&this.backup()},D.QueryLexer.prototype.more=function(){return this.pos=t&&(e=c.limit_backward,c.limit_backward=t,c.ket=c.cursor,c.find_among_b(o,4)?(c.bra=c.cursor,c.limit_backward=e,c.cursor=c.limit-r,c.cursor>c.limit_backward&&(c.cursor--,c.bra=c.cursor,c.slice_del())):c.limit_backward=e)}this.setCurrent=function(e){c.setCurrent(e)},this.getCurrent=function(){return c.getCurrent()},this.stem=function(){var e,r=c.cursor;return function(){var e,r=c.cursor+3;if(t=c.limit,0<=r&&r<=c.limit){for(i=r;;){if(e=c.cursor,c.in_grouping(d,97,248)){c.cursor=e;break}if((c.cursor=e)>=c.limit)return;c.cursor++}for(;!c.out_grouping(d,97,248);){if(c.cursor>=c.limit)return;c.cursor++}(t=c.cursor)=t&&(r=c.limit_backward,c.limit_backward=t,c.ket=c.cursor,e=c.find_among_b(s,32),c.limit_backward=r,e))switch(c.bra=c.cursor,e){case 1:c.slice_del();break;case 2:c.in_grouping_b(u,97,229)&&c.slice_del()}}(),c.cursor=c.limit,l(),c.cursor=c.limit,function(){var e,r,i,n=c.limit-c.cursor;if(c.ket=c.cursor,c.eq_s_b(2,"st")&&(c.bra=c.cursor,c.eq_s_b(2,"ig")&&c.slice_del()),c.cursor=c.limit-n,c.cursor>=t&&(r=c.limit_backward,c.limit_backward=t,c.ket=c.cursor,e=c.find_among_b(a,5),c.limit_backward=r,e))switch(c.bra=c.cursor,e){case 1:c.slice_del(),i=c.limit-c.cursor,l(),c.cursor=c.limit-i;break;case 2:c.slice_from("løs")}}(),c.cursor=c.limit,c.cursor>=t&&(e=c.limit_backward,c.limit_backward=t,c.ket=c.cursor,c.out_grouping_b(d,97,248)?(c.bra=c.cursor,n=c.slice_to(n),c.limit_backward=e,c.eq_v_b(n)&&c.slice_del()):c.limit_backward=e),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}),e.Pipeline.registerFunction(e.da.stemmer,"stemmer-da"),e.da.stopWordFilter=e.generateStopWordFilter("ad af alle alt anden at blev blive bliver da de dem den denne der deres det dette dig din disse dog du efter eller en end er et for fra ham han hans har havde have hende hendes her hos hun hvad hvis hvor i ikke ind jeg jer jo kunne man mange med meget men mig min mine mit mod ned noget nogle nu når og også om op os over på selv sig sin sine sit skal skulle som sådan thi til ud under var vi vil ville vor være været".split(" ")),e.Pipeline.registerFunction(e.da.stopWordFilter,"stopWordFilter-da")}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.de.js b/docs/site/assets/javascripts/lunr/lunr.de.js new file mode 100644 index 0000000..1529892 --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.de.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var _,p,r;e.de=function(){this.pipeline.reset(),this.pipeline.add(e.de.trimmer,e.de.stopWordFilter,e.de.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.de.stemmer))},e.de.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.de.trimmer=e.trimmerSupport.generateTrimmer(e.de.wordCharacters),e.Pipeline.registerFunction(e.de.trimmer,"trimmer-de"),e.de.stemmer=(_=e.stemmerSupport.Among,p=e.stemmerSupport.SnowballProgram,r=new function(){var r,n,i,s=[new _("",-1,6),new _("U",0,2),new _("Y",0,1),new _("ä",0,3),new _("ö",0,4),new _("ü",0,5)],o=[new _("e",-1,2),new _("em",-1,1),new _("en",-1,2),new _("ern",-1,1),new _("er",-1,1),new _("s",-1,3),new _("es",5,2)],c=[new _("en",-1,1),new _("er",-1,1),new _("st",-1,2),new _("est",2,1)],u=[new _("ig",-1,1),new _("lich",-1,1)],a=[new _("end",-1,1),new _("ig",-1,2),new _("ung",-1,1),new _("lich",-1,3),new _("isch",-1,2),new _("ik",-1,2),new _("heit",-1,3),new _("keit",-1,4)],t=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32,8],d=[117,30,5],l=[117,30,4],m=new p;function h(e,r,n){return!(!m.eq_s(1,e)||(m.ket=m.cursor,!m.in_grouping(t,97,252)))&&(m.slice_from(r),m.cursor=n,!0)}function w(){for(;!m.in_grouping(t,97,252);){if(m.cursor>=m.limit)return!0;m.cursor++}for(;!m.out_grouping(t,97,252);){if(m.cursor>=m.limit)return!0;m.cursor++}return!1}function f(){return i<=m.cursor}function b(){return n<=m.cursor}this.setCurrent=function(e){m.setCurrent(e)},this.getCurrent=function(){return m.getCurrent()},this.stem=function(){var e=m.cursor;return function(){for(var e,r,n,i,s=m.cursor;;)if(e=m.cursor,m.bra=e,m.eq_s(1,"ß"))m.ket=m.cursor,m.slice_from("ss");else{if(e>=m.limit)break;m.cursor=e+1}for(m.cursor=s;;)for(r=m.cursor;;){if(n=m.cursor,m.in_grouping(t,97,252)){if(i=m.cursor,m.bra=i,h("u","U",n))break;if(m.cursor=i,h("y","Y",n))break}if(n>=m.limit)return m.cursor=r;m.cursor=n+1}}(),m.cursor=e,function(){i=m.limit,n=i;var e=m.cursor+3;0<=e&&e<=m.limit&&(r=e,w()||((i=m.cursor)=m.limit)return;m.cursor++}}}(),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return r.setCurrent(e),r.stem(),r.getCurrent()}):(r.setCurrent(e),r.stem(),r.getCurrent())}),e.Pipeline.registerFunction(e.de.stemmer,"stemmer-de"),e.de.stopWordFilter=e.generateStopWordFilter("aber alle allem allen aller alles als also am an ander andere anderem anderen anderer anderes anderm andern anderr anders auch auf aus bei bin bis bist da damit dann das dasselbe dazu daß dein deine deinem deinen deiner deines dem demselben den denn denselben der derer derselbe derselben des desselben dessen dich die dies diese dieselbe dieselben diesem diesen dieser dieses dir doch dort du durch ein eine einem einen einer eines einig einige einigem einigen einiger einiges einmal er es etwas euch euer eure eurem euren eurer eures für gegen gewesen hab habe haben hat hatte hatten hier hin hinter ich ihm ihn ihnen ihr ihre ihrem ihren ihrer ihres im in indem ins ist jede jedem jeden jeder jedes jene jenem jenen jener jenes jetzt kann kein keine keinem keinen keiner keines können könnte machen man manche manchem manchen mancher manches mein meine meinem meinen meiner meines mich mir mit muss musste nach nicht nichts noch nun nur ob oder ohne sehr sein seine seinem seinen seiner seines selbst sich sie sind so solche solchem solchen solcher solches soll sollte sondern sonst um und uns unse unsem unsen unser unses unter viel vom von vor war waren warst was weg weil weiter welche welchem welchen welcher welches wenn werde werden wie wieder will wir wird wirst wo wollen wollte während würde würden zu zum zur zwar zwischen über".split(" ")),e.Pipeline.registerFunction(e.de.stopWordFilter,"stopWordFilter-de")}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.du.js b/docs/site/assets/javascripts/lunr/lunr.du.js new file mode 100644 index 0000000..5263200 --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.du.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var v,q,r;console.warn('[Lunr Languages] Please use the "nl" instead of the "du". The "nl" code is the standard code for Dutch language, and "du" will be removed in the next major versions.'),e.du=function(){this.pipeline.reset(),this.pipeline.add(e.du.trimmer,e.du.stopWordFilter,e.du.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.du.stemmer))},e.du.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.du.trimmer=e.trimmerSupport.generateTrimmer(e.du.wordCharacters),e.Pipeline.registerFunction(e.du.trimmer,"trimmer-du"),e.du.stemmer=(v=e.stemmerSupport.Among,q=e.stemmerSupport.SnowballProgram,r=new function(){var r,i,u,o=[new v("",-1,6),new v("á",0,1),new v("ä",0,1),new v("é",0,2),new v("ë",0,2),new v("í",0,3),new v("ï",0,3),new v("ó",0,4),new v("ö",0,4),new v("ú",0,5),new v("ü",0,5)],n=[new v("",-1,3),new v("I",0,2),new v("Y",0,1)],t=[new v("dd",-1,-1),new v("kk",-1,-1),new v("tt",-1,-1)],c=[new v("ene",-1,2),new v("se",-1,3),new v("en",-1,2),new v("heden",2,1),new v("s",-1,3)],a=[new v("end",-1,1),new v("ig",-1,2),new v("ing",-1,1),new v("lijk",-1,3),new v("baar",-1,4),new v("bar",-1,5)],l=[new v("aa",-1,-1),new v("ee",-1,-1),new v("oo",-1,-1),new v("uu",-1,-1)],m=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],d=[1,0,0,17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],f=[17,67,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],_=new q;function s(e){return(_.cursor=e)>=_.limit||(_.cursor++,!1)}function w(){for(;!_.in_grouping(m,97,232);){if(_.cursor>=_.limit)return!0;_.cursor++}for(;!_.out_grouping(m,97,232);){if(_.cursor>=_.limit)return!0;_.cursor++}return!1}function b(){return i<=_.cursor}function p(){return r<=_.cursor}function g(){var e=_.limit-_.cursor;_.find_among_b(t,3)&&(_.cursor=_.limit-e,_.ket=_.cursor,_.cursor>_.limit_backward&&(_.cursor--,_.bra=_.cursor,_.slice_del()))}function h(){var e;u=!1,_.ket=_.cursor,_.eq_s_b(1,"e")&&(_.bra=_.cursor,b()&&(e=_.limit-_.cursor,_.out_grouping_b(m,97,232)&&(_.cursor=_.limit-e,_.slice_del(),u=!0,g())))}function k(){var e;b()&&(e=_.limit-_.cursor,_.out_grouping_b(m,97,232)&&(_.cursor=_.limit-e,_.eq_s_b(3,"gem")||(_.cursor=_.limit-e,_.slice_del(),g())))}this.setCurrent=function(e){_.setCurrent(e)},this.getCurrent=function(){return _.getCurrent()},this.stem=function(){var e=_.cursor;return function(){for(var e,r,i,n=_.cursor;;){if(_.bra=_.cursor,e=_.find_among(o,11))switch(_.ket=_.cursor,e){case 1:_.slice_from("a");continue;case 2:_.slice_from("e");continue;case 3:_.slice_from("i");continue;case 4:_.slice_from("o");continue;case 5:_.slice_from("u");continue;case 6:if(_.cursor>=_.limit)break;_.cursor++;continue}break}for(_.cursor=n,_.bra=n,_.eq_s(1,"y")?(_.ket=_.cursor,_.slice_from("Y")):_.cursor=n;;)if(r=_.cursor,_.in_grouping(m,97,232)){if(i=_.cursor,_.bra=i,_.eq_s(1,"i"))_.ket=_.cursor,_.in_grouping(m,97,232)&&(_.slice_from("I"),_.cursor=r);else if(_.cursor=i,_.eq_s(1,"y"))_.ket=_.cursor,_.slice_from("Y"),_.cursor=r;else if(s(r))break}else if(s(r))break}(),_.cursor=e,i=_.limit,r=i,w()||((i=_.cursor)<3&&(i=3),w()||(r=_.cursor)),_.limit_backward=e,_.cursor=_.limit,function(){var e,r,i,n,o,t,s=_.limit-_.cursor;if(_.ket=_.cursor,e=_.find_among_b(c,5))switch(_.bra=_.cursor,e){case 1:b()&&_.slice_from("heid");break;case 2:k();break;case 3:b()&&_.out_grouping_b(f,97,232)&&_.slice_del()}if(_.cursor=_.limit-s,h(),_.cursor=_.limit-s,_.ket=_.cursor,_.eq_s_b(4,"heid")&&(_.bra=_.cursor,p()&&(r=_.limit-_.cursor,_.eq_s_b(1,"c")||(_.cursor=_.limit-r,_.slice_del(),_.ket=_.cursor,_.eq_s_b(2,"en")&&(_.bra=_.cursor,k())))),_.cursor=_.limit-s,_.ket=_.cursor,e=_.find_among_b(a,6))switch(_.bra=_.cursor,e){case 1:if(p()){if(_.slice_del(),i=_.limit-_.cursor,_.ket=_.cursor,_.eq_s_b(2,"ig")&&(_.bra=_.cursor,p()&&(n=_.limit-_.cursor,!_.eq_s_b(1,"e")))){_.cursor=_.limit-n,_.slice_del();break}_.cursor=_.limit-i,g()}break;case 2:p()&&(o=_.limit-_.cursor,_.eq_s_b(1,"e")||(_.cursor=_.limit-o,_.slice_del()));break;case 3:p()&&(_.slice_del(),h());break;case 4:p()&&_.slice_del();break;case 5:p()&&u&&_.slice_del()}_.cursor=_.limit-s,_.out_grouping_b(d,73,232)&&(t=_.limit-_.cursor,_.find_among_b(l,4)&&_.out_grouping_b(m,97,232)&&(_.cursor=_.limit-t,_.ket=_.cursor,_.cursor>_.limit_backward&&(_.cursor--,_.bra=_.cursor,_.slice_del())))}(),_.cursor=_.limit_backward,function(){for(var e;;)if(_.bra=_.cursor,e=_.find_among(n,3))switch(_.ket=_.cursor,e){case 1:_.slice_from("y");break;case 2:_.slice_from("i");break;case 3:if(_.cursor>=_.limit)return;_.cursor++}}(),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return r.setCurrent(e),r.stem(),r.getCurrent()}):(r.setCurrent(e),r.stem(),r.getCurrent())}),e.Pipeline.registerFunction(e.du.stemmer,"stemmer-du"),e.du.stopWordFilter=e.generateStopWordFilter(" aan al alles als altijd andere ben bij daar dan dat de der deze die dit doch doen door dus een eens en er ge geen geweest haar had heb hebben heeft hem het hier hij hoe hun iemand iets ik in is ja je kan kon kunnen maar me meer men met mij mijn moet na naar niet niets nog nu of om omdat onder ons ook op over reeds te tegen toch toen tot u uit uw van veel voor want waren was wat werd wezen wie wil worden wordt zal ze zelf zich zij zijn zo zonder zou".split(" ")),e.Pipeline.registerFunction(e.du.stopWordFilter,"stopWordFilter-du")}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.es.js b/docs/site/assets/javascripts/lunr/lunr.es.js new file mode 100644 index 0000000..9de6c09 --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.es.js @@ -0,0 +1 @@ +!function(e,s){"function"==typeof define&&define.amd?define(s):"object"==typeof exports?module.exports=s():s()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var C,P,s;e.es=function(){this.pipeline.reset(),this.pipeline.add(e.es.trimmer,e.es.stopWordFilter,e.es.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.es.stemmer))},e.es.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.es.trimmer=e.trimmerSupport.generateTrimmer(e.es.wordCharacters),e.Pipeline.registerFunction(e.es.trimmer,"trimmer-es"),e.es.stemmer=(C=e.stemmerSupport.Among,P=e.stemmerSupport.SnowballProgram,s=new function(){var r,n,i,a=[new C("",-1,6),new C("á",0,1),new C("é",0,2),new C("í",0,3),new C("ó",0,4),new C("ú",0,5)],t=[new C("la",-1,-1),new C("sela",0,-1),new C("le",-1,-1),new C("me",-1,-1),new C("se",-1,-1),new C("lo",-1,-1),new C("selo",5,-1),new C("las",-1,-1),new C("selas",7,-1),new C("les",-1,-1),new C("los",-1,-1),new C("selos",10,-1),new C("nos",-1,-1)],o=[new C("ando",-1,6),new C("iendo",-1,6),new C("yendo",-1,7),new C("ándo",-1,2),new C("iéndo",-1,1),new C("ar",-1,6),new C("er",-1,6),new C("ir",-1,6),new C("ár",-1,3),new C("ér",-1,4),new C("ír",-1,5)],s=[new C("ic",-1,-1),new C("ad",-1,-1),new C("os",-1,-1),new C("iv",-1,1)],u=[new C("able",-1,1),new C("ible",-1,1),new C("ante",-1,1)],w=[new C("ic",-1,1),new C("abil",-1,1),new C("iv",-1,1)],c=[new C("ica",-1,1),new C("ancia",-1,2),new C("encia",-1,5),new C("adora",-1,2),new C("osa",-1,1),new C("ista",-1,1),new C("iva",-1,9),new C("anza",-1,1),new C("logía",-1,3),new C("idad",-1,8),new C("able",-1,1),new C("ible",-1,1),new C("ante",-1,2),new C("mente",-1,7),new C("amente",13,6),new C("ación",-1,2),new C("ución",-1,4),new C("ico",-1,1),new C("ismo",-1,1),new C("oso",-1,1),new C("amiento",-1,1),new C("imiento",-1,1),new C("ivo",-1,9),new C("ador",-1,2),new C("icas",-1,1),new C("ancias",-1,2),new C("encias",-1,5),new C("adoras",-1,2),new C("osas",-1,1),new C("istas",-1,1),new C("ivas",-1,9),new C("anzas",-1,1),new C("logías",-1,3),new C("idades",-1,8),new C("ables",-1,1),new C("ibles",-1,1),new C("aciones",-1,2),new C("uciones",-1,4),new C("adores",-1,2),new C("antes",-1,2),new C("icos",-1,1),new C("ismos",-1,1),new C("osos",-1,1),new C("amientos",-1,1),new C("imientos",-1,1),new C("ivos",-1,9)],m=[new C("ya",-1,1),new C("ye",-1,1),new C("yan",-1,1),new C("yen",-1,1),new C("yeron",-1,1),new C("yendo",-1,1),new C("yo",-1,1),new C("yas",-1,1),new C("yes",-1,1),new C("yais",-1,1),new C("yamos",-1,1),new C("yó",-1,1)],l=[new C("aba",-1,2),new C("ada",-1,2),new C("ida",-1,2),new C("ara",-1,2),new C("iera",-1,2),new C("ía",-1,2),new C("aría",5,2),new C("ería",5,2),new C("iría",5,2),new C("ad",-1,2),new C("ed",-1,2),new C("id",-1,2),new C("ase",-1,2),new C("iese",-1,2),new C("aste",-1,2),new C("iste",-1,2),new C("an",-1,2),new C("aban",16,2),new C("aran",16,2),new C("ieran",16,2),new C("ían",16,2),new C("arían",20,2),new C("erían",20,2),new C("irían",20,2),new C("en",-1,1),new C("asen",24,2),new C("iesen",24,2),new C("aron",-1,2),new C("ieron",-1,2),new C("arán",-1,2),new C("erán",-1,2),new C("irán",-1,2),new C("ado",-1,2),new C("ido",-1,2),new C("ando",-1,2),new C("iendo",-1,2),new C("ar",-1,2),new C("er",-1,2),new C("ir",-1,2),new C("as",-1,2),new C("abas",39,2),new C("adas",39,2),new C("idas",39,2),new C("aras",39,2),new C("ieras",39,2),new C("ías",39,2),new C("arías",45,2),new C("erías",45,2),new C("irías",45,2),new C("es",-1,1),new C("ases",49,2),new C("ieses",49,2),new C("abais",-1,2),new C("arais",-1,2),new C("ierais",-1,2),new C("íais",-1,2),new C("aríais",55,2),new C("eríais",55,2),new C("iríais",55,2),new C("aseis",-1,2),new C("ieseis",-1,2),new C("asteis",-1,2),new C("isteis",-1,2),new C("áis",-1,2),new C("éis",-1,1),new C("aréis",64,2),new C("eréis",64,2),new C("iréis",64,2),new C("ados",-1,2),new C("idos",-1,2),new C("amos",-1,2),new C("ábamos",70,2),new C("áramos",70,2),new C("iéramos",70,2),new C("íamos",70,2),new C("aríamos",74,2),new C("eríamos",74,2),new C("iríamos",74,2),new C("emos",-1,1),new C("aremos",78,2),new C("eremos",78,2),new C("iremos",78,2),new C("ásemos",78,2),new C("iésemos",78,2),new C("imos",-1,2),new C("arás",-1,2),new C("erás",-1,2),new C("irás",-1,2),new C("ís",-1,2),new C("ará",-1,2),new C("erá",-1,2),new C("irá",-1,2),new C("aré",-1,2),new C("eré",-1,2),new C("iré",-1,2),new C("ió",-1,2)],d=[new C("a",-1,1),new C("e",-1,2),new C("o",-1,1),new C("os",-1,1),new C("á",-1,1),new C("é",-1,2),new C("í",-1,1),new C("ó",-1,1)],b=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,1,17,4,10],f=new P;function _(){if(f.out_grouping(b,97,252)){for(;!f.in_grouping(b,97,252);){if(f.cursor>=f.limit)return!0;f.cursor++}return!1}return!0}function h(){var e,s=f.cursor;if(function(){if(f.in_grouping(b,97,252)){var e=f.cursor;if(_()){if(f.cursor=e,!f.in_grouping(b,97,252))return!0;for(;!f.out_grouping(b,97,252);){if(f.cursor>=f.limit)return!0;f.cursor++}}return!1}return!0}()){if(f.cursor=s,!f.out_grouping(b,97,252))return;if(e=f.cursor,_()){if(f.cursor=e,!f.in_grouping(b,97,252)||f.cursor>=f.limit)return;f.cursor++}}i=f.cursor}function v(){for(;!f.in_grouping(b,97,252);){if(f.cursor>=f.limit)return!1;f.cursor++}for(;!f.out_grouping(b,97,252);){if(f.cursor>=f.limit)return!1;f.cursor++}return!0}function p(){return i<=f.cursor}function g(){return r<=f.cursor}function k(e,s){if(!g())return!0;f.slice_del(),f.ket=f.cursor;var r=f.find_among_b(e,s);return r&&(f.bra=f.cursor,1==r&&g()&&f.slice_del()),!1}function y(e){return!g()||(f.slice_del(),f.ket=f.cursor,f.eq_s_b(2,e)&&(f.bra=f.cursor,g()&&f.slice_del()),!1)}function q(){var e;if(f.ket=f.cursor,e=f.find_among_b(c,46)){switch(f.bra=f.cursor,e){case 1:if(!g())return!1;f.slice_del();break;case 2:if(y("ic"))return!1;break;case 3:if(!g())return!1;f.slice_from("log");break;case 4:if(!g())return!1;f.slice_from("u");break;case 5:if(!g())return!1;f.slice_from("ente");break;case 6:if(!(n<=f.cursor))return!1;f.slice_del(),f.ket=f.cursor,(e=f.find_among_b(s,4))&&(f.bra=f.cursor,g()&&(f.slice_del(),1==e&&(f.ket=f.cursor,f.eq_s_b(2,"at")&&(f.bra=f.cursor,g()&&f.slice_del()))));break;case 7:if(k(u,3))return!1;break;case 8:if(k(w,3))return!1;break;case 9:if(y("at"))return!1}return!0}return!1}this.setCurrent=function(e){f.setCurrent(e)},this.getCurrent=function(){return f.getCurrent()},this.stem=function(){var e,s=f.cursor;return e=f.cursor,i=f.limit,r=n=i,h(),f.cursor=e,v()&&(n=f.cursor,v()&&(r=f.cursor)),f.limit_backward=s,f.cursor=f.limit,function(){var e;if(f.ket=f.cursor,f.find_among_b(t,13)&&(f.bra=f.cursor,(e=f.find_among_b(o,11))&&p()))switch(e){case 1:f.bra=f.cursor,f.slice_from("iendo");break;case 2:f.bra=f.cursor,f.slice_from("ando");break;case 3:f.bra=f.cursor,f.slice_from("ar");break;case 4:f.bra=f.cursor,f.slice_from("er");break;case 5:f.bra=f.cursor,f.slice_from("ir");break;case 6:f.slice_del();break;case 7:f.eq_s_b(1,"u")&&f.slice_del()}}(),f.cursor=f.limit,q()||(f.cursor=f.limit,function(){var e,s;if(f.cursor>=i&&(s=f.limit_backward,f.limit_backward=i,f.ket=f.cursor,e=f.find_among_b(m,12),f.limit_backward=s,e)){if(f.bra=f.cursor,1==e){if(!f.eq_s_b(1,"u"))return!1;f.slice_del()}return!0}return!1}()||(f.cursor=f.limit,function(){var e,s,r,n;if(f.cursor>=i&&(s=f.limit_backward,f.limit_backward=i,f.ket=f.cursor,e=f.find_among_b(l,96),f.limit_backward=s,e))switch(f.bra=f.cursor,e){case 1:r=f.limit-f.cursor,f.eq_s_b(1,"u")?(n=f.limit-f.cursor,f.eq_s_b(1,"g")?f.cursor=f.limit-n:f.cursor=f.limit-r):f.cursor=f.limit-r,f.bra=f.cursor;case 2:f.slice_del()}}())),f.cursor=f.limit,function(){var e,s;if(f.ket=f.cursor,e=f.find_among_b(d,8))switch(f.bra=f.cursor,e){case 1:p()&&f.slice_del();break;case 2:p()&&(f.slice_del(),f.ket=f.cursor,f.eq_s_b(1,"u")&&(f.bra=f.cursor,s=f.limit-f.cursor,f.eq_s_b(1,"g")&&(f.cursor=f.limit-s,p()&&f.slice_del())))}}(),f.cursor=f.limit_backward,function(){for(var e;;){if(f.bra=f.cursor,e=f.find_among(a,6))switch(f.ket=f.cursor,e){case 1:f.slice_from("a");continue;case 2:f.slice_from("e");continue;case 3:f.slice_from("i");continue;case 4:f.slice_from("o");continue;case 5:f.slice_from("u");continue;case 6:if(f.cursor>=f.limit)break;f.cursor++;continue}break}}(),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return s.setCurrent(e),s.stem(),s.getCurrent()}):(s.setCurrent(e),s.stem(),s.getCurrent())}),e.Pipeline.registerFunction(e.es.stemmer,"stemmer-es"),e.es.stopWordFilter=e.generateStopWordFilter("a al algo algunas algunos ante antes como con contra cual cuando de del desde donde durante e el ella ellas ellos en entre era erais eran eras eres es esa esas ese eso esos esta estaba estabais estaban estabas estad estada estadas estado estados estamos estando estar estaremos estará estarán estarás estaré estaréis estaría estaríais estaríamos estarían estarías estas este estemos esto estos estoy estuve estuviera estuvierais estuvieran estuvieras estuvieron estuviese estuvieseis estuviesen estuvieses estuvimos estuviste estuvisteis estuviéramos estuviésemos estuvo está estábamos estáis están estás esté estéis estén estés fue fuera fuerais fueran fueras fueron fuese fueseis fuesen fueses fui fuimos fuiste fuisteis fuéramos fuésemos ha habida habidas habido habidos habiendo habremos habrá habrán habrás habré habréis habría habríais habríamos habrían habrías habéis había habíais habíamos habían habías han has hasta hay haya hayamos hayan hayas hayáis he hemos hube hubiera hubierais hubieran hubieras hubieron hubiese hubieseis hubiesen hubieses hubimos hubiste hubisteis hubiéramos hubiésemos hubo la las le les lo los me mi mis mucho muchos muy más mí mía mías mío míos nada ni no nos nosotras nosotros nuestra nuestras nuestro nuestros o os otra otras otro otros para pero poco por porque que quien quienes qué se sea seamos sean seas seremos será serán serás seré seréis sería seríais seríamos serían serías seáis sido siendo sin sobre sois somos son soy su sus suya suyas suyo suyos sí también tanto te tendremos tendrá tendrán tendrás tendré tendréis tendría tendríais tendríamos tendrían tendrías tened tenemos tenga tengamos tengan tengas tengo tengáis tenida tenidas tenido tenidos teniendo tenéis tenía teníais teníamos tenían tenías ti tiene tienen tienes todo todos tu tus tuve tuviera tuvierais tuvieran tuvieras tuvieron tuviese tuvieseis tuviesen tuvieses tuvimos tuviste tuvisteis tuviéramos tuviésemos tuvo tuya tuyas tuyo tuyos tú un una uno unos vosotras vosotros vuestra vuestras vuestro vuestros y ya yo él éramos".split(" ")),e.Pipeline.registerFunction(e.es.stopWordFilter,"stopWordFilter-es")}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.fi.js b/docs/site/assets/javascripts/lunr/lunr.fi.js new file mode 100644 index 0000000..2f9bf5a --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.fi.js @@ -0,0 +1 @@ +!function(i,e){"function"==typeof define&&define.amd?define(e):"object"==typeof exports?module.exports=e():e()(i.lunr)}(this,function(){return function(i){if(void 0===i)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===i.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var v,C,e;i.fi=function(){this.pipeline.reset(),this.pipeline.add(i.fi.trimmer,i.fi.stopWordFilter,i.fi.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(i.fi.stemmer))},i.fi.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",i.fi.trimmer=i.trimmerSupport.generateTrimmer(i.fi.wordCharacters),i.Pipeline.registerFunction(i.fi.trimmer,"trimmer-fi"),i.fi.stemmer=(v=i.stemmerSupport.Among,C=i.stemmerSupport.SnowballProgram,e=new function(){var n,t,l,o,r=[new v("pa",-1,1),new v("sti",-1,2),new v("kaan",-1,1),new v("han",-1,1),new v("kin",-1,1),new v("hän",-1,1),new v("kään",-1,1),new v("ko",-1,1),new v("pä",-1,1),new v("kö",-1,1)],s=[new v("lla",-1,-1),new v("na",-1,-1),new v("ssa",-1,-1),new v("ta",-1,-1),new v("lta",3,-1),new v("sta",3,-1)],a=[new v("llä",-1,-1),new v("nä",-1,-1),new v("ssä",-1,-1),new v("tä",-1,-1),new v("ltä",3,-1),new v("stä",3,-1)],u=[new v("lle",-1,-1),new v("ine",-1,-1)],c=[new v("nsa",-1,3),new v("mme",-1,3),new v("nne",-1,3),new v("ni",-1,2),new v("si",-1,1),new v("an",-1,4),new v("en",-1,6),new v("än",-1,5),new v("nsä",-1,3)],i=[new v("aa",-1,-1),new v("ee",-1,-1),new v("ii",-1,-1),new v("oo",-1,-1),new v("uu",-1,-1),new v("ää",-1,-1),new v("öö",-1,-1)],m=[new v("a",-1,8),new v("lla",0,-1),new v("na",0,-1),new v("ssa",0,-1),new v("ta",0,-1),new v("lta",4,-1),new v("sta",4,-1),new v("tta",4,9),new v("lle",-1,-1),new v("ine",-1,-1),new v("ksi",-1,-1),new v("n",-1,7),new v("han",11,1),new v("den",11,-1,q),new v("seen",11,-1,j),new v("hen",11,2),new v("tten",11,-1,q),new v("hin",11,3),new v("siin",11,-1,q),new v("hon",11,4),new v("hän",11,5),new v("hön",11,6),new v("ä",-1,8),new v("llä",22,-1),new v("nä",22,-1),new v("ssä",22,-1),new v("tä",22,-1),new v("ltä",26,-1),new v("stä",26,-1),new v("ttä",26,9)],w=[new v("eja",-1,-1),new v("mma",-1,1),new v("imma",1,-1),new v("mpa",-1,1),new v("impa",3,-1),new v("mmi",-1,1),new v("immi",5,-1),new v("mpi",-1,1),new v("impi",7,-1),new v("ejä",-1,-1),new v("mmä",-1,1),new v("immä",10,-1),new v("mpä",-1,1),new v("impä",12,-1)],_=[new v("i",-1,-1),new v("j",-1,-1)],k=[new v("mma",-1,1),new v("imma",0,-1)],b=[17,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8],d=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],e=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],f=[17,97,24,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],h=new C;function p(){for(var i;i=h.cursor,!h.in_grouping(d,97,246);){if((h.cursor=i)>=h.limit)return!0;h.cursor++}for(h.cursor=i;!h.out_grouping(d,97,246);){if(h.cursor>=h.limit)return!0;h.cursor++}return!1}function g(){var i,e;if(h.cursor>=o)if(e=h.limit_backward,h.limit_backward=o,h.ket=h.cursor,i=h.find_among_b(r,10)){switch(h.bra=h.cursor,h.limit_backward=e,i){case 1:if(!h.in_grouping_b(f,97,246))return;break;case 2:if(!(l<=h.cursor))return}h.slice_del()}else h.limit_backward=e}function j(){return h.find_among_b(i,7)}function q(){return h.eq_s_b(1,"i")&&h.in_grouping_b(e,97,246)}this.setCurrent=function(i){h.setCurrent(i)},this.getCurrent=function(){return h.getCurrent()},this.stem=function(){var i,e=h.cursor;return o=h.limit,l=o,p()||(o=h.cursor,p()||(l=h.cursor)),n=!1,h.limit_backward=e,h.cursor=h.limit,g(),h.cursor=h.limit,function(){var i,e,r;if(h.cursor>=o)if(e=h.limit_backward,h.limit_backward=o,h.ket=h.cursor,i=h.find_among_b(c,9))switch(h.bra=h.cursor,h.limit_backward=e,i){case 1:r=h.limit-h.cursor,h.eq_s_b(1,"k")||(h.cursor=h.limit-r,h.slice_del());break;case 2:h.slice_del(),h.ket=h.cursor,h.eq_s_b(3,"kse")&&(h.bra=h.cursor,h.slice_from("ksi"));break;case 3:h.slice_del();break;case 4:h.find_among_b(s,6)&&h.slice_del();break;case 5:h.find_among_b(a,6)&&h.slice_del();break;case 6:h.find_among_b(u,2)&&h.slice_del()}else h.limit_backward=e}(),h.cursor=h.limit,function(){var i,e,r;if(h.cursor>=o)if(e=h.limit_backward,h.limit_backward=o,h.ket=h.cursor,i=h.find_among_b(m,30)){switch(h.bra=h.cursor,h.limit_backward=e,i){case 1:if(!h.eq_s_b(1,"a"))return;break;case 2:case 9:if(!h.eq_s_b(1,"e"))return;break;case 3:if(!h.eq_s_b(1,"i"))return;break;case 4:if(!h.eq_s_b(1,"o"))return;break;case 5:if(!h.eq_s_b(1,"ä"))return;break;case 6:if(!h.eq_s_b(1,"ö"))return;break;case 7:if(r=h.limit-h.cursor,!j()&&(h.cursor=h.limit-r,!h.eq_s_b(2,"ie"))){h.cursor=h.limit-r;break}if(h.cursor=h.limit-r,h.cursor<=h.limit_backward){h.cursor=h.limit-r;break}h.cursor--,h.bra=h.cursor;break;case 8:if(!h.in_grouping_b(d,97,246)||!h.out_grouping_b(d,97,246))return}h.slice_del(),n=!0}else h.limit_backward=e}(),h.cursor=h.limit,function(){var i,e,r;if(h.cursor>=l)if(e=h.limit_backward,h.limit_backward=l,h.ket=h.cursor,i=h.find_among_b(w,14)){if(h.bra=h.cursor,h.limit_backward=e,1==i){if(r=h.limit-h.cursor,h.eq_s_b(2,"po"))return;h.cursor=h.limit-r}h.slice_del()}else h.limit_backward=e}(),h.cursor=h.limit,h.cursor=(n?h.cursor>=o&&(i=h.limit_backward,h.limit_backward=o,h.ket=h.cursor,h.find_among_b(_,2)?(h.bra=h.cursor,h.limit_backward=i,h.slice_del()):h.limit_backward=i):(h.cursor=h.limit,function(){var i,e,r,n,t,s;if(h.cursor>=o){if(e=h.limit_backward,h.limit_backward=o,h.ket=h.cursor,h.eq_s_b(1,"t")&&(h.bra=h.cursor,r=h.limit-h.cursor,h.in_grouping_b(d,97,246)&&(h.cursor=h.limit-r,h.slice_del(),h.limit_backward=e,n=h.limit-h.cursor,h.cursor>=l&&(h.cursor=l,t=h.limit_backward,h.limit_backward=h.cursor,h.cursor=h.limit-n,h.ket=h.cursor,i=h.find_among_b(k,2))))){if(h.bra=h.cursor,h.limit_backward=t,1==i){if(s=h.limit-h.cursor,h.eq_s_b(2,"po"))return;h.cursor=h.limit-s}return h.slice_del()}h.limit_backward=e}}()),h.limit),function(){var i,e,r,n;if(h.cursor>=o){for(i=h.limit_backward,h.limit_backward=o,e=h.limit-h.cursor,j()&&(h.cursor=h.limit-e,h.ket=h.cursor,h.cursor>h.limit_backward&&(h.cursor--,h.bra=h.cursor,h.slice_del())),h.cursor=h.limit-e,h.ket=h.cursor,h.in_grouping_b(b,97,228)&&(h.bra=h.cursor,h.out_grouping_b(d,97,246)&&h.slice_del()),h.cursor=h.limit-e,h.ket=h.cursor,h.eq_s_b(1,"j")&&(h.bra=h.cursor,r=h.limit-h.cursor,h.eq_s_b(1,"o")?h.slice_del():(h.cursor=h.limit-r,h.eq_s_b(1,"u")&&h.slice_del())),h.cursor=h.limit-e,h.ket=h.cursor,h.eq_s_b(1,"o")&&(h.bra=h.cursor,h.eq_s_b(1,"j")&&h.slice_del()),h.cursor=h.limit-e,h.limit_backward=i;;){if(n=h.limit-h.cursor,h.out_grouping_b(d,97,246)){h.cursor=h.limit-n;break}if(h.cursor=h.limit-n,h.cursor<=h.limit_backward)return;h.cursor--}h.ket=h.cursor,h.cursor>h.limit_backward&&(h.cursor--,h.bra=h.cursor,t=h.slice_to(),h.eq_v_b(t)&&h.slice_del())}}(),!0}},function(i){return"function"==typeof i.update?i.update(function(i){return e.setCurrent(i),e.stem(),e.getCurrent()}):(e.setCurrent(i),e.stem(),e.getCurrent())}),i.Pipeline.registerFunction(i.fi.stemmer,"stemmer-fi"),i.fi.stopWordFilter=i.generateStopWordFilter("ei eivät emme en et ette että he heidän heidät heihin heille heillä heiltä heissä heistä heitä hän häneen hänelle hänellä häneltä hänen hänessä hänestä hänet häntä itse ja johon joiden joihin joiksi joilla joille joilta joina joissa joista joita joka joksi jolla jolle jolta jona jonka jos jossa josta jota jotka kanssa keiden keihin keiksi keille keillä keiltä keinä keissä keistä keitä keneen keneksi kenelle kenellä keneltä kenen kenenä kenessä kenestä kenet ketkä ketkä ketä koska kuin kuka kun me meidän meidät meihin meille meillä meiltä meissä meistä meitä mihin miksi mikä mille millä miltä minkä minkä minua minulla minulle minulta minun minussa minusta minut minuun minä minä missä mistä mitkä mitä mukaan mutta ne niiden niihin niiksi niille niillä niiltä niin niin niinä niissä niistä niitä noiden noihin noiksi noilla noille noilta noin noina noissa noista noita nuo nyt näiden näihin näiksi näille näillä näiltä näinä näissä näistä näitä nämä ole olemme olen olet olette oli olimme olin olisi olisimme olisin olisit olisitte olisivat olit olitte olivat olla olleet ollut on ovat poikki se sekä sen siihen siinä siitä siksi sille sillä sillä siltä sinua sinulla sinulle sinulta sinun sinussa sinusta sinut sinuun sinä sinä sitä tai te teidän teidät teihin teille teillä teiltä teissä teistä teitä tuo tuohon tuoksi tuolla tuolle tuolta tuon tuona tuossa tuosta tuota tähän täksi tälle tällä tältä tämä tämän tänä tässä tästä tätä vaan vai vaikka yli".split(" ")),i.Pipeline.registerFunction(i.fi.stopWordFilter,"stopWordFilter-fi")}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.fr.js b/docs/site/assets/javascripts/lunr/lunr.fr.js new file mode 100644 index 0000000..078d0ca --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.fr.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r,y,s;e.fr=function(){this.pipeline.reset(),this.pipeline.add(e.fr.trimmer,e.fr.stopWordFilter,e.fr.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.fr.stemmer))},e.fr.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.fr.trimmer=e.trimmerSupport.generateTrimmer(e.fr.wordCharacters),e.Pipeline.registerFunction(e.fr.trimmer,"trimmer-fr"),e.fr.stemmer=(r=e.stemmerSupport.Among,y=e.stemmerSupport.SnowballProgram,s=new function(){var s,i,t,n=[new r("col",-1,-1),new r("par",-1,-1),new r("tap",-1,-1)],u=[new r("",-1,4),new r("I",0,1),new r("U",0,2),new r("Y",0,3)],o=[new r("iqU",-1,3),new r("abl",-1,3),new r("Ièr",-1,4),new r("ièr",-1,4),new r("eus",-1,2),new r("iv",-1,1)],c=[new r("ic",-1,2),new r("abil",-1,1),new r("iv",-1,3)],a=[new r("iqUe",-1,1),new r("atrice",-1,2),new r("ance",-1,1),new r("ence",-1,5),new r("logie",-1,3),new r("able",-1,1),new r("isme",-1,1),new r("euse",-1,11),new r("iste",-1,1),new r("ive",-1,8),new r("if",-1,8),new r("usion",-1,4),new r("ation",-1,2),new r("ution",-1,4),new r("ateur",-1,2),new r("iqUes",-1,1),new r("atrices",-1,2),new r("ances",-1,1),new r("ences",-1,5),new r("logies",-1,3),new r("ables",-1,1),new r("ismes",-1,1),new r("euses",-1,11),new r("istes",-1,1),new r("ives",-1,8),new r("ifs",-1,8),new r("usions",-1,4),new r("ations",-1,2),new r("utions",-1,4),new r("ateurs",-1,2),new r("ments",-1,15),new r("ements",30,6),new r("issements",31,12),new r("ités",-1,7),new r("ment",-1,15),new r("ement",34,6),new r("issement",35,12),new r("amment",34,13),new r("emment",34,14),new r("aux",-1,10),new r("eaux",39,9),new r("eux",-1,1),new r("ité",-1,7)],l=[new r("ira",-1,1),new r("ie",-1,1),new r("isse",-1,1),new r("issante",-1,1),new r("i",-1,1),new r("irai",4,1),new r("ir",-1,1),new r("iras",-1,1),new r("ies",-1,1),new r("îmes",-1,1),new r("isses",-1,1),new r("issantes",-1,1),new r("îtes",-1,1),new r("is",-1,1),new r("irais",13,1),new r("issais",13,1),new r("irions",-1,1),new r("issions",-1,1),new r("irons",-1,1),new r("issons",-1,1),new r("issants",-1,1),new r("it",-1,1),new r("irait",21,1),new r("issait",21,1),new r("issant",-1,1),new r("iraIent",-1,1),new r("issaIent",-1,1),new r("irent",-1,1),new r("issent",-1,1),new r("iront",-1,1),new r("ît",-1,1),new r("iriez",-1,1),new r("issiez",-1,1),new r("irez",-1,1),new r("issez",-1,1)],w=[new r("a",-1,3),new r("era",0,2),new r("asse",-1,3),new r("ante",-1,3),new r("ée",-1,2),new r("ai",-1,3),new r("erai",5,2),new r("er",-1,2),new r("as",-1,3),new r("eras",8,2),new r("âmes",-1,3),new r("asses",-1,3),new r("antes",-1,3),new r("âtes",-1,3),new r("ées",-1,2),new r("ais",-1,3),new r("erais",15,2),new r("ions",-1,1),new r("erions",17,2),new r("assions",17,3),new r("erons",-1,2),new r("ants",-1,3),new r("és",-1,2),new r("ait",-1,3),new r("erait",23,2),new r("ant",-1,3),new r("aIent",-1,3),new r("eraIent",26,2),new r("èrent",-1,2),new r("assent",-1,3),new r("eront",-1,2),new r("ât",-1,3),new r("ez",-1,2),new r("iez",32,2),new r("eriez",33,2),new r("assiez",33,3),new r("erez",32,2),new r("é",-1,2)],f=[new r("e",-1,3),new r("Ière",0,2),new r("ière",0,2),new r("ion",-1,1),new r("Ier",-1,2),new r("ier",-1,2),new r("ë",-1,4)],m=[new r("ell",-1,-1),new r("eill",-1,-1),new r("enn",-1,-1),new r("onn",-1,-1),new r("ett",-1,-1)],_=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,128,130,103,8,5],b=[1,65,20,0,0,0,0,0,0,0,0,0,0,0,0,0,128],d=new y;function k(e,r,s){return!(!d.eq_s(1,e)||(d.ket=d.cursor,!d.in_grouping(_,97,251)))&&(d.slice_from(r),d.cursor=s,!0)}function p(e,r,s){return!!d.eq_s(1,e)&&(d.ket=d.cursor,d.slice_from(r),d.cursor=s,!0)}function g(){for(;!d.in_grouping(_,97,251);){if(d.cursor>=d.limit)return!0;d.cursor++}for(;!d.out_grouping(_,97,251);){if(d.cursor>=d.limit)return!0;d.cursor++}return!1}function q(){return t<=d.cursor}function v(){return i<=d.cursor}function h(){return s<=d.cursor}function z(){if(!function(){var e,r;if(d.ket=d.cursor,e=d.find_among_b(a,43)){switch(d.bra=d.cursor,e){case 1:if(!h())return!1;d.slice_del();break;case 2:if(!h())return!1;d.slice_del(),d.ket=d.cursor,d.eq_s_b(2,"ic")&&(d.bra=d.cursor,h()?d.slice_del():d.slice_from("iqU"));break;case 3:if(!h())return!1;d.slice_from("log");break;case 4:if(!h())return!1;d.slice_from("u");break;case 5:if(!h())return!1;d.slice_from("ent");break;case 6:if(!q())return!1;if(d.slice_del(),d.ket=d.cursor,e=d.find_among_b(o,6))switch(d.bra=d.cursor,e){case 1:h()&&(d.slice_del(),d.ket=d.cursor,d.eq_s_b(2,"at")&&(d.bra=d.cursor,h()&&d.slice_del()));break;case 2:h()?d.slice_del():v()&&d.slice_from("eux");break;case 3:h()&&d.slice_del();break;case 4:q()&&d.slice_from("i")}break;case 7:if(!h())return!1;if(d.slice_del(),d.ket=d.cursor,e=d.find_among_b(c,3))switch(d.bra=d.cursor,e){case 1:h()?d.slice_del():d.slice_from("abl");break;case 2:h()?d.slice_del():d.slice_from("iqU");break;case 3:h()&&d.slice_del()}break;case 8:if(!h())return!1;if(d.slice_del(),d.ket=d.cursor,d.eq_s_b(2,"at")&&(d.bra=d.cursor,h()&&(d.slice_del(),d.ket=d.cursor,d.eq_s_b(2,"ic")))){d.bra=d.cursor,h()?d.slice_del():d.slice_from("iqU");break}break;case 9:d.slice_from("eau");break;case 10:if(!v())return!1;d.slice_from("al");break;case 11:if(h())d.slice_del();else{if(!v())return!1;d.slice_from("eux")}break;case 12:if(!v()||!d.out_grouping_b(_,97,251))return!1;d.slice_del();break;case 13:return q()&&d.slice_from("ant"),!1;case 14:return q()&&d.slice_from("ent"),!1;case 15:return r=d.limit-d.cursor,d.in_grouping_b(_,97,251)&&q()&&(d.cursor=d.limit-r,d.slice_del()),!1}return!0}return!1}()&&(d.cursor=d.limit,!function(){var e,r;if(d.cursor=t){if(s=d.limit_backward,d.limit_backward=t,d.ket=d.cursor,e=d.find_among_b(f,7))switch(d.bra=d.cursor,e){case 1:if(h()){if(i=d.limit-d.cursor,!d.eq_s_b(1,"s")&&(d.cursor=d.limit-i,!d.eq_s_b(1,"t")))break;d.slice_del()}break;case 2:d.slice_from("i");break;case 3:d.slice_del();break;case 4:d.eq_s_b(2,"gu")&&d.slice_del()}d.limit_backward=s}}();d.cursor=d.limit,d.ket=d.cursor,d.eq_s_b(1,"Y")?(d.bra=d.cursor,d.slice_from("i")):(d.cursor=d.limit,d.eq_s_b(1,"ç")&&(d.bra=d.cursor,d.slice_from("c")))}this.setCurrent=function(e){d.setCurrent(e)},this.getCurrent=function(){return d.getCurrent()},this.stem=function(){var e,r=d.cursor;return function(){for(var e,r;;){if(e=d.cursor,d.in_grouping(_,97,251)){if(d.bra=d.cursor,r=d.cursor,k("u","U",e))continue;if(d.cursor=r,k("i","I",e))continue;if(d.cursor=r,p("y","Y",e))continue}if(d.cursor=e,!k("y","Y",d.bra=e)){if(d.cursor=e,d.eq_s(1,"q")&&(d.bra=d.cursor,p("u","U",e)))continue;if((d.cursor=e)>=d.limit)return;d.cursor++}}}(),d.cursor=r,function(){var e=d.cursor;if(t=d.limit,s=i=t,d.in_grouping(_,97,251)&&d.in_grouping(_,97,251)&&d.cursor=d.limit){d.cursor=t;break}d.cursor++}while(!d.in_grouping(_,97,251))}t=d.cursor,d.cursor=e,g()||(i=d.cursor,g()||(s=d.cursor))}(),d.limit_backward=r,d.cursor=d.limit,z(),d.cursor=d.limit,e=d.limit-d.cursor,d.find_among_b(m,5)&&(d.cursor=d.limit-e,d.ket=d.cursor,d.cursor>d.limit_backward&&(d.cursor--,d.bra=d.cursor,d.slice_del())),d.cursor=d.limit,function(){for(var e,r=1;d.out_grouping_b(_,97,251);)r--;if(r<=0){if(d.ket=d.cursor,e=d.limit-d.cursor,!d.eq_s_b(1,"é")&&(d.cursor=d.limit-e,!d.eq_s_b(1,"è")))return;d.bra=d.cursor,d.slice_from("e")}}(),d.cursor=d.limit_backward,function(){for(var e,r;r=d.cursor,d.bra=r,e=d.find_among(u,4);)switch(d.ket=d.cursor,e){case 1:d.slice_from("i");break;case 2:d.slice_from("u");break;case 3:d.slice_from("y");break;case 4:if(d.cursor>=d.limit)return;d.cursor++}}(),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return s.setCurrent(e),s.stem(),s.getCurrent()}):(s.setCurrent(e),s.stem(),s.getCurrent())}),e.Pipeline.registerFunction(e.fr.stemmer,"stemmer-fr"),e.fr.stopWordFilter=e.generateStopWordFilter("ai aie aient aies ait as au aura aurai auraient aurais aurait auras aurez auriez aurions aurons auront aux avaient avais avait avec avez aviez avions avons ayant ayez ayons c ce ceci celà ces cet cette d dans de des du elle en es est et eu eue eues eurent eus eusse eussent eusses eussiez eussions eut eux eûmes eût eûtes furent fus fusse fussent fusses fussiez fussions fut fûmes fût fûtes ici il ils j je l la le les leur leurs lui m ma mais me mes moi mon même n ne nos notre nous on ont ou par pas pour qu que quel quelle quelles quels qui s sa sans se sera serai seraient serais serait seras serez seriez serions serons seront ses soi soient sois soit sommes son sont soyez soyons suis sur t ta te tes toi ton tu un une vos votre vous y à étaient étais était étant étiez étions été étée étées étés êtes".split(" ")),e.Pipeline.registerFunction(e.fr.stopWordFilter,"stopWordFilter-fr")}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.hu.js b/docs/site/assets/javascripts/lunr/lunr.hu.js new file mode 100644 index 0000000..56a4b0d --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.hu.js @@ -0,0 +1 @@ +!function(e,n){"function"==typeof define&&define.amd?define(n):"object"==typeof exports?module.exports=n():n()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var p,_,n;e.hu=function(){this.pipeline.reset(),this.pipeline.add(e.hu.trimmer,e.hu.stopWordFilter,e.hu.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.hu.stemmer))},e.hu.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.hu.trimmer=e.trimmerSupport.generateTrimmer(e.hu.wordCharacters),e.Pipeline.registerFunction(e.hu.trimmer,"trimmer-hu"),e.hu.stemmer=(p=e.stemmerSupport.Among,_=e.stemmerSupport.SnowballProgram,n=new function(){var r,i=[new p("cs",-1,-1),new p("dzs",-1,-1),new p("gy",-1,-1),new p("ly",-1,-1),new p("ny",-1,-1),new p("sz",-1,-1),new p("ty",-1,-1),new p("zs",-1,-1)],n=[new p("á",-1,1),new p("é",-1,2)],a=[new p("bb",-1,-1),new p("cc",-1,-1),new p("dd",-1,-1),new p("ff",-1,-1),new p("gg",-1,-1),new p("jj",-1,-1),new p("kk",-1,-1),new p("ll",-1,-1),new p("mm",-1,-1),new p("nn",-1,-1),new p("pp",-1,-1),new p("rr",-1,-1),new p("ccs",-1,-1),new p("ss",-1,-1),new p("zzs",-1,-1),new p("tt",-1,-1),new p("vv",-1,-1),new p("ggy",-1,-1),new p("lly",-1,-1),new p("nny",-1,-1),new p("tty",-1,-1),new p("ssz",-1,-1),new p("zz",-1,-1)],t=[new p("al",-1,1),new p("el",-1,2)],e=[new p("ba",-1,-1),new p("ra",-1,-1),new p("be",-1,-1),new p("re",-1,-1),new p("ig",-1,-1),new p("nak",-1,-1),new p("nek",-1,-1),new p("val",-1,-1),new p("vel",-1,-1),new p("ul",-1,-1),new p("nál",-1,-1),new p("nél",-1,-1),new p("ból",-1,-1),new p("ról",-1,-1),new p("tól",-1,-1),new p("bõl",-1,-1),new p("rõl",-1,-1),new p("tõl",-1,-1),new p("ül",-1,-1),new p("n",-1,-1),new p("an",19,-1),new p("ban",20,-1),new p("en",19,-1),new p("ben",22,-1),new p("képpen",22,-1),new p("on",19,-1),new p("ön",19,-1),new p("képp",-1,-1),new p("kor",-1,-1),new p("t",-1,-1),new p("at",29,-1),new p("et",29,-1),new p("ként",29,-1),new p("anként",32,-1),new p("enként",32,-1),new p("onként",32,-1),new p("ot",29,-1),new p("ért",29,-1),new p("öt",29,-1),new p("hez",-1,-1),new p("hoz",-1,-1),new p("höz",-1,-1),new p("vá",-1,-1),new p("vé",-1,-1)],s=[new p("án",-1,2),new p("én",-1,1),new p("ánként",-1,3)],c=[new p("stul",-1,2),new p("astul",0,1),new p("ástul",0,3),new p("stül",-1,2),new p("estül",3,1),new p("éstül",3,4)],w=[new p("á",-1,1),new p("é",-1,2)],o=[new p("k",-1,7),new p("ak",0,4),new p("ek",0,6),new p("ok",0,5),new p("ák",0,1),new p("ék",0,2),new p("ök",0,3)],l=[new p("éi",-1,7),new p("áéi",0,6),new p("ééi",0,5),new p("é",-1,9),new p("ké",3,4),new p("aké",4,1),new p("eké",4,1),new p("oké",4,1),new p("áké",4,3),new p("éké",4,2),new p("öké",4,1),new p("éé",3,8)],u=[new p("a",-1,18),new p("ja",0,17),new p("d",-1,16),new p("ad",2,13),new p("ed",2,13),new p("od",2,13),new p("ád",2,14),new p("éd",2,15),new p("öd",2,13),new p("e",-1,18),new p("je",9,17),new p("nk",-1,4),new p("unk",11,1),new p("ánk",11,2),new p("énk",11,3),new p("ünk",11,1),new p("uk",-1,8),new p("juk",16,7),new p("ájuk",17,5),new p("ük",-1,8),new p("jük",19,7),new p("éjük",20,6),new p("m",-1,12),new p("am",22,9),new p("em",22,9),new p("om",22,9),new p("ám",22,10),new p("ém",22,11),new p("o",-1,18),new p("á",-1,19),new p("é",-1,20)],m=[new p("id",-1,10),new p("aid",0,9),new p("jaid",1,6),new p("eid",0,9),new p("jeid",3,6),new p("áid",0,7),new p("éid",0,8),new p("i",-1,15),new p("ai",7,14),new p("jai",8,11),new p("ei",7,14),new p("jei",10,11),new p("ái",7,12),new p("éi",7,13),new p("itek",-1,24),new p("eitek",14,21),new p("jeitek",15,20),new p("éitek",14,23),new p("ik",-1,29),new p("aik",18,26),new p("jaik",19,25),new p("eik",18,26),new p("jeik",21,25),new p("áik",18,27),new p("éik",18,28),new p("ink",-1,20),new p("aink",25,17),new p("jaink",26,16),new p("eink",25,17),new p("jeink",28,16),new p("áink",25,18),new p("éink",25,19),new p("aitok",-1,21),new p("jaitok",32,20),new p("áitok",-1,22),new p("im",-1,5),new p("aim",35,4),new p("jaim",36,1),new p("eim",35,4),new p("jeim",38,1),new p("áim",35,2),new p("éim",35,3)],k=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,1,17,52,14],f=new _;function b(){return r<=f.cursor}function d(){var e=f.limit-f.cursor;return!!f.find_among_b(a,23)&&(f.cursor=f.limit-e,!0)}function g(){if(f.cursor>f.limit_backward){f.cursor--,f.ket=f.cursor;var e=f.cursor-1;f.limit_backward<=e&&e<=f.limit&&(f.cursor=e,f.bra=e,f.slice_del())}}function h(){f.ket=f.cursor,f.find_among_b(e,44)&&(f.bra=f.cursor,b()&&(f.slice_del(),function(){var e;if(f.ket=f.cursor,(e=f.find_among_b(n,2))&&(f.bra=f.cursor,b()))switch(e){case 1:f.slice_from("a");break;case 2:f.slice_from("e")}}()))}this.setCurrent=function(e){f.setCurrent(e)},this.getCurrent=function(){return f.getCurrent()},this.stem=function(){var e=f.cursor;return function(){var e,n=f.cursor;if(r=f.limit,f.in_grouping(k,97,252))for(;;){if(e=f.cursor,f.out_grouping(k,97,252))return f.cursor=e,f.find_among(i,8)||(f.cursor=e)=f.limit)return r=e;f.cursor++}if(f.cursor=n,f.out_grouping(k,97,252)){for(;!f.in_grouping(k,97,252);){if(f.cursor>=f.limit)return;f.cursor++}r=f.cursor}}(),f.limit_backward=e,f.cursor=f.limit,function(){var e;if(f.ket=f.cursor,(e=f.find_among_b(t,2))&&(f.bra=f.cursor,b())){if((1==e||2==e)&&!d())return;f.slice_del(),g()}}(),f.cursor=f.limit,h(),f.cursor=f.limit,function(){var e;if(f.ket=f.cursor,(e=f.find_among_b(s,3))&&(f.bra=f.cursor,b()))switch(e){case 1:f.slice_from("e");break;case 2:case 3:f.slice_from("a")}}(),f.cursor=f.limit,function(){var e;if(f.ket=f.cursor,(e=f.find_among_b(c,6))&&(f.bra=f.cursor,b()))switch(e){case 1:case 2:f.slice_del();break;case 3:f.slice_from("a");break;case 4:f.slice_from("e")}}(),f.cursor=f.limit,function(){var e;if(f.ket=f.cursor,(e=f.find_among_b(w,2))&&(f.bra=f.cursor,b())){if((1==e||2==e)&&!d())return;f.slice_del(),g()}}(),f.cursor=f.limit,function(){var e;if(f.ket=f.cursor,(e=f.find_among_b(l,12))&&(f.bra=f.cursor,b()))switch(e){case 1:case 4:case 7:case 9:f.slice_del();break;case 2:case 5:case 8:f.slice_from("e");break;case 3:case 6:f.slice_from("a")}}(),f.cursor=f.limit,function(){var e;if(f.ket=f.cursor,(e=f.find_among_b(u,31))&&(f.bra=f.cursor,b()))switch(e){case 1:case 4:case 7:case 8:case 9:case 12:case 13:case 16:case 17:case 18:f.slice_del();break;case 2:case 5:case 10:case 14:case 19:f.slice_from("a");break;case 3:case 6:case 11:case 15:case 20:f.slice_from("e")}}(),f.cursor=f.limit,function(){var e;if(f.ket=f.cursor,(e=f.find_among_b(m,42))&&(f.bra=f.cursor,b()))switch(e){case 1:case 4:case 5:case 6:case 9:case 10:case 11:case 14:case 15:case 16:case 17:case 20:case 21:case 24:case 25:case 26:case 29:f.slice_del();break;case 2:case 7:case 12:case 18:case 22:case 27:f.slice_from("a");break;case 3:case 8:case 13:case 19:case 23:case 28:f.slice_from("e")}}(),f.cursor=f.limit,function(){var e;if(f.ket=f.cursor,(e=f.find_among_b(o,7))&&(f.bra=f.cursor,b()))switch(e){case 1:f.slice_from("a");break;case 2:f.slice_from("e");break;case 3:case 4:case 5:case 6:case 7:f.slice_del()}}(),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}),e.Pipeline.registerFunction(e.hu.stemmer,"stemmer-hu"),e.hu.stopWordFilter=e.generateStopWordFilter("a abban ahhoz ahogy ahol aki akik akkor alatt amely amelyek amelyekben amelyeket amelyet amelynek ami amikor amit amolyan amíg annak arra arról az azok azon azonban azt aztán azután azzal azért be belül benne bár cikk cikkek cikkeket csak de e ebben eddig egy egyes egyetlen egyik egyre egyéb egész ehhez ekkor el ellen elsõ elég elõ elõször elõtt emilyen ennek erre ez ezek ezen ezt ezzel ezért fel felé hanem hiszen hogy hogyan igen ill ill. illetve ilyen ilyenkor ismét ison itt jobban jó jól kell kellett keressünk keresztül ki kívül között közül legalább legyen lehet lehetett lenne lenni lesz lett maga magát majd majd meg mellett mely melyek mert mi mikor milyen minden mindenki mindent mindig mint mintha mit mivel miért most már más másik még míg nagy nagyobb nagyon ne nekem neki nem nincs néha néhány nélkül olyan ott pedig persze rá s saját sem semmi sok sokat sokkal szemben szerint szinte számára talán tehát teljes tovább továbbá több ugyanis utolsó után utána vagy vagyis vagyok valaki valami valamint való van vannak vele vissza viszont volna volt voltak voltam voltunk által általában át én éppen és így õ õk õket össze úgy új újabb újra".split(" ")),e.Pipeline.registerFunction(e.hu.stopWordFilter,"stopWordFilter-hu")}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.it.js b/docs/site/assets/javascripts/lunr/lunr.it.js new file mode 100644 index 0000000..50dddaa --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.it.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var z,P,r;e.it=function(){this.pipeline.reset(),this.pipeline.add(e.it.trimmer,e.it.stopWordFilter,e.it.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.it.stemmer))},e.it.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.it.trimmer=e.trimmerSupport.generateTrimmer(e.it.wordCharacters),e.Pipeline.registerFunction(e.it.trimmer,"trimmer-it"),e.it.stemmer=(z=e.stemmerSupport.Among,P=e.stemmerSupport.SnowballProgram,r=new function(){var o,t,s,a=[new z("",-1,7),new z("qu",0,6),new z("á",0,1),new z("é",0,2),new z("í",0,3),new z("ó",0,4),new z("ú",0,5)],u=[new z("",-1,3),new z("I",0,1),new z("U",0,2)],c=[new z("la",-1,-1),new z("cela",0,-1),new z("gliela",0,-1),new z("mela",0,-1),new z("tela",0,-1),new z("vela",0,-1),new z("le",-1,-1),new z("cele",6,-1),new z("gliele",6,-1),new z("mele",6,-1),new z("tele",6,-1),new z("vele",6,-1),new z("ne",-1,-1),new z("cene",12,-1),new z("gliene",12,-1),new z("mene",12,-1),new z("sene",12,-1),new z("tene",12,-1),new z("vene",12,-1),new z("ci",-1,-1),new z("li",-1,-1),new z("celi",20,-1),new z("glieli",20,-1),new z("meli",20,-1),new z("teli",20,-1),new z("veli",20,-1),new z("gli",20,-1),new z("mi",-1,-1),new z("si",-1,-1),new z("ti",-1,-1),new z("vi",-1,-1),new z("lo",-1,-1),new z("celo",31,-1),new z("glielo",31,-1),new z("melo",31,-1),new z("telo",31,-1),new z("velo",31,-1)],w=[new z("ando",-1,1),new z("endo",-1,1),new z("ar",-1,2),new z("er",-1,2),new z("ir",-1,2)],r=[new z("ic",-1,-1),new z("abil",-1,-1),new z("os",-1,-1),new z("iv",-1,1)],n=[new z("ic",-1,1),new z("abil",-1,1),new z("iv",-1,1)],i=[new z("ica",-1,1),new z("logia",-1,3),new z("osa",-1,1),new z("ista",-1,1),new z("iva",-1,9),new z("anza",-1,1),new z("enza",-1,5),new z("ice",-1,1),new z("atrice",7,1),new z("iche",-1,1),new z("logie",-1,3),new z("abile",-1,1),new z("ibile",-1,1),new z("usione",-1,4),new z("azione",-1,2),new z("uzione",-1,4),new z("atore",-1,2),new z("ose",-1,1),new z("ante",-1,1),new z("mente",-1,1),new z("amente",19,7),new z("iste",-1,1),new z("ive",-1,9),new z("anze",-1,1),new z("enze",-1,5),new z("ici",-1,1),new z("atrici",25,1),new z("ichi",-1,1),new z("abili",-1,1),new z("ibili",-1,1),new z("ismi",-1,1),new z("usioni",-1,4),new z("azioni",-1,2),new z("uzioni",-1,4),new z("atori",-1,2),new z("osi",-1,1),new z("anti",-1,1),new z("amenti",-1,6),new z("imenti",-1,6),new z("isti",-1,1),new z("ivi",-1,9),new z("ico",-1,1),new z("ismo",-1,1),new z("oso",-1,1),new z("amento",-1,6),new z("imento",-1,6),new z("ivo",-1,9),new z("ità",-1,8),new z("istà",-1,1),new z("istè",-1,1),new z("istì",-1,1)],l=[new z("isca",-1,1),new z("enda",-1,1),new z("ata",-1,1),new z("ita",-1,1),new z("uta",-1,1),new z("ava",-1,1),new z("eva",-1,1),new z("iva",-1,1),new z("erebbe",-1,1),new z("irebbe",-1,1),new z("isce",-1,1),new z("ende",-1,1),new z("are",-1,1),new z("ere",-1,1),new z("ire",-1,1),new z("asse",-1,1),new z("ate",-1,1),new z("avate",16,1),new z("evate",16,1),new z("ivate",16,1),new z("ete",-1,1),new z("erete",20,1),new z("irete",20,1),new z("ite",-1,1),new z("ereste",-1,1),new z("ireste",-1,1),new z("ute",-1,1),new z("erai",-1,1),new z("irai",-1,1),new z("isci",-1,1),new z("endi",-1,1),new z("erei",-1,1),new z("irei",-1,1),new z("assi",-1,1),new z("ati",-1,1),new z("iti",-1,1),new z("eresti",-1,1),new z("iresti",-1,1),new z("uti",-1,1),new z("avi",-1,1),new z("evi",-1,1),new z("ivi",-1,1),new z("isco",-1,1),new z("ando",-1,1),new z("endo",-1,1),new z("Yamo",-1,1),new z("iamo",-1,1),new z("avamo",-1,1),new z("evamo",-1,1),new z("ivamo",-1,1),new z("eremo",-1,1),new z("iremo",-1,1),new z("assimo",-1,1),new z("ammo",-1,1),new z("emmo",-1,1),new z("eremmo",54,1),new z("iremmo",54,1),new z("immo",-1,1),new z("ano",-1,1),new z("iscano",58,1),new z("avano",58,1),new z("evano",58,1),new z("ivano",58,1),new z("eranno",-1,1),new z("iranno",-1,1),new z("ono",-1,1),new z("iscono",65,1),new z("arono",65,1),new z("erono",65,1),new z("irono",65,1),new z("erebbero",-1,1),new z("irebbero",-1,1),new z("assero",-1,1),new z("essero",-1,1),new z("issero",-1,1),new z("ato",-1,1),new z("ito",-1,1),new z("uto",-1,1),new z("avo",-1,1),new z("evo",-1,1),new z("ivo",-1,1),new z("ar",-1,1),new z("ir",-1,1),new z("erà",-1,1),new z("irà",-1,1),new z("erò",-1,1),new z("irò",-1,1)],m=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,128,128,8,2,1],f=[17,65,0,0,0,0,0,0,0,0,0,0,0,0,0,128,128,8,2],v=[17],b=new P;function d(e,r,n){return!(!b.eq_s(1,e)||(b.ket=b.cursor,!b.in_grouping(m,97,249)))&&(b.slice_from(r),b.cursor=n,!0)}function _(e){if(b.cursor=e,!b.in_grouping(m,97,249))return!1;for(;!b.out_grouping(m,97,249);){if(b.cursor>=b.limit)return!1;b.cursor++}return!0}function g(){var e,r=b.cursor;if(!function(){if(b.in_grouping(m,97,249)){var e=b.cursor;if(b.out_grouping(m,97,249)){for(;!b.in_grouping(m,97,249);){if(b.cursor>=b.limit)return _(e);b.cursor++}return!0}return _(e)}return!1}()){if(b.cursor=r,!b.out_grouping(m,97,249))return;if(e=b.cursor,b.out_grouping(m,97,249)){for(;!b.in_grouping(m,97,249);){if(b.cursor>=b.limit)return b.cursor=e,void(b.in_grouping(m,97,249)&&b.cursor=b.limit)return;b.cursor++}s=b.cursor}function p(){for(;!b.in_grouping(m,97,249);){if(b.cursor>=b.limit)return!1;b.cursor++}for(;!b.out_grouping(m,97,249);){if(b.cursor>=b.limit)return!1;b.cursor++}return!0}function k(){return s<=b.cursor}function h(){return o<=b.cursor}function q(){var e;if(b.ket=b.cursor,!(e=b.find_among_b(i,51)))return!1;switch(b.bra=b.cursor,e){case 1:if(!h())return!1;b.slice_del();break;case 2:if(!h())return!1;b.slice_del(),b.ket=b.cursor,b.eq_s_b(2,"ic")&&(b.bra=b.cursor,h()&&b.slice_del());break;case 3:if(!h())return!1;b.slice_from("log");break;case 4:if(!h())return!1;b.slice_from("u");break;case 5:if(!h())return!1;b.slice_from("ente");break;case 6:if(!k())return!1;b.slice_del();break;case 7:if(!(t<=b.cursor))return!1;b.slice_del(),b.ket=b.cursor,(e=b.find_among_b(r,4))&&(b.bra=b.cursor,h()&&(b.slice_del(),1==e&&(b.ket=b.cursor,b.eq_s_b(2,"at")&&(b.bra=b.cursor,h()&&b.slice_del()))));break;case 8:if(!h())return!1;b.slice_del(),b.ket=b.cursor,(e=b.find_among_b(n,3))&&(b.bra=b.cursor,1==e&&h()&&b.slice_del());break;case 9:if(!h())return!1;b.slice_del(),b.ket=b.cursor,b.eq_s_b(2,"at")&&(b.bra=b.cursor,h()&&(b.slice_del(),b.ket=b.cursor,b.eq_s_b(2,"ic")&&(b.bra=b.cursor,h()&&b.slice_del())))}return!0}function C(){var e;e=b.limit-b.cursor,b.ket=b.cursor,b.in_grouping_b(f,97,242)&&(b.bra=b.cursor,k()&&(b.slice_del(),b.ket=b.cursor,b.eq_s_b(1,"i")&&(b.bra=b.cursor,k())))?b.slice_del():b.cursor=b.limit-e,b.ket=b.cursor,b.eq_s_b(1,"h")&&(b.bra=b.cursor,b.in_grouping_b(v,99,103)&&k()&&b.slice_del())}this.setCurrent=function(e){b.setCurrent(e)},this.getCurrent=function(){return b.getCurrent()},this.stem=function(){var e,r,n,i=b.cursor;return function(){for(var e,r,n,i,o=b.cursor;;){if(b.bra=b.cursor,e=b.find_among(a,7))switch(b.ket=b.cursor,e){case 1:b.slice_from("à");continue;case 2:b.slice_from("è");continue;case 3:b.slice_from("ì");continue;case 4:b.slice_from("ò");continue;case 5:b.slice_from("ù");continue;case 6:b.slice_from("qU");continue;case 7:if(b.cursor>=b.limit)break;b.cursor++;continue}break}for(b.cursor=o;;)for(r=b.cursor;;){if(n=b.cursor,b.in_grouping(m,97,249)){if(b.bra=b.cursor,i=b.cursor,d("u","U",n))break;if(b.cursor=i,d("i","I",n))break}if(b.cursor=n,b.cursor>=b.limit)return b.cursor=r;b.cursor++}}(),b.cursor=i,e=b.cursor,s=b.limit,o=t=s,g(),b.cursor=e,p()&&(t=b.cursor,p()&&(o=b.cursor)),b.limit_backward=i,b.cursor=b.limit,function(){var e;if(b.ket=b.cursor,b.find_among_b(c,37)&&(b.bra=b.cursor,(e=b.find_among_b(w,5))&&k()))switch(e){case 1:b.slice_del();break;case 2:b.slice_from("e")}}(),b.cursor=b.limit,q()||(b.cursor=b.limit,b.cursor>=s&&(n=b.limit_backward,b.limit_backward=s,b.ket=b.cursor,(r=b.find_among_b(l,87))&&(b.bra=b.cursor,1==r&&b.slice_del()),b.limit_backward=n)),b.cursor=b.limit,C(),b.cursor=b.limit_backward,function(){for(var e;b.bra=b.cursor,e=b.find_among(u,3);)switch(b.ket=b.cursor,e){case 1:b.slice_from("i");break;case 2:b.slice_from("u");break;case 3:if(b.cursor>=b.limit)return;b.cursor++}}(),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return r.setCurrent(e),r.stem(),r.getCurrent()}):(r.setCurrent(e),r.stem(),r.getCurrent())}),e.Pipeline.registerFunction(e.it.stemmer,"stemmer-it"),e.it.stopWordFilter=e.generateStopWordFilter("a abbia abbiamo abbiano abbiate ad agl agli ai al all alla alle allo anche avemmo avendo avesse avessero avessi avessimo aveste avesti avete aveva avevamo avevano avevate avevi avevo avrai avranno avrebbe avrebbero avrei avremmo avremo avreste avresti avrete avrà avrò avuta avute avuti avuto c che chi ci coi col come con contro cui da dagl dagli dai dal dall dalla dalle dallo degl degli dei del dell della delle dello di dov dove e ebbe ebbero ebbi ed era erano eravamo eravate eri ero essendo faccia facciamo facciano facciate faccio facemmo facendo facesse facessero facessi facessimo faceste facesti faceva facevamo facevano facevate facevi facevo fai fanno farai faranno farebbe farebbero farei faremmo faremo fareste faresti farete farà farò fece fecero feci fosse fossero fossi fossimo foste fosti fu fui fummo furono gli ha hai hanno ho i il in io l la le lei li lo loro lui ma mi mia mie miei mio ne negl negli nei nel nell nella nelle nello noi non nostra nostre nostri nostro o per perché più quale quanta quante quanti quanto quella quelle quelli quello questa queste questi questo sarai saranno sarebbe sarebbero sarei saremmo saremo sareste saresti sarete sarà sarò se sei si sia siamo siano siate siete sono sta stai stando stanno starai staranno starebbe starebbero starei staremmo staremo stareste staresti starete starà starò stava stavamo stavano stavate stavi stavo stemmo stesse stessero stessi stessimo steste stesti stette stettero stetti stia stiamo stiano stiate sto su sua sue sugl sugli sui sul sull sulla sulle sullo suo suoi ti tra tu tua tue tuo tuoi tutti tutto un una uno vi voi vostra vostre vostri vostro è".split(" ")),e.Pipeline.registerFunction(e.it.stopWordFilter,"stopWordFilter-it")}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.ja.js b/docs/site/assets/javascripts/lunr/lunr.ja.js new file mode 100644 index 0000000..69f6202 --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.ja.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(m){if(void 0===m)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===m.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var l="2"==m.version[0];m.ja=function(){this.pipeline.reset(),this.pipeline.add(m.ja.trimmer,m.ja.stopWordFilter,m.ja.stemmer),l?this.tokenizer=m.ja.tokenizer:(m.tokenizer&&(m.tokenizer=m.ja.tokenizer),this.tokenizerFn&&(this.tokenizerFn=m.ja.tokenizer))};var j=new m.TinySegmenter;m.ja.tokenizer=function(e){var r,t,i,n,o,s,p,a,u;if(!arguments.length||null==e||null==e)return[];if(Array.isArray(e))return e.map(function(e){return l?new m.Token(e.toLowerCase()):e.toLowerCase()});for(r=(t=e.toString().toLowerCase().replace(/^\s+/,"")).length-1;0<=r;r--)if(/\S/.test(t.charAt(r))){t=t.substring(0,r+1);break}for(o=[],i=t.length,p=a=0;a<=i;a++)if(s=a-p,t.charAt(a).match(/\s/)||a==i){if(0=_.limit||(_.cursor++,!1)}function w(){for(;!_.in_grouping(m,97,232);){if(_.cursor>=_.limit)return!0;_.cursor++}for(;!_.out_grouping(m,97,232);){if(_.cursor>=_.limit)return!0;_.cursor++}return!1}function b(){return i<=_.cursor}function p(){return e<=_.cursor}function g(){var r=_.limit-_.cursor;_.find_among_b(t,3)&&(_.cursor=_.limit-r,_.ket=_.cursor,_.cursor>_.limit_backward&&(_.cursor--,_.bra=_.cursor,_.slice_del()))}function h(){var r;u=!1,_.ket=_.cursor,_.eq_s_b(1,"e")&&(_.bra=_.cursor,b()&&(r=_.limit-_.cursor,_.out_grouping_b(m,97,232)&&(_.cursor=_.limit-r,_.slice_del(),u=!0,g())))}function k(){var r;b()&&(r=_.limit-_.cursor,_.out_grouping_b(m,97,232)&&(_.cursor=_.limit-r,_.eq_s_b(3,"gem")||(_.cursor=_.limit-r,_.slice_del(),g())))}this.setCurrent=function(r){_.setCurrent(r)},this.getCurrent=function(){return _.getCurrent()},this.stem=function(){var r=_.cursor;return function(){for(var r,e,i,n=_.cursor;;){if(_.bra=_.cursor,r=_.find_among(o,11))switch(_.ket=_.cursor,r){case 1:_.slice_from("a");continue;case 2:_.slice_from("e");continue;case 3:_.slice_from("i");continue;case 4:_.slice_from("o");continue;case 5:_.slice_from("u");continue;case 6:if(_.cursor>=_.limit)break;_.cursor++;continue}break}for(_.cursor=n,_.bra=n,_.eq_s(1,"y")?(_.ket=_.cursor,_.slice_from("Y")):_.cursor=n;;)if(e=_.cursor,_.in_grouping(m,97,232)){if(i=_.cursor,_.bra=i,_.eq_s(1,"i"))_.ket=_.cursor,_.in_grouping(m,97,232)&&(_.slice_from("I"),_.cursor=e);else if(_.cursor=i,_.eq_s(1,"y"))_.ket=_.cursor,_.slice_from("Y"),_.cursor=e;else if(s(e))break}else if(s(e))break}(),_.cursor=r,i=_.limit,e=i,w()||((i=_.cursor)<3&&(i=3),w()||(e=_.cursor)),_.limit_backward=r,_.cursor=_.limit,function(){var r,e,i,n,o,t,s=_.limit-_.cursor;if(_.ket=_.cursor,r=_.find_among_b(c,5))switch(_.bra=_.cursor,r){case 1:b()&&_.slice_from("heid");break;case 2:k();break;case 3:b()&&_.out_grouping_b(f,97,232)&&_.slice_del()}if(_.cursor=_.limit-s,h(),_.cursor=_.limit-s,_.ket=_.cursor,_.eq_s_b(4,"heid")&&(_.bra=_.cursor,p()&&(e=_.limit-_.cursor,_.eq_s_b(1,"c")||(_.cursor=_.limit-e,_.slice_del(),_.ket=_.cursor,_.eq_s_b(2,"en")&&(_.bra=_.cursor,k())))),_.cursor=_.limit-s,_.ket=_.cursor,r=_.find_among_b(a,6))switch(_.bra=_.cursor,r){case 1:if(p()){if(_.slice_del(),i=_.limit-_.cursor,_.ket=_.cursor,_.eq_s_b(2,"ig")&&(_.bra=_.cursor,p()&&(n=_.limit-_.cursor,!_.eq_s_b(1,"e")))){_.cursor=_.limit-n,_.slice_del();break}_.cursor=_.limit-i,g()}break;case 2:p()&&(o=_.limit-_.cursor,_.eq_s_b(1,"e")||(_.cursor=_.limit-o,_.slice_del()));break;case 3:p()&&(_.slice_del(),h());break;case 4:p()&&_.slice_del();break;case 5:p()&&u&&_.slice_del()}_.cursor=_.limit-s,_.out_grouping_b(d,73,232)&&(t=_.limit-_.cursor,_.find_among_b(l,4)&&_.out_grouping_b(m,97,232)&&(_.cursor=_.limit-t,_.ket=_.cursor,_.cursor>_.limit_backward&&(_.cursor--,_.bra=_.cursor,_.slice_del())))}(),_.cursor=_.limit_backward,function(){for(var r;;)if(_.bra=_.cursor,r=_.find_among(n,3))switch(_.ket=_.cursor,r){case 1:_.slice_from("y");break;case 2:_.slice_from("i");break;case 3:if(_.cursor>=_.limit)return;_.cursor++}}(),!0}},function(r){return"function"==typeof r.update?r.update(function(r){return e.setCurrent(r),e.stem(),e.getCurrent()}):(e.setCurrent(r),e.stem(),e.getCurrent())}),r.Pipeline.registerFunction(r.nl.stemmer,"stemmer-nl"),r.nl.stopWordFilter=r.generateStopWordFilter(" aan al alles als altijd andere ben bij daar dan dat de der deze die dit doch doen door dus een eens en er ge geen geweest haar had heb hebben heeft hem het hier hij hoe hun iemand iets ik in is ja je kan kon kunnen maar me meer men met mij mijn moet na naar niet niets nog nu of om omdat onder ons ook op over reeds te tegen toch toen tot u uit uw van veel voor want waren was wat werd wezen wie wil worden wordt zal ze zelf zich zij zijn zo zonder zou".split(" ")),r.Pipeline.registerFunction(r.nl.stopWordFilter,"stopWordFilter-nl")}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.no.js b/docs/site/assets/javascripts/lunr/lunr.no.js new file mode 100644 index 0000000..3d156b9 --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.no.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r,n,i;e.no=function(){this.pipeline.reset(),this.pipeline.add(e.no.trimmer,e.no.stopWordFilter,e.no.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.no.stemmer))},e.no.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.no.trimmer=e.trimmerSupport.generateTrimmer(e.no.wordCharacters),e.Pipeline.registerFunction(e.no.trimmer,"trimmer-no"),e.no.stemmer=(r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){var o,s,a=[new r("a",-1,1),new r("e",-1,1),new r("ede",1,1),new r("ande",1,1),new r("ende",1,1),new r("ane",1,1),new r("ene",1,1),new r("hetene",6,1),new r("erte",1,3),new r("en",-1,1),new r("heten",9,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",12,1),new r("s",-1,2),new r("as",14,1),new r("es",14,1),new r("edes",16,1),new r("endes",16,1),new r("enes",16,1),new r("hetenes",19,1),new r("ens",14,1),new r("hetens",21,1),new r("ers",14,1),new r("ets",14,1),new r("et",-1,1),new r("het",25,1),new r("ert",-1,3),new r("ast",-1,1)],m=[new r("dt",-1,-1),new r("vt",-1,-1)],l=[new r("leg",-1,1),new r("eleg",0,1),new r("ig",-1,1),new r("eig",2,1),new r("lig",2,1),new r("elig",4,1),new r("els",-1,1),new r("lov",-1,1),new r("elov",7,1),new r("slov",7,1),new r("hetslov",9,1)],u=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128],d=[119,125,149,1],c=new n;this.setCurrent=function(e){c.setCurrent(e)},this.getCurrent=function(){return c.getCurrent()},this.stem=function(){var e,r,n,i,t=c.cursor;return function(){var e,r=c.cursor+3;if(s=c.limit,0<=r||r<=c.limit){for(o=r;;){if(e=c.cursor,c.in_grouping(u,97,248)){c.cursor=e;break}if(e>=c.limit)return;c.cursor=e+1}for(;!c.out_grouping(u,97,248);){if(c.cursor>=c.limit)return;c.cursor++}(s=c.cursor)=s&&(r=c.limit_backward,c.limit_backward=s,c.ket=c.cursor,e=c.find_among_b(a,29),c.limit_backward=r,e))switch(c.bra=c.cursor,e){case 1:c.slice_del();break;case 2:n=c.limit-c.cursor,c.in_grouping_b(d,98,122)?c.slice_del():(c.cursor=c.limit-n,c.eq_s_b(1,"k")&&c.out_grouping_b(u,97,248)&&c.slice_del());break;case 3:c.slice_from("er")}}(),c.cursor=c.limit,r=c.limit-c.cursor,c.cursor>=s&&(e=c.limit_backward,c.limit_backward=s,c.ket=c.cursor,c.find_among_b(m,2)?(c.bra=c.cursor,c.limit_backward=e,c.cursor=c.limit-r,c.cursor>c.limit_backward&&(c.cursor--,c.bra=c.cursor,c.slice_del())):c.limit_backward=e),c.cursor=c.limit,c.cursor>=s&&(i=c.limit_backward,c.limit_backward=s,c.ket=c.cursor,(n=c.find_among_b(l,11))?(c.bra=c.cursor,c.limit_backward=i,1==n&&c.slice_del()):c.limit_backward=i),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}),e.Pipeline.registerFunction(e.no.stemmer,"stemmer-no"),e.no.stopWordFilter=e.generateStopWordFilter("alle at av bare begge ble blei bli blir blitt både båe da de deg dei deim deira deires dem den denne der dere deres det dette di din disse ditt du dykk dykkar då eg ein eit eitt eller elles en enn er et ett etter for fordi fra før ha hadde han hans har hennar henne hennes her hjå ho hoe honom hoss hossen hun hva hvem hver hvilke hvilken hvis hvor hvordan hvorfor i ikke ikkje ikkje ingen ingi inkje inn inni ja jeg kan kom korleis korso kun kunne kva kvar kvarhelst kven kvi kvifor man mange me med medan meg meget mellom men mi min mine mitt mot mykje ned no noe noen noka noko nokon nokor nokre nå når og også om opp oss over på samme seg selv si si sia sidan siden sin sine sitt sjøl skal skulle slik so som som somme somt så sånn til um upp ut uten var vart varte ved vere verte vi vil ville vore vors vort vår være være vært å".split(" ")),e.Pipeline.registerFunction(e.no.stopWordFilter,"stopWordFilter-no")}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.pt.js b/docs/site/assets/javascripts/lunr/lunr.pt.js new file mode 100644 index 0000000..f50fc9f --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.pt.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var j,C,r;e.pt=function(){this.pipeline.reset(),this.pipeline.add(e.pt.trimmer,e.pt.stopWordFilter,e.pt.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.pt.stemmer))},e.pt.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.pt.trimmer=e.trimmerSupport.generateTrimmer(e.pt.wordCharacters),e.Pipeline.registerFunction(e.pt.trimmer,"trimmer-pt"),e.pt.stemmer=(j=e.stemmerSupport.Among,C=e.stemmerSupport.SnowballProgram,r=new function(){var s,n,i,o=[new j("",-1,3),new j("ã",0,1),new j("õ",0,2)],a=[new j("",-1,3),new j("a~",0,1),new j("o~",0,2)],r=[new j("ic",-1,-1),new j("ad",-1,-1),new j("os",-1,-1),new j("iv",-1,1)],t=[new j("ante",-1,1),new j("avel",-1,1),new j("ível",-1,1)],u=[new j("ic",-1,1),new j("abil",-1,1),new j("iv",-1,1)],w=[new j("ica",-1,1),new j("ância",-1,1),new j("ência",-1,4),new j("ira",-1,9),new j("adora",-1,1),new j("osa",-1,1),new j("ista",-1,1),new j("iva",-1,8),new j("eza",-1,1),new j("logía",-1,2),new j("idade",-1,7),new j("ante",-1,1),new j("mente",-1,6),new j("amente",12,5),new j("ável",-1,1),new j("ível",-1,1),new j("ución",-1,3),new j("ico",-1,1),new j("ismo",-1,1),new j("oso",-1,1),new j("amento",-1,1),new j("imento",-1,1),new j("ivo",-1,8),new j("aça~o",-1,1),new j("ador",-1,1),new j("icas",-1,1),new j("ências",-1,4),new j("iras",-1,9),new j("adoras",-1,1),new j("osas",-1,1),new j("istas",-1,1),new j("ivas",-1,8),new j("ezas",-1,1),new j("logías",-1,2),new j("idades",-1,7),new j("uciones",-1,3),new j("adores",-1,1),new j("antes",-1,1),new j("aço~es",-1,1),new j("icos",-1,1),new j("ismos",-1,1),new j("osos",-1,1),new j("amentos",-1,1),new j("imentos",-1,1),new j("ivos",-1,8)],m=[new j("ada",-1,1),new j("ida",-1,1),new j("ia",-1,1),new j("aria",2,1),new j("eria",2,1),new j("iria",2,1),new j("ara",-1,1),new j("era",-1,1),new j("ira",-1,1),new j("ava",-1,1),new j("asse",-1,1),new j("esse",-1,1),new j("isse",-1,1),new j("aste",-1,1),new j("este",-1,1),new j("iste",-1,1),new j("ei",-1,1),new j("arei",16,1),new j("erei",16,1),new j("irei",16,1),new j("am",-1,1),new j("iam",20,1),new j("ariam",21,1),new j("eriam",21,1),new j("iriam",21,1),new j("aram",20,1),new j("eram",20,1),new j("iram",20,1),new j("avam",20,1),new j("em",-1,1),new j("arem",29,1),new j("erem",29,1),new j("irem",29,1),new j("assem",29,1),new j("essem",29,1),new j("issem",29,1),new j("ado",-1,1),new j("ido",-1,1),new j("ando",-1,1),new j("endo",-1,1),new j("indo",-1,1),new j("ara~o",-1,1),new j("era~o",-1,1),new j("ira~o",-1,1),new j("ar",-1,1),new j("er",-1,1),new j("ir",-1,1),new j("as",-1,1),new j("adas",47,1),new j("idas",47,1),new j("ias",47,1),new j("arias",50,1),new j("erias",50,1),new j("irias",50,1),new j("aras",47,1),new j("eras",47,1),new j("iras",47,1),new j("avas",47,1),new j("es",-1,1),new j("ardes",58,1),new j("erdes",58,1),new j("irdes",58,1),new j("ares",58,1),new j("eres",58,1),new j("ires",58,1),new j("asses",58,1),new j("esses",58,1),new j("isses",58,1),new j("astes",58,1),new j("estes",58,1),new j("istes",58,1),new j("is",-1,1),new j("ais",71,1),new j("eis",71,1),new j("areis",73,1),new j("ereis",73,1),new j("ireis",73,1),new j("áreis",73,1),new j("éreis",73,1),new j("íreis",73,1),new j("ásseis",73,1),new j("ésseis",73,1),new j("ísseis",73,1),new j("áveis",73,1),new j("íeis",73,1),new j("aríeis",84,1),new j("eríeis",84,1),new j("iríeis",84,1),new j("ados",-1,1),new j("idos",-1,1),new j("amos",-1,1),new j("áramos",90,1),new j("éramos",90,1),new j("íramos",90,1),new j("ávamos",90,1),new j("íamos",90,1),new j("aríamos",95,1),new j("eríamos",95,1),new j("iríamos",95,1),new j("emos",-1,1),new j("aremos",99,1),new j("eremos",99,1),new j("iremos",99,1),new j("ássemos",99,1),new j("êssemos",99,1),new j("íssemos",99,1),new j("imos",-1,1),new j("armos",-1,1),new j("ermos",-1,1),new j("irmos",-1,1),new j("ámos",-1,1),new j("arás",-1,1),new j("erás",-1,1),new j("irás",-1,1),new j("eu",-1,1),new j("iu",-1,1),new j("ou",-1,1),new j("ará",-1,1),new j("erá",-1,1),new j("irá",-1,1)],c=[new j("a",-1,1),new j("i",-1,1),new j("o",-1,1),new j("os",-1,1),new j("á",-1,1),new j("í",-1,1),new j("ó",-1,1)],l=[new j("e",-1,1),new j("ç",-1,2),new j("é",-1,1),new j("ê",-1,1)],f=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,3,19,12,2],d=new C;function v(){if(d.out_grouping(f,97,250)){for(;!d.in_grouping(f,97,250);){if(d.cursor>=d.limit)return!0;d.cursor++}return!1}return!0}function p(){var e,r,s=d.cursor;if(d.in_grouping(f,97,250))if(e=d.cursor,v()){if(d.cursor=e,function(){if(d.in_grouping(f,97,250))for(;!d.out_grouping(f,97,250);){if(d.cursor>=d.limit)return!1;d.cursor++}return i=d.cursor,!0}())return}else i=d.cursor;if(d.cursor=s,d.out_grouping(f,97,250)){if(r=d.cursor,v()){if(d.cursor=r,!d.in_grouping(f,97,250)||d.cursor>=d.limit)return;d.cursor++}i=d.cursor}}function _(){for(;!d.in_grouping(f,97,250);){if(d.cursor>=d.limit)return!1;d.cursor++}for(;!d.out_grouping(f,97,250);){if(d.cursor>=d.limit)return!1;d.cursor++}return!0}function h(){return i<=d.cursor}function b(){return s<=d.cursor}function g(){var e;if(d.ket=d.cursor,!(e=d.find_among_b(w,45)))return!1;switch(d.bra=d.cursor,e){case 1:if(!b())return!1;d.slice_del();break;case 2:if(!b())return!1;d.slice_from("log");break;case 3:if(!b())return!1;d.slice_from("u");break;case 4:if(!b())return!1;d.slice_from("ente");break;case 5:if(!(n<=d.cursor))return!1;d.slice_del(),d.ket=d.cursor,(e=d.find_among_b(r,4))&&(d.bra=d.cursor,b()&&(d.slice_del(),1==e&&(d.ket=d.cursor,d.eq_s_b(2,"at")&&(d.bra=d.cursor,b()&&d.slice_del()))));break;case 6:if(!b())return!1;d.slice_del(),d.ket=d.cursor,(e=d.find_among_b(t,3))&&(d.bra=d.cursor,1==e&&b()&&d.slice_del());break;case 7:if(!b())return!1;d.slice_del(),d.ket=d.cursor,(e=d.find_among_b(u,3))&&(d.bra=d.cursor,1==e&&b()&&d.slice_del());break;case 8:if(!b())return!1;d.slice_del(),d.ket=d.cursor,d.eq_s_b(2,"at")&&(d.bra=d.cursor,b()&&d.slice_del());break;case 9:if(!h()||!d.eq_s_b(1,"e"))return!1;d.slice_from("ir")}return!0}function k(e,r){if(d.eq_s_b(1,e)){d.bra=d.cursor;var s=d.limit-d.cursor;if(d.eq_s_b(1,r))return d.cursor=d.limit-s,h()&&d.slice_del(),!1}return!0}function q(){if(!g()&&(d.cursor=d.limit,!function(){var e,r;if(d.cursor>=i){if(r=d.limit_backward,d.limit_backward=i,d.ket=d.cursor,e=d.find_among_b(m,120))return d.bra=d.cursor,1==e&&d.slice_del(),d.limit_backward=r,!0;d.limit_backward=r}return!1}()))return d.cursor=d.limit,d.ket=d.cursor,void((e=d.find_among_b(c,7))&&(d.bra=d.cursor,1==e&&h()&&d.slice_del()));var e;d.cursor=d.limit,d.ket=d.cursor,d.eq_s_b(1,"i")&&(d.bra=d.cursor,d.eq_s_b(1,"c")&&(d.cursor=d.limit,h()&&d.slice_del()))}this.setCurrent=function(e){d.setCurrent(e)},this.getCurrent=function(){return d.getCurrent()},this.stem=function(){var e,r=d.cursor;return function(){for(var e;;){if(d.bra=d.cursor,e=d.find_among(o,3))switch(d.ket=d.cursor,e){case 1:d.slice_from("a~");continue;case 2:d.slice_from("o~");continue;case 3:if(d.cursor>=d.limit)break;d.cursor++;continue}break}}(),d.cursor=r,e=d.cursor,i=d.limit,s=n=i,p(),d.cursor=e,_()&&(n=d.cursor,_()&&(s=d.cursor)),d.limit_backward=r,d.cursor=d.limit,q(),d.cursor=d.limit,function(){var e;if(d.ket=d.cursor,e=d.find_among_b(l,4))switch(d.bra=d.cursor,e){case 1:h()&&(d.slice_del(),d.ket=d.cursor,d.limit,d.cursor,k("u","g")&&k("i","c"));break;case 2:d.slice_from("c")}}(),d.cursor=d.limit_backward,function(){for(var e;;){if(d.bra=d.cursor,e=d.find_among(a,3))switch(d.ket=d.cursor,e){case 1:d.slice_from("ã");continue;case 2:d.slice_from("õ");continue;case 3:if(d.cursor>=d.limit)break;d.cursor++;continue}break}}(),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return r.setCurrent(e),r.stem(),r.getCurrent()}):(r.setCurrent(e),r.stem(),r.getCurrent())}),e.Pipeline.registerFunction(e.pt.stemmer,"stemmer-pt"),e.pt.stopWordFilter=e.generateStopWordFilter("a ao aos aquela aquelas aquele aqueles aquilo as até com como da das de dela delas dele deles depois do dos e ela elas ele eles em entre era eram essa essas esse esses esta estamos estas estava estavam este esteja estejam estejamos estes esteve estive estivemos estiver estivera estiveram estiverem estivermos estivesse estivessem estivéramos estivéssemos estou está estávamos estão eu foi fomos for fora foram forem formos fosse fossem fui fôramos fôssemos haja hajam hajamos havemos hei houve houvemos houver houvera houveram houverei houverem houveremos houveria houveriam houvermos houverá houverão houveríamos houvesse houvessem houvéramos houvéssemos há hão isso isto já lhe lhes mais mas me mesmo meu meus minha minhas muito na nas nem no nos nossa nossas nosso nossos num numa não nós o os ou para pela pelas pelo pelos por qual quando que quem se seja sejam sejamos sem serei seremos seria seriam será serão seríamos seu seus somos sou sua suas são só também te tem temos tenha tenham tenhamos tenho terei teremos teria teriam terá terão teríamos teu teus teve tinha tinham tive tivemos tiver tivera tiveram tiverem tivermos tivesse tivessem tivéramos tivéssemos tu tua tuas tém tínhamos um uma você vocês vos à às éramos".split(" ")),e.Pipeline.registerFunction(e.pt.stopWordFilter,"stopWordFilter-pt")}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.ro.js b/docs/site/assets/javascripts/lunr/lunr.ro.js new file mode 100644 index 0000000..b19627e --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.ro.js @@ -0,0 +1 @@ +!function(e,i){"function"==typeof define&&define.amd?define(i):"object"==typeof exports?module.exports=i():i()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var h,z,i;e.ro=function(){this.pipeline.reset(),this.pipeline.add(e.ro.trimmer,e.ro.stopWordFilter,e.ro.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.ro.stemmer))},e.ro.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.ro.trimmer=e.trimmerSupport.generateTrimmer(e.ro.wordCharacters),e.Pipeline.registerFunction(e.ro.trimmer,"trimmer-ro"),e.ro.stemmer=(h=e.stemmerSupport.Among,z=e.stemmerSupport.SnowballProgram,i=new function(){var r,n,t,a,o=[new h("",-1,3),new h("I",0,1),new h("U",0,2)],s=[new h("ea",-1,3),new h("aţia",-1,7),new h("aua",-1,2),new h("iua",-1,4),new h("aţie",-1,7),new h("ele",-1,3),new h("ile",-1,5),new h("iile",6,4),new h("iei",-1,4),new h("atei",-1,6),new h("ii",-1,4),new h("ului",-1,1),new h("ul",-1,1),new h("elor",-1,3),new h("ilor",-1,4),new h("iilor",14,4)],c=[new h("icala",-1,4),new h("iciva",-1,4),new h("ativa",-1,5),new h("itiva",-1,6),new h("icale",-1,4),new h("aţiune",-1,5),new h("iţiune",-1,6),new h("atoare",-1,5),new h("itoare",-1,6),new h("ătoare",-1,5),new h("icitate",-1,4),new h("abilitate",-1,1),new h("ibilitate",-1,2),new h("ivitate",-1,3),new h("icive",-1,4),new h("ative",-1,5),new h("itive",-1,6),new h("icali",-1,4),new h("atori",-1,5),new h("icatori",18,4),new h("itori",-1,6),new h("ători",-1,5),new h("icitati",-1,4),new h("abilitati",-1,1),new h("ivitati",-1,3),new h("icivi",-1,4),new h("ativi",-1,5),new h("itivi",-1,6),new h("icităi",-1,4),new h("abilităi",-1,1),new h("ivităi",-1,3),new h("icităţi",-1,4),new h("abilităţi",-1,1),new h("ivităţi",-1,3),new h("ical",-1,4),new h("ator",-1,5),new h("icator",35,4),new h("itor",-1,6),new h("ător",-1,5),new h("iciv",-1,4),new h("ativ",-1,5),new h("itiv",-1,6),new h("icală",-1,4),new h("icivă",-1,4),new h("ativă",-1,5),new h("itivă",-1,6)],u=[new h("ica",-1,1),new h("abila",-1,1),new h("ibila",-1,1),new h("oasa",-1,1),new h("ata",-1,1),new h("ita",-1,1),new h("anta",-1,1),new h("ista",-1,3),new h("uta",-1,1),new h("iva",-1,1),new h("ic",-1,1),new h("ice",-1,1),new h("abile",-1,1),new h("ibile",-1,1),new h("isme",-1,3),new h("iune",-1,2),new h("oase",-1,1),new h("ate",-1,1),new h("itate",17,1),new h("ite",-1,1),new h("ante",-1,1),new h("iste",-1,3),new h("ute",-1,1),new h("ive",-1,1),new h("ici",-1,1),new h("abili",-1,1),new h("ibili",-1,1),new h("iuni",-1,2),new h("atori",-1,1),new h("osi",-1,1),new h("ati",-1,1),new h("itati",30,1),new h("iti",-1,1),new h("anti",-1,1),new h("isti",-1,3),new h("uti",-1,1),new h("işti",-1,3),new h("ivi",-1,1),new h("ităi",-1,1),new h("oşi",-1,1),new h("ităţi",-1,1),new h("abil",-1,1),new h("ibil",-1,1),new h("ism",-1,3),new h("ator",-1,1),new h("os",-1,1),new h("at",-1,1),new h("it",-1,1),new h("ant",-1,1),new h("ist",-1,3),new h("ut",-1,1),new h("iv",-1,1),new h("ică",-1,1),new h("abilă",-1,1),new h("ibilă",-1,1),new h("oasă",-1,1),new h("ată",-1,1),new h("ită",-1,1),new h("antă",-1,1),new h("istă",-1,3),new h("ută",-1,1),new h("ivă",-1,1)],w=[new h("ea",-1,1),new h("ia",-1,1),new h("esc",-1,1),new h("ăsc",-1,1),new h("ind",-1,1),new h("ând",-1,1),new h("are",-1,1),new h("ere",-1,1),new h("ire",-1,1),new h("âre",-1,1),new h("se",-1,2),new h("ase",10,1),new h("sese",10,2),new h("ise",10,1),new h("use",10,1),new h("âse",10,1),new h("eşte",-1,1),new h("ăşte",-1,1),new h("eze",-1,1),new h("ai",-1,1),new h("eai",19,1),new h("iai",19,1),new h("sei",-1,2),new h("eşti",-1,1),new h("ăşti",-1,1),new h("ui",-1,1),new h("ezi",-1,1),new h("âi",-1,1),new h("aşi",-1,1),new h("seşi",-1,2),new h("aseşi",29,1),new h("seseşi",29,2),new h("iseşi",29,1),new h("useşi",29,1),new h("âseşi",29,1),new h("işi",-1,1),new h("uşi",-1,1),new h("âşi",-1,1),new h("aţi",-1,2),new h("eaţi",38,1),new h("iaţi",38,1),new h("eţi",-1,2),new h("iţi",-1,2),new h("âţi",-1,2),new h("arăţi",-1,1),new h("serăţi",-1,2),new h("aserăţi",45,1),new h("seserăţi",45,2),new h("iserăţi",45,1),new h("userăţi",45,1),new h("âserăţi",45,1),new h("irăţi",-1,1),new h("urăţi",-1,1),new h("ârăţi",-1,1),new h("am",-1,1),new h("eam",54,1),new h("iam",54,1),new h("em",-1,2),new h("asem",57,1),new h("sesem",57,2),new h("isem",57,1),new h("usem",57,1),new h("âsem",57,1),new h("im",-1,2),new h("âm",-1,2),new h("ăm",-1,2),new h("arăm",65,1),new h("serăm",65,2),new h("aserăm",67,1),new h("seserăm",67,2),new h("iserăm",67,1),new h("userăm",67,1),new h("âserăm",67,1),new h("irăm",65,1),new h("urăm",65,1),new h("ârăm",65,1),new h("au",-1,1),new h("eau",76,1),new h("iau",76,1),new h("indu",-1,1),new h("ându",-1,1),new h("ez",-1,1),new h("ească",-1,1),new h("ară",-1,1),new h("seră",-1,2),new h("aseră",84,1),new h("seseră",84,2),new h("iseră",84,1),new h("useră",84,1),new h("âseră",84,1),new h("iră",-1,1),new h("ură",-1,1),new h("âră",-1,1),new h("ează",-1,1)],i=[new h("a",-1,1),new h("e",-1,1),new h("ie",1,1),new h("i",-1,1),new h("ă",-1,1)],m=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,2,32,0,0,4],l=new z;function f(e,i){l.eq_s(1,e)&&(l.ket=l.cursor,l.in_grouping(m,97,259)&&l.slice_from(i))}function p(){if(l.out_grouping(m,97,259)){for(;!l.in_grouping(m,97,259);){if(l.cursor>=l.limit)return!0;l.cursor++}return!1}return!0}function d(){var e,i,r=l.cursor;if(l.in_grouping(m,97,259)){if(e=l.cursor,!p())return void(a=l.cursor);if(l.cursor=e,!function(){if(l.in_grouping(m,97,259))for(;!l.out_grouping(m,97,259);){if(l.cursor>=l.limit)return!0;l.cursor++}return!1}())return void(a=l.cursor)}l.cursor=r,l.out_grouping(m,97,259)&&(i=l.cursor,p()&&(l.cursor=i,l.in_grouping(m,97,259)&&l.cursor=l.limit)return!1;l.cursor++}for(;!l.out_grouping(m,97,259);){if(l.cursor>=l.limit)return!1;l.cursor++}return!0}function v(){return t<=l.cursor}function _(){var e,i=l.limit-l.cursor;if(l.ket=l.cursor,(e=l.find_among_b(c,46))&&(l.bra=l.cursor,v())){switch(e){case 1:l.slice_from("abil");break;case 2:l.slice_from("ibil");break;case 3:l.slice_from("iv");break;case 4:l.slice_from("ic");break;case 5:l.slice_from("at");break;case 6:l.slice_from("it")}return r=!0,l.cursor=l.limit-i,!0}return!1}function g(){var e,i;for(r=!1;;)if(i=l.limit-l.cursor,!_()){l.cursor=l.limit-i;break}if(l.ket=l.cursor,(e=l.find_among_b(u,62))&&(l.bra=l.cursor,n<=l.cursor)){switch(e){case 1:l.slice_del();break;case 2:l.eq_s_b(1,"ţ")&&(l.bra=l.cursor,l.slice_from("t"));break;case 3:l.slice_from("ist")}r=!0}}function k(){var e;l.ket=l.cursor,(e=l.find_among_b(i,5))&&(l.bra=l.cursor,a<=l.cursor&&1==e&&l.slice_del())}this.setCurrent=function(e){l.setCurrent(e)},this.getCurrent=function(){return l.getCurrent()},this.stem=function(){var e,i=l.cursor;return function(){for(var e,i;e=l.cursor,l.in_grouping(m,97,259)&&(i=l.cursor,l.bra=i,f("u","U"),l.cursor=i,f("i","I")),l.cursor=e,!(l.cursor>=l.limit);)l.cursor++}(),l.cursor=i,e=l.cursor,a=l.limit,n=t=a,d(),l.cursor=e,b()&&(t=l.cursor,b()&&(n=l.cursor)),l.limit_backward=i,l.cursor=l.limit,function(){var e,i;if(l.ket=l.cursor,(e=l.find_among_b(s,16))&&(l.bra=l.cursor,v()))switch(e){case 1:l.slice_del();break;case 2:l.slice_from("a");break;case 3:l.slice_from("e");break;case 4:l.slice_from("i");break;case 5:i=l.limit-l.cursor,l.eq_s_b(2,"ab")||(l.cursor=l.limit-i,l.slice_from("i"));break;case 6:l.slice_from("at");break;case 7:l.slice_from("aţi")}}(),l.cursor=l.limit,g(),l.cursor=l.limit,r||(l.cursor=l.limit,function(){var e,i,r;if(l.cursor>=a){if(i=l.limit_backward,l.limit_backward=a,l.ket=l.cursor,e=l.find_among_b(w,94))switch(l.bra=l.cursor,e){case 1:if(r=l.limit-l.cursor,!l.out_grouping_b(m,97,259)&&(l.cursor=l.limit-r,!l.eq_s_b(1,"u")))break;case 2:l.slice_del()}l.limit_backward=i}}(),l.cursor=l.limit),k(),l.cursor=l.limit_backward,function(){for(var e;;){if(l.bra=l.cursor,e=l.find_among(o,3))switch(l.ket=l.cursor,e){case 1:l.slice_from("i");continue;case 2:l.slice_from("u");continue;case 3:if(l.cursor>=l.limit)break;l.cursor++;continue}break}}(),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}),e.Pipeline.registerFunction(e.ro.stemmer,"stemmer-ro"),e.ro.stopWordFilter=e.generateStopWordFilter("acea aceasta această aceea acei aceia acel acela acele acelea acest acesta aceste acestea aceşti aceştia acolo acord acum ai aia aibă aici al ale alea altceva altcineva am ar are asemenea asta astea astăzi asupra au avea avem aveţi azi aş aşadar aţi bine bucur bună ca care caut ce cel ceva chiar cinci cine cineva contra cu cum cumva curând curînd când cât câte câtva câţi cînd cît cîte cîtva cîţi că căci cărei căror cărui către da dacă dar datorită dată dau de deci deja deoarece departe deşi din dinaintea dintr- dintre doi doilea două drept după dă ea ei el ele eram este eu eşti face fata fi fie fiecare fii fim fiu fiţi frumos fără graţie halbă iar ieri la le li lor lui lângă lîngă mai mea mei mele mereu meu mi mie mine mult multă mulţi mulţumesc mâine mîine mă ne nevoie nici nicăieri nimeni nimeri nimic nişte noastre noastră noi noroc nostru nouă noştri nu opt ori oricare orice oricine oricum oricând oricât oricînd oricît oriunde patra patru patrulea pe pentru peste pic poate pot prea prima primul prin puţin puţina puţină până pînă rog sa sale sau se spate spre sub sunt suntem sunteţi sută sînt sîntem sînteţi să săi său ta tale te timp tine toate toată tot totuşi toţi trei treia treilea tu tăi tău un una unde undeva unei uneia unele uneori unii unor unora unu unui unuia unul vi voastre voastră voi vostru vouă voştri vreme vreo vreun vă zece zero zi zice îi îl îmi împotriva în înainte înaintea încotro încât încît între întrucât întrucît îţi ăla ălea ăsta ăstea ăştia şapte şase şi ştiu ţi ţie".split(" ")),e.Pipeline.registerFunction(e.ro.stopWordFilter,"stopWordFilter-ro")}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.ru.js b/docs/site/assets/javascripts/lunr/lunr.ru.js new file mode 100644 index 0000000..ac99248 --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.ru.js @@ -0,0 +1 @@ +!function(e,n){"function"==typeof define&&define.amd?define(n):"object"==typeof exports?module.exports=n():n()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var h,g,n;e.ru=function(){this.pipeline.reset(),this.pipeline.add(e.ru.trimmer,e.ru.stopWordFilter,e.ru.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.ru.stemmer))},e.ru.wordCharacters="Ѐ-҄҇-ԯᴫᵸⷠ-ⷿꙀ-ꚟ︮︯",e.ru.trimmer=e.trimmerSupport.generateTrimmer(e.ru.wordCharacters),e.Pipeline.registerFunction(e.ru.trimmer,"trimmer-ru"),e.ru.stemmer=(h=e.stemmerSupport.Among,g=e.stemmerSupport.SnowballProgram,n=new function(){var n,e,r=[new h("в",-1,1),new h("ив",0,2),new h("ыв",0,2),new h("вши",-1,1),new h("ивши",3,2),new h("ывши",3,2),new h("вшись",-1,1),new h("ившись",6,2),new h("ывшись",6,2)],t=[new h("ее",-1,1),new h("ие",-1,1),new h("ое",-1,1),new h("ые",-1,1),new h("ими",-1,1),new h("ыми",-1,1),new h("ей",-1,1),new h("ий",-1,1),new h("ой",-1,1),new h("ый",-1,1),new h("ем",-1,1),new h("им",-1,1),new h("ом",-1,1),new h("ым",-1,1),new h("его",-1,1),new h("ого",-1,1),new h("ему",-1,1),new h("ому",-1,1),new h("их",-1,1),new h("ых",-1,1),new h("ею",-1,1),new h("ою",-1,1),new h("ую",-1,1),new h("юю",-1,1),new h("ая",-1,1),new h("яя",-1,1)],w=[new h("ем",-1,1),new h("нн",-1,1),new h("вш",-1,1),new h("ивш",2,2),new h("ывш",2,2),new h("щ",-1,1),new h("ющ",5,1),new h("ующ",6,2)],i=[new h("сь",-1,1),new h("ся",-1,1)],u=[new h("ла",-1,1),new h("ила",0,2),new h("ыла",0,2),new h("на",-1,1),new h("ена",3,2),new h("ете",-1,1),new h("ите",-1,2),new h("йте",-1,1),new h("ейте",7,2),new h("уйте",7,2),new h("ли",-1,1),new h("или",10,2),new h("ыли",10,2),new h("й",-1,1),new h("ей",13,2),new h("уй",13,2),new h("л",-1,1),new h("ил",16,2),new h("ыл",16,2),new h("ем",-1,1),new h("им",-1,2),new h("ым",-1,2),new h("н",-1,1),new h("ен",22,2),new h("ло",-1,1),new h("ило",24,2),new h("ыло",24,2),new h("но",-1,1),new h("ено",27,2),new h("нно",27,1),new h("ет",-1,1),new h("ует",30,2),new h("ит",-1,2),new h("ыт",-1,2),new h("ют",-1,1),new h("уют",34,2),new h("ят",-1,2),new h("ны",-1,1),new h("ены",37,2),new h("ть",-1,1),new h("ить",39,2),new h("ыть",39,2),new h("ешь",-1,1),new h("ишь",-1,2),new h("ю",-1,2),new h("ую",44,2)],s=[new h("а",-1,1),new h("ев",-1,1),new h("ов",-1,1),new h("е",-1,1),new h("ие",3,1),new h("ье",3,1),new h("и",-1,1),new h("еи",6,1),new h("ии",6,1),new h("ами",6,1),new h("ями",6,1),new h("иями",10,1),new h("й",-1,1),new h("ей",12,1),new h("ией",13,1),new h("ий",12,1),new h("ой",12,1),new h("ам",-1,1),new h("ем",-1,1),new h("ием",18,1),new h("ом",-1,1),new h("ям",-1,1),new h("иям",21,1),new h("о",-1,1),new h("у",-1,1),new h("ах",-1,1),new h("ях",-1,1),new h("иях",26,1),new h("ы",-1,1),new h("ь",-1,1),new h("ю",-1,1),new h("ию",30,1),new h("ью",30,1),new h("я",-1,1),new h("ия",33,1),new h("ья",33,1)],o=[new h("ост",-1,1),new h("ость",-1,1)],c=[new h("ейше",-1,1),new h("н",-1,2),new h("ейш",-1,1),new h("ь",-1,3)],m=[33,65,8,232],l=new g;function f(){for(;!l.in_grouping(m,1072,1103);){if(l.cursor>=l.limit)return!1;l.cursor++}return!0}function a(){for(;!l.out_grouping(m,1072,1103);){if(l.cursor>=l.limit)return!1;l.cursor++}return!0}function p(e,n){var r,t;if(l.ket=l.cursor,r=l.find_among_b(e,n)){switch(l.bra=l.cursor,r){case 1:if(t=l.limit-l.cursor,!l.eq_s_b(1,"а")&&(l.cursor=l.limit-t,!l.eq_s_b(1,"я")))return!1;case 2:l.slice_del()}return!0}return!1}function d(e,n){var r;return l.ket=l.cursor,!!(r=l.find_among_b(e,n))&&(l.bra=l.cursor,1==r&&l.slice_del(),!0)}function _(){return!!d(t,26)&&(p(w,8),!0)}function b(){var e;l.ket=l.cursor,(e=l.find_among_b(o,2))&&(l.bra=l.cursor,n<=l.cursor&&1==e&&l.slice_del())}this.setCurrent=function(e){l.setCurrent(e)},this.getCurrent=function(){return l.getCurrent()},this.stem=function(){return e=l.limit,n=e,f()&&(e=l.cursor,a()&&f()&&a()&&(n=l.cursor)),l.cursor=l.limit,!(l.cursor>3]&1<<(7&s))return this.cursor++,!0}return!1},in_grouping_b:function(r,t,i){if(this.cursor>this.limit_backward){var s=b.charCodeAt(this.cursor-1);if(s<=i&&t<=s&&r[(s-=t)>>3]&1<<(7&s))return this.cursor--,!0}return!1},out_grouping:function(r,t,i){if(this.cursor>3]&1<<(7&s)))return this.cursor++,!0}return!1},out_grouping_b:function(r,t,i){if(this.cursor>this.limit_backward){var s=b.charCodeAt(this.cursor-1);if(i>3]&1<<(7&s)))return this.cursor--,!0}return!1},eq_s:function(r,t){if(this.limit-this.cursor>1),a=0,f=u=(l=r[i]).s_size){if(this.cursor=e+l.s_size,!l.method)return l.result;var m=l.method();if(this.cursor=e+l.s_size,m)return l.result}if((i=l.substring_i)<0)return 0}},find_among_b:function(r,t){for(var i=0,s=t,e=this.cursor,n=this.limit_backward,u=0,o=0,h=!1;;){for(var c=i+(s-i>>1),a=0,f=u=(_=r[i]).s_size){if(this.cursor=e-_.s_size,!_.method)return _.result;var m=_.method();if(this.cursor=e-_.s_size,m)return _.result}if((i=_.substring_i)<0)return 0}},replace_s:function(r,t,i){var s=i.length-(t-r);return b=b.substring(0,r)+i+b.substring(t),this.limit+=s,this.cursor>=t?this.cursor+=s:this.cursor>r&&(this.cursor=r),s},slice_check:function(){if(this.bra<0||this.bra>this.ket||this.ket>this.limit||this.limit>b.length)throw"faulty slice operation"},slice_from:function(r){this.slice_check(),this.replace_s(this.bra,this.ket,r)},slice_del:function(){this.slice_from("")},insert:function(r,t,i){var s=this.replace_s(r,t,i);r<=this.bra&&(this.bra+=s),r<=this.ket&&(this.ket+=s)},slice_to:function(){return this.slice_check(),b.substring(this.bra,this.ket)},eq_v_b:function(r){return this.eq_s_b(r.length,r)}}}},r.trimmerSupport={generateTrimmer:function(r){var t=new RegExp("^[^"+r+"]+"),i=new RegExp("[^"+r+"]+$");return function(r){return"function"==typeof r.update?r.update(function(r){return r.replace(t,"").replace(i,"")}):r.replace(t,"").replace(i,"")}}}}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.sv.js b/docs/site/assets/javascripts/lunr/lunr.sv.js new file mode 100644 index 0000000..6daf5f9 --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.sv.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r,l,n;e.sv=function(){this.pipeline.reset(),this.pipeline.add(e.sv.trimmer,e.sv.stopWordFilter,e.sv.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.sv.stemmer))},e.sv.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.sv.trimmer=e.trimmerSupport.generateTrimmer(e.sv.wordCharacters),e.Pipeline.registerFunction(e.sv.trimmer,"trimmer-sv"),e.sv.stemmer=(r=e.stemmerSupport.Among,l=e.stemmerSupport.SnowballProgram,n=new function(){var n,t,i=[new r("a",-1,1),new r("arna",0,1),new r("erna",0,1),new r("heterna",2,1),new r("orna",0,1),new r("ad",-1,1),new r("e",-1,1),new r("ade",6,1),new r("ande",6,1),new r("arne",6,1),new r("are",6,1),new r("aste",6,1),new r("en",-1,1),new r("anden",12,1),new r("aren",12,1),new r("heten",12,1),new r("ern",-1,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",18,1),new r("or",-1,1),new r("s",-1,2),new r("as",21,1),new r("arnas",22,1),new r("ernas",22,1),new r("ornas",22,1),new r("es",21,1),new r("ades",26,1),new r("andes",26,1),new r("ens",21,1),new r("arens",29,1),new r("hetens",29,1),new r("erns",21,1),new r("at",-1,1),new r("andet",-1,1),new r("het",-1,1),new r("ast",-1,1)],s=[new r("dd",-1,-1),new r("gd",-1,-1),new r("nn",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1),new r("tt",-1,-1)],a=[new r("ig",-1,1),new r("lig",0,1),new r("els",-1,1),new r("fullt",-1,3),new r("löst",-1,2)],o=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,24,0,32],u=[119,127,149],m=new l;this.setCurrent=function(e){m.setCurrent(e)},this.getCurrent=function(){return m.getCurrent()},this.stem=function(){var e,r=m.cursor;return function(){var e,r=m.cursor+3;if(t=m.limit,0<=r||r<=m.limit){for(n=r;;){if(e=m.cursor,m.in_grouping(o,97,246)){m.cursor=e;break}if(m.cursor=e,m.cursor>=m.limit)return;m.cursor++}for(;!m.out_grouping(o,97,246);){if(m.cursor>=m.limit)return;m.cursor++}(t=m.cursor)=t&&(m.limit_backward=t,m.cursor=m.limit,m.ket=m.cursor,e=m.find_among_b(i,37),m.limit_backward=r,e))switch(m.bra=m.cursor,e){case 1:m.slice_del();break;case 2:m.in_grouping_b(u,98,121)&&m.slice_del()}}(),m.cursor=m.limit,e=m.limit_backward,m.cursor>=t&&(m.limit_backward=t,m.cursor=m.limit,m.find_among_b(s,7)&&(m.cursor=m.limit,m.ket=m.cursor,m.cursor>m.limit_backward&&(m.bra=--m.cursor,m.slice_del())),m.limit_backward=e),m.cursor=m.limit,function(){var e,r;if(m.cursor>=t){if(r=m.limit_backward,m.limit_backward=t,m.cursor=m.limit,m.ket=m.cursor,e=m.find_among_b(a,5))switch(m.bra=m.cursor,e){case 1:m.slice_del();break;case 2:m.slice_from("lös");break;case 3:m.slice_from("full")}m.limit_backward=r}}(),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}),e.Pipeline.registerFunction(e.sv.stemmer,"stemmer-sv"),e.sv.stopWordFilter=e.generateStopWordFilter("alla allt att av blev bli blir blivit de dem den denna deras dess dessa det detta dig din dina ditt du där då efter ej eller en er era ert ett från för ha hade han hans har henne hennes hon honom hur här i icke ingen inom inte jag ju kan kunde man med mellan men mig min mina mitt mot mycket ni nu när någon något några och om oss på samma sedan sig sin sina sitta själv skulle som så sådan sådana sådant till under upp ut utan vad var vara varför varit varje vars vart vem vi vid vilka vilkas vilken vilket vår våra vårt än är åt över".split(" ")),e.Pipeline.registerFunction(e.sv.stopWordFilter,"stopWordFilter-sv")}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.th.js b/docs/site/assets/javascripts/lunr/lunr.th.js new file mode 100644 index 0000000..ee8ef37 --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.th.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(t){if(void 0===t)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===t.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var i="2"==t.version[0];t.th=function(){this.pipeline.reset(),this.pipeline.add(t.th.trimmer),i?this.tokenizer=t.th.tokenizer:(t.tokenizer&&(t.tokenizer=t.th.tokenizer),this.tokenizerFn&&(this.tokenizerFn=t.th.tokenizer))},t.th.wordCharacters="[฀-๿]",t.th.trimmer=t.trimmerSupport.generateTrimmer(t.th.wordCharacters),t.Pipeline.registerFunction(t.th.trimmer,"trimmer-th");var n=t.wordcut;n.init(),t.th.tokenizer=function(e){if(!arguments.length||null==e||null==e)return[];if(Array.isArray(e))return e.map(function(e){return i?new t.Token(e):e});var r=e.toString().replace(/^\s+/,"");return n.cut(r).split("|")}}}); \ No newline at end of file diff --git a/docs/site/assets/javascripts/lunr/lunr.tr.js b/docs/site/assets/javascripts/lunr/lunr.tr.js new file mode 100644 index 0000000..e8fb5a7 --- /dev/null +++ b/docs/site/assets/javascripts/lunr/lunr.tr.js @@ -0,0 +1 @@ +!function(r,i){"function"==typeof define&&define.amd?define(i):"object"==typeof exports?module.exports=i():i()(r.lunr)}(this,function(){return function(r){if(void 0===r)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===r.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var mr,dr,i;r.tr=function(){this.pipeline.reset(),this.pipeline.add(r.tr.trimmer,r.tr.stopWordFilter,r.tr.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(r.tr.stemmer))},r.tr.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",r.tr.trimmer=r.trimmerSupport.generateTrimmer(r.tr.wordCharacters),r.Pipeline.registerFunction(r.tr.trimmer,"trimmer-tr"),r.tr.stemmer=(mr=r.stemmerSupport.Among,dr=r.stemmerSupport.SnowballProgram,i=new function(){var t,r=[new mr("m",-1,-1),new mr("n",-1,-1),new mr("miz",-1,-1),new mr("niz",-1,-1),new mr("muz",-1,-1),new mr("nuz",-1,-1),new mr("müz",-1,-1),new mr("nüz",-1,-1),new mr("mız",-1,-1),new mr("nız",-1,-1)],i=[new mr("leri",-1,-1),new mr("ları",-1,-1)],e=[new mr("ni",-1,-1),new mr("nu",-1,-1),new mr("nü",-1,-1),new mr("nı",-1,-1)],n=[new mr("in",-1,-1),new mr("un",-1,-1),new mr("ün",-1,-1),new mr("ın",-1,-1)],u=[new mr("a",-1,-1),new mr("e",-1,-1)],o=[new mr("na",-1,-1),new mr("ne",-1,-1)],s=[new mr("da",-1,-1),new mr("ta",-1,-1),new mr("de",-1,-1),new mr("te",-1,-1)],c=[new mr("nda",-1,-1),new mr("nde",-1,-1)],l=[new mr("dan",-1,-1),new mr("tan",-1,-1),new mr("den",-1,-1),new mr("ten",-1,-1)],a=[new mr("ndan",-1,-1),new mr("nden",-1,-1)],m=[new mr("la",-1,-1),new mr("le",-1,-1)],d=[new mr("ca",-1,-1),new mr("ce",-1,-1)],f=[new mr("im",-1,-1),new mr("um",-1,-1),new mr("üm",-1,-1),new mr("ım",-1,-1)],b=[new mr("sin",-1,-1),new mr("sun",-1,-1),new mr("sün",-1,-1),new mr("sın",-1,-1)],w=[new mr("iz",-1,-1),new mr("uz",-1,-1),new mr("üz",-1,-1),new mr("ız",-1,-1)],_=[new mr("siniz",-1,-1),new mr("sunuz",-1,-1),new mr("sünüz",-1,-1),new mr("sınız",-1,-1)],k=[new mr("lar",-1,-1),new mr("ler",-1,-1)],p=[new mr("niz",-1,-1),new mr("nuz",-1,-1),new mr("nüz",-1,-1),new mr("nız",-1,-1)],g=[new mr("dir",-1,-1),new mr("tir",-1,-1),new mr("dur",-1,-1),new mr("tur",-1,-1),new mr("dür",-1,-1),new mr("tür",-1,-1),new mr("dır",-1,-1),new mr("tır",-1,-1)],y=[new mr("casına",-1,-1),new mr("cesine",-1,-1)],z=[new mr("di",-1,-1),new mr("ti",-1,-1),new mr("dik",-1,-1),new mr("tik",-1,-1),new mr("duk",-1,-1),new mr("tuk",-1,-1),new mr("dük",-1,-1),new mr("tük",-1,-1),new mr("dık",-1,-1),new mr("tık",-1,-1),new mr("dim",-1,-1),new mr("tim",-1,-1),new mr("dum",-1,-1),new mr("tum",-1,-1),new mr("düm",-1,-1),new mr("tüm",-1,-1),new mr("dım",-1,-1),new mr("tım",-1,-1),new mr("din",-1,-1),new mr("tin",-1,-1),new mr("dun",-1,-1),new mr("tun",-1,-1),new mr("dün",-1,-1),new mr("tün",-1,-1),new mr("dın",-1,-1),new mr("tın",-1,-1),new mr("du",-1,-1),new mr("tu",-1,-1),new mr("dü",-1,-1),new mr("tü",-1,-1),new mr("dı",-1,-1),new mr("tı",-1,-1)],h=[new mr("sa",-1,-1),new mr("se",-1,-1),new mr("sak",-1,-1),new mr("sek",-1,-1),new mr("sam",-1,-1),new mr("sem",-1,-1),new mr("san",-1,-1),new mr("sen",-1,-1)],v=[new mr("miş",-1,-1),new mr("muş",-1,-1),new mr("müş",-1,-1),new mr("mış",-1,-1)],q=[new mr("b",-1,1),new mr("c",-1,2),new mr("d",-1,3),new mr("ğ",-1,4)],C=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,32,8,0,0,0,0,0,0,1],P=[1,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,1],F=[65],S=[65],W=[["a",[1,64,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],97,305],["e",[17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,130],101,252],["ı",[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],97,305],["i",[17],101,105],["o",F,111,117],["ö",S,246,252],["u",F,111,117]],L=new dr;function x(r,i,e){for(;;){var n=L.limit-L.cursor;if(L.in_grouping_b(r,i,e)){L.cursor=L.limit-n;break}if(L.cursor=L.limit-n,L.cursor<=L.limit_backward)return!1;L.cursor--}return!0}function A(){var r,i;r=L.limit-L.cursor,x(C,97,305);for(var e=0;eL.limit_backward&&(L.cursor--,e=L.limit-L.cursor,i()))?(L.cursor=L.limit-e,!0):(L.cursor=L.limit-n,r()?(L.cursor=L.limit-n,!1):(L.cursor=L.limit-n,!(L.cursor<=L.limit_backward)&&(L.cursor--,!!i()&&(L.cursor=L.limit-n,!0))))}function j(r){return E(r,function(){return L.in_grouping_b(C,97,305)})}function T(){return j(function(){return L.eq_s_b(1,"n")})}function Z(){return j(function(){return L.eq_s_b(1,"y")})}function B(){return L.find_among_b(r,10)&&E(function(){return L.in_grouping_b(P,105,305)},function(){return L.out_grouping_b(C,97,305)})}function D(){return A()&&L.in_grouping_b(P,105,305)&&j(function(){return L.eq_s_b(1,"s")})}function G(){return L.find_among_b(i,2)}function H(){return A()&&L.find_among_b(n,4)&&T()}function I(){return A()&&L.find_among_b(s,4)}function J(){return A()&&L.find_among_b(c,2)}function K(){return A()&&L.find_among_b(f,4)&&Z()}function M(){return A()&&L.find_among_b(b,4)}function N(){return A()&&L.find_among_b(w,4)&&Z()}function O(){return L.find_among_b(_,4)}function Q(){return A()&&L.find_among_b(k,2)}function R(){return A()&&L.find_among_b(g,8)}function U(){return A()&&L.find_among_b(z,32)&&Z()}function V(){return L.find_among_b(h,8)&&Z()}function X(){return A()&&L.find_among_b(v,4)&&Z()}function Y(){var r=L.limit-L.cursor;return!(X()||(L.cursor=L.limit-r,U()||(L.cursor=L.limit-r,V()||(L.cursor=L.limit-r,L.eq_s_b(3,"ken")&&Z()))))}function $(){if(L.find_among_b(y,2)){var r=L.limit-L.cursor;if(O()||(L.cursor=L.limit-r,Q()||(L.cursor=L.limit-r,K()||(L.cursor=L.limit-r,M()||(L.cursor=L.limit-r,N()||(L.cursor=L.limit-r))))),X())return!1}return!0}function rr(){if(!A()||!L.find_among_b(p,4))return!0;var r=L.limit-L.cursor;return!U()&&(L.cursor=L.limit-r,!V())}function ir(){var r,i,e,n=L.limit-L.cursor;if(L.ket=L.cursor,t=!0,Y()&&(L.cursor=L.limit-n,$()&&(L.cursor=L.limit-n,function(){if(Q()){L.bra=L.cursor,L.slice_del();var r=L.limit-L.cursor;return L.ket=L.cursor,R()||(L.cursor=L.limit-r,U()||(L.cursor=L.limit-r,V()||(L.cursor=L.limit-r,X()||(L.cursor=L.limit-r)))),t=!1}return!0}()&&(L.cursor=L.limit-n,rr()&&(L.cursor=L.limit-n,e=L.limit-L.cursor,!(O()||(L.cursor=L.limit-e,N()||(L.cursor=L.limit-e,M()||(L.cursor=L.limit-e,K()))))||(L.bra=L.cursor,L.slice_del(),i=L.limit-L.cursor,L.ket=L.cursor,X()||(L.cursor=L.limit-i),0)))))){if(L.cursor=L.limit-n,!R())return;L.bra=L.cursor,L.slice_del(),L.ket=L.cursor,r=L.limit-L.cursor,O()||(L.cursor=L.limit-r,Q()||(L.cursor=L.limit-r,K()||(L.cursor=L.limit-r,M()||(L.cursor=L.limit-r,N()||(L.cursor=L.limit-r))))),X()||(L.cursor=L.limit-r)}L.bra=L.cursor,L.slice_del()}function er(){var r,i,e,n;if(L.ket=L.cursor,L.eq_s_b(2,"ki")){if(r=L.limit-L.cursor,I())return L.bra=L.cursor,L.slice_del(),i=L.limit-L.cursor,L.ket=L.cursor,Q()?(L.bra=L.cursor,L.slice_del(),er()):(L.cursor=L.limit-i,B()&&(L.bra=L.cursor,L.slice_del(),L.ket=L.cursor,Q()&&(L.bra=L.cursor,L.slice_del(),er()))),!0;if(L.cursor=L.limit-r,H()){if(L.bra=L.cursor,L.slice_del(),L.ket=L.cursor,e=L.limit-L.cursor,G())L.bra=L.cursor,L.slice_del();else{if(L.cursor=L.limit-e,L.ket=L.cursor,!B()&&(L.cursor=L.limit-e,!D()&&(L.cursor=L.limit-e,!er())))return!0;L.bra=L.cursor,L.slice_del(),L.ket=L.cursor,Q()&&(L.bra=L.cursor,L.slice_del(),er())}return!0}if(L.cursor=L.limit-r,J()){if(n=L.limit-L.cursor,G())L.bra=L.cursor,L.slice_del();else if(L.cursor=L.limit-n,D())L.bra=L.cursor,L.slice_del(),L.ket=L.cursor,Q()&&(L.bra=L.cursor,L.slice_del(),er());else if(L.cursor=L.limit-n,!er())return!1;return!0}}return!1}function nr(r){if(L.ket=L.cursor,!J()&&(L.cursor=L.limit-r,!A()||!L.find_among_b(o,2)))return!1;var i=L.limit-L.cursor;if(G())L.bra=L.cursor,L.slice_del();else if(L.cursor=L.limit-i,D())L.bra=L.cursor,L.slice_del(),L.ket=L.cursor,Q()&&(L.bra=L.cursor,L.slice_del(),er());else if(L.cursor=L.limit-i,!er())return!1;return!0}function tr(r){if(L.ket=L.cursor,!(A()&&L.find_among_b(a,2)||(L.cursor=L.limit-r,A()&&L.find_among_b(e,4))))return!1;var i=L.limit-L.cursor;return!(!D()&&(L.cursor=L.limit-i,!G()))&&(L.bra=L.cursor,L.slice_del(),L.ket=L.cursor,Q()&&(L.bra=L.cursor,L.slice_del(),er()),!0)}function ur(){var r,i=L.limit-L.cursor;return L.ket=L.cursor,!!(H()||(L.cursor=L.limit-i,A()&&L.find_among_b(m,2)&&Z()))&&(L.bra=L.cursor,L.slice_del(),r=L.limit-L.cursor,L.ket=L.cursor,!(!Q()||(L.bra=L.cursor,L.slice_del(),!er()))||(L.cursor=L.limit-r,L.ket=L.cursor,(B()||(L.cursor=L.limit-r,D()||(L.cursor=L.limit-r,er())))&&(L.bra=L.cursor,L.slice_del(),L.ket=L.cursor,Q()&&(L.bra=L.cursor,L.slice_del(),er())),!0))}function or(){var r,i,e=L.limit-L.cursor;if(L.ket=L.cursor,!(I()||(L.cursor=L.limit-e,A()&&L.in_grouping_b(P,105,305)&&Z()||(L.cursor=L.limit-e,A()&&L.find_among_b(u,2)&&Z()))))return!1;if(L.bra=L.cursor,L.slice_del(),L.ket=L.cursor,r=L.limit-L.cursor,B())L.bra=L.cursor,L.slice_del(),i=L.limit-L.cursor,L.ket=L.cursor,Q()||(L.cursor=L.limit-i);else if(L.cursor=L.limit-r,!Q())return!0;return L.bra=L.cursor,L.slice_del(),L.ket=L.cursor,er(),!0}function sr(){var r,i,e=L.limit-L.cursor;if(L.ket=L.cursor,Q())return L.bra=L.cursor,L.slice_del(),void er();if(L.cursor=L.limit-e,L.ket=L.cursor,A()&&L.find_among_b(d,2)&&T())if(L.bra=L.cursor,L.slice_del(),r=L.limit-L.cursor,L.ket=L.cursor,G())L.bra=L.cursor,L.slice_del();else{if(L.cursor=L.limit-r,L.ket=L.cursor,!B()&&(L.cursor=L.limit-r,!D())){if(L.cursor=L.limit-r,L.ket=L.cursor,!Q())return;if(L.bra=L.cursor,L.slice_del(),!er())return}L.bra=L.cursor,L.slice_del(),L.ket=L.cursor,Q()&&(L.bra=L.cursor,L.slice_del(),er())}else if(L.cursor=L.limit-e,!nr(e)&&(L.cursor=L.limit-e,!tr(e))){if(L.cursor=L.limit-e,L.ket=L.cursor,A()&&L.find_among_b(l,4))return L.bra=L.cursor,L.slice_del(),L.ket=L.cursor,i=L.limit-L.cursor,void(B()?(L.bra=L.cursor,L.slice_del(),L.ket=L.cursor,Q()&&(L.bra=L.cursor,L.slice_del(),er())):(L.cursor=L.limit-i,Q()?(L.bra=L.cursor,L.slice_del()):L.cursor=L.limit-i,er()));if(L.cursor=L.limit-e,!ur()){if(L.cursor=L.limit-e,G())return L.bra=L.cursor,void L.slice_del();L.cursor=L.limit-e,er()||(L.cursor=L.limit-e,or()||(L.cursor=L.limit-e,L.ket=L.cursor,(B()||(L.cursor=L.limit-e,D()))&&(L.bra=L.cursor,L.slice_del(),L.ket=L.cursor,Q()&&(L.bra=L.cursor,L.slice_del(),er()))))}}}function cr(r,i,e){if(L.cursor=L.limit-r,function(){for(;;){var r=L.limit-L.cursor;if(L.in_grouping_b(C,97,305)){L.cursor=L.limit-r;break}if(L.cursor=L.limit-r,L.cursor<=L.limit_backward)return!1;L.cursor--}return!0}()){var n=L.limit-L.cursor;if(!L.eq_s_b(1,i)&&(L.cursor=L.limit-n,!L.eq_s_b(1,e)))return!0;L.cursor=L.limit-r;var t=L.cursor;return L.insert(L.cursor,L.cursor,e),L.cursor=t,!1}return!0}function lr(r,i,e){for(;!L.eq_s(i,e);){if(L.cursor>=L.limit)return!0;L.cursor++}return i!=L.limit||(L.cursor=r,!1)}function ar(){var r,i,e=L.cursor;return!(!lr(r=L.cursor,2,"ad")||!lr(L.cursor=r,5,"soyad"))&&(L.limit_backward=e,L.cursor=L.limit,i=L.limit-L.cursor,(L.eq_s_b(1,"d")||(L.cursor=L.limit-i,L.eq_s_b(1,"g")))&&cr(i,"a","ı")&&cr(i,"e","i")&&cr(i,"o","u")&&cr(i,"ö","ü"),L.cursor=L.limit,function(){var r;if(L.ket=L.cursor,r=L.find_among_b(q,4))switch(L.bra=L.cursor,r){case 1:L.slice_from("p");break;case 2:L.slice_from("ç");break;case 3:L.slice_from("t");break;case 4:L.slice_from("k")}}(),!0)}this.setCurrent=function(r){L.setCurrent(r)},this.getCurrent=function(){return L.getCurrent()},this.stem=function(){return!!(function(){for(var r,i=L.cursor,e=2;;){for(r=L.cursor;!L.in_grouping(C,97,305);){if(L.cursor>=L.limit)return L.cursor=r,!(0e&&(this._events[n].warned=!0,console.error("(node) warning: possible EventEmitter memory leak detected. %d listeners added. Use emitter.setMaxListeners() to increase limit.",this._events[n].length),"function"==typeof console.trace&&console.trace()));return this},r.prototype.once=function(n,t){if(!a(t))throw TypeError("listener must be a function");var e=!1;function r(){this.removeListener(n,r),e||(e=!0,t.apply(this,arguments))}return r.listener=t,this.on(n,r),this},r.prototype.removeListener=function(n,t){var e,r,i,o;if(!a(t))throw TypeError("listener must be a function");if(!this._events||!this._events[n])return this;if(i=(e=this._events[n]).length,r=-1,e===t||a(e.listener)&&e.listener===t)delete this._events[n],this._events.removeListener&&this.emit("removeListener",n,t);else if(c(e)){for(o=i;0this.maxLength)return i();if(!this.stat&&p(this.cache,o)){var t=this.cache[o];if(Array.isArray(t)&&(t="DIR"),!n||"DIR"===t)return i(null,t);if(n&&"FILE"===t)return i()}var e=this.statCache[o];if(void 0!==e){if(!1===e)return i(null,e);var s=e.isDirectory()?"DIR":"FILE";return n&&"FILE"===s?i():i(null,s,e)}var a=this,c=d("stat\0"+o,function(n,e){{if(e&&e.isSymbolicLink())return u.stat(o,function(n,t){n?a._stat2(r,o,null,e,i):a._stat2(r,o,n,t,i)});a._stat2(r,o,n,e,i)}});c&&u.lstat(o,c)},b.prototype._stat2=function(n,t,e,r,i){if(e)return this.statCache[t]=!1,i();var o="/"===n.slice(-1);if(this.statCache[t]=r,"/"===t.slice(-1)&&!r.isDirectory())return i(null,!1,r);var s=r.isDirectory()?"DIR":"FILE";return this.cache[t]=this.cache[t]||s,o&&"DIR"!==s?i():i(null,s,r)}}).call(this,_("_process"))},{"./common.js":15,"./sync.js":17,_process:24,assert:9,events:14,fs:12,inflight:18,inherits:19,minimatch:20,once:21,path:22,"path-is-absolute":23,util:28}],17:[function(e,r,n){(function(i){(r.exports=n).GlobSync=h;var s=e("fs"),c=e("minimatch"),g=(c.Minimatch,e("./glob.js").Glob,e("util"),e("path")),u=e("assert"),l=e("path-is-absolute"),t=e("./common.js"),o=(t.alphasort,t.alphasorti,t.setopts),a=t.ownProp,f=t.childrenIgnored;function n(n,t){if("function"==typeof t||3===arguments.length)throw new TypeError("callback provided to sync glob\nSee: https://github.com/isaacs/node-glob/issues/167");return new h(n,t).found}function h(n,t){if(!n)throw new Error("must provide pattern");if("function"==typeof t||3===arguments.length)throw new TypeError("callback provided to sync glob\nSee: https://github.com/isaacs/node-glob/issues/167");if(!(this instanceof h))return new h(n,t);if(o(this,n,t),this.noprocess)return this;var e=this.minimatch.set.length;this.matches=new Array(e);for(var r=0;rthis.maxLength)return!1;if(!this.stat&&a(this.cache,t)){var r=this.cache[t];if(Array.isArray(r)&&(r="DIR"),!e||"DIR"===r)return r;if(e&&"FILE"===r)return!1}var i=this.statCache[t];if(!i){var o;try{o=s.lstatSync(t)}catch(n){return!1}if(o.isSymbolicLink())try{i=s.statSync(t)}catch(n){i=o}else i=o}r=(this.statCache[t]=i).isDirectory()?"DIR":"FILE";return this.cache[t]=this.cache[t]||r,(!e||"DIR"===r)&&r},h.prototype._mark=function(n){return t.mark(this,n)},h.prototype._makeAbs=function(n){return t.makeAbs(this,n)}}).call(this,e("_process"))},{"./common.js":15,"./glob.js":16,_process:24,assert:9,fs:12,minimatch:20,path:22,"path-is-absolute":23,util:28}],18:[function(t,r,n){(function(s){var n=t("wrappy"),a=Object.create(null),e=t("once");r.exports=n(function(n,t){return a[n]?(a[n].push(t),null):(a[n]=[t],o=n,e(function n(){var t=a[o],e=t.length,r=function(n){for(var t=n.length,e=[],r=0;re?(t.splice(0,e),s.nextTick(function(){n.apply(null,r)})):delete a[o]}}));var o})}).call(this,t("_process"))},{_process:24,once:21,wrappy:29}],19:[function(n,t,e){"function"==typeof Object.create?t.exports=function(n,t){n.super_=t,n.prototype=Object.create(t.prototype,{constructor:{value:n,enumerable:!1,writable:!0,configurable:!0}})}:t.exports=function(n,t){n.super_=t;var e=function(){};e.prototype=t.prototype,n.prototype=new e,n.prototype.constructor=n}},{}],20:[function(n,t,e){(t.exports=s).Minimatch=i;var u={sep:"/"};try{u=n("path")}catch(n){}var M=s.GLOBSTAR=i.GLOBSTAR={},r=n("brace-expansion"),C={"!":{open:"(?:(?!(?:",close:"))[^/]*?)"},"?":{open:"(?:",close:")?"},"+":{open:"(?:",close:")+"},"*":{open:"(?:",close:")*"},"@":{open:"(?:",close:")"}},P="[^/]",z=P+"*?",B="().*{}+?[]^$\\!".split("").reduce(function(n,t){return n[t]=!0,n},{});var l=/\/+/;function o(t,e){t=t||{},e=e||{};var r={};return Object.keys(e).forEach(function(n){r[n]=e[n]}),Object.keys(t).forEach(function(n){r[n]=t[n]}),r}function s(n,t,e){if("string"!=typeof t)throw new TypeError("glob pattern string required");return e||(e={}),!(!e.nocomment&&"#"===t.charAt(0))&&(""===t.trim()?""===n:new i(t,e).match(n))}function i(n,t){if(!(this instanceof i))return new i(n,t);if("string"!=typeof n)throw new TypeError("glob pattern string required");t||(t={}),n=n.trim(),"/"!==u.sep&&(n=n.split(u.sep).join("/")),this.options=t,this.set=[],this.pattern=n,this.regexp=null,this.negate=!1,this.comment=!1,this.empty=!1,this.make()}function a(n,t){if(t||(t=this instanceof i?this.options:{}),void 0===(n=void 0===n?this.pattern:n))throw new TypeError("undefined pattern");return t.nobrace||!n.match(/\{.*\}/)?[n]:r(n)}s.filter=function(r,i){return i=i||{},function(n,t,e){return s(n,r,i)}},s.defaults=function(r){if(!r||!Object.keys(r).length)return s;var i=s,n=function(n,t,e){return i.minimatch(n,t,o(r,e))};return n.Minimatch=function(n,t){return new i.Minimatch(n,o(r,t))},n},i.defaults=function(n){return n&&Object.keys(n).length?s.defaults(n).Minimatch:i},i.prototype.debug=function(){},i.prototype.make=function(){if(this._made)return;var n=this.pattern,t=this.options;if(!t.nocomment&&"#"===n.charAt(0))return void(this.comment=!0);if(!n)return void(this.empty=!0);this.parseNegate();var e=this.globSet=this.braceExpand();t.debug&&(this.debug=console.error);this.debug(this.pattern,e),e=this.globParts=e.map(function(n){return n.split(l)}),this.debug(this.pattern,e),e=e.map(function(n,t,e){return n.map(this.parse,this)},this),this.debug(this.pattern,e),e=e.filter(function(n){return-1===n.indexOf(!1)}),this.debug(this.pattern,e),this.set=e},i.prototype.parseNegate=function(){var n=this.pattern,t=!1,e=this.options,r=0;if(e.nonegate)return;for(var i=0,o=n.length;i>> no match, partial?",n,f,t,h),f!==s))}if("string"==typeof u?(c=r.nocase?l.toLowerCase()===u.toLowerCase():l===u,this.debug("string match",u,l,c)):(c=l.match(u),this.debug("pattern match",u,l,c)),!c)return!1}if(i===s&&o===a)return!0;if(i===s)return e;if(o===a)return i===s-1&&""===n[i];throw new Error("wtf?")}},{"brace-expansion":11,path:22}],21:[function(n,t,e){var r=n("wrappy");function i(n){var t=function(){return t.called?t.value:(t.called=!0,t.value=n.apply(this,arguments))};return t.called=!1,t}function o(n){var t=function(){if(t.called)throw new Error(t.onceError);return t.called=!0,t.value=n.apply(this,arguments)},e=n.name||"Function wrapped with `once`";return t.onceError=e+" shouldn't be called more than once",t.called=!1,t}t.exports=r(i),t.exports.strict=r(o),i.proto=i(function(){Object.defineProperty(Function.prototype,"once",{value:function(){return i(this)},configurable:!0}),Object.defineProperty(Function.prototype,"onceStrict",{value:function(){return o(this)},configurable:!0})})},{wrappy:29}],22:[function(n,t,u){(function(i){function o(n,t){for(var e=0,r=n.length-1;0<=r;r--){var i=n[r];"."===i?n.splice(r,1):".."===i?(n.splice(r,1),e++):e&&(n.splice(r,1),e--)}if(t)for(;e--;e)n.unshift("..");return n}var t=/^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/,s=function(n){return t.exec(n).slice(1)};function a(n,t){if(n.filter)return n.filter(t);for(var e=[],r=0;r":">",'"':""","'":"'","`":"`"},D=d.invert(N),F=function(t){var e=function(n){return t[n]},n="(?:"+d.keys(t).join("|")+")",r=RegExp(n),i=RegExp(n,"g");return function(n){return n=null==n?"":""+n,r.test(n)?n.replace(i,e):n}};d.escape=F(N),d.unescape=F(D),d.result=function(n,t,e){var r=null==n?void 0:n[t];return void 0===r&&(r=e),d.isFunction(r)?r.call(n):r};var M=0;d.uniqueId=function(n){var t=++M+"";return n?n+t:t},d.templateSettings={evaluate:/<%([\s\S]+?)%>/g,interpolate:/<%=([\s\S]+?)%>/g,escape:/<%-([\s\S]+?)%>/g};var C=/(.)^/,P={"'":"'","\\":"\\","\r":"r","\n":"n","\u2028":"u2028","\u2029":"u2029"},z=/\\|'|\r|\n|\u2028|\u2029/g,B=function(n){return"\\"+P[n]};d.template=function(o,n,t){!n&&t&&(n=t),n=d.defaults({},n,d.templateSettings);var e=RegExp([(n.escape||C).source,(n.interpolate||C).source,(n.evaluate||C).source].join("|")+"|$","g"),s=0,a="__p+='";o.replace(e,function(n,t,e,r,i){return a+=o.slice(s,i).replace(z,B),s=i+n.length,t?a+="'+\n((__t=("+t+"))==null?'':_.escape(__t))+\n'":e?a+="'+\n((__t=("+e+"))==null?'':__t)+\n'":r&&(a+="';\n"+r+"\n__p+='"),n}),a+="';\n",n.variable||(a="with(obj||{}){\n"+a+"}\n"),a="var __t,__p='',__j=Array.prototype.join,print=function(){__p+=__j.call(arguments,'');};\n"+a+"return __p;\n";try{var r=new Function(n.variable||"obj","_",a)}catch(n){throw n.source=a,n}var i=function(n){return r.call(this,n,d)},c=n.variable||"obj";return i.source="function("+c+"){\n"+a+"}",i},d.chain=function(n){var t=d(n);return t._chain=!0,t};var U=function(n,t){return n._chain?d(t).chain():t};d.mixin=function(e){d.each(d.functions(e),function(n){var t=d[n]=e[n];d.prototype[n]=function(){var n=[this._wrapped];return i.apply(n,arguments),U(this,t.apply(d,n))}})},d.mixin(d),d.each(["pop","push","reverse","shift","sort","splice","unshift"],function(t){var e=r[t];d.prototype[t]=function(){var n=this._wrapped;return e.apply(n,arguments),"shift"!==t&&"splice"!==t||0!==n.length||delete n[0],U(this,n)}}),d.each(["concat","join","slice"],function(n){var t=r[n];d.prototype[n]=function(){return U(this,t.apply(this._wrapped,arguments))}}),d.prototype.value=function(){return this._wrapped},d.prototype.valueOf=d.prototype.toJSON=d.prototype.value,d.prototype.toString=function(){return""+this._wrapped}}).call(this)},{}],26:[function(n,t,e){arguments[4][19][0].apply(e,arguments)},{dup:19}],27:[function(n,t,e){t.exports=function(n){return n&&"object"==typeof n&&"function"==typeof n.copy&&"function"==typeof n.fill&&"function"==typeof n.readUInt8}},{}],28:[function(h,n,k){(function(r,i){var a=/%[sdj%]/g;k.format=function(n){if(!_(n)){for(var t=[],e=0;e.md-nav__link{color:inherit}button[data-md-color-primary=pink]{background-color:#e91e63}[data-md-color-primary=pink] .md-typeset a{color:#e91e63}[data-md-color-primary=pink] .md-header,[data-md-color-primary=pink] .md-hero{background-color:#e91e63}[data-md-color-primary=pink] .md-nav__link--active,[data-md-color-primary=pink] .md-nav__link:active{color:#e91e63}[data-md-color-primary=pink] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=purple]{background-color:#ab47bc}[data-md-color-primary=purple] .md-typeset a{color:#ab47bc}[data-md-color-primary=purple] .md-header,[data-md-color-primary=purple] .md-hero{background-color:#ab47bc}[data-md-color-primary=purple] .md-nav__link--active,[data-md-color-primary=purple] .md-nav__link:active{color:#ab47bc}[data-md-color-primary=purple] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=deep-purple]{background-color:#7e57c2}[data-md-color-primary=deep-purple] .md-typeset a{color:#7e57c2}[data-md-color-primary=deep-purple] .md-header,[data-md-color-primary=deep-purple] .md-hero{background-color:#7e57c2}[data-md-color-primary=deep-purple] .md-nav__link--active,[data-md-color-primary=deep-purple] .md-nav__link:active{color:#7e57c2}[data-md-color-primary=deep-purple] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=indigo]{background-color:#3f51b5}[data-md-color-primary=indigo] .md-typeset a{color:#3f51b5}[data-md-color-primary=indigo] .md-header,[data-md-color-primary=indigo] .md-hero{background-color:#3f51b5}[data-md-color-primary=indigo] .md-nav__link--active,[data-md-color-primary=indigo] .md-nav__link:active{color:#3f51b5}[data-md-color-primary=indigo] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=blue]{background-color:#2196f3}[data-md-color-primary=blue] .md-typeset a{color:#2196f3}[data-md-color-primary=blue] .md-header,[data-md-color-primary=blue] .md-hero{background-color:#2196f3}[data-md-color-primary=blue] .md-nav__link--active,[data-md-color-primary=blue] .md-nav__link:active{color:#2196f3}[data-md-color-primary=blue] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=light-blue]{background-color:#03a9f4}[data-md-color-primary=light-blue] .md-typeset a{color:#03a9f4}[data-md-color-primary=light-blue] .md-header,[data-md-color-primary=light-blue] .md-hero{background-color:#03a9f4}[data-md-color-primary=light-blue] .md-nav__link--active,[data-md-color-primary=light-blue] .md-nav__link:active{color:#03a9f4}[data-md-color-primary=light-blue] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=cyan]{background-color:#00bcd4}[data-md-color-primary=cyan] .md-typeset a{color:#00bcd4}[data-md-color-primary=cyan] .md-header,[data-md-color-primary=cyan] .md-hero{background-color:#00bcd4}[data-md-color-primary=cyan] .md-nav__link--active,[data-md-color-primary=cyan] .md-nav__link:active{color:#00bcd4}[data-md-color-primary=cyan] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=teal]{background-color:#009688}[data-md-color-primary=teal] .md-typeset a{color:#009688}[data-md-color-primary=teal] .md-header,[data-md-color-primary=teal] .md-hero{background-color:#009688}[data-md-color-primary=teal] .md-nav__link--active,[data-md-color-primary=teal] .md-nav__link:active{color:#009688}[data-md-color-primary=teal] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=green]{background-color:#4caf50}[data-md-color-primary=green] .md-typeset a{color:#4caf50}[data-md-color-primary=green] .md-header,[data-md-color-primary=green] .md-hero{background-color:#4caf50}[data-md-color-primary=green] .md-nav__link--active,[data-md-color-primary=green] .md-nav__link:active{color:#4caf50}[data-md-color-primary=green] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=light-green]{background-color:#7cb342}[data-md-color-primary=light-green] .md-typeset a{color:#7cb342}[data-md-color-primary=light-green] .md-header,[data-md-color-primary=light-green] .md-hero{background-color:#7cb342}[data-md-color-primary=light-green] .md-nav__link--active,[data-md-color-primary=light-green] .md-nav__link:active{color:#7cb342}[data-md-color-primary=light-green] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=lime]{background-color:#c0ca33}[data-md-color-primary=lime] .md-typeset a{color:#c0ca33}[data-md-color-primary=lime] .md-header,[data-md-color-primary=lime] .md-hero{background-color:#c0ca33}[data-md-color-primary=lime] .md-nav__link--active,[data-md-color-primary=lime] .md-nav__link:active{color:#c0ca33}[data-md-color-primary=lime] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=yellow]{background-color:#f9a825}[data-md-color-primary=yellow] .md-typeset a{color:#f9a825}[data-md-color-primary=yellow] .md-header,[data-md-color-primary=yellow] .md-hero{background-color:#f9a825}[data-md-color-primary=yellow] .md-nav__link--active,[data-md-color-primary=yellow] .md-nav__link:active{color:#f9a825}[data-md-color-primary=yellow] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=amber]{background-color:#ffa000}[data-md-color-primary=amber] .md-typeset a{color:#ffa000}[data-md-color-primary=amber] .md-header,[data-md-color-primary=amber] .md-hero{background-color:#ffa000}[data-md-color-primary=amber] .md-nav__link--active,[data-md-color-primary=amber] .md-nav__link:active{color:#ffa000}[data-md-color-primary=amber] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=orange]{background-color:#fb8c00}[data-md-color-primary=orange] .md-typeset a{color:#fb8c00}[data-md-color-primary=orange] .md-header,[data-md-color-primary=orange] .md-hero{background-color:#fb8c00}[data-md-color-primary=orange] .md-nav__link--active,[data-md-color-primary=orange] .md-nav__link:active{color:#fb8c00}[data-md-color-primary=orange] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=deep-orange]{background-color:#ff7043}[data-md-color-primary=deep-orange] .md-typeset a{color:#ff7043}[data-md-color-primary=deep-orange] .md-header,[data-md-color-primary=deep-orange] .md-hero{background-color:#ff7043}[data-md-color-primary=deep-orange] .md-nav__link--active,[data-md-color-primary=deep-orange] .md-nav__link:active{color:#ff7043}[data-md-color-primary=deep-orange] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=brown]{background-color:#795548}[data-md-color-primary=brown] .md-typeset a{color:#795548}[data-md-color-primary=brown] .md-header,[data-md-color-primary=brown] .md-hero{background-color:#795548}[data-md-color-primary=brown] .md-nav__link--active,[data-md-color-primary=brown] .md-nav__link:active{color:#795548}[data-md-color-primary=brown] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=grey]{background-color:#757575}[data-md-color-primary=grey] .md-typeset a{color:#757575}[data-md-color-primary=grey] .md-header,[data-md-color-primary=grey] .md-hero{background-color:#757575}[data-md-color-primary=grey] .md-nav__link--active,[data-md-color-primary=grey] .md-nav__link:active{color:#757575}[data-md-color-primary=grey] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=blue-grey]{background-color:#546e7a}[data-md-color-primary=blue-grey] .md-typeset a{color:#546e7a}[data-md-color-primary=blue-grey] .md-header,[data-md-color-primary=blue-grey] .md-hero{background-color:#546e7a}[data-md-color-primary=blue-grey] .md-nav__link--active,[data-md-color-primary=blue-grey] .md-nav__link:active{color:#546e7a}[data-md-color-primary=blue-grey] .md-nav__item--nested>.md-nav__link{color:inherit}button[data-md-color-primary=white]{box-shadow:inset 0 0 .05rem rgba(0,0,0,.54)}[data-md-color-primary=white] .md-header,[data-md-color-primary=white] .md-hero,button[data-md-color-primary=white]{background-color:#fff;color:rgba(0,0,0,.87)}[data-md-color-primary=white] .md-hero--expand{border-bottom:.05rem solid rgba(0,0,0,.07)}[data-md-color-primary=black] .md-header,[data-md-color-primary=black] .md-hero,button[data-md-color-primary=black]{background-color:#000}button[data-md-color-accent=red]{background-color:#ff1744}[data-md-color-accent=red] .md-typeset a:active,[data-md-color-accent=red] .md-typeset a:hover{color:#ff1744}[data-md-color-accent=red] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=red] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#ff1744}[data-md-color-accent=red] .md-nav__link:focus,[data-md-color-accent=red] .md-nav__link:hover,[data-md-color-accent=red] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=red] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=red] .md-typeset .md-clipboard:active:before,[data-md-color-accent=red] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=red] .md-typeset [id] .headerlink:focus,[data-md-color-accent=red] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=red] .md-typeset [id]:target .headerlink{color:#ff1744}[data-md-color-accent=red] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ff1744}[data-md-color-accent=red] .md-search-result__link:hover,[data-md-color-accent=red] .md-search-result__link[data-md-state=active]{background-color:rgba(255,23,68,.1)}[data-md-color-accent=red] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ff1744}[data-md-color-accent=red] .md-source-file:hover:before{background-color:#ff1744}button[data-md-color-accent=pink]{background-color:#f50057}[data-md-color-accent=pink] .md-typeset a:active,[data-md-color-accent=pink] .md-typeset a:hover{color:#f50057}[data-md-color-accent=pink] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=pink] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#f50057}[data-md-color-accent=pink] .md-nav__link:focus,[data-md-color-accent=pink] .md-nav__link:hover,[data-md-color-accent=pink] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=pink] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=pink] .md-typeset .md-clipboard:active:before,[data-md-color-accent=pink] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=pink] .md-typeset [id] .headerlink:focus,[data-md-color-accent=pink] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=pink] .md-typeset [id]:target .headerlink{color:#f50057}[data-md-color-accent=pink] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#f50057}[data-md-color-accent=pink] .md-search-result__link:hover,[data-md-color-accent=pink] .md-search-result__link[data-md-state=active]{background-color:rgba(245,0,87,.1)}[data-md-color-accent=pink] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#f50057}[data-md-color-accent=pink] .md-source-file:hover:before{background-color:#f50057}button[data-md-color-accent=purple]{background-color:#e040fb}[data-md-color-accent=purple] .md-typeset a:active,[data-md-color-accent=purple] .md-typeset a:hover{color:#e040fb}[data-md-color-accent=purple] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=purple] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#e040fb}[data-md-color-accent=purple] .md-nav__link:focus,[data-md-color-accent=purple] .md-nav__link:hover,[data-md-color-accent=purple] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=purple] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=purple] .md-typeset .md-clipboard:active:before,[data-md-color-accent=purple] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=purple] .md-typeset [id] .headerlink:focus,[data-md-color-accent=purple] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=purple] .md-typeset [id]:target .headerlink{color:#e040fb}[data-md-color-accent=purple] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#e040fb}[data-md-color-accent=purple] .md-search-result__link:hover,[data-md-color-accent=purple] .md-search-result__link[data-md-state=active]{background-color:rgba(224,64,251,.1)}[data-md-color-accent=purple] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#e040fb}[data-md-color-accent=purple] .md-source-file:hover:before{background-color:#e040fb}button[data-md-color-accent=deep-purple]{background-color:#7c4dff}[data-md-color-accent=deep-purple] .md-typeset a:active,[data-md-color-accent=deep-purple] .md-typeset a:hover{color:#7c4dff}[data-md-color-accent=deep-purple] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=deep-purple] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#7c4dff}[data-md-color-accent=deep-purple] .md-nav__link:focus,[data-md-color-accent=deep-purple] .md-nav__link:hover,[data-md-color-accent=deep-purple] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=deep-purple] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=deep-purple] .md-typeset .md-clipboard:active:before,[data-md-color-accent=deep-purple] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=deep-purple] .md-typeset [id] .headerlink:focus,[data-md-color-accent=deep-purple] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=deep-purple] .md-typeset [id]:target .headerlink{color:#7c4dff}[data-md-color-accent=deep-purple] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#7c4dff}[data-md-color-accent=deep-purple] .md-search-result__link:hover,[data-md-color-accent=deep-purple] .md-search-result__link[data-md-state=active]{background-color:rgba(124,77,255,.1)}[data-md-color-accent=deep-purple] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#7c4dff}[data-md-color-accent=deep-purple] .md-source-file:hover:before{background-color:#7c4dff}button[data-md-color-accent=indigo]{background-color:#536dfe}[data-md-color-accent=indigo] .md-typeset a:active,[data-md-color-accent=indigo] .md-typeset a:hover{color:#536dfe}[data-md-color-accent=indigo] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=indigo] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#536dfe}[data-md-color-accent=indigo] .md-nav__link:focus,[data-md-color-accent=indigo] .md-nav__link:hover,[data-md-color-accent=indigo] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=indigo] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=indigo] .md-typeset .md-clipboard:active:before,[data-md-color-accent=indigo] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=indigo] .md-typeset [id] .headerlink:focus,[data-md-color-accent=indigo] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=indigo] .md-typeset [id]:target .headerlink{color:#536dfe}[data-md-color-accent=indigo] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}[data-md-color-accent=indigo] .md-search-result__link:hover,[data-md-color-accent=indigo] .md-search-result__link[data-md-state=active]{background-color:rgba(83,109,254,.1)}[data-md-color-accent=indigo] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}[data-md-color-accent=indigo] .md-source-file:hover:before{background-color:#536dfe}button[data-md-color-accent=blue]{background-color:#448aff}[data-md-color-accent=blue] .md-typeset a:active,[data-md-color-accent=blue] .md-typeset a:hover{color:#448aff}[data-md-color-accent=blue] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=blue] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#448aff}[data-md-color-accent=blue] .md-nav__link:focus,[data-md-color-accent=blue] .md-nav__link:hover,[data-md-color-accent=blue] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=blue] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=blue] .md-typeset .md-clipboard:active:before,[data-md-color-accent=blue] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=blue] .md-typeset [id] .headerlink:focus,[data-md-color-accent=blue] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=blue] .md-typeset [id]:target .headerlink{color:#448aff}[data-md-color-accent=blue] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#448aff}[data-md-color-accent=blue] .md-search-result__link:hover,[data-md-color-accent=blue] .md-search-result__link[data-md-state=active]{background-color:rgba(68,138,255,.1)}[data-md-color-accent=blue] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#448aff}[data-md-color-accent=blue] .md-source-file:hover:before{background-color:#448aff}button[data-md-color-accent=light-blue]{background-color:#0091ea}[data-md-color-accent=light-blue] .md-typeset a:active,[data-md-color-accent=light-blue] .md-typeset a:hover{color:#0091ea}[data-md-color-accent=light-blue] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=light-blue] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#0091ea}[data-md-color-accent=light-blue] .md-nav__link:focus,[data-md-color-accent=light-blue] .md-nav__link:hover,[data-md-color-accent=light-blue] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=light-blue] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=light-blue] .md-typeset .md-clipboard:active:before,[data-md-color-accent=light-blue] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=light-blue] .md-typeset [id] .headerlink:focus,[data-md-color-accent=light-blue] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=light-blue] .md-typeset [id]:target .headerlink{color:#0091ea}[data-md-color-accent=light-blue] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#0091ea}[data-md-color-accent=light-blue] .md-search-result__link:hover,[data-md-color-accent=light-blue] .md-search-result__link[data-md-state=active]{background-color:rgba(0,145,234,.1)}[data-md-color-accent=light-blue] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#0091ea}[data-md-color-accent=light-blue] .md-source-file:hover:before{background-color:#0091ea}button[data-md-color-accent=cyan]{background-color:#00b8d4}[data-md-color-accent=cyan] .md-typeset a:active,[data-md-color-accent=cyan] .md-typeset a:hover{color:#00b8d4}[data-md-color-accent=cyan] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=cyan] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#00b8d4}[data-md-color-accent=cyan] .md-nav__link:focus,[data-md-color-accent=cyan] .md-nav__link:hover,[data-md-color-accent=cyan] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=cyan] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=cyan] .md-typeset .md-clipboard:active:before,[data-md-color-accent=cyan] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=cyan] .md-typeset [id] .headerlink:focus,[data-md-color-accent=cyan] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=cyan] .md-typeset [id]:target .headerlink{color:#00b8d4}[data-md-color-accent=cyan] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#00b8d4}[data-md-color-accent=cyan] .md-search-result__link:hover,[data-md-color-accent=cyan] .md-search-result__link[data-md-state=active]{background-color:rgba(0,184,212,.1)}[data-md-color-accent=cyan] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#00b8d4}[data-md-color-accent=cyan] .md-source-file:hover:before{background-color:#00b8d4}button[data-md-color-accent=teal]{background-color:#00bfa5}[data-md-color-accent=teal] .md-typeset a:active,[data-md-color-accent=teal] .md-typeset a:hover{color:#00bfa5}[data-md-color-accent=teal] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=teal] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#00bfa5}[data-md-color-accent=teal] .md-nav__link:focus,[data-md-color-accent=teal] .md-nav__link:hover,[data-md-color-accent=teal] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=teal] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=teal] .md-typeset .md-clipboard:active:before,[data-md-color-accent=teal] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=teal] .md-typeset [id] .headerlink:focus,[data-md-color-accent=teal] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=teal] .md-typeset [id]:target .headerlink{color:#00bfa5}[data-md-color-accent=teal] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#00bfa5}[data-md-color-accent=teal] .md-search-result__link:hover,[data-md-color-accent=teal] .md-search-result__link[data-md-state=active]{background-color:rgba(0,191,165,.1)}[data-md-color-accent=teal] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#00bfa5}[data-md-color-accent=teal] .md-source-file:hover:before{background-color:#00bfa5}button[data-md-color-accent=green]{background-color:#00c853}[data-md-color-accent=green] .md-typeset a:active,[data-md-color-accent=green] .md-typeset a:hover{color:#00c853}[data-md-color-accent=green] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=green] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#00c853}[data-md-color-accent=green] .md-nav__link:focus,[data-md-color-accent=green] .md-nav__link:hover,[data-md-color-accent=green] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=green] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=green] .md-typeset .md-clipboard:active:before,[data-md-color-accent=green] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=green] .md-typeset [id] .headerlink:focus,[data-md-color-accent=green] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=green] .md-typeset [id]:target .headerlink{color:#00c853}[data-md-color-accent=green] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#00c853}[data-md-color-accent=green] .md-search-result__link:hover,[data-md-color-accent=green] .md-search-result__link[data-md-state=active]{background-color:rgba(0,200,83,.1)}[data-md-color-accent=green] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#00c853}[data-md-color-accent=green] .md-source-file:hover:before{background-color:#00c853}button[data-md-color-accent=light-green]{background-color:#64dd17}[data-md-color-accent=light-green] .md-typeset a:active,[data-md-color-accent=light-green] .md-typeset a:hover{color:#64dd17}[data-md-color-accent=light-green] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=light-green] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#64dd17}[data-md-color-accent=light-green] .md-nav__link:focus,[data-md-color-accent=light-green] .md-nav__link:hover,[data-md-color-accent=light-green] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=light-green] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=light-green] .md-typeset .md-clipboard:active:before,[data-md-color-accent=light-green] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=light-green] .md-typeset [id] .headerlink:focus,[data-md-color-accent=light-green] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=light-green] .md-typeset [id]:target .headerlink{color:#64dd17}[data-md-color-accent=light-green] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#64dd17}[data-md-color-accent=light-green] .md-search-result__link:hover,[data-md-color-accent=light-green] .md-search-result__link[data-md-state=active]{background-color:rgba(100,221,23,.1)}[data-md-color-accent=light-green] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#64dd17}[data-md-color-accent=light-green] .md-source-file:hover:before{background-color:#64dd17}button[data-md-color-accent=lime]{background-color:#aeea00}[data-md-color-accent=lime] .md-typeset a:active,[data-md-color-accent=lime] .md-typeset a:hover{color:#aeea00}[data-md-color-accent=lime] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=lime] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#aeea00}[data-md-color-accent=lime] .md-nav__link:focus,[data-md-color-accent=lime] .md-nav__link:hover,[data-md-color-accent=lime] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=lime] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=lime] .md-typeset .md-clipboard:active:before,[data-md-color-accent=lime] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=lime] .md-typeset [id] .headerlink:focus,[data-md-color-accent=lime] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=lime] .md-typeset [id]:target .headerlink{color:#aeea00}[data-md-color-accent=lime] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#aeea00}[data-md-color-accent=lime] .md-search-result__link:hover,[data-md-color-accent=lime] .md-search-result__link[data-md-state=active]{background-color:rgba(174,234,0,.1)}[data-md-color-accent=lime] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#aeea00}[data-md-color-accent=lime] .md-source-file:hover:before{background-color:#aeea00}button[data-md-color-accent=yellow]{background-color:#ffd600}[data-md-color-accent=yellow] .md-typeset a:active,[data-md-color-accent=yellow] .md-typeset a:hover{color:#ffd600}[data-md-color-accent=yellow] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=yellow] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#ffd600}[data-md-color-accent=yellow] .md-nav__link:focus,[data-md-color-accent=yellow] .md-nav__link:hover,[data-md-color-accent=yellow] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=yellow] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=yellow] .md-typeset .md-clipboard:active:before,[data-md-color-accent=yellow] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=yellow] .md-typeset [id] .headerlink:focus,[data-md-color-accent=yellow] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=yellow] .md-typeset [id]:target .headerlink{color:#ffd600}[data-md-color-accent=yellow] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ffd600}[data-md-color-accent=yellow] .md-search-result__link:hover,[data-md-color-accent=yellow] .md-search-result__link[data-md-state=active]{background-color:rgba(255,214,0,.1)}[data-md-color-accent=yellow] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ffd600}[data-md-color-accent=yellow] .md-source-file:hover:before{background-color:#ffd600}button[data-md-color-accent=amber]{background-color:#ffab00}[data-md-color-accent=amber] .md-typeset a:active,[data-md-color-accent=amber] .md-typeset a:hover{color:#ffab00}[data-md-color-accent=amber] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=amber] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#ffab00}[data-md-color-accent=amber] .md-nav__link:focus,[data-md-color-accent=amber] .md-nav__link:hover,[data-md-color-accent=amber] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=amber] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=amber] .md-typeset .md-clipboard:active:before,[data-md-color-accent=amber] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=amber] .md-typeset [id] .headerlink:focus,[data-md-color-accent=amber] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=amber] .md-typeset [id]:target .headerlink{color:#ffab00}[data-md-color-accent=amber] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ffab00}[data-md-color-accent=amber] .md-search-result__link:hover,[data-md-color-accent=amber] .md-search-result__link[data-md-state=active]{background-color:rgba(255,171,0,.1)}[data-md-color-accent=amber] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ffab00}[data-md-color-accent=amber] .md-source-file:hover:before{background-color:#ffab00}button[data-md-color-accent=orange]{background-color:#ff9100}[data-md-color-accent=orange] .md-typeset a:active,[data-md-color-accent=orange] .md-typeset a:hover{color:#ff9100}[data-md-color-accent=orange] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=orange] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#ff9100}[data-md-color-accent=orange] .md-nav__link:focus,[data-md-color-accent=orange] .md-nav__link:hover,[data-md-color-accent=orange] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=orange] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=orange] .md-typeset .md-clipboard:active:before,[data-md-color-accent=orange] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=orange] .md-typeset [id] .headerlink:focus,[data-md-color-accent=orange] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=orange] .md-typeset [id]:target .headerlink{color:#ff9100}[data-md-color-accent=orange] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ff9100}[data-md-color-accent=orange] .md-search-result__link:hover,[data-md-color-accent=orange] .md-search-result__link[data-md-state=active]{background-color:rgba(255,145,0,.1)}[data-md-color-accent=orange] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ff9100}[data-md-color-accent=orange] .md-source-file:hover:before{background-color:#ff9100}button[data-md-color-accent=deep-orange]{background-color:#ff6e40}[data-md-color-accent=deep-orange] .md-typeset a:active,[data-md-color-accent=deep-orange] .md-typeset a:hover{color:#ff6e40}[data-md-color-accent=deep-orange] .md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,[data-md-color-accent=deep-orange] .md-typeset pre code::-webkit-scrollbar-thumb:hover{background-color:#ff6e40}[data-md-color-accent=deep-orange] .md-nav__link:focus,[data-md-color-accent=deep-orange] .md-nav__link:hover,[data-md-color-accent=deep-orange] .md-typeset .footnote li:hover .footnote-backref:hover,[data-md-color-accent=deep-orange] .md-typeset .footnote li:target .footnote-backref,[data-md-color-accent=deep-orange] .md-typeset .md-clipboard:active:before,[data-md-color-accent=deep-orange] .md-typeset .md-clipboard:hover:before,[data-md-color-accent=deep-orange] .md-typeset [id] .headerlink:focus,[data-md-color-accent=deep-orange] .md-typeset [id]:hover .headerlink:hover,[data-md-color-accent=deep-orange] .md-typeset [id]:target .headerlink{color:#ff6e40}[data-md-color-accent=deep-orange] .md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ff6e40}[data-md-color-accent=deep-orange] .md-search-result__link:hover,[data-md-color-accent=deep-orange] .md-search-result__link[data-md-state=active]{background-color:rgba(255,110,64,.1)}[data-md-color-accent=deep-orange] .md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#ff6e40}[data-md-color-accent=deep-orange] .md-source-file:hover:before{background-color:#ff6e40}@media only screen and (max-width:59.9375em){[data-md-color-primary=red] .md-nav__source{background-color:rgba(190,66,64,.9675)}[data-md-color-primary=pink] .md-nav__source{background-color:rgba(185,24,79,.9675)}[data-md-color-primary=purple] .md-nav__source{background-color:rgba(136,57,150,.9675)}[data-md-color-primary=deep-purple] .md-nav__source{background-color:rgba(100,69,154,.9675)}[data-md-color-primary=indigo] .md-nav__source{background-color:rgba(50,64,144,.9675)}[data-md-color-primary=blue] .md-nav__source{background-color:rgba(26,119,193,.9675)}[data-md-color-primary=light-blue] .md-nav__source{background-color:rgba(2,134,194,.9675)}[data-md-color-primary=cyan] .md-nav__source{background-color:rgba(0,150,169,.9675)}[data-md-color-primary=teal] .md-nav__source{background-color:rgba(0,119,108,.9675)}[data-md-color-primary=green] .md-nav__source{background-color:rgba(60,139,64,.9675)}[data-md-color-primary=light-green] .md-nav__source{background-color:rgba(99,142,53,.9675)}[data-md-color-primary=lime] .md-nav__source{background-color:rgba(153,161,41,.9675)}[data-md-color-primary=yellow] .md-nav__source{background-color:rgba(198,134,29,.9675)}[data-md-color-primary=amber] .md-nav__source{background-color:rgba(203,127,0,.9675)}[data-md-color-primary=orange] .md-nav__source{background-color:rgba(200,111,0,.9675)}[data-md-color-primary=deep-orange] .md-nav__source{background-color:rgba(203,89,53,.9675)}[data-md-color-primary=brown] .md-nav__source{background-color:rgba(96,68,57,.9675)}[data-md-color-primary=grey] .md-nav__source{background-color:rgba(93,93,93,.9675)}[data-md-color-primary=blue-grey] .md-nav__source{background-color:rgba(67,88,97,.9675)}[data-md-color-primary=white] .md-nav__source{background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.87)}[data-md-color-primary=black] .md-nav__source{background-color:#404040}}@media only screen and (max-width:76.1875em){html [data-md-color-primary=red] .md-nav--primary .md-nav__title--site{background-color:#ef5350}html [data-md-color-primary=pink] .md-nav--primary .md-nav__title--site{background-color:#e91e63}html [data-md-color-primary=purple] .md-nav--primary .md-nav__title--site{background-color:#ab47bc}html [data-md-color-primary=deep-purple] .md-nav--primary .md-nav__title--site{background-color:#7e57c2}html [data-md-color-primary=indigo] .md-nav--primary .md-nav__title--site{background-color:#3f51b5}html [data-md-color-primary=blue] .md-nav--primary .md-nav__title--site{background-color:#2196f3}html [data-md-color-primary=light-blue] .md-nav--primary .md-nav__title--site{background-color:#03a9f4}html [data-md-color-primary=cyan] .md-nav--primary .md-nav__title--site{background-color:#00bcd4}html [data-md-color-primary=teal] .md-nav--primary .md-nav__title--site{background-color:#009688}html [data-md-color-primary=green] .md-nav--primary .md-nav__title--site{background-color:#4caf50}html [data-md-color-primary=light-green] .md-nav--primary .md-nav__title--site{background-color:#7cb342}html [data-md-color-primary=lime] .md-nav--primary .md-nav__title--site{background-color:#c0ca33}html [data-md-color-primary=yellow] .md-nav--primary .md-nav__title--site{background-color:#f9a825}html [data-md-color-primary=amber] .md-nav--primary .md-nav__title--site{background-color:#ffa000}html [data-md-color-primary=orange] .md-nav--primary .md-nav__title--site{background-color:#fb8c00}html [data-md-color-primary=deep-orange] .md-nav--primary .md-nav__title--site{background-color:#ff7043}html [data-md-color-primary=brown] .md-nav--primary .md-nav__title--site{background-color:#795548}html [data-md-color-primary=grey] .md-nav--primary .md-nav__title--site{background-color:#757575}html [data-md-color-primary=blue-grey] .md-nav--primary .md-nav__title--site{background-color:#546e7a}html [data-md-color-primary=white] .md-nav--primary .md-nav__title--site{background-color:#fff;color:rgba(0,0,0,.87)}[data-md-color-primary=white] .md-hero{border-bottom:.05rem solid rgba(0,0,0,.07)}html [data-md-color-primary=black] .md-nav--primary .md-nav__title--site{background-color:#000}}@media only screen and (min-width:76.25em){[data-md-color-primary=red] .md-tabs{background-color:#ef5350}[data-md-color-primary=pink] .md-tabs{background-color:#e91e63}[data-md-color-primary=purple] .md-tabs{background-color:#ab47bc}[data-md-color-primary=deep-purple] .md-tabs{background-color:#7e57c2}[data-md-color-primary=indigo] .md-tabs{background-color:#3f51b5}[data-md-color-primary=blue] .md-tabs{background-color:#2196f3}[data-md-color-primary=light-blue] .md-tabs{background-color:#03a9f4}[data-md-color-primary=cyan] .md-tabs{background-color:#00bcd4}[data-md-color-primary=teal] .md-tabs{background-color:#009688}[data-md-color-primary=green] .md-tabs{background-color:#4caf50}[data-md-color-primary=light-green] .md-tabs{background-color:#7cb342}[data-md-color-primary=lime] .md-tabs{background-color:#c0ca33}[data-md-color-primary=yellow] .md-tabs{background-color:#f9a825}[data-md-color-primary=amber] .md-tabs{background-color:#ffa000}[data-md-color-primary=orange] .md-tabs{background-color:#fb8c00}[data-md-color-primary=deep-orange] .md-tabs{background-color:#ff7043}[data-md-color-primary=brown] .md-tabs{background-color:#795548}[data-md-color-primary=grey] .md-tabs{background-color:#757575}[data-md-color-primary=blue-grey] .md-tabs{background-color:#546e7a}[data-md-color-primary=white] .md-tabs{border-bottom:.05rem solid rgba(0,0,0,.07);background-color:#fff;color:rgba(0,0,0,.87)}[data-md-color-primary=black] .md-tabs{background-color:#000}}@media only screen and (min-width:60em){[data-md-color-primary=white] .md-search__input{background-color:rgba(0,0,0,.07)}[data-md-color-primary=white] .md-search__input::-webkit-input-placeholder{color:rgba(0,0,0,.54)}[data-md-color-primary=white] .md-search__input::-moz-placeholder{color:rgba(0,0,0,.54)}[data-md-color-primary=white] .md-search__input:-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-color-primary=white] .md-search__input::-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-color-primary=white] .md-search__input::placeholder{color:rgba(0,0,0,.54)}[data-md-color-primary=black] .md-search__input{background-color:hsla(0,0%,100%,.3)}} \ No newline at end of file diff --git a/docs/site/assets/stylesheets/application.30686662.css b/docs/site/assets/stylesheets/application.30686662.css new file mode 100644 index 0000000..77e8a7e --- /dev/null +++ b/docs/site/assets/stylesheets/application.30686662.css @@ -0,0 +1 @@ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}html{-webkit-text-size-adjust:none;-moz-text-size-adjust:none;-ms-text-size-adjust:none;text-size-adjust:none}body{margin:0}hr{overflow:visible;box-sizing:content-box}a{-webkit-text-decoration-skip:objects}a,button,input,label{-webkit-tap-highlight-color:transparent}a{color:inherit;text-decoration:none}small,sub,sup{font-size:80%}sub,sup{position:relative;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}table{border-collapse:separate;border-spacing:0}td,th{font-weight:400;vertical-align:top}button{margin:0;padding:0;border:0;outline-style:none;background:transparent;font-size:inherit}input{border:0;outline:0}.md-clipboard:before,.md-icon,.md-nav__button,.md-nav__link:after,.md-nav__title:before,.md-search-result__article--document:before,.md-source-file:before,.md-typeset .admonition>.admonition-title:before,.md-typeset .admonition>summary:before,.md-typeset .critic.comment:before,.md-typeset .footnote-backref,.md-typeset .task-list-control .task-list-indicator:before,.md-typeset details>.admonition-title:before,.md-typeset details>summary:before,.md-typeset summary:after{font-family:Material Icons;font-style:normal;font-variant:normal;font-weight:400;line-height:1;text-transform:none;white-space:nowrap;speak:none;word-wrap:normal;direction:ltr}.md-content__icon,.md-footer-nav__button,.md-header-nav__button,.md-nav__button,.md-nav__title:before,.md-search-result__article--document:before{display:inline-block;margin:.2rem;padding:.4rem;font-size:1.2rem;cursor:pointer}.md-icon--arrow-back:before{content:""}.md-icon--arrow-forward:before{content:""}.md-icon--menu:before{content:""}.md-icon--search:before{content:""}[dir=rtl] .md-icon--arrow-back:before{content:""}[dir=rtl] .md-icon--arrow-forward:before{content:""}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}body,input{color:rgba(0,0,0,.87);-webkit-font-feature-settings:"kern","liga";font-feature-settings:"kern","liga";font-family:Helvetica Neue,Helvetica,Arial,sans-serif}code,kbd,pre{color:rgba(0,0,0,.87);-webkit-font-feature-settings:"kern";font-feature-settings:"kern";font-family:Courier New,Courier,monospace}.md-typeset{font-size:.8rem;line-height:1.6;-webkit-print-color-adjust:exact}.md-typeset blockquote,.md-typeset ol,.md-typeset p,.md-typeset ul{margin:1em 0}.md-typeset h1{margin:0 0 2rem;color:rgba(0,0,0,.54);font-size:1.5625rem;line-height:1.3}.md-typeset h1,.md-typeset h2{font-weight:300;letter-spacing:-.01em}.md-typeset h2{margin:2rem 0 .8rem;font-size:1.25rem;line-height:1.4}.md-typeset h3{margin:1.6rem 0 .8rem;font-size:1rem;font-weight:400;letter-spacing:-.01em;line-height:1.5}.md-typeset h2+h3{margin-top:.8rem}.md-typeset h4{font-size:.8rem}.md-typeset h4,.md-typeset h5,.md-typeset h6{margin:.8rem 0;font-weight:700;letter-spacing:-.01em}.md-typeset h5,.md-typeset h6{color:rgba(0,0,0,.54);font-size:.64rem}.md-typeset h5{text-transform:uppercase}.md-typeset hr{margin:1.5em 0;border-bottom:.05rem dotted rgba(0,0,0,.26)}.md-typeset a{color:#3f51b5;word-break:break-word}.md-typeset a,.md-typeset a:before{-webkit-transition:color .125s;transition:color .125s}.md-typeset a:active,.md-typeset a:hover{color:#536dfe}.md-typeset code,.md-typeset pre{background-color:hsla(0,0%,92.5%,.5);color:#37474f;font-size:85%;direction:ltr}.md-typeset code{margin:0 .29412em;padding:.07353em 0;border-radius:.1rem;box-shadow:.29412em 0 0 hsla(0,0%,92.5%,.5),-.29412em 0 0 hsla(0,0%,92.5%,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset h1 code,.md-typeset h2 code,.md-typeset h3 code,.md-typeset h4 code,.md-typeset h5 code,.md-typeset h6 code{margin:0;background-color:transparent;box-shadow:none}.md-typeset a>code{margin:inherit;padding:inherit;border-radius:initial;background-color:inherit;color:inherit;box-shadow:none}.md-typeset pre{position:relative;margin:1em 0;border-radius:.1rem;line-height:1.4;-webkit-overflow-scrolling:touch}.md-typeset pre>code{display:block;margin:0;padding:.525rem .6rem;background-color:transparent;font-size:inherit;box-shadow:none;-webkit-box-decoration-break:slice;box-decoration-break:slice;overflow:auto}.md-typeset pre>code::-webkit-scrollbar{width:.2rem;height:.2rem}.md-typeset pre>code::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset pre>code::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset kbd{padding:0 .29412em;border-radius:.15rem;border:.05rem solid #c9c9c9;border-bottom-color:#bcbcbc;background-color:#fcfcfc;color:#555;font-size:85%;box-shadow:0 .05rem 0 #b0b0b0;word-break:break-word}.md-typeset mark{margin:0 .25em;padding:.0625em 0;border-radius:.1rem;background-color:rgba(255,235,59,.5);box-shadow:.25em 0 0 rgba(255,235,59,.5),-.25em 0 0 rgba(255,235,59,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset abbr{border-bottom:.05rem dotted rgba(0,0,0,.54);text-decoration:none;cursor:help}.md-typeset small{opacity:.75}.md-typeset sub,.md-typeset sup{margin-left:.07812em}[dir=rtl] .md-typeset sub,[dir=rtl] .md-typeset sup{margin-right:.07812em;margin-left:0}.md-typeset blockquote{padding-left:.6rem;border-left:.2rem solid rgba(0,0,0,.26);color:rgba(0,0,0,.54)}[dir=rtl] .md-typeset blockquote{padding-right:.6rem;padding-left:0;border-right:.2rem solid rgba(0,0,0,.26);border-left:initial}.md-typeset ul{list-style-type:disc}.md-typeset ol,.md-typeset ul{margin-left:.625em;padding:0}[dir=rtl] .md-typeset ol,[dir=rtl] .md-typeset ul{margin-right:.625em;margin-left:0}.md-typeset ol ol,.md-typeset ul ol{list-style-type:lower-alpha}.md-typeset ol ol ol,.md-typeset ul ol ol{list-style-type:lower-roman}.md-typeset ol li,.md-typeset ul li{margin-bottom:.5em;margin-left:1.25em}[dir=rtl] .md-typeset ol li,[dir=rtl] .md-typeset ul li{margin-right:1.25em;margin-left:0}.md-typeset ol li blockquote,.md-typeset ol li p,.md-typeset ul li blockquote,.md-typeset ul li p{margin:.5em 0}.md-typeset ol li:last-child,.md-typeset ul li:last-child{margin-bottom:0}.md-typeset ol li ol,.md-typeset ol li ul,.md-typeset ul li ol,.md-typeset ul li ul{margin:.5em 0 .5em .625em}[dir=rtl] .md-typeset ol li ol,[dir=rtl] .md-typeset ol li ul,[dir=rtl] .md-typeset ul li ol,[dir=rtl] .md-typeset ul li ul{margin-right:.625em;margin-left:0}.md-typeset dd{margin:1em 0 1em 1.875em}[dir=rtl] .md-typeset dd{margin-right:1.875em;margin-left:0}.md-typeset iframe,.md-typeset img,.md-typeset svg{max-width:100%}.md-typeset table:not([class]){box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);display:inline-block;max-width:100%;border-radius:.1rem;font-size:.64rem;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset table:not([class])+*{margin-top:1.5em}.md-typeset table:not([class]) td:not([align]),.md-typeset table:not([class]) th:not([align]){text-align:left}[dir=rtl] .md-typeset table:not([class]) td:not([align]),[dir=rtl] .md-typeset table:not([class]) th:not([align]){text-align:right}.md-typeset table:not([class]) th{min-width:5rem;padding:.6rem .8rem;background-color:rgba(0,0,0,.54);color:#fff;vertical-align:top}.md-typeset table:not([class]) td{padding:.6rem .8rem;border-top:.05rem solid rgba(0,0,0,.07);vertical-align:top}.md-typeset table:not([class]) tr{-webkit-transition:background-color .125s;transition:background-color .125s}.md-typeset table:not([class]) tr:hover{background-color:rgba(0,0,0,.035);box-shadow:inset 0 .05rem 0 #fff}.md-typeset table:not([class]) tr:first-child td{border-top:0}.md-typeset table:not([class]) a{word-break:normal}.md-typeset__scrollwrap{margin:1em -.8rem;overflow-x:auto;-webkit-overflow-scrolling:touch}.md-typeset .md-typeset__table{display:inline-block;margin-bottom:.5em;padding:0 .8rem}.md-typeset .md-typeset__table table{display:table;width:100%;margin:0;overflow:hidden}html{font-size:125%;overflow-x:hidden}body,html{height:100%}body{position:relative;font-size:.5rem}hr{display:block;height:.05rem;padding:0;border:0}.md-svg{display:none}.md-grid{max-width:61rem;margin-right:auto;margin-left:auto}.md-container,.md-main{overflow:auto}.md-container{display:table;width:100%;height:100%;padding-top:2.4rem;table-layout:fixed}.md-main{display:table-row;height:100%}.md-main__inner{height:100%;padding-top:1.5rem;padding-bottom:.05rem}.md-toggle{display:none}.md-overlay{position:fixed;top:0;width:0;height:0;-webkit-transition:width 0s .25s,height 0s .25s,opacity .25s;transition:width 0s .25s,height 0s .25s,opacity .25s;background-color:rgba(0,0,0,.54);opacity:0;z-index:3}.md-flex{display:table}.md-flex__cell{display:table-cell;position:relative;vertical-align:top}.md-flex__cell--shrink{width:0}.md-flex__cell--stretch{display:table;width:100%;table-layout:fixed}.md-flex__ellipsis{display:table-cell;text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.md-skip{position:fixed;width:.05rem;height:.05rem;margin:.5rem;padding:.3rem .5rem;-webkit-transform:translateY(.4rem);transform:translateY(.4rem);border-radius:.1rem;background-color:rgba(0,0,0,.87);color:#fff;font-size:.64rem;opacity:0;overflow:hidden}.md-skip:focus{width:auto;height:auto;clip:auto;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1;z-index:10}@page{margin:25mm}.md-clipboard{position:absolute;top:.3rem;right:.3rem;width:1.4rem;height:1.4rem;border-radius:.1rem;font-size:.8rem;cursor:pointer;z-index:1;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-clipboard:before{-webkit-transition:color .25s,opacity .25s;transition:color .25s,opacity .25s;color:rgba(0,0,0,.07);content:"\E14D"}.codehilite:hover .md-clipboard:before,.md-typeset .highlight:hover .md-clipboard:before,pre:hover .md-clipboard:before{color:rgba(0,0,0,.54)}.md-clipboard:focus:before,.md-clipboard:hover:before{color:#536dfe}.md-clipboard__message{display:block;position:absolute;top:0;right:1.7rem;padding:.3rem .5rem;-webkit-transform:translateX(.4rem);transform:translateX(.4rem);-webkit-transition:opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);transition:opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);transition:transform .25s cubic-bezier(.9,.1,.9,0),opacity .175s;transition:transform .25s cubic-bezier(.9,.1,.9,0),opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);border-radius:.1rem;background-color:rgba(0,0,0,.54);color:#fff;font-size:.64rem;white-space:nowrap;opacity:0;pointer-events:none}.md-clipboard__message--active{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1;pointer-events:auto}.md-clipboard__message:before{content:attr(aria-label)}.md-clipboard__message:after{display:block;position:absolute;top:50%;right:-.2rem;width:0;margin-top:-.2rem;border-color:transparent rgba(0,0,0,.54);border-style:solid;border-width:.2rem 0 .2rem .2rem;content:""}.md-content__inner{margin:0 .8rem 1.2rem;padding-top:.6rem}.md-content__inner:before{display:block;height:.4rem;content:""}.md-content__inner>:last-child{margin-bottom:0}.md-content__icon{position:relative;margin:.4rem 0;padding:0;float:right}.md-typeset .md-content__icon{color:rgba(0,0,0,.26)}.md-header{position:fixed;top:0;right:0;left:0;height:2.4rem;-webkit-transition:background-color .25s,color .25s;transition:background-color .25s,color .25s;background-color:#3f51b5;color:#fff;box-shadow:none;z-index:2;-webkit-backface-visibility:hidden;backface-visibility:hidden}.no-js .md-header{-webkit-transition:none;transition:none;box-shadow:none}.md-header[data-md-state=shadow]{-webkit-transition:background-color .25s,color .25s,box-shadow .25s;transition:background-color .25s,color .25s,box-shadow .25s;box-shadow:0 0 .2rem rgba(0,0,0,.1),0 .2rem .4rem rgba(0,0,0,.2)}.md-header-nav{padding:0 .2rem}.md-header-nav__button{position:relative;-webkit-transition:opacity .25s;transition:opacity .25s;z-index:1}.md-header-nav__button:hover{opacity:.7}.md-header-nav__button.md-logo *{display:block}.no-js .md-header-nav__button.md-icon--search{display:none}.md-header-nav__topic{display:block;position:absolute;-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(1.25rem);transform:translateX(1.25rem);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);opacity:0;z-index:-1;pointer-events:none}[dir=rtl] .md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(-1.25rem);transform:translateX(-1.25rem)}.no-js .md-header-nav__topic{position:static}.no-js .md-header-nav__topic+.md-header-nav__topic{display:none}.md-header-nav__title{padding:0 1rem;font-size:.9rem;line-height:2.4rem}.md-header-nav__title[data-md-state=active] .md-header-nav__topic{-webkit-transform:translateX(-1.25rem);transform:translateX(-1.25rem);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);opacity:0;z-index:-1;pointer-events:none}[dir=rtl] .md-header-nav__title[data-md-state=active] .md-header-nav__topic{-webkit-transform:translateX(1.25rem);transform:translateX(1.25rem)}.md-header-nav__title[data-md-state=active] .md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);opacity:1;z-index:0;pointer-events:auto}.md-header-nav__source{display:none}.md-hero{-webkit-transition:background .25s;transition:background .25s;background-color:#3f51b5;color:#fff;font-size:1rem;overflow:hidden}.md-hero__inner{margin-top:1rem;padding:.8rem .8rem .4rem;-webkit-transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);-webkit-transition-delay:.1s;transition-delay:.1s}[data-md-state=hidden] .md-hero__inner{pointer-events:none;-webkit-transform:translateY(.625rem);transform:translateY(.625rem);-webkit-transition:opacity .1s 0s,-webkit-transform 0s .4s;transition:opacity .1s 0s,-webkit-transform 0s .4s;transition:transform 0s .4s,opacity .1s 0s;transition:transform 0s .4s,opacity .1s 0s,-webkit-transform 0s .4s;opacity:0}.md-hero--expand .md-hero__inner{margin-bottom:1.2rem}.md-footer-nav{background-color:rgba(0,0,0,.87);color:#fff}.md-footer-nav__inner{padding:.2rem;overflow:auto}.md-footer-nav__link{padding-top:1.4rem;padding-bottom:.4rem;-webkit-transition:opacity .25s;transition:opacity .25s}.md-footer-nav__link:hover{opacity:.7}.md-footer-nav__link--prev{width:25%;float:left}[dir=rtl] .md-footer-nav__link--prev{float:right}.md-footer-nav__link--next{width:75%;float:right;text-align:right}[dir=rtl] .md-footer-nav__link--next{float:left;text-align:left}.md-footer-nav__button{-webkit-transition:background .25s;transition:background .25s}.md-footer-nav__title{position:relative;padding:0 1rem;font-size:.9rem;line-height:2.4rem}.md-footer-nav__direction{position:absolute;right:0;left:0;margin-top:-1rem;padding:0 1rem;color:hsla(0,0%,100%,.7);font-size:.75rem}.md-footer-meta{background-color:rgba(0,0,0,.895)}.md-footer-meta__inner{padding:.2rem;overflow:auto}html .md-footer-meta.md-typeset a{color:hsla(0,0%,100%,.7)}html .md-footer-meta.md-typeset a:focus,html .md-footer-meta.md-typeset a:hover{color:#fff}.md-footer-copyright{margin:0 .6rem;padding:.4rem 0;color:hsla(0,0%,100%,.3);font-size:.64rem}.md-footer-copyright__highlight{color:hsla(0,0%,100%,.7)}.md-footer-social{margin:0 .4rem;padding:.2rem 0 .6rem}.md-footer-social__link{display:inline-block;width:1.6rem;height:1.6rem;font-size:.8rem;text-align:center}.md-footer-social__link:before{line-height:1.9}.md-nav{font-size:.7rem;line-height:1.3}.md-nav__title{display:block;padding:0 .6rem;font-weight:700;text-overflow:ellipsis;overflow:hidden}.md-nav__title:before{display:none;content:"\E5C4"}[dir=rtl] .md-nav__title:before{content:"\E5C8"}.md-nav__title .md-nav__button{display:none}.md-nav__list{margin:0;padding:0;list-style:none}.md-nav__item{padding:0 .6rem}.md-nav__item:last-child{padding-bottom:.6rem}.md-nav__item .md-nav__item{padding-right:0}[dir=rtl] .md-nav__item .md-nav__item{padding-right:.6rem;padding-left:0}.md-nav__item .md-nav__item:last-child{padding-bottom:0}.md-nav__button img{width:100%;height:auto}.md-nav__link{display:block;margin-top:.625em;-webkit-transition:color .125s;transition:color .125s;text-overflow:ellipsis;cursor:pointer;overflow:hidden}.md-nav__item--nested>.md-nav__link:after{content:"\E313"}html .md-nav__link[for=__toc],html .md-nav__link[for=__toc]+.md-nav__link:after,html .md-nav__link[for=__toc]~.md-nav{display:none}.md-nav__link[data-md-state=blur]{color:rgba(0,0,0,.54)}.md-nav__link--active,.md-nav__link:active{color:#3f51b5}.md-nav__item--nested>.md-nav__link{color:inherit}.md-nav__link:focus,.md-nav__link:hover{color:#536dfe}.md-nav__source,.no-js .md-search{display:none}.md-search__overlay{opacity:0;z-index:1}.md-search__form{position:relative}.md-search__input{position:relative;padding:0 2.2rem 0 3.6rem;text-overflow:ellipsis;z-index:2}[dir=rtl] .md-search__input{padding:0 3.6rem 0 2.2rem}.md-search__input::-webkit-input-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::-moz-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input:-ms-input-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::-ms-input-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::-webkit-input-placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::-moz-placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input:-ms-input-placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::-ms-input-placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::-ms-clear{display:none}.md-search__icon{position:absolute;-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:color .25s cubic-bezier(.1,.7,.1,1),opacity .25s;font-size:1.2rem;cursor:pointer;z-index:2}.md-search__icon:hover{opacity:.7}.md-search__icon[for=__search]{top:.3rem;left:.5rem}[dir=rtl] .md-search__icon[for=__search]{right:.5rem;left:auto}.md-search__icon[for=__search]:before{content:"\E8B6"}.md-search__icon[type=reset]{top:.3rem;right:.5rem;-webkit-transform:scale(.125);transform:scale(.125);-webkit-transition:opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);opacity:0}[dir=rtl] .md-search__icon[type=reset]{right:auto;left:.5rem}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__icon[type=reset]{-webkit-transform:scale(1);transform:scale(1);opacity:1}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__icon[type=reset]:hover{opacity:.7}.md-search__output{position:absolute;width:100%;border-radius:0 0 .1rem .1rem;overflow:hidden;z-index:1}.md-search__scrollwrap{height:100%;background-color:#fff;box-shadow:inset 0 .05rem 0 rgba(0,0,0,.07);overflow-y:auto;-webkit-overflow-scrolling:touch}.md-search-result{color:rgba(0,0,0,.87);word-break:break-word}.md-search-result__meta{padding:0 .8rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-size:.64rem;line-height:1.8rem}.md-search-result__list{margin:0;padding:0;border-top:.05rem solid rgba(0,0,0,.07);list-style:none}.md-search-result__item{box-shadow:0 -.05rem 0 rgba(0,0,0,.07)}.md-search-result__link{display:block;-webkit-transition:background .25s;transition:background .25s;outline:0;overflow:hidden}.md-search-result__link:hover,.md-search-result__link[data-md-state=active]{background-color:rgba(83,109,254,.1)}.md-search-result__link:hover .md-search-result__article:before,.md-search-result__link[data-md-state=active] .md-search-result__article:before{opacity:.7}.md-search-result__link:last-child .md-search-result__teaser{margin-bottom:.6rem}.md-search-result__article{position:relative;padding:0 .8rem;overflow:auto}.md-search-result__article--document:before{position:absolute;left:0;margin:.1rem;-webkit-transition:opacity .25s;transition:opacity .25s;color:rgba(0,0,0,.54);content:"\E880"}[dir=rtl] .md-search-result__article--document:before{right:0;left:auto}.md-search-result__article--document .md-search-result__title{margin:.55rem 0;font-size:.8rem;font-weight:400;line-height:1.4}.md-search-result__title{margin:.5em 0;font-size:.64rem;font-weight:700;line-height:1.4}.md-search-result__teaser{display:-webkit-box;max-height:1.65rem;margin:.5em 0;color:rgba(0,0,0,.54);font-size:.64rem;line-height:1.4;text-overflow:ellipsis;overflow:hidden;-webkit-box-orient:vertical;-webkit-line-clamp:2}.md-search-result em{font-style:normal;font-weight:700;text-decoration:underline}.md-sidebar{position:absolute;width:12.1rem;padding:1.2rem 0;overflow:hidden}.md-sidebar[data-md-state=lock]{position:fixed;top:2.4rem}.md-sidebar--secondary{display:none}.md-sidebar__scrollwrap{max-height:100%;margin:0 .2rem;overflow-y:auto;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-sidebar__scrollwrap::-webkit-scrollbar{width:.2rem;height:.2rem}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}@-webkit-keyframes md-source__facts--done{0%{height:0}to{height:.65rem}}@keyframes md-source__facts--done{0%{height:0}to{height:.65rem}}@-webkit-keyframes md-source__fact--done{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}@keyframes md-source__fact--done{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}.md-source{display:block;padding-right:.6rem;-webkit-transition:opacity .25s;transition:opacity .25s;font-size:.65rem;line-height:1.2;white-space:nowrap}[dir=rtl] .md-source{padding-right:0;padding-left:.6rem}.md-source:hover{opacity:.7}.md-source:after,.md-source__icon{display:inline-block;height:2.4rem;content:"";vertical-align:middle}.md-source__icon{width:2.4rem}.md-source__icon svg{width:1.2rem;height:1.2rem;margin-top:.6rem;margin-left:.6rem}[dir=rtl] .md-source__icon svg{margin-right:.6rem;margin-left:0}.md-source__icon+.md-source__repository{margin-left:-2rem;padding-left:2rem}[dir=rtl] .md-source__icon+.md-source__repository{margin-right:-2rem;margin-left:0;padding-right:2rem;padding-left:0}.md-source__repository{display:inline-block;max-width:100%;margin-left:.6rem;font-weight:700;text-overflow:ellipsis;overflow:hidden;vertical-align:middle}.md-source__facts{margin:0;padding:0;font-size:.55rem;font-weight:700;list-style-type:none;opacity:.75;overflow:hidden}[data-md-state=done] .md-source__facts{-webkit-animation:md-source__facts--done .25s ease-in;animation:md-source__facts--done .25s ease-in}.md-source__fact{float:left}[dir=rtl] .md-source__fact{float:right}[data-md-state=done] .md-source__fact{-webkit-animation:md-source__fact--done .4s ease-out;animation:md-source__fact--done .4s ease-out}.md-source__fact:before{margin:0 .1rem;content:"\00B7"}.md-source__fact:first-child:before{display:none}.md-source-file{display:inline-block;margin:1em .5em 1em 0;padding-right:.25rem;border-radius:.1rem;background-color:rgba(0,0,0,.07);font-size:.64rem;list-style-type:none;cursor:pointer;overflow:hidden}.md-source-file:before{display:inline-block;margin-right:.25rem;padding:.25rem;background-color:rgba(0,0,0,.26);color:#fff;font-size:.8rem;content:"\E86F";vertical-align:middle}html .md-source-file{-webkit-transition:background .4s,color .4s,box-shadow .4s cubic-bezier(.4,0,.2,1);transition:background .4s,color .4s,box-shadow .4s cubic-bezier(.4,0,.2,1)}html .md-source-file:before{-webkit-transition:inherit;transition:inherit}html body .md-typeset .md-source-file{color:rgba(0,0,0,.54)}.md-source-file:hover{box-shadow:0 0 8px rgba(0,0,0,.18),0 8px 16px rgba(0,0,0,.36)}.md-source-file:hover:before{background-color:#536dfe}.md-tabs{width:100%;-webkit-transition:background .25s;transition:background .25s;background-color:#3f51b5;color:#fff;overflow:auto}.md-tabs__list{margin:0 0 0 .2rem;padding:0;list-style:none;white-space:nowrap}.md-tabs__item{display:inline-block;height:2.4rem;padding-right:.6rem;padding-left:.6rem}.md-tabs__link{display:block;margin-top:.8rem;-webkit-transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);font-size:.7rem;opacity:.7}.md-tabs__link--active,.md-tabs__link:hover{color:inherit;opacity:1}.md-tabs__item:nth-child(2) .md-tabs__link{-webkit-transition-delay:.02s;transition-delay:.02s}.md-tabs__item:nth-child(3) .md-tabs__link{-webkit-transition-delay:.04s;transition-delay:.04s}.md-tabs__item:nth-child(4) .md-tabs__link{-webkit-transition-delay:.06s;transition-delay:.06s}.md-tabs__item:nth-child(5) .md-tabs__link{-webkit-transition-delay:.08s;transition-delay:.08s}.md-tabs__item:nth-child(6) .md-tabs__link{-webkit-transition-delay:.1s;transition-delay:.1s}.md-tabs__item:nth-child(7) .md-tabs__link{-webkit-transition-delay:.12s;transition-delay:.12s}.md-tabs__item:nth-child(8) .md-tabs__link{-webkit-transition-delay:.14s;transition-delay:.14s}.md-tabs__item:nth-child(9) .md-tabs__link{-webkit-transition-delay:.16s;transition-delay:.16s}.md-tabs__item:nth-child(10) .md-tabs__link{-webkit-transition-delay:.18s;transition-delay:.18s}.md-tabs__item:nth-child(11) .md-tabs__link{-webkit-transition-delay:.2s;transition-delay:.2s}.md-tabs__item:nth-child(12) .md-tabs__link{-webkit-transition-delay:.22s;transition-delay:.22s}.md-tabs__item:nth-child(13) .md-tabs__link{-webkit-transition-delay:.24s;transition-delay:.24s}.md-tabs__item:nth-child(14) .md-tabs__link{-webkit-transition-delay:.26s;transition-delay:.26s}.md-tabs__item:nth-child(15) .md-tabs__link{-webkit-transition-delay:.28s;transition-delay:.28s}.md-tabs__item:nth-child(16) .md-tabs__link{-webkit-transition-delay:.3s;transition-delay:.3s}.md-tabs[data-md-state=hidden]{pointer-events:none}.md-tabs[data-md-state=hidden] .md-tabs__link{-webkit-transform:translateY(50%);transform:translateY(50%);-webkit-transition:color .25s,opacity .1s,-webkit-transform 0s .4s;transition:color .25s,opacity .1s,-webkit-transform 0s .4s;transition:color .25s,transform 0s .4s,opacity .1s;transition:color .25s,transform 0s .4s,opacity .1s,-webkit-transform 0s .4s;opacity:0}.md-typeset .admonition,.md-typeset details{box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);position:relative;margin:1.5625em 0;padding:0 .6rem;border-left:.2rem solid #448aff;border-radius:.1rem;font-size:.64rem;overflow:auto}[dir=rtl] .md-typeset .admonition,[dir=rtl] .md-typeset details{border-right:.2rem solid #448aff;border-left:none}html .md-typeset .admonition>:last-child,html .md-typeset details>:last-child{margin-bottom:.6rem}.md-typeset .admonition .admonition,.md-typeset .admonition details,.md-typeset details .admonition,.md-typeset details details{margin:1em 0}.md-typeset .admonition>.admonition-title,.md-typeset .admonition>summary,.md-typeset details>.admonition-title,.md-typeset details>summary{margin:0 -.6rem;padding:.4rem .6rem .4rem 2rem;border-bottom:.05rem solid rgba(68,138,255,.1);background-color:rgba(68,138,255,.1);font-weight:700}[dir=rtl] .md-typeset .admonition>.admonition-title,[dir=rtl] .md-typeset .admonition>summary,[dir=rtl] .md-typeset details>.admonition-title,[dir=rtl] .md-typeset details>summary{padding:.4rem 2rem .4rem .6rem}.md-typeset .admonition>.admonition-title:last-child,.md-typeset .admonition>summary:last-child,.md-typeset details>.admonition-title:last-child,.md-typeset details>summary:last-child{margin-bottom:0}.md-typeset .admonition>.admonition-title:before,.md-typeset .admonition>summary:before,.md-typeset details>.admonition-title:before,.md-typeset details>summary:before{position:absolute;left:.6rem;color:#448aff;font-size:1rem;content:"\E3C9"}[dir=rtl] .md-typeset .admonition>.admonition-title:before,[dir=rtl] .md-typeset .admonition>summary:before,[dir=rtl] .md-typeset details>.admonition-title:before,[dir=rtl] .md-typeset details>summary:before{right:.6rem;left:auto}.md-typeset .admonition.abstract,.md-typeset .admonition.summary,.md-typeset .admonition.tldr,.md-typeset details.abstract,.md-typeset details.summary,.md-typeset details.tldr{border-left-color:#00b0ff}[dir=rtl] .md-typeset .admonition.abstract,[dir=rtl] .md-typeset .admonition.summary,[dir=rtl] .md-typeset .admonition.tldr,[dir=rtl] .md-typeset details.abstract,[dir=rtl] .md-typeset details.summary,[dir=rtl] .md-typeset details.tldr{border-right-color:#00b0ff}.md-typeset .admonition.abstract>.admonition-title,.md-typeset .admonition.abstract>summary,.md-typeset .admonition.summary>.admonition-title,.md-typeset .admonition.summary>summary,.md-typeset .admonition.tldr>.admonition-title,.md-typeset .admonition.tldr>summary,.md-typeset details.abstract>.admonition-title,.md-typeset details.abstract>summary,.md-typeset details.summary>.admonition-title,.md-typeset details.summary>summary,.md-typeset details.tldr>.admonition-title,.md-typeset details.tldr>summary{border-bottom-color:rgba(0,176,255,.1);background-color:rgba(0,176,255,.1)}.md-typeset .admonition.abstract>.admonition-title:before,.md-typeset .admonition.abstract>summary:before,.md-typeset .admonition.summary>.admonition-title:before,.md-typeset .admonition.summary>summary:before,.md-typeset .admonition.tldr>.admonition-title:before,.md-typeset .admonition.tldr>summary:before,.md-typeset details.abstract>.admonition-title:before,.md-typeset details.abstract>summary:before,.md-typeset details.summary>.admonition-title:before,.md-typeset details.summary>summary:before,.md-typeset details.tldr>.admonition-title:before,.md-typeset details.tldr>summary:before{color:#00b0ff;content:""}.md-typeset .admonition.info,.md-typeset .admonition.todo,.md-typeset details.info,.md-typeset details.todo{border-left-color:#00b8d4}[dir=rtl] .md-typeset .admonition.info,[dir=rtl] .md-typeset .admonition.todo,[dir=rtl] .md-typeset details.info,[dir=rtl] .md-typeset details.todo{border-right-color:#00b8d4}.md-typeset .admonition.info>.admonition-title,.md-typeset .admonition.info>summary,.md-typeset .admonition.todo>.admonition-title,.md-typeset .admonition.todo>summary,.md-typeset details.info>.admonition-title,.md-typeset details.info>summary,.md-typeset details.todo>.admonition-title,.md-typeset details.todo>summary{border-bottom-color:rgba(0,184,212,.1);background-color:rgba(0,184,212,.1)}.md-typeset .admonition.info>.admonition-title:before,.md-typeset .admonition.info>summary:before,.md-typeset .admonition.todo>.admonition-title:before,.md-typeset .admonition.todo>summary:before,.md-typeset details.info>.admonition-title:before,.md-typeset details.info>summary:before,.md-typeset details.todo>.admonition-title:before,.md-typeset details.todo>summary:before{color:#00b8d4;content:""}.md-typeset .admonition.hint,.md-typeset .admonition.important,.md-typeset .admonition.tip,.md-typeset details.hint,.md-typeset details.important,.md-typeset details.tip{border-left-color:#00bfa5}[dir=rtl] .md-typeset .admonition.hint,[dir=rtl] .md-typeset .admonition.important,[dir=rtl] .md-typeset .admonition.tip,[dir=rtl] .md-typeset details.hint,[dir=rtl] .md-typeset details.important,[dir=rtl] .md-typeset details.tip{border-right-color:#00bfa5}.md-typeset .admonition.hint>.admonition-title,.md-typeset .admonition.hint>summary,.md-typeset .admonition.important>.admonition-title,.md-typeset .admonition.important>summary,.md-typeset .admonition.tip>.admonition-title,.md-typeset .admonition.tip>summary,.md-typeset details.hint>.admonition-title,.md-typeset details.hint>summary,.md-typeset details.important>.admonition-title,.md-typeset details.important>summary,.md-typeset details.tip>.admonition-title,.md-typeset details.tip>summary{border-bottom-color:rgba(0,191,165,.1);background-color:rgba(0,191,165,.1)}.md-typeset .admonition.hint>.admonition-title:before,.md-typeset .admonition.hint>summary:before,.md-typeset .admonition.important>.admonition-title:before,.md-typeset .admonition.important>summary:before,.md-typeset .admonition.tip>.admonition-title:before,.md-typeset .admonition.tip>summary:before,.md-typeset details.hint>.admonition-title:before,.md-typeset details.hint>summary:before,.md-typeset details.important>.admonition-title:before,.md-typeset details.important>summary:before,.md-typeset details.tip>.admonition-title:before,.md-typeset details.tip>summary:before{color:#00bfa5;content:""}.md-typeset .admonition.check,.md-typeset .admonition.done,.md-typeset .admonition.success,.md-typeset details.check,.md-typeset details.done,.md-typeset details.success{border-left-color:#00c853}[dir=rtl] .md-typeset .admonition.check,[dir=rtl] .md-typeset .admonition.done,[dir=rtl] .md-typeset .admonition.success,[dir=rtl] .md-typeset details.check,[dir=rtl] .md-typeset details.done,[dir=rtl] .md-typeset details.success{border-right-color:#00c853}.md-typeset .admonition.check>.admonition-title,.md-typeset .admonition.check>summary,.md-typeset .admonition.done>.admonition-title,.md-typeset .admonition.done>summary,.md-typeset .admonition.success>.admonition-title,.md-typeset .admonition.success>summary,.md-typeset details.check>.admonition-title,.md-typeset details.check>summary,.md-typeset details.done>.admonition-title,.md-typeset details.done>summary,.md-typeset details.success>.admonition-title,.md-typeset details.success>summary{border-bottom-color:rgba(0,200,83,.1);background-color:rgba(0,200,83,.1)}.md-typeset .admonition.check>.admonition-title:before,.md-typeset .admonition.check>summary:before,.md-typeset .admonition.done>.admonition-title:before,.md-typeset .admonition.done>summary:before,.md-typeset .admonition.success>.admonition-title:before,.md-typeset .admonition.success>summary:before,.md-typeset details.check>.admonition-title:before,.md-typeset details.check>summary:before,.md-typeset details.done>.admonition-title:before,.md-typeset details.done>summary:before,.md-typeset details.success>.admonition-title:before,.md-typeset details.success>summary:before{color:#00c853;content:""}.md-typeset .admonition.faq,.md-typeset .admonition.help,.md-typeset .admonition.question,.md-typeset details.faq,.md-typeset details.help,.md-typeset details.question{border-left-color:#64dd17}[dir=rtl] .md-typeset .admonition.faq,[dir=rtl] .md-typeset .admonition.help,[dir=rtl] .md-typeset .admonition.question,[dir=rtl] .md-typeset details.faq,[dir=rtl] .md-typeset details.help,[dir=rtl] .md-typeset details.question{border-right-color:#64dd17}.md-typeset .admonition.faq>.admonition-title,.md-typeset .admonition.faq>summary,.md-typeset .admonition.help>.admonition-title,.md-typeset .admonition.help>summary,.md-typeset .admonition.question>.admonition-title,.md-typeset .admonition.question>summary,.md-typeset details.faq>.admonition-title,.md-typeset details.faq>summary,.md-typeset details.help>.admonition-title,.md-typeset details.help>summary,.md-typeset details.question>.admonition-title,.md-typeset details.question>summary{border-bottom-color:rgba(100,221,23,.1);background-color:rgba(100,221,23,.1)}.md-typeset .admonition.faq>.admonition-title:before,.md-typeset .admonition.faq>summary:before,.md-typeset .admonition.help>.admonition-title:before,.md-typeset .admonition.help>summary:before,.md-typeset .admonition.question>.admonition-title:before,.md-typeset .admonition.question>summary:before,.md-typeset details.faq>.admonition-title:before,.md-typeset details.faq>summary:before,.md-typeset details.help>.admonition-title:before,.md-typeset details.help>summary:before,.md-typeset details.question>.admonition-title:before,.md-typeset details.question>summary:before{color:#64dd17;content:""}.md-typeset .admonition.attention,.md-typeset .admonition.caution,.md-typeset .admonition.warning,.md-typeset details.attention,.md-typeset details.caution,.md-typeset details.warning{border-left-color:#ff9100}[dir=rtl] .md-typeset .admonition.attention,[dir=rtl] .md-typeset .admonition.caution,[dir=rtl] .md-typeset .admonition.warning,[dir=rtl] .md-typeset details.attention,[dir=rtl] .md-typeset details.caution,[dir=rtl] .md-typeset details.warning{border-right-color:#ff9100}.md-typeset .admonition.attention>.admonition-title,.md-typeset .admonition.attention>summary,.md-typeset .admonition.caution>.admonition-title,.md-typeset .admonition.caution>summary,.md-typeset .admonition.warning>.admonition-title,.md-typeset .admonition.warning>summary,.md-typeset details.attention>.admonition-title,.md-typeset details.attention>summary,.md-typeset details.caution>.admonition-title,.md-typeset details.caution>summary,.md-typeset details.warning>.admonition-title,.md-typeset details.warning>summary{border-bottom-color:rgba(255,145,0,.1);background-color:rgba(255,145,0,.1)}.md-typeset .admonition.attention>.admonition-title:before,.md-typeset .admonition.attention>summary:before,.md-typeset .admonition.caution>.admonition-title:before,.md-typeset .admonition.caution>summary:before,.md-typeset .admonition.warning>.admonition-title:before,.md-typeset .admonition.warning>summary:before,.md-typeset details.attention>.admonition-title:before,.md-typeset details.attention>summary:before,.md-typeset details.caution>.admonition-title:before,.md-typeset details.caution>summary:before,.md-typeset details.warning>.admonition-title:before,.md-typeset details.warning>summary:before{color:#ff9100;content:""}.md-typeset .admonition.fail,.md-typeset .admonition.failure,.md-typeset .admonition.missing,.md-typeset details.fail,.md-typeset details.failure,.md-typeset details.missing{border-left-color:#ff5252}[dir=rtl] .md-typeset .admonition.fail,[dir=rtl] .md-typeset .admonition.failure,[dir=rtl] .md-typeset .admonition.missing,[dir=rtl] .md-typeset details.fail,[dir=rtl] .md-typeset details.failure,[dir=rtl] .md-typeset details.missing{border-right-color:#ff5252}.md-typeset .admonition.fail>.admonition-title,.md-typeset .admonition.fail>summary,.md-typeset .admonition.failure>.admonition-title,.md-typeset .admonition.failure>summary,.md-typeset .admonition.missing>.admonition-title,.md-typeset .admonition.missing>summary,.md-typeset details.fail>.admonition-title,.md-typeset details.fail>summary,.md-typeset details.failure>.admonition-title,.md-typeset details.failure>summary,.md-typeset details.missing>.admonition-title,.md-typeset details.missing>summary{border-bottom-color:rgba(255,82,82,.1);background-color:rgba(255,82,82,.1)}.md-typeset .admonition.fail>.admonition-title:before,.md-typeset .admonition.fail>summary:before,.md-typeset .admonition.failure>.admonition-title:before,.md-typeset .admonition.failure>summary:before,.md-typeset .admonition.missing>.admonition-title:before,.md-typeset .admonition.missing>summary:before,.md-typeset details.fail>.admonition-title:before,.md-typeset details.fail>summary:before,.md-typeset details.failure>.admonition-title:before,.md-typeset details.failure>summary:before,.md-typeset details.missing>.admonition-title:before,.md-typeset details.missing>summary:before{color:#ff5252;content:""}.md-typeset .admonition.danger,.md-typeset .admonition.error,.md-typeset details.danger,.md-typeset details.error{border-left-color:#ff1744}[dir=rtl] .md-typeset .admonition.danger,[dir=rtl] .md-typeset .admonition.error,[dir=rtl] .md-typeset details.danger,[dir=rtl] .md-typeset details.error{border-right-color:#ff1744}.md-typeset .admonition.danger>.admonition-title,.md-typeset .admonition.danger>summary,.md-typeset .admonition.error>.admonition-title,.md-typeset .admonition.error>summary,.md-typeset details.danger>.admonition-title,.md-typeset details.danger>summary,.md-typeset details.error>.admonition-title,.md-typeset details.error>summary{border-bottom-color:rgba(255,23,68,.1);background-color:rgba(255,23,68,.1)}.md-typeset .admonition.danger>.admonition-title:before,.md-typeset .admonition.danger>summary:before,.md-typeset .admonition.error>.admonition-title:before,.md-typeset .admonition.error>summary:before,.md-typeset details.danger>.admonition-title:before,.md-typeset details.danger>summary:before,.md-typeset details.error>.admonition-title:before,.md-typeset details.error>summary:before{color:#ff1744;content:""}.md-typeset .admonition.bug,.md-typeset details.bug{border-left-color:#f50057}[dir=rtl] .md-typeset .admonition.bug,[dir=rtl] .md-typeset details.bug{border-right-color:#f50057}.md-typeset .admonition.bug>.admonition-title,.md-typeset .admonition.bug>summary,.md-typeset details.bug>.admonition-title,.md-typeset details.bug>summary{border-bottom-color:rgba(245,0,87,.1);background-color:rgba(245,0,87,.1)}.md-typeset .admonition.bug>.admonition-title:before,.md-typeset .admonition.bug>summary:before,.md-typeset details.bug>.admonition-title:before,.md-typeset details.bug>summary:before{color:#f50057;content:""}.md-typeset .admonition.example,.md-typeset details.example{border-left-color:#651fff}[dir=rtl] .md-typeset .admonition.example,[dir=rtl] .md-typeset details.example{border-right-color:#651fff}.md-typeset .admonition.example>.admonition-title,.md-typeset .admonition.example>summary,.md-typeset details.example>.admonition-title,.md-typeset details.example>summary{border-bottom-color:rgba(101,31,255,.1);background-color:rgba(101,31,255,.1)}.md-typeset .admonition.example>.admonition-title:before,.md-typeset .admonition.example>summary:before,.md-typeset details.example>.admonition-title:before,.md-typeset details.example>summary:before{color:#651fff;content:""}.md-typeset .admonition.cite,.md-typeset .admonition.quote,.md-typeset details.cite,.md-typeset details.quote{border-left-color:#9e9e9e}[dir=rtl] .md-typeset .admonition.cite,[dir=rtl] .md-typeset .admonition.quote,[dir=rtl] .md-typeset details.cite,[dir=rtl] .md-typeset details.quote{border-right-color:#9e9e9e}.md-typeset .admonition.cite>.admonition-title,.md-typeset .admonition.cite>summary,.md-typeset .admonition.quote>.admonition-title,.md-typeset .admonition.quote>summary,.md-typeset details.cite>.admonition-title,.md-typeset details.cite>summary,.md-typeset details.quote>.admonition-title,.md-typeset details.quote>summary{border-bottom-color:hsla(0,0%,62%,.1);background-color:hsla(0,0%,62%,.1)}.md-typeset .admonition.cite>.admonition-title:before,.md-typeset .admonition.cite>summary:before,.md-typeset .admonition.quote>.admonition-title:before,.md-typeset .admonition.quote>summary:before,.md-typeset details.cite>.admonition-title:before,.md-typeset details.cite>summary:before,.md-typeset details.quote>.admonition-title:before,.md-typeset details.quote>summary:before{color:#9e9e9e;content:""}.codehilite .o,.codehilite .ow,.md-typeset .highlight .o,.md-typeset .highlight .ow{color:inherit}.codehilite .ge,.md-typeset .highlight .ge{color:#000}.codehilite .gr,.md-typeset .highlight .gr{color:#a00}.codehilite .gh,.md-typeset .highlight .gh{color:#999}.codehilite .go,.md-typeset .highlight .go{color:#888}.codehilite .gp,.md-typeset .highlight .gp{color:#555}.codehilite .gs,.md-typeset .highlight .gs{color:inherit}.codehilite .gu,.md-typeset .highlight .gu{color:#aaa}.codehilite .gt,.md-typeset .highlight .gt{color:#a00}.codehilite .gd,.md-typeset .highlight .gd{background-color:#fdd}.codehilite .gi,.md-typeset .highlight .gi{background-color:#dfd}.codehilite .k,.md-typeset .highlight .k{color:#3b78e7}.codehilite .kc,.md-typeset .highlight .kc{color:#a71d5d}.codehilite .kd,.codehilite .kn,.md-typeset .highlight .kd,.md-typeset .highlight .kn{color:#3b78e7}.codehilite .kp,.md-typeset .highlight .kp{color:#a71d5d}.codehilite .kr,.codehilite .kt,.md-typeset .highlight .kr,.md-typeset .highlight .kt{color:#3e61a2}.codehilite .c,.codehilite .cm,.md-typeset .highlight .c,.md-typeset .highlight .cm{color:#999}.codehilite .cp,.md-typeset .highlight .cp{color:#666}.codehilite .c1,.codehilite .ch,.codehilite .cs,.md-typeset .highlight .c1,.md-typeset .highlight .ch,.md-typeset .highlight .cs{color:#999}.codehilite .na,.codehilite .nb,.md-typeset .highlight .na,.md-typeset .highlight .nb{color:#c2185b}.codehilite .bp,.md-typeset .highlight .bp{color:#3e61a2}.codehilite .nc,.md-typeset .highlight .nc{color:#c2185b}.codehilite .no,.md-typeset .highlight .no{color:#3e61a2}.codehilite .nd,.codehilite .ni,.md-typeset .highlight .nd,.md-typeset .highlight .ni{color:#666}.codehilite .ne,.codehilite .nf,.md-typeset .highlight .ne,.md-typeset .highlight .nf{color:#c2185b}.codehilite .nl,.md-typeset .highlight .nl{color:#3b5179}.codehilite .nn,.md-typeset .highlight .nn{color:#ec407a}.codehilite .nt,.md-typeset .highlight .nt{color:#3b78e7}.codehilite .nv,.codehilite .vc,.codehilite .vg,.codehilite .vi,.md-typeset .highlight .nv,.md-typeset .highlight .vc,.md-typeset .highlight .vg,.md-typeset .highlight .vi{color:#3e61a2}.codehilite .nx,.md-typeset .highlight .nx{color:#ec407a}.codehilite .il,.codehilite .m,.codehilite .mf,.codehilite .mh,.codehilite .mi,.codehilite .mo,.md-typeset .highlight .il,.md-typeset .highlight .m,.md-typeset .highlight .mf,.md-typeset .highlight .mh,.md-typeset .highlight .mi,.md-typeset .highlight .mo{color:#e74c3c}.codehilite .s,.codehilite .sb,.codehilite .sc,.md-typeset .highlight .s,.md-typeset .highlight .sb,.md-typeset .highlight .sc{color:#0d904f}.codehilite .sd,.md-typeset .highlight .sd{color:#999}.codehilite .s2,.md-typeset .highlight .s2{color:#0d904f}.codehilite .se,.codehilite .sh,.codehilite .si,.codehilite .sx,.md-typeset .highlight .se,.md-typeset .highlight .sh,.md-typeset .highlight .si,.md-typeset .highlight .sx{color:#183691}.codehilite .sr,.md-typeset .highlight .sr{color:#009926}.codehilite .s1,.codehilite .ss,.md-typeset .highlight .s1,.md-typeset .highlight .ss{color:#0d904f}.codehilite .err,.md-typeset .highlight .err{color:#a61717}.codehilite .w,.md-typeset .highlight .w{color:transparent}.codehilite .hll,.md-typeset .highlight .hll{display:block;margin:0 -.6rem;padding:0 .6rem;background-color:rgba(255,235,59,.5)}.md-typeset .codehilite,.md-typeset .highlight{position:relative;margin:1em 0;padding:0;border-radius:.1rem;background-color:hsla(0,0%,92.5%,.5);color:#37474f;line-height:1.4;-webkit-overflow-scrolling:touch}.md-typeset .codehilite code,.md-typeset .codehilite pre,.md-typeset .highlight code,.md-typeset .highlight pre{display:block;margin:0;padding:.525rem .6rem;background-color:transparent;overflow:auto;vertical-align:top}.md-typeset .codehilite code::-webkit-scrollbar,.md-typeset .codehilite pre::-webkit-scrollbar,.md-typeset .highlight code::-webkit-scrollbar,.md-typeset .highlight pre::-webkit-scrollbar{width:.2rem;height:.2rem}.md-typeset .codehilite code::-webkit-scrollbar-thumb,.md-typeset .codehilite pre::-webkit-scrollbar-thumb,.md-typeset .highlight code::-webkit-scrollbar-thumb,.md-typeset .highlight pre::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset .codehilite code::-webkit-scrollbar-thumb:hover,.md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,.md-typeset .highlight code::-webkit-scrollbar-thumb:hover,.md-typeset .highlight pre::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset pre.codehilite,.md-typeset pre.highlight{overflow:visible}.md-typeset pre.codehilite code,.md-typeset pre.highlight code{display:block;padding:.525rem .6rem;overflow:auto}.md-typeset .codehilitetable,.md-typeset .highlighttable{display:block;margin:1em 0;border-radius:.2em;font-size:.8rem;overflow:hidden}.md-typeset .codehilitetable tbody,.md-typeset .codehilitetable td,.md-typeset .highlighttable tbody,.md-typeset .highlighttable td{display:block;padding:0}.md-typeset .codehilitetable tr,.md-typeset .highlighttable tr{display:-webkit-box;display:flex}.md-typeset .codehilitetable .codehilite,.md-typeset .codehilitetable .highlight,.md-typeset .codehilitetable .linenodiv,.md-typeset .highlighttable .codehilite,.md-typeset .highlighttable .highlight,.md-typeset .highlighttable .linenodiv{margin:0;border-radius:0}.md-typeset .codehilitetable .linenodiv,.md-typeset .highlighttable .linenodiv{padding:.525rem .6rem}.md-typeset .codehilitetable .linenos,.md-typeset .highlighttable .linenos{background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.26);-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.md-typeset .codehilitetable .linenos pre,.md-typeset .highlighttable .linenos pre{margin:0;padding:0;background-color:transparent;color:inherit;text-align:right}.md-typeset .codehilitetable .code,.md-typeset .highlighttable .code{-webkit-box-flex:1;flex:1;overflow:hidden}.md-typeset>.codehilitetable,.md-typeset>.highlighttable{box-shadow:none}.md-typeset [id^="fnref:"]{display:inline-block}.md-typeset [id^="fnref:"]:target{margin-top:-3.8rem;padding-top:3.8rem;pointer-events:none}.md-typeset [id^="fn:"]:before{display:none;height:0;content:""}.md-typeset [id^="fn:"]:target:before{display:block;margin-top:-3.5rem;padding-top:3.5rem;pointer-events:none}.md-typeset .footnote{color:rgba(0,0,0,.54);font-size:.64rem}.md-typeset .footnote ol{margin-left:0}.md-typeset .footnote li{-webkit-transition:color .25s;transition:color .25s}.md-typeset .footnote li:target{color:rgba(0,0,0,.87)}.md-typeset .footnote li :first-child{margin-top:0}.md-typeset .footnote li:hover .footnote-backref,.md-typeset .footnote li:target .footnote-backref{-webkit-transform:translateX(0);transform:translateX(0);opacity:1}.md-typeset .footnote li:hover .footnote-backref:hover,.md-typeset .footnote li:target .footnote-backref{color:#536dfe}.md-typeset .footnote-ref{display:inline-block;pointer-events:auto}.md-typeset .footnote-ref:before{display:inline;margin:0 .2em;border-left:.05rem solid rgba(0,0,0,.26);font-size:1.25em;content:"";vertical-align:-.25rem}.md-typeset .footnote-backref{display:inline-block;-webkit-transform:translateX(.25rem);transform:translateX(.25rem);-webkit-transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s,-webkit-transform .25s .125s;color:rgba(0,0,0,.26);font-size:0;opacity:0;vertical-align:text-bottom}[dir=rtl] .md-typeset .footnote-backref{-webkit-transform:translateX(-.25rem);transform:translateX(-.25rem)}.md-typeset .footnote-backref:before{display:inline-block;font-size:.8rem;content:"\E31B"}[dir=rtl] .md-typeset .footnote-backref:before{-webkit-transform:scaleX(-1);transform:scaleX(-1)}.md-typeset .headerlink{display:inline-block;margin-left:.5rem;-webkit-transform:translateY(.25rem);transform:translateY(.25rem);-webkit-transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s,-webkit-transform .25s .25s;opacity:0}[dir=rtl] .md-typeset .headerlink{margin-right:.5rem;margin-left:0}html body .md-typeset .headerlink{color:rgba(0,0,0,.26)}.md-typeset h1[id]:before{display:block;margin-top:-9px;padding-top:9px;content:""}.md-typeset h1[id]:target:before{margin-top:-3.45rem;padding-top:3.45rem}.md-typeset h1[id] .headerlink:focus,.md-typeset h1[id]:hover .headerlink,.md-typeset h1[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h1[id] .headerlink:focus,.md-typeset h1[id]:hover .headerlink:hover,.md-typeset h1[id]:target .headerlink{color:#536dfe}.md-typeset h2[id]:before{display:block;margin-top:-8px;padding-top:8px;content:""}.md-typeset h2[id]:target:before{margin-top:-3.4rem;padding-top:3.4rem}.md-typeset h2[id] .headerlink:focus,.md-typeset h2[id]:hover .headerlink,.md-typeset h2[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h2[id] .headerlink:focus,.md-typeset h2[id]:hover .headerlink:hover,.md-typeset h2[id]:target .headerlink{color:#536dfe}.md-typeset h3[id]:before{display:block;margin-top:-9px;padding-top:9px;content:""}.md-typeset h3[id]:target:before{margin-top:-3.45rem;padding-top:3.45rem}.md-typeset h3[id] .headerlink:focus,.md-typeset h3[id]:hover .headerlink,.md-typeset h3[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h3[id] .headerlink:focus,.md-typeset h3[id]:hover .headerlink:hover,.md-typeset h3[id]:target .headerlink{color:#536dfe}.md-typeset h4[id]:before{display:block;margin-top:-9px;padding-top:9px;content:""}.md-typeset h4[id]:target:before{margin-top:-3.45rem;padding-top:3.45rem}.md-typeset h4[id] .headerlink:focus,.md-typeset h4[id]:hover .headerlink,.md-typeset h4[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h4[id] .headerlink:focus,.md-typeset h4[id]:hover .headerlink:hover,.md-typeset h4[id]:target .headerlink{color:#536dfe}.md-typeset h5[id]:before{display:block;margin-top:-11px;padding-top:11px;content:""}.md-typeset h5[id]:target:before{margin-top:-3.55rem;padding-top:3.55rem}.md-typeset h5[id] .headerlink:focus,.md-typeset h5[id]:hover .headerlink,.md-typeset h5[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h5[id] .headerlink:focus,.md-typeset h5[id]:hover .headerlink:hover,.md-typeset h5[id]:target .headerlink{color:#536dfe}.md-typeset h6[id]:before{display:block;margin-top:-11px;padding-top:11px;content:""}.md-typeset h6[id]:target:before{margin-top:-3.55rem;padding-top:3.55rem}.md-typeset h6[id] .headerlink:focus,.md-typeset h6[id]:hover .headerlink,.md-typeset h6[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h6[id] .headerlink:focus,.md-typeset h6[id]:hover .headerlink:hover,.md-typeset h6[id]:target .headerlink{color:#536dfe}.md-typeset .MJXc-display{margin:.75em 0;padding:.75em 0;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset .MathJax_CHTML{outline:0}.md-typeset .critic.comment,.md-typeset del.critic,.md-typeset ins.critic{margin:0 .25em;padding:.0625em 0;border-radius:.1rem;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset del.critic{background-color:#fdd;box-shadow:.25em 0 0 #fdd,-.25em 0 0 #fdd}.md-typeset ins.critic{background-color:#dfd;box-shadow:.25em 0 0 #dfd,-.25em 0 0 #dfd}.md-typeset .critic.comment{background-color:hsla(0,0%,92.5%,.5);color:#37474f;box-shadow:.25em 0 0 hsla(0,0%,92.5%,.5),-.25em 0 0 hsla(0,0%,92.5%,.5)}.md-typeset .critic.comment:before{padding-right:.125em;color:rgba(0,0,0,.26);content:"\E0B7";vertical-align:-.125em}.md-typeset .critic.block{display:block;margin:1em 0;padding-right:.8rem;padding-left:.8rem;box-shadow:none}.md-typeset .critic.block :first-child{margin-top:.5em}.md-typeset .critic.block :last-child{margin-bottom:.5em}.md-typeset details{display:block;padding-top:0}.md-typeset details[open]>summary:after{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.md-typeset details:not([open]){padding-bottom:0}.md-typeset details:not([open])>summary{border-bottom:none}.md-typeset details summary{padding-right:2rem}[dir=rtl] .md-typeset details summary{padding-left:2rem}.no-details .md-typeset details:not([open])>*{display:none}.no-details .md-typeset details:not([open]) summary{display:block}.md-typeset summary{display:block;outline:none;cursor:pointer}.md-typeset summary::-webkit-details-marker{display:none}.md-typeset summary:after{position:absolute;top:.4rem;right:.6rem;color:rgba(0,0,0,.26);font-size:1rem;content:"\E313"}[dir=rtl] .md-typeset summary:after{right:auto;left:.6rem}.md-typeset .emojione{width:1rem;vertical-align:text-top}.md-typeset code.codehilite,.md-typeset code.highlight{margin:0 .29412em;padding:.07353em 0}.md-typeset .superfences-content{display:none;-webkit-box-ordinal-group:100;order:99;width:100%;background-color:#fff}.md-typeset .superfences-content>*{margin:0;border-radius:0}.md-typeset .superfences-tabs{display:-webkit-box;display:flex;position:relative;flex-wrap:wrap;margin:1em 0;border:.05rem solid rgba(0,0,0,.07);border-radius:.2em}.md-typeset .superfences-tabs>input{display:none}.md-typeset .superfences-tabs>input:checked+label{font-weight:700}.md-typeset .superfences-tabs>input:checked+label+.superfences-content{display:block}.md-typeset .superfences-tabs>label{width:auto;padding:.6rem;-webkit-transition:color .125s;transition:color .125s;font-size:.64rem;cursor:pointer}html .md-typeset .superfences-tabs>label:hover{color:#536dfe}.md-typeset .task-list-item{position:relative;list-style-type:none}.md-typeset .task-list-item [type=checkbox]{position:absolute;top:.45em;left:-2em}[dir=rtl] .md-typeset .task-list-item [type=checkbox]{right:-2em;left:auto}.md-typeset .task-list-control .task-list-indicator:before{position:absolute;top:.15em;left:-1.25em;color:rgba(0,0,0,.26);font-size:1.25em;content:"\E835";vertical-align:-.25em}[dir=rtl] .md-typeset .task-list-control .task-list-indicator:before{right:-1.25em;left:auto}.md-typeset .task-list-control [type=checkbox]:checked+.task-list-indicator:before{content:"\E834"}.md-typeset .task-list-control [type=checkbox]{opacity:0;z-index:-1}@media print{.md-typeset a:after{color:rgba(0,0,0,.54);content:" [" attr(href) "]"}.md-typeset code,.md-typeset pre{white-space:pre-wrap}.md-typeset code{box-shadow:none;-webkit-box-decoration-break:initial;box-decoration-break:slice}.md-clipboard,.md-content__icon,.md-footer,.md-header,.md-sidebar,.md-tabs,.md-typeset .headerlink{display:none}}@media only screen and (max-width:44.9375em){.md-typeset pre{margin:1em -.8rem;border-radius:0}.md-typeset pre>code{padding:.525rem .8rem}.md-footer-nav__link--prev .md-footer-nav__title{display:none}.md-search-result__teaser{max-height:2.5rem;-webkit-line-clamp:3}.codehilite .hll,.md-typeset .highlight .hll{margin:0 -.8rem;padding:0 .8rem}.md-typeset>.codehilite,.md-typeset>.highlight{margin:1em -.8rem;border-radius:0}.md-typeset>.codehilite code,.md-typeset>.codehilite pre,.md-typeset>.highlight code,.md-typeset>.highlight pre{padding:.525rem .8rem}.md-typeset>.codehilitetable,.md-typeset>.highlighttable{margin:1em -.8rem;border-radius:0}.md-typeset>.codehilitetable .codehilite>code,.md-typeset>.codehilitetable .codehilite>pre,.md-typeset>.codehilitetable .highlight>code,.md-typeset>.codehilitetable .highlight>pre,.md-typeset>.codehilitetable .linenodiv,.md-typeset>.highlighttable .codehilite>code,.md-typeset>.highlighttable .codehilite>pre,.md-typeset>.highlighttable .highlight>code,.md-typeset>.highlighttable .highlight>pre,.md-typeset>.highlighttable .linenodiv{padding:.5rem .8rem}.md-typeset>p>.MJXc-display{margin:.75em -.8rem;padding:.25em .8rem}.md-typeset>.superfences-tabs{margin:1em -.8rem;border:0;border-top:.05rem solid rgba(0,0,0,.07);border-radius:0}.md-typeset>.superfences-tabs code,.md-typeset>.superfences-tabs pre{padding:.525rem .8rem}}@media only screen and (min-width:100em){html{font-size:137.5%}}@media only screen and (min-width:125em){html{font-size:150%}}@media only screen and (max-width:59.9375em){body[data-md-state=lock]{overflow:hidden}.ios body[data-md-state=lock] .md-container{display:none}html .md-nav__link[for=__toc]{display:block;padding-right:2.4rem}html .md-nav__link[for=__toc]:after{color:inherit;content:"\E8DE"}html .md-nav__link[for=__toc]+.md-nav__link{display:none}html .md-nav__link[for=__toc]~.md-nav{display:-webkit-box;display:flex}html [dir=rtl] .md-nav__link{padding-right:.8rem;padding-left:2.4rem}.md-nav__source{display:block;padding:0 .2rem;background-color:rgba(50,64,144,.9675);color:#fff}.md-search__overlay{position:absolute;top:.2rem;left:.2rem;width:1.8rem;height:1.8rem;-webkit-transform-origin:center;transform-origin:center;-webkit-transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:transform .3s .1s,opacity .2s .2s;transition:transform .3s .1s,opacity .2s .2s,-webkit-transform .3s .1s;border-radius:1rem;background-color:#fff;overflow:hidden;pointer-events:none}[dir=rtl] .md-search__overlay{right:.2rem;left:auto}[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transition:opacity .1s,-webkit-transform .4s;transition:opacity .1s,-webkit-transform .4s;transition:transform .4s,opacity .1s;transition:transform .4s,opacity .1s,-webkit-transform .4s;opacity:1}.md-search__inner{position:fixed;top:0;left:100%;width:100%;height:100%;-webkit-transform:translateX(5%);transform:translateX(5%);-webkit-transition:right 0s .3s,left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:right 0s .3s,left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:right 0s .3s,left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s;transition:right 0s .3s,left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;opacity:0;z-index:2}[data-md-toggle=search]:checked~.md-header .md-search__inner{left:0;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:right 0s 0s,left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:right 0s 0s,left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:right 0s 0s,left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s;transition:right 0s 0s,left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;opacity:1}[dir=rtl] [data-md-toggle=search]:checked~.md-header .md-search__inner{right:0;left:auto}html [dir=rtl] .md-search__inner{right:100%;left:auto;-webkit-transform:translateX(-5%);transform:translateX(-5%)}.md-search__input{width:100%;height:2.4rem;font-size:.9rem}.md-search__icon[for=__search]{top:.6rem;left:.8rem}.md-search__icon[for=__search][for=__search]:before{content:"\E5C4"}[dir=rtl] .md-search__icon[for=__search][for=__search]:before{content:"\E5C8"}.md-search__icon[type=reset]{top:.6rem;right:.8rem}.md-search__output{top:2.4rem;bottom:0}.md-search-result__article--document:before{display:none}}@media only screen and (max-width:76.1875em){[data-md-toggle=drawer]:checked~.md-overlay{width:100%;height:100%;-webkit-transition:width 0s,height 0s,opacity .25s;transition:width 0s,height 0s,opacity .25s;opacity:1}.md-header-nav__button.md-icon--home,.md-header-nav__button.md-logo{display:none}.md-hero__inner{margin-top:2.4rem;margin-bottom:1.2rem}.md-nav{background-color:#fff}.md-nav--primary,.md-nav--primary .md-nav{display:-webkit-box;display:flex;position:absolute;top:0;right:0;left:0;-webkit-box-orient:vertical;-webkit-box-direction:normal;flex-direction:column;height:100%;z-index:1}.md-nav--primary .md-nav__item,.md-nav--primary .md-nav__title{font-size:.8rem;line-height:1.5}html .md-nav--primary .md-nav__title{position:relative;height:5.6rem;padding:3rem .8rem .2rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-weight:400;line-height:2.4rem;white-space:nowrap;cursor:pointer}html .md-nav--primary .md-nav__title:before{display:block;position:absolute;top:.2rem;left:.2rem;width:2rem;height:2rem;color:rgba(0,0,0,.54)}html .md-nav--primary .md-nav__title~.md-nav__list{background-color:#fff;box-shadow:inset 0 .05rem 0 rgba(0,0,0,.07)}html .md-nav--primary .md-nav__title~.md-nav__list>.md-nav__item:first-child{border-top:0}html .md-nav--primary .md-nav__title--site{position:relative;background-color:#3f51b5;color:#fff}html .md-nav--primary .md-nav__title--site .md-nav__button{display:block;position:absolute;top:.2rem;left:.2rem;width:3.2rem;height:3.2rem;font-size:2.4rem}html .md-nav--primary .md-nav__title--site:before{display:none}html [dir=rtl] .md-nav--primary .md-nav__title--site .md-nav__button,html [dir=rtl] .md-nav--primary .md-nav__title:before{right:.2rem;left:auto}.md-nav--primary .md-nav__list{-webkit-box-flex:1;flex:1;overflow-y:auto}.md-nav--primary .md-nav__item{padding:0;border-top:.05rem solid rgba(0,0,0,.07)}[dir=rtl] .md-nav--primary .md-nav__item{padding:0}.md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:2.4rem}[dir=rtl] .md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:.8rem;padding-left:2.4rem}.md-nav--primary .md-nav__item--nested>.md-nav__link:after{content:"\E315"}[dir=rtl] .md-nav--primary .md-nav__item--nested>.md-nav__link:after{content:"\E314"}.md-nav--primary .md-nav__link{position:relative;margin-top:0;padding:.6rem .8rem}.md-nav--primary .md-nav__link:after{position:absolute;top:50%;right:.6rem;margin-top:-.6rem;color:inherit;font-size:1.2rem}[dir=rtl] .md-nav--primary .md-nav__link:after{right:auto;left:.6rem}.md-nav--primary .md-nav--secondary .md-nav__link{position:static}.md-nav--primary .md-nav--secondary .md-nav{position:static;background-color:transparent}.md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-left:1.4rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-right:1.4rem;padding-left:0}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-left:2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-right:2rem;padding-left:0}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-left:2.6rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-right:2.6rem;padding-left:0}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-left:3.2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-right:3.2rem;padding-left:0}.md-nav__toggle~.md-nav{display:-webkit-box;display:flex;-webkit-transform:translateX(100%);transform:translateX(100%);-webkit-transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s;transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);opacity:0}[dir=rtl] .md-nav__toggle~.md-nav{-webkit-transform:translateX(-100%);transform:translateX(-100%)}.no-csstransforms3d .md-nav__toggle~.md-nav{display:none}.md-nav__toggle:checked~.md-nav{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1}.no-csstransforms3d .md-nav__toggle:checked~.md-nav{display:-webkit-box;display:flex}.md-sidebar--primary{position:fixed;top:0;left:-12.1rem;width:12.1rem;height:100%;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s;transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);background-color:#fff;z-index:3}[dir=rtl] .md-sidebar--primary{right:-12.1rem;left:auto}.no-csstransforms3d .md-sidebar--primary{display:none}[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{box-shadow:0 8px 10px 1px rgba(0,0,0,.14),0 3px 14px 2px rgba(0,0,0,.12),0 5px 5px -3px rgba(0,0,0,.4);-webkit-transform:translateX(12.1rem);transform:translateX(12.1rem)}[dir=rtl] [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{-webkit-transform:translateX(-12.1rem);transform:translateX(-12.1rem)}.no-csstransforms3d [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{display:block}.md-sidebar--primary .md-sidebar__scrollwrap{overflow:hidden;position:absolute;top:0;right:0;bottom:0;left:0;margin:0}.md-tabs{display:none}}@media only screen and (min-width:60em){.md-content{margin-right:12.1rem}[dir=rtl] .md-content{margin-right:0;margin-left:12.1rem}.md-header-nav__button.md-icon--search{display:none}.md-header-nav__source{display:block;width:11.7rem;max-width:11.7rem;padding-right:.6rem}[dir=rtl] .md-header-nav__source{padding-right:0;padding-left:.6rem}.md-search{padding:.2rem}.md-search__overlay{position:fixed;top:0;left:0;width:0;height:0;-webkit-transition:width 0s .25s,height 0s .25s,opacity .25s;transition:width 0s .25s,height 0s .25s,opacity .25s;background-color:rgba(0,0,0,.54);cursor:pointer}[dir=rtl] .md-search__overlay{right:0;left:auto}[data-md-toggle=search]:checked~.md-header .md-search__overlay{width:100%;height:100%;-webkit-transition:width 0s,height 0s,opacity .25s;transition:width 0s,height 0s,opacity .25s;opacity:1}.md-search__inner{position:relative;width:11.5rem;margin-right:.8rem;padding:.1rem 0;float:right;-webkit-transition:width .25s cubic-bezier(.1,.7,.1,1);transition:width .25s cubic-bezier(.1,.7,.1,1)}[dir=rtl] .md-search__inner{margin-right:0;margin-left:.8rem;float:left}.md-search__form,.md-search__input{border-radius:.1rem}.md-search__input{width:100%;height:1.8rem;padding-left:2.2rem;-webkit-transition:background-color .25s cubic-bezier(.1,.7,.1,1),color .25s cubic-bezier(.1,.7,.1,1);transition:background-color .25s cubic-bezier(.1,.7,.1,1),color .25s cubic-bezier(.1,.7,.1,1);background-color:rgba(0,0,0,.26);color:inherit;font-size:.8rem}[dir=rtl] .md-search__input{padding-right:2.2rem}.md-search__input+.md-search__icon{color:inherit}.md-search__input::-webkit-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input::-moz-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input:-ms-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input::-ms-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input::placeholder{color:hsla(0,0%,100%,.7)}.md-search__input:hover{background-color:hsla(0,0%,100%,.12)}[data-md-toggle=search]:checked~.md-header .md-search__input{border-radius:.1rem .1rem 0 0;background-color:#fff;color:rgba(0,0,0,.87);text-overflow:clip}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::-webkit-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::-moz-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input:-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::placeholder{color:rgba(0,0,0,.54)}.md-search__output{top:1.9rem;-webkit-transition:opacity .4s;transition:opacity .4s;opacity:0}[data-md-toggle=search]:checked~.md-header .md-search__output{box-shadow:0 6px 10px 0 rgba(0,0,0,.14),0 1px 18px 0 rgba(0,0,0,.12),0 3px 5px -1px rgba(0,0,0,.4);opacity:1}.md-search__scrollwrap{max-height:0}[data-md-toggle=search]:checked~.md-header .md-search__scrollwrap{max-height:75vh}.md-search__scrollwrap::-webkit-scrollbar{width:.2rem;height:.2rem}.md-search__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-search-result__meta{padding-left:2.2rem}[dir=rtl] .md-search-result__meta{padding-right:2.2rem;padding-left:0}.md-search-result__article{padding-left:2.2rem}[dir=rtl] .md-search-result__article{padding-right:2.2rem;padding-left:.8rem}.md-sidebar--secondary{display:block;margin-left:100%;-webkit-transform:translate(-100%);transform:translate(-100%)}[dir=rtl] .md-sidebar--secondary{margin-right:100%;margin-left:0;-webkit-transform:translate(100%);transform:translate(100%)}}@media only screen and (min-width:76.25em){.md-content{margin-left:12.1rem}[dir=rtl] .md-content{margin-right:12.1rem}.md-content__inner{margin-right:1.2rem;margin-left:1.2rem}.md-header-nav__button.md-icon--menu{display:none}.md-nav[data-md-state=animate]{-webkit-transition:max-height .25s cubic-bezier(.86,0,.07,1);transition:max-height .25s cubic-bezier(.86,0,.07,1)}.md-nav__toggle~.md-nav{max-height:0;overflow:hidden}.no-js .md-nav__toggle~.md-nav{display:none}.md-nav[data-md-state=expand],.md-nav__toggle:checked~.md-nav{max-height:100%}.no-js .md-nav[data-md-state=expand],.no-js .md-nav__toggle:checked~.md-nav{display:block}.md-nav__item--nested>.md-nav>.md-nav__title{display:none}.md-nav__item--nested>.md-nav__link:after{display:inline-block;-webkit-transform-origin:.45em .45em;transform-origin:.45em .45em;-webkit-transform-style:preserve-3d;transform-style:preserve-3d;vertical-align:-.125em}.js .md-nav__item--nested>.md-nav__link:after{-webkit-transition:-webkit-transform .4s;transition:-webkit-transform .4s;transition:transform .4s;transition:transform .4s,-webkit-transform .4s}.md-nav__item--nested .md-nav__toggle:checked~.md-nav__link:after{-webkit-transform:rotateX(180deg);transform:rotateX(180deg)}.md-search__inner{margin-right:1.2rem}[dir=rtl] .md-search__inner{margin-left:1.2rem}.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__inner{width:34.4rem}.md-sidebar--secondary{margin-left:61rem}[dir=rtl] .md-sidebar--secondary{margin-right:61rem;margin-left:0}.md-tabs~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested{font-size:0;visibility:hidden}.md-tabs--active~.md-main .md-nav--primary .md-nav__title{display:block;padding:0}.md-tabs--active~.md-main .md-nav--primary .md-nav__title--site{display:none}.no-js .md-tabs--active~.md-main .md-nav--primary .md-nav{display:block}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item{font-size:0;visibility:hidden}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested{display:none;font-size:.7rem;overflow:auto;visibility:visible}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested>.md-nav__link{display:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--active{display:block}.md-tabs--active~.md-main .md-nav[data-md-level="1"]{max-height:none;overflow:visible}.md-tabs--active~.md-main .md-nav[data-md-level="1"]>.md-nav__list>.md-nav__item{padding-left:0}.md-tabs--active~.md-main .md-nav[data-md-level="1"] .md-nav .md-nav__title{display:none}}@media only screen and (min-width:45em){.md-footer-nav__link{width:50%}.md-footer-copyright{max-width:75%;float:left}[dir=rtl] .md-footer-copyright{float:right}.md-footer-social{padding:.6rem 0;float:right}[dir=rtl] .md-footer-social{float:left}}@media only screen and (max-width:29.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(45);transform:scale(45)}}@media only screen and (min-width:30em) and (max-width:44.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(60);transform:scale(60)}}@media only screen and (min-width:45em) and (max-width:59.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(75);transform:scale(75)}}@media only screen and (min-width:60em) and (max-width:76.1875em){.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__inner{width:23.4rem}.md-search-result__teaser{max-height:2.5rem;-webkit-line-clamp:3}} \ No newline at end of file diff --git a/docs/site/coco/index.html b/docs/site/coco/index.html new file mode 100644 index 0000000..f2f0764 --- /dev/null +++ b/docs/site/coco/index.html @@ -0,0 +1,948 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + COCO - torchbench Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Skip to content + + + +
+ +
+ +
+ + + + +
+
+ + +
+
+
+ +
+
+
+ + + + + +
+
+ + + +

COCO

+

COCO Dataset Examples

+

You can view the COCO minival leaderboard here.

+
+

Warning

+

Object detection APIs in PyTorch are not very standardised across repositories, meaning that +it may require a lot of glue to get them working with this evaluation procedure (which is based on torchvision).

+

For easier COCO integration with sotabench it is recommended to use the more general API sotabencheval.

+
+

Getting Started

+

You'll need the following in the root of your repository:

+
    +
  • sotabench.py file - contains benchmarking logic; the server will run this on each commit
  • +
  • requirements.txt file - Python dependencies to be installed before running sotabench.py
  • +
  • sotabench_setup.sh (optional) - any advanced dependencies or setup, e.g. compilation
  • +
+

Once you connect your repository to sotabench.com, the platform +will run your sotabench.py file whenever you commit to master.

+

We now show how to write the sotabench.py file to evaluate a PyTorch object model with +the torchbench library, and to allow your results to be recorded and reported for the community.

+

The COCO Evaluation Class

+

You can import the evaluation class from the following module:

+
from torchbench.object_detection import COCO
+
+ + +

The COCO class contains several components used in the evaluation, such as the dataset:

+
COCO.dataset
+# torchbench.datasets.coco.CocoDetection
+
+ + +

And some default arguments used for evaluation (which can be overridden):

+
COCO.transforms
+# <torchbench.object_detection.transforms.Compose at 0x7f60e9ffd0b8>
+
+COCO.send_data_to_device
+# <function torchbench.object_detection.coco.coco_data_to_device>
+
+COCO.collate_fn
+# <function torchbench.object_detection.coco.coco_collate_fn>
+
+COCO.model_output_transform
+# <function torchbench.object_detection.coco.coco_output_transform>
+
+ + +

We will explain these different options shortly and how you can manipulate them to get the +evaluation logic to play nicely with your model.

+

An evaluation call - which performs evaluation, and if on the sotabench.com server, saves the results - +looks like the following through the benchmark() method:

+
import torchvision
+model = torchvision.models.detection.__dict__['maskrcnn_resnet50_fpn'](num_classes=91, pretrained=True)
+
+COCO.benchmark(
+    model=model,
+    paper_model_name='Mask R-CNN (ResNet-50-FPN)',
+    paper_arxiv_id='1703.06870'
+)
+
+ + +

These are the key arguments: the model which is a usually a nn.Module type object, but more generally, +is any method with a forward method that takes in input data and outputs predictions. +paper_model_name refers to the name of the model and paper_arxiv_id (optionally) refers to +the paper from which the model originated. If these two arguments match a record paper result, +then sotabench.com will match your model with the paper and compare your code's results with the +reported results in the paper.

+

A full sotabench.py example

+

Below shows an example for the torchvision +repository benchmarking a Mask R-CNN model:

+
from torchbench.object_detection import COCO
+from torchbench.utils import send_model_to_device
+from torchbench.object_detection.transforms import Compose, ConvertCocoPolysToMask, ToTensor
+import torchvision
+import PIL
+
+def coco_data_to_device(input, target, device: str = "cuda", non_blocking: bool = True):
+    input = list(inp.to(device=device, non_blocking=non_blocking) for inp in input)
+    target = [{k: v.to(device=device, non_blocking=non_blocking) for k, v in t.items()} for t in target]
+    return input, target
+
+def coco_collate_fn(batch):
+    return tuple(zip(*batch))
+
+def coco_output_transform(output, target):
+    output = [{k: v.to("cpu") for k, v in t.items()} for t in output]
+    return output, target
+
+transforms = Compose([ConvertCocoPolysToMask(), ToTensor()])
+
+model = torchvision.models.detection.__dict__['maskrcnn_resnet50_fpn'](num_classes=91, pretrained=True)
+
+# Run the benchmark
+COCO.benchmark(
+    model=model,
+    paper_model_name='Mask R-CNN (ResNet-50-FPN)',
+    paper_arxiv_id='1703.06870',
+    transforms=transforms,
+    model_output_transform=coco_output_transform,
+    send_data_to_device=coco_data_to_device,
+    collate_fn=coco_collate_fn,
+    batch_size=8,
+    num_gpu=1
+)
+
+ + +

COCO.benchmark() Arguments

+

The source code for the COCO evaluation method can be found here. +We now explain each argument.

+

model

+

a PyTorch module, (e.g. a nn.Module object), that takes in COCO data and outputs detections.

+

For example, from the torchvision repository:

+
import torchvision
+model = torchvision.models.detection.__dict__['maskrcnn_resnet50_fpn'](num_classes=91, pretrained=True)
+
+ + +

model_description

+

(str, optional): Optional model description.

+

For example:

+
model_description = 'Using ported TensorFlow weights'
+
+ + +

input_transform

+

Composing the transforms used to transform the input data (the images), e.g. +resizing (e.g transforms.Resize), center cropping, to tensor transformations and normalization.

+

For example:

+
import torchvision.transforms as transforms
+input_transform = transforms.Compose([
+    transforms.Resize(512, PIL.Image.BICUBIC),
+    transforms.ToTensor(),
+])
+
+ + +

target_transform

+

Composing the transforms used to transform the target data

+

transforms

+

Composing the transforms used to transform the input data (the images) and the target data (the labels) +in a dual fashion - for example resizing the pair of data jointly.

+

Below shows an example; note the +fact that the __call__ takes in two arguments and returns two arguments (ordinary torchvision transforms +return one result).

+
from torchvision.transforms import functional as F
+
+class Compose(object):
+    def __init__(self, transforms):
+        self.transforms = transforms
+
+    def __call__(self, image, target):
+        for t in self.transforms:
+            image, target = t(image, target)
+        return image, target
+
+class ToTensor(object):
+    def __call__(self, image, target):
+        image = F.to_tensor(image)
+        return image, target
+
+class ImageResize(object):
+    def __init__(self, resize_shape):
+        self.resize_shape = resize_shape
+
+    def __call__(self, image, target):
+        image = F.resize(image, self.resize_shape)
+        return image, target
+
+transforms = Compose([ImageResize((512, 512)), ToTensor()])
+
+ + +

Note that the default transforms are:

+
from torchbench.object_detection.utils import Compose, ConvertCocoPolysToMask, ToTensor
+transforms = Compose([ConvertCocoPolysToMask(), ToTensor()])
+
+ + +

Where ConvertCocoPolysToMask is from the torchvision reference implementation to transform +the inputs to the right format to be entered into the model. You can pass whatever transforms +you need to make the dataset work with your model.

+

model_output_transform

+

(callable, optional): An optional function + that takes in model output (after being passed through your + model forward pass) and transforms it. Afterwards, the + output will be passed into an evaluation function.

+

The model output transform is a function that you can pass in to transform the model output +after the data has been passed into the model. This is useful if you have to do further +processing steps after inference to get the predictions in the right format for evaluation.

+

The model evaluation for each batch is as follows from utils.py +are:

+
with torch.no_grad():
+    for i, (input, target) in enumerate(iterator):
+        input, target = send_data_to_device(input, target, device=device)
+        original_output = model(input)
+        output, target = model_output_transform(original_output, target)
+        result = {
+            tar["image_id"].item(): out for tar, out in zip(target, output)
+        }
+        coco_evaluator.update(result)
+
+ + +

We can see the model_output_transform in use, and the fact that the output is then +transformed to be a dictionary with image_ids as keys and output as values.

+

The expected output of model_output_transform is a list of dictionaries (length = batch_size), +where each dictionary contains keys for 'boxes', 'labels', 'scores', 'masks', and each value is +of the torch.tensor type.

+

The expected output of result is converted to a dictionary with keys as the image ids, and +values as a dictionary with the predictions (boxes, labels, scores, ... as keys).

+

collate_fn

+

How the dataset is collated - an optional callable passed into the DataLoader

+

As an example the default collate function is:

+
def coco_collate_fn(batch):
+    return tuple(zip(*batch))
+
+ + +

send_data_to_device

+

An optional function specifying how the model is sent to a device

+

As an example the COCO default is:

+
def coco_data_to_device(input, target, device: str = "cuda", non_blocking: bool = True):
+    input = list(inp.to(device=device, non_blocking=non_blocking) for inp in input)
+    target = [{k: v.to(device=device, non_blocking=non_blocking) for k, v in t.items()} for t in target]
+    return input, target
+
+ + +

data_root

+

data_root (str): The location of the COCO dataset - change this + parameter when evaluating locally if your COCO data is + located in a different folder (or alternatively if you want to + download to an alternative location).

+

Note that this parameter will be overriden when the evaluation is performed on the server, +so it is solely for your local use.

+

num_workers

+

num_workers (int): The number of workers to use for the DataLoader.

+

batch_size

+

batch_size (int) : The batch_size to use for evaluation; if you get + memory errors, then reduce this (half each time) until your + model fits onto the GPU.

+

paper_model_name

+

paper_model_name (str, optional): The name of the model from the + paper - if you want to link your build to a machine learning + paper. See the COCO benchmark page for model names, + https://www.sotabench.com/benchmark/coco-minival, e.g. on the paper + leaderboard tab.

+

paper_arxiv_id

+

paper_arxiv_id (str, optional): Optional linking to ArXiv if you + want to link to papers on the leaderboard; put in the + corresponding paper's ArXiv ID, e.g. '1611.05431'.

+

paper_pwc_id

+

paper_pwc_id (str, optional): Optional linking to Papers With Code; + put in the corresponding papers with code URL slug, e.g. + 'u-gat-it-unsupervised-generative-attentional'

+

paper_results

+

paper_results (dict, optional) : If the paper you are reproducing + does not have model results on sotabench.com, you can specify + the paper results yourself through this argument, where keys + are metric names, values are metric values. e.g::

+
{'box AP': 0.349, 'AP50': 0.592, ...}.
+
+ + +

Ensure that the metric names match those on the sotabench +leaderboard - for COCO it should be 'box AP', 'AP50', +'AP75', 'APS', 'APM', 'APL'

+

pytorch_hub_url

+

pytorch_hub_url (str, optional): Optional linking to PyTorch Hub + url if your model is linked there; e.g: + 'nvidia_deeplearningexamples_waveglow'.

+

Need More Help?

+

Head on over to the Computer Vision section of the sotabench +forums if you have any questions or difficulties.

+ + + + + + + + + +
+
+
+
+ + + + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/site/imagenet/index.html b/docs/site/imagenet/index.html new file mode 100644 index 0000000..4c205ab --- /dev/null +++ b/docs/site/imagenet/index.html @@ -0,0 +1,865 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ImageNet - torchbench Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Skip to content + + + +
+ +
+ +
+ + + + +
+
+ + +
+
+
+ +
+
+
+ + + + + +
+
+ + + +

ImageNet

+

ImageNet Dataset Examples

+

You can view the ImageNet leaderboard here.

+

Getting Started

+

You'll need the following in the root of your repository:

+
    +
  • sotabench.py file - contains benchmarking logic; the server will run this on each commit
  • +
  • requirements.txt file - Python dependencies to be installed before running sotabench.py
  • +
  • sotabench_setup.sh (optional) - any advanced dependencies or setup, e.g. compilation
  • +
+

Once you connect your repository to sotabench.com, the platform +will run your sotabench.py file whenever you commit to master.

+

We now show how to write the sotabench.py file to evaluate a PyTorch object model with +the torchbench library, and to allow your results to be recorded and reported for the community.

+

The ImageNet Evaluation Class

+

You can import the evaluation class from the following module:

+
from torchbench.image_classification import ImageNet
+
+ + +

The ImageNet class contains several components used in the evaluation, such as the dataset:

+
ImageNet.dataset
+# torchvision.datasets.ImageNet
+
+ + +

And some default arguments used for evaluation (which can be overridden):

+
ImageNet.normalize
+# Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+
+ImageNet.input_transform
+# Compose(
+#   Resize(size=256, interpolation=PIL.Image.BILINEAR)
+#   CenterCrop(size=(224, 224))
+#   ToTensor()
+#   Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+# )
+
+ImageNet.send_data_to_device
+# <function torchbench.utils.default_data_to_device>
+
+ + +

We will explain these different options shortly and how you can manipulate them to get the +evaluation logic to play nicely with your model.

+

An evaluation call - which performs evaluation, and if on the sotabench.com server, saves the results - +looks like the following through the benchmark() method:

+
from torchvision.models.resnet import resnext101_32x8d
+
+ImageNet.benchmark(
+    model=resnext101_32x8d(pretrained=True),
+    paper_model_name='ResNeXt-101-32x8d',
+    paper_arxiv_id='1611.05431'
+)
+
+ + +

These are the key arguments: the model which is a usually a nn.Module type object, but more generally, +is any method with a forward method that takes in input data and outputs predictions. +paper_model_name refers to the name of the model and paper_arxiv_id (optionally) refers to +the paper from which the model originated. If these two arguments match a record paper result, +then sotabench.com will match your model with the paper and compare your code's results with the +reported results in the paper.

+

A full sotabench.py example

+

Below shows an example for the torchvision +repository benchmarking a ResNeXt-101-32x8d model:

+
from torchbench.image_classification import ImageNet
+from torchvision.models.resnet import resnext101_32x8d
+import torchvision.transforms as transforms
+import PIL
+
+# Define the transforms need to convert ImageNet data to expected
+# model input
+normalize = transforms.Normalize(
+    mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
+)
+input_transform = transforms.Compose([
+    transforms.Resize(256, PIL.Image.BICUBIC),
+    transforms.CenterCrop(224),
+    transforms.ToTensor(),
+    normalize,
+])
+
+# Run the benchmark
+ImageNet.benchmark(
+    model=resnext101_32x8d(pretrained=True),
+    paper_model_name='ResNeXt-101-32x8d',
+    paper_arxiv_id='1611.05431',
+    input_transform=input_transform,
+    batch_size=256,
+    num_gpu=1
+)
+
+ + +

ImageNet.benchmark() Arguments

+

The source code for the ImageNet evaluation method can be found here. +We now explain each argument.

+

model

+

a PyTorch module, (e.g. a nn.Module object), that takes in ImageNet data and outputs detections.

+

For example, from the torchvision repository:

+
from torchvision.models.resnet import resnext101_32x8d
+model = resnext101_32x8d(pretrained=True)
+
+ + +

model_description

+

(str, optional): Optional model description.

+

For example:

+
model_description = 'Using ported TensorFlow weights'
+
+ + +

input_transform

+

Composing the transforms used to transform the input data (the images), e.g. +resizing (e.g transforms.Resize), center cropping, to tensor transformations and normalization.

+

For example:

+
import torchvision.transforms as transforms
+normalize = transforms.Normalize(
+    mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
+)
+input_transform = transforms.Compose([
+    transforms.Resize(256, PIL.Image.BICUBIC),
+    transforms.CenterCrop(224),
+    transforms.ToTensor(),
+    normalize,
+])
+
+ + +

target_transform

+

Composing the transforms used to transform the target data

+

model_output_transform

+

(callable, optional): An optional function + that takes in model output (after being passed through your + model forward pass) and transforms it. Afterwards, the + output will be passed into an evaluation function.

+

The model output transform is a function that you can pass in to transform the model output +after the data has been passed into the model. This is useful if you have to do further +processing steps after inference to get the predictions in the right format for evaluation.

+

Most PyTorch models for Image Classification on ImageNet don't need to use this argument.

+

The model evaluation for each batch is as follows from utils.py +are:

+
with torch.no_grad():
+    for i, (input, target) in enumerate(iterator):
+
+        input, target = send_data_to_device(input, target, device=device)
+        output = model(input)
+
+        if model_output_transform is not None:
+            output = model_output_transform(output, target, model=model)
+
+        check_metric_inputs(output, target, test_loader.dataset, i)
+        prec1, prec5 = accuracy(output, target, topk=(1, 5))
+
+ + +

Model output (following model.forward() and optionally model_output_transform) should be a 2D +torch.Tensor containing the model output; first dimension should be output for each example +(length batch_size) and second dimension should be output for each class in ImageNet (length 1000).

+

send_data_to_device

+

An optional function specifying how the model is sent to a device

+

As an example the default is:

+
def default_data_to_device(input, target=None, device: str = "cuda", non_blocking: bool = True):
+    """Sends data output from a PyTorch Dataloader to the device."""
+
+    input = input.to(device=device, non_blocking=non_blocking)
+
+    if target is not None:
+        target = target.to(device=device, non_blocking=non_blocking)
+
+    return input, target
+
+ + +

data_root

+

data_root (str): The location of the ImageNet dataset - change this + parameter when evaluating locally if your ImageNet data is + located in a different folder (or alternatively if you want to + download to an alternative location).

+

Note that this parameter will be overriden when the evaluation is performed on the server, +so it is solely for your local use.

+

num_workers

+

num_workers (int): The number of workers to use for the DataLoader.

+

batch_size

+

batch_size (int) : The batch_size to use for evaluation; if you get + memory errors, then reduce this (half each time) until your + model fits onto the GPU.

+

paper_model_name

+

paper_model_name (str, optional): The name of the model from the + paper - if you want to link your build to a machine learning + paper. See the ImageNet benchmark page for model names, + https://sotabench.com/benchmarks/image-classification-on-imagenet, e.g. on the paper + leaderboard tab.

+

paper_arxiv_id

+

paper_arxiv_id (str, optional): Optional linking to ArXiv if you + want to link to papers on the leaderboard; put in the + corresponding paper's ArXiv ID, e.g. '1611.05431'.

+

paper_pwc_id

+

paper_pwc_id (str, optional): Optional linking to Papers With Code; + put in the corresponding papers with code URL slug, e.g. + 'u-gat-it-unsupervised-generative-attentional'

+

paper_results

+

paper_results (dict, optional) : If the paper you are reproducing + does not have model results on sotabench.com, you can specify + the paper results yourself through this argument, where keys + are metric names, values are metric values. e.g:

+
{'Top 1 Accuracy': 0.543, 'Top 5 Accuracy': 0.654}
+
+ + +

Ensure that the metric names match those on the sotabench +leaderboard - for ImageNet it should be 'Top 1 Accuracy', +'Top 5 Accuracy'

+

pytorch_hub_url

+

pytorch_hub_url (str, optional): Optional linking to PyTorch Hub + url if your model is linked there; e.g: + 'nvidia_deeplearningexamples_waveglow'.

+

Need More Help?

+

Head on over to the Computer Vision section of the sotabench +forums if you have any questions or difficulties.

+ + + + + + + + + +
+
+
+
+ + + + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/site/img/banner.png b/docs/site/img/banner.png new file mode 100644 index 0000000..d1ad1de Binary files /dev/null and b/docs/site/img/banner.png differ diff --git a/docs/site/img/coco.jpg b/docs/site/img/coco.jpg new file mode 100644 index 0000000..c1939b5 Binary files /dev/null and b/docs/site/img/coco.jpg differ diff --git a/docs/site/img/connect.png b/docs/site/img/connect.png new file mode 100644 index 0000000..a7430a3 Binary files /dev/null and b/docs/site/img/connect.png differ diff --git a/docs/site/img/connect2.png b/docs/site/img/connect2.png new file mode 100644 index 0000000..b25daa7 Binary files /dev/null and b/docs/site/img/connect2.png differ diff --git a/docs/site/img/imagenet.jpeg b/docs/site/img/imagenet.jpeg new file mode 100644 index 0000000..3002ce1 Binary files /dev/null and b/docs/site/img/imagenet.jpeg differ diff --git a/docs/site/img/pascalvoc2012.png b/docs/site/img/pascalvoc2012.png new file mode 100644 index 0000000..68c6832 Binary files /dev/null and b/docs/site/img/pascalvoc2012.png differ diff --git a/docs/site/img/torchbench.png b/docs/site/img/torchbench.png new file mode 100644 index 0000000..52389aa Binary files /dev/null and b/docs/site/img/torchbench.png differ diff --git a/docs/site/index.html b/docs/site/index.html new file mode 100644 index 0000000..d75e2bd --- /dev/null +++ b/docs/site/index.html @@ -0,0 +1,465 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + torchbench Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Skip to content + + + +
+ +
+ +
+ + + + +
+
+ + +
+
+
+ +
+
+
+ + +
+
+
+ + +
+
+
+ + +
+
+ + + +

Welcome to torchbench!

+

+

You have reached the docs for the torchbench library. This library contains a collection of deep learning benchmarks you can use to +benchmark your models, optimized for the PyTorch framework. It can be used in conjunction with the +sotabench.com website to record results for models, so the community +can compare model performance on different tasks, as well as a continuous integration style +service for your repository to benchmark your models on each commit.

+

torchbench is a framework-optimized library, meaning it is designed to take advantage of PyTorch based features +and standardisation. If this is too constraining, you can use alternative libraries that are framework-independent, + e.g. sotabencheval.

+

Getting Started : Benchmarking on ImageNet

+

Step One : Create a sotabench.py file in the root of your repository

+

This contains a call to your model, metadata about your model, and options for evaluation such as dataset +processing logic and data loader logic such as the batch size. Below is an example for the torchvision +repository:

+
from torchbench.image_classification import ImageNet
+from torchvision.models.resnet import resnext101_32x8d
+import torchvision.transforms as transforms
+import PIL
+
+# Define the transforms need to convert ImageNet data to expected model input
+normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], 
+    std=[0.229, 0.224, 0.225])
+input_transform = transforms.Compose([
+    transforms.Resize(256, PIL.Image.BICUBIC),
+    transforms.CenterCrop(224),
+    transforms.ToTensor(),
+    normalize,
+])
+
+# Run the benchmark
+ImageNet.benchmark(
+    model=resnext101_32x8d(pretrained=True),
+    paper_model_name='ResNeXt-101-32x8d',
+    paper_arxiv_id='1611.05431',
+    input_transform=input_transform,
+    batch_size=256,
+    num_gpu=1
+)
+
+ + +

Step Two : Run locally to verify that it works

+
python sotabench.py
+
+ + +

You can also run the logic in a Jupyter Notebook if that is your preferred workflow.

+

To verify your benchmark will run and all parameters are correct you can use the included CLI checking tool:

+
$ sb check
+
+ + +

Step Three : Login and connect your repository to sotabench

+

Create an account on sotabench, then head to your user page. Click the +Connect a GitHub repository button:

+

+

Then follow the steps to connect the repositories that you wish to benchmark:

+

+

After you connect your repository, the sotabench servers will re-evaluate your model on every commit, +to ensure the model is working and results are up-to-date - including if you add additional models to the benchmark file.

+

Installation

+

The library requires Python 3.6+. You can install via pip:

+
pip install torchbench
+
+ + +

Support

+

If you get stuck you can head to our Discourse forum where you ask +questions on how to use the project. You can also find ideas for contributions, +and work with others on exciting projects.

+ + + + + + + + + +
+
+
+
+ + + + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/site/pascalvoc/index.html b/docs/site/pascalvoc/index.html new file mode 100644 index 0000000..b9e2790 --- /dev/null +++ b/docs/site/pascalvoc/index.html @@ -0,0 +1,940 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PASCAL VOC 2012 - torchbench Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Skip to content + + + +
+ +
+ +
+ + + + +
+
+ + +
+
+
+ +
+
+
+ + + + + +
+
+ + + +

PASCAL VOC 2012

+

VOC Dataset Examples

+

You can view the PASCAL VOC 2012 leaderboard here.

+
+

Warning

+

Semantic Segmentations APIs in PyTorch are not very standardised across repositories, meaning that +it may require a lot of glue to get them working with this evaluation procedure (which is based on torchvision).

+

For easier VOC integration with sotabench it is recommended to use the more general API sotabencheval.

+
+

Getting Started

+

You'll need the following in the root of your repository:

+
    +
  • sotabench.py file - contains benchmarking logic; the server will run this on each commit
  • +
  • requirements.txt file - Python dependencies to be installed before running sotabench.py
  • +
  • sotabench_setup.sh (optional) - any advanced dependencies or setup, e.g. compilation
  • +
+

Once you connect your repository to sotabench.com, the platform +will run your sotabench.py file whenever you commit to master.

+

We now show how to write the sotabench.py file to evaluate a PyTorch object model with +the torchbench library, and to allow your results to be recorded and reported for the community.

+

The VOC Evaluation Class

+

You can import the evaluation class from the following module:

+
from torchbench.semantic_segmentation import PASCALVOC
+
+ + +

The PASCALVOC class contains several components used in the evaluation, such as the dataset:

+
PASCALVOC.dataset
+# torchvision.datasets.voc.VOCSegmentation
+
+ + +

And some default arguments used for evaluation (which can be overridden):

+
PASCALVOC.normalize
+# <torchbench.semantic_segmentation.transforms.Normalize at 0x7f9d645d2160>
+
+PASCALVOC.transforms
+# <torchbench.semantic_segmentation.transforms.Compose at 0x7f9d645d2278>
+
+PASCALVOC.send_data_to_device
+# <function torchbench.utils.default_data_to_device>
+
+PASCALVOC.collate_fn
+# <function torchbench.semantic_segmentation.utils.default_seg_collate_fn>
+
+PASCALVOC.model_output_transform
+# <function torchbench.semantic_segmentation.utils.default_seg_output_transform>
+
+ + +

We will explain these different options shortly and how you can manipulate them to get the +evaluation logic to play nicely with your model.

+

An evaluation call - which performs evaluation, and if on the sotabench.com server, saves the results - +looks like the following through the benchmark() method:

+
from torchvision.models.segmentation import fcn_resnet101
+model = fcn_resnet101(num_classes=21, pretrained=True)
+
+PASCALVOC.benchmark(model=model,
+    paper_model_name='FCN ResNet-101',
+    paper_arxiv_id='1605.06211')
+
+ + +

These are the key arguments: the model which is a usually a nn.Module type object, but more generally, +is any method with a forward method that takes in input data and outputs predictions. +paper_model_name refers to the name of the model and paper_arxiv_id (optionally) refers to +the paper from which the model originated. If these two arguments match a record paper result, +then sotabench.com will match your model with the paper and compare your code's results with the +reported results in the paper.

+

A full sotabench.py example

+

Below shows an example for the torchvision +repository benchmarking a FCN ResNet-101 model:

+
from torchbench.semantic_segmentation import PASCALVOC
+from torchbench.semantic_segmentation.transforms import (
+    Normalize,
+    Resize,
+    ToTensor,
+    Compose,
+)
+from torchvision.models.segmentation import fcn_resnet101
+import torchvision.transforms as transforms
+import PIL
+
+def model_output_function(output, labels):
+    return output['out'].argmax(1).flatten(), target.flatten()
+
+def seg_collate_fn(batch):
+    images, targets = list(zip(*batch))
+    batched_imgs = cat_list(images, fill_value=0)
+    batched_targets = cat_list(targets, fill_value=255)
+    return batched_imgs, batched_targets
+
+model = fcn_resnet101(num_classes=21, pretrained=True)
+
+normalize = Normalize(
+    mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
+)
+my_transforms = Compose([Resize((520, 480)), ToTensor(), normalize])
+
+PASCALVOC.benchmark(batch_size=32,
+    model=model,
+    transforms=my_transforms,
+    model_output_transform=model_output_function,
+    collate_fn=seg_collate_fn,
+    paper_model_name='FCN ResNet-101',
+    paper_arxiv_id='1605.06211')
+
+ + +

PASCALVOC.benchmark() Arguments

+

The source code for the PASCALVOC evaluation method can be found here. +We now explain each argument.

+

model

+

a PyTorch module, (e.g. a nn.Module object), that takes in VOC data and outputs detections.

+

For example, from the torchvision repository:

+
from torchvision.models.segmentation import fcn_resnet101
+model = fcn_resnet101(num_classes=21, pretrained=True)
+
+ + +

model_description

+

(str, optional): Optional model description.

+

For example:

+
model_description = 'Using ported TensorFlow weights'
+
+ + +

input_transform

+

Composing the transforms used to transform the input data (the images), e.g. +resizing (e.g transforms.Resize), center cropping, to tensor transformations and normalization.

+

For example:

+
import torchvision.transforms as transforms
+input_transform = transforms.Compose([
+    transforms.Resize(512, PIL.Image.BICUBIC),
+    transforms.ToTensor(),
+])
+
+ + +

target_transform

+

Composing the transforms used to transform the target data

+

transforms

+

Composing the transforms used to transform the input data (the images) and the target data (the labels) +in a dual fashion - for example resizing the pair of data jointly.

+

Below shows an example; note the +fact that the __call__ takes in two arguments and returns two arguments (ordinary torchvision transforms +return one result).

+
from torchvision.transforms import functional as F
+
+class Compose(object):
+    def __init__(self, transforms):
+        self.transforms = transforms
+
+    def __call__(self, image, target):
+        for t in self.transforms:
+            image, target = t(image, target)
+        return image, target
+
+class ToTensor(object):
+    def __call__(self, image, target):
+        image = F.to_tensor(image)
+        return image, target
+
+class ImageResize(object):
+    def __init__(self, resize_shape):
+        self.resize_shape = resize_shape
+
+    def __call__(self, image, target):
+        image = F.resize(image, self.resize_shape)
+        return image, target
+
+transforms = Compose([ImageResize((512, 512)), ToTensor()])
+
+ + +

Note that the default transforms are:

+
from torchbench.semantic_segmentation.transforms import (Normalize, Resize, ToTensor, Compose)
+normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+transforms = Compose([Resize((520, 480)), ToTensor(), normalize])
+
+ + +

model_output_transform

+

(callable, optional): An optional function + that takes in model output (after being passed through your + model forward pass) and transforms it. Afterwards, the + output will be passed into an evaluation function.

+

The model output transform is a function that you can pass in to transform the model output +after the data has been passed into the model. This is useful if you have to do further +processing steps after inference to get the predictions in the right format for evaluation.

+

The model evaluation for each batch is as follows from utils.py +are:

+
with torch.no_grad():
+    for i, (input, target) in enumerate(iterator):
+        input, target = send_data_to_device(input, target, device=device)
+        output = model(input)
+        output, target = model_output_transform(output, target)
+        confmat.update(target, output)
+
+ + +

The default model_output_transform is:

+
def default_seg_output_transform(output, target):
+    return output["out"].argmax(1).flatten(), target.flatten()
+
+ + +

We can see the output and target are flattened to 1D tensors, and in the case of the output, +we take the maximum predicted class to compare against for accuracy. Each element in each tensor +represents a pixel, and contains a class, e.g. class 6, and we compare pixel-by-pixel the model +predictions against the ground truth labels to calculate the accuracy.

+

collate_fn

+

How the dataset is collated - an optional callable passed into the DataLoader

+

As an example the default collate function is:

+
def default_seg_collate_fn(batch):
+    images, targets = list(zip(*batch))
+    batched_imgs = cat_list(images, fill_value=0)
+    batched_targets = cat_list(targets, fill_value=255)
+    return batched_imgs, batched_targets
+
+ + +

send_data_to_device

+

An optional function specifying how the model is sent to a device

+

As an example the PASCAL VOC default is:

+
def default_data_to_device(input, target=None, device: str = "cuda", non_blocking: bool = True):
+    """Sends data output from a PyTorch Dataloader to the device."""
+
+    input = input.to(device=device, non_blocking=non_blocking)
+
+    if target is not None:
+        target = target.to(device=device, non_blocking=non_blocking)
+
+    return input, target
+
+ + +

data_root

+

data_root (str): The location of the VOC dataset - change this + parameter when evaluating locally if your VOC data is + located in a different folder (or alternatively if you want to + download to an alternative location).

+

Note that this parameter will be overriden when the evaluation is performed on the server, +so it is solely for your local use.

+

num_workers

+

num_workers (int): The number of workers to use for the DataLoader.

+

batch_size

+

batch_size (int) : The batch_size to use for evaluation; if you get + memory errors, then reduce this (half each time) until your + model fits onto the GPU.

+

paper_model_name

+

paper_model_name (str, optional): The name of the model from the + paper - if you want to link your build to a machine learning + paper. See the VOC benchmark page for model names, + https://sotabench.com/benchmarks/semantic-segmentation-on-pascal-voc-2012, e.g. on the paper + leaderboard tab.

+

paper_arxiv_id

+

paper_arxiv_id (str, optional): Optional linking to ArXiv if you + want to link to papers on the leaderboard; put in the + corresponding paper's ArXiv ID, e.g. '1611.05431'.

+

paper_pwc_id

+

paper_pwc_id (str, optional): Optional linking to Papers With Code; + put in the corresponding papers with code URL slug, e.g. + 'u-gat-it-unsupervised-generative-attentional'

+

paper_results

+

paper_results (dict, optional) : If the paper you are reproducing + does not have model results on sotabench.com, you can specify + the paper results yourself through this argument, where keys + are metric names, values are metric values. e.g::

+
{'Accuracy': 0.745, 'Mean IOU': 0.592}.
+
+ + +

Ensure that the metric names match those on the sotabench +leaderboard - for VOC it should be 'Accuracy', 'Mean IOU'.

+

pytorch_hub_url

+

pytorch_hub_url (str, optional): Optional linking to PyTorch Hub + url if your model is linked there; e.g: + 'nvidia_deeplearningexamples_waveglow'.

+

Need More Help?

+

Head on over to the Computer Vision section of the sotabench +forums if you have any questions or difficulties.

+ + + + + + + + + +
+
+
+
+ + + + +
+ + + + + + + + \ No newline at end of file diff --git a/docs/site/search/search_index.json b/docs/site/search/search_index.json new file mode 100644 index 0000000..b08cf50 --- /dev/null +++ b/docs/site/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Welcome to torchbench! You have reached the docs for the torchbench library. This library contains a collection of deep learning benchmarks you can use to benchmark your models, optimized for the PyTorch framework. It can be used in conjunction with the sotabench.com website to record results for models, so the community can compare model performance on different tasks, as well as a continuous integration style service for your repository to benchmark your models on each commit. torchbench is a framework-optimized library, meaning it is designed to take advantage of PyTorch based features and standardisation. If this is too constraining, you can use alternative libraries that are framework-independent, e.g. sotabencheval . Getting Started : Benchmarking on ImageNet Step One : Create a sotabench.py file in the root of your repository This contains a call to your model, metadata about your model, and options for evaluation such as dataset processing logic and data loader logic such as the batch size. Below is an example for the torchvision repository: from torchbench.image_classification import ImageNet from torchvision.models.resnet import resnext101_32x8d import torchvision.transforms as transforms import PIL # Define the transforms need to convert ImageNet data to expected model input normalize = transforms . Normalize ( mean = [ 0.485 , 0.456 , 0.406 ], std = [ 0.229 , 0.224 , 0.225 ]) input_transform = transforms . Compose ([ transforms . Resize ( 256 , PIL . Image . BICUBIC ), transforms . CenterCrop ( 224 ), transforms . ToTensor (), normalize , ]) # Run the benchmark ImageNet . benchmark ( model = resnext101_32x8d ( pretrained = True ), paper_model_name = 'ResNeXt-101-32x8d' , paper_arxiv_id = '1611.05431' , input_transform = input_transform , batch_size = 256 , num_gpu = 1 ) Step Two : Run locally to verify that it works python sotabench . py You can also run the logic in a Jupyter Notebook if that is your preferred workflow. To verify your benchmark will run and all parameters are correct you can use the included CLI checking tool: $ sb check Step Three : Login and connect your repository to sotabench Create an account on sotabench , then head to your user page. Click the Connect a GitHub repository button: Then follow the steps to connect the repositories that you wish to benchmark: After you connect your repository, the sotabench servers will re-evaluate your model on every commit, to ensure the model is working and results are up-to-date - including if you add additional models to the benchmark file. Installation The library requires Python 3.6+. You can install via pip: pip install torchbench Support If you get stuck you can head to our Discourse forum where you ask questions on how to use the project. You can also find ideas for contributions, and work with others on exciting projects.","title":"Welcome to torchbench!"},{"location":"#welcome-to-torchbench","text":"You have reached the docs for the torchbench library. This library contains a collection of deep learning benchmarks you can use to benchmark your models, optimized for the PyTorch framework. It can be used in conjunction with the sotabench.com website to record results for models, so the community can compare model performance on different tasks, as well as a continuous integration style service for your repository to benchmark your models on each commit. torchbench is a framework-optimized library, meaning it is designed to take advantage of PyTorch based features and standardisation. If this is too constraining, you can use alternative libraries that are framework-independent, e.g. sotabencheval .","title":"Welcome to torchbench!"},{"location":"#getting-started-benchmarking-on-imagenet","text":"Step One : Create a sotabench.py file in the root of your repository This contains a call to your model, metadata about your model, and options for evaluation such as dataset processing logic and data loader logic such as the batch size. Below is an example for the torchvision repository: from torchbench.image_classification import ImageNet from torchvision.models.resnet import resnext101_32x8d import torchvision.transforms as transforms import PIL # Define the transforms need to convert ImageNet data to expected model input normalize = transforms . Normalize ( mean = [ 0.485 , 0.456 , 0.406 ], std = [ 0.229 , 0.224 , 0.225 ]) input_transform = transforms . Compose ([ transforms . Resize ( 256 , PIL . Image . BICUBIC ), transforms . CenterCrop ( 224 ), transforms . ToTensor (), normalize , ]) # Run the benchmark ImageNet . benchmark ( model = resnext101_32x8d ( pretrained = True ), paper_model_name = 'ResNeXt-101-32x8d' , paper_arxiv_id = '1611.05431' , input_transform = input_transform , batch_size = 256 , num_gpu = 1 ) Step Two : Run locally to verify that it works python sotabench . py You can also run the logic in a Jupyter Notebook if that is your preferred workflow. To verify your benchmark will run and all parameters are correct you can use the included CLI checking tool: $ sb check Step Three : Login and connect your repository to sotabench Create an account on sotabench , then head to your user page. Click the Connect a GitHub repository button: Then follow the steps to connect the repositories that you wish to benchmark: After you connect your repository, the sotabench servers will re-evaluate your model on every commit, to ensure the model is working and results are up-to-date - including if you add additional models to the benchmark file.","title":"Getting Started : Benchmarking on ImageNet"},{"location":"#installation","text":"The library requires Python 3.6+. You can install via pip: pip install torchbench","title":"Installation"},{"location":"#support","text":"If you get stuck you can head to our Discourse forum where you ask questions on how to use the project. You can also find ideas for contributions, and work with others on exciting projects.","title":"Support"},{"location":"coco/","text":"COCO You can view the COCO minival leaderboard here . Warning Object detection APIs in PyTorch are not very standardised across repositories, meaning that it may require a lot of glue to get them working with this evaluation procedure (which is based on torchvision). For easier COCO integration with sotabench it is recommended to use the more general API sotabencheval . Getting Started You'll need the following in the root of your repository: sotabench.py file - contains benchmarking logic; the server will run this on each commit requirements.txt file - Python dependencies to be installed before running sotabench.py sotabench_setup.sh (optional) - any advanced dependencies or setup, e.g. compilation Once you connect your repository to sotabench.com , the platform will run your sotabench.py file whenever you commit to master. We now show how to write the sotabench.py file to evaluate a PyTorch object model with the torchbench library, and to allow your results to be recorded and reported for the community. The COCO Evaluation Class You can import the evaluation class from the following module: from torchbench.object_detection import COCO The COCO class contains several components used in the evaluation, such as the dataset : COCO . dataset # torchbench.datasets.coco.CocoDetection And some default arguments used for evaluation (which can be overridden): COCO . transforms # COCO . send_data_to_device # COCO . collate_fn # COCO . model_output_transform # We will explain these different options shortly and how you can manipulate them to get the evaluation logic to play nicely with your model. An evaluation call - which performs evaluation, and if on the sotabench.com server, saves the results - looks like the following through the benchmark() method: import torchvision model = torchvision . models . detection . __dict__ [ 'maskrcnn_resnet50_fpn' ]( num_classes = 91 , pretrained = True ) COCO . benchmark ( model = model , paper_model_name = 'Mask R-CNN (ResNet-50-FPN)' , paper_arxiv_id = '1703.06870' ) These are the key arguments: the model which is a usually a nn.Module type object, but more generally, is any method with a forward method that takes in input data and outputs predictions. paper_model_name refers to the name of the model and paper_arxiv_id (optionally) refers to the paper from which the model originated. If these two arguments match a record paper result, then sotabench.com will match your model with the paper and compare your code's results with the reported results in the paper. A full sotabench.py example Below shows an example for the torchvision repository benchmarking a Mask R-CNN model: from torchbench.object_detection import COCO from torchbench.utils import send_model_to_device from torchbench.object_detection.transforms import Compose , ConvertCocoPolysToMask , ToTensor import torchvision import PIL def coco_data_to_device ( input , target , device : str = \"cuda\" , non_blocking : bool = True ): input = list ( inp . to ( device = device , non_blocking = non_blocking ) for inp in input ) target = [{ k : v . to ( device = device , non_blocking = non_blocking ) for k , v in t . items ()} for t in target ] return input , target def coco_collate_fn ( batch ): return tuple ( zip ( * batch )) def coco_output_transform ( output , target ): output = [{ k : v . to ( \"cpu\" ) for k , v in t . items ()} for t in output ] return output , target transforms = Compose ([ ConvertCocoPolysToMask (), ToTensor ()]) model = torchvision . models . detection . __dict__ [ 'maskrcnn_resnet50_fpn' ]( num_classes = 91 , pretrained = True ) # Run the benchmark COCO . benchmark ( model = model , paper_model_name = 'Mask R-CNN (ResNet-50-FPN)' , paper_arxiv_id = '1703.06870' , transforms = transforms , model_output_transform = coco_output_transform , send_data_to_device = coco_data_to_device , collate_fn = coco_collate_fn , batch_size = 8 , num_gpu = 1 ) COCO.benchmark() Arguments The source code for the COCO evaluation method can be found here . We now explain each argument. model a PyTorch module, (e.g. a nn.Module object), that takes in COCO data and outputs detections. For example, from the torchvision repository: import torchvision model = torchvision . models . detection . __dict__ [ 'maskrcnn_resnet50_fpn' ]( num_classes = 91 , pretrained = True ) model_description (str, optional): Optional model description. For example: model_description = 'Using ported TensorFlow weights' input_transform Composing the transforms used to transform the input data (the images), e.g. resizing (e.g transforms.Resize ), center cropping, to tensor transformations and normalization. For example: import torchvision.transforms as transforms input_transform = transforms . Compose ([ transforms . Resize ( 512 , PIL . Image . BICUBIC ), transforms . ToTensor (), ]) target_transform Composing the transforms used to transform the target data transforms Composing the transforms used to transform the input data (the images) and the target data (the labels) in a dual fashion - for example resizing the pair of data jointly. Below shows an example; note the fact that the __call__ takes in two arguments and returns two arguments (ordinary torchvision transforms return one result). from torchvision.transforms import functional as F class Compose ( object ): def __init__ ( self , transforms ): self . transforms = transforms def __call__ ( self , image , target ): for t in self . transforms : image , target = t ( image , target ) return image , target class ToTensor ( object ): def __call__ ( self , image , target ): image = F . to_tensor ( image ) return image , target class ImageResize ( object ): def __init__ ( self , resize_shape ): self . resize_shape = resize_shape def __call__ ( self , image , target ): image = F . resize ( image , self . resize_shape ) return image , target transforms = Compose ([ ImageResize (( 512 , 512 )), ToTensor ()]) Note that the default transforms are: from torchbench.object_detection.utils import Compose , ConvertCocoPolysToMask , ToTensor transforms = Compose ([ ConvertCocoPolysToMask (), ToTensor ()]) Where ConvertCocoPolysToMask is from the torchvision reference implementation to transform the inputs to the right format to be entered into the model. You can pass whatever transforms you need to make the dataset work with your model. model_output_transform (callable, optional): An optional function that takes in model output (after being passed through your model forward pass) and transforms it. Afterwards, the output will be passed into an evaluation function. The model output transform is a function that you can pass in to transform the model output after the data has been passed into the model. This is useful if you have to do further processing steps after inference to get the predictions in the right format for evaluation. The model evaluation for each batch is as follows from utils.py are: with torch . no_grad (): for i , ( input , target ) in enumerate ( iterator ): input , target = send_data_to_device ( input , target , device = device ) original_output = model ( input ) output , target = model_output_transform ( original_output , target ) result = { tar [ \"image_id\" ] . item (): out for tar , out in zip ( target , output ) } coco_evaluator . update ( result ) We can see the model_output_transform in use, and the fact that the output is then transformed to be a dictionary with image_ids as keys and output as values. The expected output of model_output_transform is a list of dictionaries (length = batch_size), where each dictionary contains keys for 'boxes', 'labels', 'scores', 'masks', and each value is of the torch.tensor type. The expected output of result is converted to a dictionary with keys as the image ids, and values as a dictionary with the predictions (boxes, labels, scores, ... as keys). collate_fn How the dataset is collated - an optional callable passed into the DataLoader As an example the default collate function is: def coco_collate_fn ( batch ): return tuple ( zip ( * batch )) send_data_to_device An optional function specifying how the model is sent to a device As an example the COCO default is: def coco_data_to_device ( input , target , device : str = \"cuda\" , non_blocking : bool = True ): input = list ( inp . to ( device = device , non_blocking = non_blocking ) for inp in input ) target = [{ k : v . to ( device = device , non_blocking = non_blocking ) for k , v in t . items ()} for t in target ] return input , target data_root data_root (str): The location of the COCO dataset - change this parameter when evaluating locally if your COCO data is located in a different folder (or alternatively if you want to download to an alternative location). Note that this parameter will be overriden when the evaluation is performed on the server, so it is solely for your local use. num_workers num_workers (int): The number of workers to use for the DataLoader. batch_size batch_size (int) : The batch_size to use for evaluation; if you get memory errors, then reduce this (half each time) until your model fits onto the GPU. paper_model_name paper_model_name (str, optional): The name of the model from the paper - if you want to link your build to a machine learning paper. See the COCO benchmark page for model names, https://www.sotabench.com/benchmark/coco-minival, e.g. on the paper leaderboard tab. paper_arxiv_id paper_arxiv_id (str, optional): Optional linking to ArXiv if you want to link to papers on the leaderboard; put in the corresponding paper's ArXiv ID, e.g. '1611.05431'. paper_pwc_id paper_pwc_id (str, optional): Optional linking to Papers With Code; put in the corresponding papers with code URL slug, e.g. 'u-gat-it-unsupervised-generative-attentional' paper_results paper_results (dict, optional) : If the paper you are reproducing does not have model results on sotabench.com, you can specify the paper results yourself through this argument, where keys are metric names, values are metric values. e.g:: { 'box AP' : 0.349 , 'AP50' : 0.592 , ... } . Ensure that the metric names match those on the sotabench leaderboard - for COCO it should be 'box AP', 'AP50', 'AP75', 'APS', 'APM', 'APL' pytorch_hub_url pytorch_hub_url (str, optional): Optional linking to PyTorch Hub url if your model is linked there; e.g: 'nvidia_deeplearningexamples_waveglow'. Need More Help? Head on over to the Computer Vision section of the sotabench forums if you have any questions or difficulties.","title":"COCO"},{"location":"coco/#coco","text":"You can view the COCO minival leaderboard here . Warning Object detection APIs in PyTorch are not very standardised across repositories, meaning that it may require a lot of glue to get them working with this evaluation procedure (which is based on torchvision). For easier COCO integration with sotabench it is recommended to use the more general API sotabencheval .","title":"COCO"},{"location":"coco/#getting-started","text":"You'll need the following in the root of your repository: sotabench.py file - contains benchmarking logic; the server will run this on each commit requirements.txt file - Python dependencies to be installed before running sotabench.py sotabench_setup.sh (optional) - any advanced dependencies or setup, e.g. compilation Once you connect your repository to sotabench.com , the platform will run your sotabench.py file whenever you commit to master. We now show how to write the sotabench.py file to evaluate a PyTorch object model with the torchbench library, and to allow your results to be recorded and reported for the community.","title":"Getting Started"},{"location":"coco/#the-coco-evaluation-class","text":"You can import the evaluation class from the following module: from torchbench.object_detection import COCO The COCO class contains several components used in the evaluation, such as the dataset : COCO . dataset # torchbench.datasets.coco.CocoDetection And some default arguments used for evaluation (which can be overridden): COCO . transforms # COCO . send_data_to_device # COCO . collate_fn # COCO . model_output_transform # We will explain these different options shortly and how you can manipulate them to get the evaluation logic to play nicely with your model. An evaluation call - which performs evaluation, and if on the sotabench.com server, saves the results - looks like the following through the benchmark() method: import torchvision model = torchvision . models . detection . __dict__ [ 'maskrcnn_resnet50_fpn' ]( num_classes = 91 , pretrained = True ) COCO . benchmark ( model = model , paper_model_name = 'Mask R-CNN (ResNet-50-FPN)' , paper_arxiv_id = '1703.06870' ) These are the key arguments: the model which is a usually a nn.Module type object, but more generally, is any method with a forward method that takes in input data and outputs predictions. paper_model_name refers to the name of the model and paper_arxiv_id (optionally) refers to the paper from which the model originated. If these two arguments match a record paper result, then sotabench.com will match your model with the paper and compare your code's results with the reported results in the paper.","title":"The COCO Evaluation Class"},{"location":"coco/#a-full-sotabenchpy-example","text":"Below shows an example for the torchvision repository benchmarking a Mask R-CNN model: from torchbench.object_detection import COCO from torchbench.utils import send_model_to_device from torchbench.object_detection.transforms import Compose , ConvertCocoPolysToMask , ToTensor import torchvision import PIL def coco_data_to_device ( input , target , device : str = \"cuda\" , non_blocking : bool = True ): input = list ( inp . to ( device = device , non_blocking = non_blocking ) for inp in input ) target = [{ k : v . to ( device = device , non_blocking = non_blocking ) for k , v in t . items ()} for t in target ] return input , target def coco_collate_fn ( batch ): return tuple ( zip ( * batch )) def coco_output_transform ( output , target ): output = [{ k : v . to ( \"cpu\" ) for k , v in t . items ()} for t in output ] return output , target transforms = Compose ([ ConvertCocoPolysToMask (), ToTensor ()]) model = torchvision . models . detection . __dict__ [ 'maskrcnn_resnet50_fpn' ]( num_classes = 91 , pretrained = True ) # Run the benchmark COCO . benchmark ( model = model , paper_model_name = 'Mask R-CNN (ResNet-50-FPN)' , paper_arxiv_id = '1703.06870' , transforms = transforms , model_output_transform = coco_output_transform , send_data_to_device = coco_data_to_device , collate_fn = coco_collate_fn , batch_size = 8 , num_gpu = 1 )","title":"A full sotabench.py example"},{"location":"coco/#cocobenchmark-arguments","text":"The source code for the COCO evaluation method can be found here . We now explain each argument.","title":"COCO.benchmark() Arguments"},{"location":"coco/#model","text":"a PyTorch module, (e.g. a nn.Module object), that takes in COCO data and outputs detections. For example, from the torchvision repository: import torchvision model = torchvision . models . detection . __dict__ [ 'maskrcnn_resnet50_fpn' ]( num_classes = 91 , pretrained = True )","title":"model"},{"location":"coco/#model_description","text":"(str, optional): Optional model description. For example: model_description = 'Using ported TensorFlow weights'","title":"model_description"},{"location":"coco/#input_transform","text":"Composing the transforms used to transform the input data (the images), e.g. resizing (e.g transforms.Resize ), center cropping, to tensor transformations and normalization. For example: import torchvision.transforms as transforms input_transform = transforms . Compose ([ transforms . Resize ( 512 , PIL . Image . BICUBIC ), transforms . ToTensor (), ])","title":"input_transform"},{"location":"coco/#target_transform","text":"Composing the transforms used to transform the target data","title":"target_transform"},{"location":"coco/#transforms","text":"Composing the transforms used to transform the input data (the images) and the target data (the labels) in a dual fashion - for example resizing the pair of data jointly. Below shows an example; note the fact that the __call__ takes in two arguments and returns two arguments (ordinary torchvision transforms return one result). from torchvision.transforms import functional as F class Compose ( object ): def __init__ ( self , transforms ): self . transforms = transforms def __call__ ( self , image , target ): for t in self . transforms : image , target = t ( image , target ) return image , target class ToTensor ( object ): def __call__ ( self , image , target ): image = F . to_tensor ( image ) return image , target class ImageResize ( object ): def __init__ ( self , resize_shape ): self . resize_shape = resize_shape def __call__ ( self , image , target ): image = F . resize ( image , self . resize_shape ) return image , target transforms = Compose ([ ImageResize (( 512 , 512 )), ToTensor ()]) Note that the default transforms are: from torchbench.object_detection.utils import Compose , ConvertCocoPolysToMask , ToTensor transforms = Compose ([ ConvertCocoPolysToMask (), ToTensor ()]) Where ConvertCocoPolysToMask is from the torchvision reference implementation to transform the inputs to the right format to be entered into the model. You can pass whatever transforms you need to make the dataset work with your model.","title":"transforms"},{"location":"coco/#model_output_transform","text":"(callable, optional): An optional function that takes in model output (after being passed through your model forward pass) and transforms it. Afterwards, the output will be passed into an evaluation function. The model output transform is a function that you can pass in to transform the model output after the data has been passed into the model. This is useful if you have to do further processing steps after inference to get the predictions in the right format for evaluation. The model evaluation for each batch is as follows from utils.py are: with torch . no_grad (): for i , ( input , target ) in enumerate ( iterator ): input , target = send_data_to_device ( input , target , device = device ) original_output = model ( input ) output , target = model_output_transform ( original_output , target ) result = { tar [ \"image_id\" ] . item (): out for tar , out in zip ( target , output ) } coco_evaluator . update ( result ) We can see the model_output_transform in use, and the fact that the output is then transformed to be a dictionary with image_ids as keys and output as values. The expected output of model_output_transform is a list of dictionaries (length = batch_size), where each dictionary contains keys for 'boxes', 'labels', 'scores', 'masks', and each value is of the torch.tensor type. The expected output of result is converted to a dictionary with keys as the image ids, and values as a dictionary with the predictions (boxes, labels, scores, ... as keys).","title":"model_output_transform"},{"location":"coco/#collate_fn","text":"How the dataset is collated - an optional callable passed into the DataLoader As an example the default collate function is: def coco_collate_fn ( batch ): return tuple ( zip ( * batch ))","title":"collate_fn"},{"location":"coco/#send_data_to_device","text":"An optional function specifying how the model is sent to a device As an example the COCO default is: def coco_data_to_device ( input , target , device : str = \"cuda\" , non_blocking : bool = True ): input = list ( inp . to ( device = device , non_blocking = non_blocking ) for inp in input ) target = [{ k : v . to ( device = device , non_blocking = non_blocking ) for k , v in t . items ()} for t in target ] return input , target","title":"send_data_to_device"},{"location":"coco/#data_root","text":"data_root (str): The location of the COCO dataset - change this parameter when evaluating locally if your COCO data is located in a different folder (or alternatively if you want to download to an alternative location). Note that this parameter will be overriden when the evaluation is performed on the server, so it is solely for your local use.","title":"data_root"},{"location":"coco/#num_workers","text":"num_workers (int): The number of workers to use for the DataLoader.","title":"num_workers"},{"location":"coco/#batch_size","text":"batch_size (int) : The batch_size to use for evaluation; if you get memory errors, then reduce this (half each time) until your model fits onto the GPU.","title":"batch_size"},{"location":"coco/#paper_model_name","text":"paper_model_name (str, optional): The name of the model from the paper - if you want to link your build to a machine learning paper. See the COCO benchmark page for model names, https://www.sotabench.com/benchmark/coco-minival, e.g. on the paper leaderboard tab.","title":"paper_model_name"},{"location":"coco/#paper_arxiv_id","text":"paper_arxiv_id (str, optional): Optional linking to ArXiv if you want to link to papers on the leaderboard; put in the corresponding paper's ArXiv ID, e.g. '1611.05431'.","title":"paper_arxiv_id"},{"location":"coco/#paper_pwc_id","text":"paper_pwc_id (str, optional): Optional linking to Papers With Code; put in the corresponding papers with code URL slug, e.g. 'u-gat-it-unsupervised-generative-attentional'","title":"paper_pwc_id"},{"location":"coco/#paper_results","text":"paper_results (dict, optional) : If the paper you are reproducing does not have model results on sotabench.com, you can specify the paper results yourself through this argument, where keys are metric names, values are metric values. e.g:: { 'box AP' : 0.349 , 'AP50' : 0.592 , ... } . Ensure that the metric names match those on the sotabench leaderboard - for COCO it should be 'box AP', 'AP50', 'AP75', 'APS', 'APM', 'APL'","title":"paper_results"},{"location":"coco/#pytorch_hub_url","text":"pytorch_hub_url (str, optional): Optional linking to PyTorch Hub url if your model is linked there; e.g: 'nvidia_deeplearningexamples_waveglow'.","title":"pytorch_hub_url"},{"location":"coco/#need-more-help","text":"Head on over to the Computer Vision section of the sotabench forums if you have any questions or difficulties.","title":"Need More Help?"},{"location":"imagenet/","text":"ImageNet You can view the ImageNet leaderboard here . Getting Started You'll need the following in the root of your repository: sotabench.py file - contains benchmarking logic; the server will run this on each commit requirements.txt file - Python dependencies to be installed before running sotabench.py sotabench_setup.sh (optional) - any advanced dependencies or setup, e.g. compilation Once you connect your repository to sotabench.com , the platform will run your sotabench.py file whenever you commit to master. We now show how to write the sotabench.py file to evaluate a PyTorch object model with the torchbench library, and to allow your results to be recorded and reported for the community. The ImageNet Evaluation Class You can import the evaluation class from the following module: from torchbench.image_classification import ImageNet The ImageNet class contains several components used in the evaluation, such as the dataset : ImageNet . dataset # torchvision.datasets.ImageNet And some default arguments used for evaluation (which can be overridden): ImageNet . normalize # Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ImageNet . input_transform # Compose( # Resize(size=256, interpolation=PIL.Image.BILINEAR) # CenterCrop(size=(224, 224)) # ToTensor() # Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # ) ImageNet . send_data_to_device # We will explain these different options shortly and how you can manipulate them to get the evaluation logic to play nicely with your model. An evaluation call - which performs evaluation, and if on the sotabench.com server, saves the results - looks like the following through the benchmark() method: from torchvision.models.resnet import resnext101_32x8d ImageNet . benchmark ( model = resnext101_32x8d ( pretrained = True ), paper_model_name = 'ResNeXt-101-32x8d' , paper_arxiv_id = '1611.05431' ) These are the key arguments: the model which is a usually a nn.Module type object, but more generally, is any method with a forward method that takes in input data and outputs predictions. paper_model_name refers to the name of the model and paper_arxiv_id (optionally) refers to the paper from which the model originated. If these two arguments match a record paper result, then sotabench.com will match your model with the paper and compare your code's results with the reported results in the paper. A full sotabench.py example Below shows an example for the torchvision repository benchmarking a ResNeXt-101-32x8d model: from torchbench.image_classification import ImageNet from torchvision.models.resnet import resnext101_32x8d import torchvision.transforms as transforms import PIL # Define the transforms need to convert ImageNet data to expected # model input normalize = transforms . Normalize ( mean = [ 0.485 , 0.456 , 0.406 ], std = [ 0.229 , 0.224 , 0.225 ] ) input_transform = transforms . Compose ([ transforms . Resize ( 256 , PIL . Image . BICUBIC ), transforms . CenterCrop ( 224 ), transforms . ToTensor (), normalize , ]) # Run the benchmark ImageNet . benchmark ( model = resnext101_32x8d ( pretrained = True ), paper_model_name = 'ResNeXt-101-32x8d' , paper_arxiv_id = '1611.05431' , input_transform = input_transform , batch_size = 256 , num_gpu = 1 ) ImageNet.benchmark() Arguments The source code for the ImageNet evaluation method can be found here . We now explain each argument. model a PyTorch module, (e.g. a nn.Module object), that takes in ImageNet data and outputs detections. For example, from the torchvision repository: from torchvision.models.resnet import resnext101_32x8d model = resnext101_32x8d ( pretrained = True ) model_description (str, optional): Optional model description. For example: model_description = 'Using ported TensorFlow weights' input_transform Composing the transforms used to transform the input data (the images), e.g. resizing (e.g transforms.Resize ), center cropping, to tensor transformations and normalization. For example: import torchvision.transforms as transforms normalize = transforms . Normalize ( mean = [ 0.485 , 0.456 , 0.406 ], std = [ 0.229 , 0.224 , 0.225 ] ) input_transform = transforms . Compose ([ transforms . Resize ( 256 , PIL . Image . BICUBIC ), transforms . CenterCrop ( 224 ), transforms . ToTensor (), normalize , ]) target_transform Composing the transforms used to transform the target data model_output_transform (callable, optional): An optional function that takes in model output (after being passed through your model forward pass) and transforms it. Afterwards, the output will be passed into an evaluation function. The model output transform is a function that you can pass in to transform the model output after the data has been passed into the model. This is useful if you have to do further processing steps after inference to get the predictions in the right format for evaluation. Most PyTorch models for Image Classification on ImageNet don't need to use this argument. The model evaluation for each batch is as follows from utils.py are: with torch . no_grad (): for i , ( input , target ) in enumerate ( iterator ): input , target = send_data_to_device ( input , target , device = device ) output = model ( input ) if model_output_transform is not None : output = model_output_transform ( output , target , model = model ) check_metric_inputs ( output , target , test_loader . dataset , i ) prec1 , prec5 = accuracy ( output , target , topk = ( 1 , 5 )) Model output (following model.forward() and optionally model_output_transform ) should be a 2D torch.Tensor containing the model output; first dimension should be output for each example (length batch_size) and second dimension should be output for each class in ImageNet (length 1000). send_data_to_device An optional function specifying how the model is sent to a device As an example the default is: def default_data_to_device ( input , target = None , device : str = \"cuda\" , non_blocking : bool = True ): \"\"\"Sends data output from a PyTorch Dataloader to the device.\"\"\" input = input . to ( device = device , non_blocking = non_blocking ) if target is not None : target = target . to ( device = device , non_blocking = non_blocking ) return input , target data_root data_root (str): The location of the ImageNet dataset - change this parameter when evaluating locally if your ImageNet data is located in a different folder (or alternatively if you want to download to an alternative location). Note that this parameter will be overriden when the evaluation is performed on the server, so it is solely for your local use. num_workers num_workers (int): The number of workers to use for the DataLoader. batch_size batch_size (int) : The batch_size to use for evaluation; if you get memory errors, then reduce this (half each time) until your model fits onto the GPU. paper_model_name paper_model_name (str, optional): The name of the model from the paper - if you want to link your build to a machine learning paper. See the ImageNet benchmark page for model names, https://sotabench.com/benchmarks/image-classification-on-imagenet, e.g. on the paper leaderboard tab. paper_arxiv_id paper_arxiv_id (str, optional): Optional linking to ArXiv if you want to link to papers on the leaderboard; put in the corresponding paper's ArXiv ID, e.g. '1611.05431'. paper_pwc_id paper_pwc_id (str, optional): Optional linking to Papers With Code; put in the corresponding papers with code URL slug, e.g. 'u-gat-it-unsupervised-generative-attentional' paper_results paper_results (dict, optional) : If the paper you are reproducing does not have model results on sotabench.com, you can specify the paper results yourself through this argument, where keys are metric names, values are metric values. e.g: { 'Top 1 Accuracy' : 0.543 , 'Top 5 Accuracy' : 0.654 } Ensure that the metric names match those on the sotabench leaderboard - for ImageNet it should be 'Top 1 Accuracy', 'Top 5 Accuracy' pytorch_hub_url pytorch_hub_url (str, optional): Optional linking to PyTorch Hub url if your model is linked there; e.g: 'nvidia_deeplearningexamples_waveglow'. Need More Help? Head on over to the Computer Vision section of the sotabench forums if you have any questions or difficulties.","title":"ImageNet"},{"location":"imagenet/#imagenet","text":"You can view the ImageNet leaderboard here .","title":"ImageNet"},{"location":"imagenet/#getting-started","text":"You'll need the following in the root of your repository: sotabench.py file - contains benchmarking logic; the server will run this on each commit requirements.txt file - Python dependencies to be installed before running sotabench.py sotabench_setup.sh (optional) - any advanced dependencies or setup, e.g. compilation Once you connect your repository to sotabench.com , the platform will run your sotabench.py file whenever you commit to master. We now show how to write the sotabench.py file to evaluate a PyTorch object model with the torchbench library, and to allow your results to be recorded and reported for the community.","title":"Getting Started"},{"location":"imagenet/#the-imagenet-evaluation-class","text":"You can import the evaluation class from the following module: from torchbench.image_classification import ImageNet The ImageNet class contains several components used in the evaluation, such as the dataset : ImageNet . dataset # torchvision.datasets.ImageNet And some default arguments used for evaluation (which can be overridden): ImageNet . normalize # Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ImageNet . input_transform # Compose( # Resize(size=256, interpolation=PIL.Image.BILINEAR) # CenterCrop(size=(224, 224)) # ToTensor() # Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # ) ImageNet . send_data_to_device # We will explain these different options shortly and how you can manipulate them to get the evaluation logic to play nicely with your model. An evaluation call - which performs evaluation, and if on the sotabench.com server, saves the results - looks like the following through the benchmark() method: from torchvision.models.resnet import resnext101_32x8d ImageNet . benchmark ( model = resnext101_32x8d ( pretrained = True ), paper_model_name = 'ResNeXt-101-32x8d' , paper_arxiv_id = '1611.05431' ) These are the key arguments: the model which is a usually a nn.Module type object, but more generally, is any method with a forward method that takes in input data and outputs predictions. paper_model_name refers to the name of the model and paper_arxiv_id (optionally) refers to the paper from which the model originated. If these two arguments match a record paper result, then sotabench.com will match your model with the paper and compare your code's results with the reported results in the paper.","title":"The ImageNet Evaluation Class"},{"location":"imagenet/#a-full-sotabenchpy-example","text":"Below shows an example for the torchvision repository benchmarking a ResNeXt-101-32x8d model: from torchbench.image_classification import ImageNet from torchvision.models.resnet import resnext101_32x8d import torchvision.transforms as transforms import PIL # Define the transforms need to convert ImageNet data to expected # model input normalize = transforms . Normalize ( mean = [ 0.485 , 0.456 , 0.406 ], std = [ 0.229 , 0.224 , 0.225 ] ) input_transform = transforms . Compose ([ transforms . Resize ( 256 , PIL . Image . BICUBIC ), transforms . CenterCrop ( 224 ), transforms . ToTensor (), normalize , ]) # Run the benchmark ImageNet . benchmark ( model = resnext101_32x8d ( pretrained = True ), paper_model_name = 'ResNeXt-101-32x8d' , paper_arxiv_id = '1611.05431' , input_transform = input_transform , batch_size = 256 , num_gpu = 1 )","title":"A full sotabench.py example"},{"location":"imagenet/#imagenetbenchmark-arguments","text":"The source code for the ImageNet evaluation method can be found here . We now explain each argument.","title":"ImageNet.benchmark() Arguments"},{"location":"imagenet/#model","text":"a PyTorch module, (e.g. a nn.Module object), that takes in ImageNet data and outputs detections. For example, from the torchvision repository: from torchvision.models.resnet import resnext101_32x8d model = resnext101_32x8d ( pretrained = True )","title":"model"},{"location":"imagenet/#model_description","text":"(str, optional): Optional model description. For example: model_description = 'Using ported TensorFlow weights'","title":"model_description"},{"location":"imagenet/#input_transform","text":"Composing the transforms used to transform the input data (the images), e.g. resizing (e.g transforms.Resize ), center cropping, to tensor transformations and normalization. For example: import torchvision.transforms as transforms normalize = transforms . Normalize ( mean = [ 0.485 , 0.456 , 0.406 ], std = [ 0.229 , 0.224 , 0.225 ] ) input_transform = transforms . Compose ([ transforms . Resize ( 256 , PIL . Image . BICUBIC ), transforms . CenterCrop ( 224 ), transforms . ToTensor (), normalize , ])","title":"input_transform"},{"location":"imagenet/#target_transform","text":"Composing the transforms used to transform the target data","title":"target_transform"},{"location":"imagenet/#model_output_transform","text":"(callable, optional): An optional function that takes in model output (after being passed through your model forward pass) and transforms it. Afterwards, the output will be passed into an evaluation function. The model output transform is a function that you can pass in to transform the model output after the data has been passed into the model. This is useful if you have to do further processing steps after inference to get the predictions in the right format for evaluation. Most PyTorch models for Image Classification on ImageNet don't need to use this argument. The model evaluation for each batch is as follows from utils.py are: with torch . no_grad (): for i , ( input , target ) in enumerate ( iterator ): input , target = send_data_to_device ( input , target , device = device ) output = model ( input ) if model_output_transform is not None : output = model_output_transform ( output , target , model = model ) check_metric_inputs ( output , target , test_loader . dataset , i ) prec1 , prec5 = accuracy ( output , target , topk = ( 1 , 5 )) Model output (following model.forward() and optionally model_output_transform ) should be a 2D torch.Tensor containing the model output; first dimension should be output for each example (length batch_size) and second dimension should be output for each class in ImageNet (length 1000).","title":"model_output_transform"},{"location":"imagenet/#send_data_to_device","text":"An optional function specifying how the model is sent to a device As an example the default is: def default_data_to_device ( input , target = None , device : str = \"cuda\" , non_blocking : bool = True ): \"\"\"Sends data output from a PyTorch Dataloader to the device.\"\"\" input = input . to ( device = device , non_blocking = non_blocking ) if target is not None : target = target . to ( device = device , non_blocking = non_blocking ) return input , target","title":"send_data_to_device"},{"location":"imagenet/#data_root","text":"data_root (str): The location of the ImageNet dataset - change this parameter when evaluating locally if your ImageNet data is located in a different folder (or alternatively if you want to download to an alternative location). Note that this parameter will be overriden when the evaluation is performed on the server, so it is solely for your local use.","title":"data_root"},{"location":"imagenet/#num_workers","text":"num_workers (int): The number of workers to use for the DataLoader.","title":"num_workers"},{"location":"imagenet/#batch_size","text":"batch_size (int) : The batch_size to use for evaluation; if you get memory errors, then reduce this (half each time) until your model fits onto the GPU.","title":"batch_size"},{"location":"imagenet/#paper_model_name","text":"paper_model_name (str, optional): The name of the model from the paper - if you want to link your build to a machine learning paper. See the ImageNet benchmark page for model names, https://sotabench.com/benchmarks/image-classification-on-imagenet, e.g. on the paper leaderboard tab.","title":"paper_model_name"},{"location":"imagenet/#paper_arxiv_id","text":"paper_arxiv_id (str, optional): Optional linking to ArXiv if you want to link to papers on the leaderboard; put in the corresponding paper's ArXiv ID, e.g. '1611.05431'.","title":"paper_arxiv_id"},{"location":"imagenet/#paper_pwc_id","text":"paper_pwc_id (str, optional): Optional linking to Papers With Code; put in the corresponding papers with code URL slug, e.g. 'u-gat-it-unsupervised-generative-attentional'","title":"paper_pwc_id"},{"location":"imagenet/#paper_results","text":"paper_results (dict, optional) : If the paper you are reproducing does not have model results on sotabench.com, you can specify the paper results yourself through this argument, where keys are metric names, values are metric values. e.g: { 'Top 1 Accuracy' : 0.543 , 'Top 5 Accuracy' : 0.654 } Ensure that the metric names match those on the sotabench leaderboard - for ImageNet it should be 'Top 1 Accuracy', 'Top 5 Accuracy'","title":"paper_results"},{"location":"imagenet/#pytorch_hub_url","text":"pytorch_hub_url (str, optional): Optional linking to PyTorch Hub url if your model is linked there; e.g: 'nvidia_deeplearningexamples_waveglow'.","title":"pytorch_hub_url"},{"location":"imagenet/#need-more-help","text":"Head on over to the Computer Vision section of the sotabench forums if you have any questions or difficulties.","title":"Need More Help?"},{"location":"pascalvoc/","text":"PASCAL VOC 2012 You can view the PASCAL VOC 2012 leaderboard here . Warning Semantic Segmentations APIs in PyTorch are not very standardised across repositories, meaning that it may require a lot of glue to get them working with this evaluation procedure (which is based on torchvision). For easier VOC integration with sotabench it is recommended to use the more general API sotabencheval . Getting Started You'll need the following in the root of your repository: sotabench.py file - contains benchmarking logic; the server will run this on each commit requirements.txt file - Python dependencies to be installed before running sotabench.py sotabench_setup.sh (optional) - any advanced dependencies or setup, e.g. compilation Once you connect your repository to sotabench.com , the platform will run your sotabench.py file whenever you commit to master. We now show how to write the sotabench.py file to evaluate a PyTorch object model with the torchbench library, and to allow your results to be recorded and reported for the community. The VOC Evaluation Class You can import the evaluation class from the following module: from torchbench.semantic_segmentation import PASCALVOC The PASCALVOC class contains several components used in the evaluation, such as the dataset : PASCALVOC . dataset # torchvision.datasets.voc.VOCSegmentation And some default arguments used for evaluation (which can be overridden): PASCALVOC . normalize # PASCALVOC . transforms # PASCALVOC . send_data_to_device # PASCALVOC . collate_fn # PASCALVOC . model_output_transform # We will explain these different options shortly and how you can manipulate them to get the evaluation logic to play nicely with your model. An evaluation call - which performs evaluation, and if on the sotabench.com server, saves the results - looks like the following through the benchmark() method: from torchvision.models.segmentation import fcn_resnet101 model = fcn_resnet101 ( num_classes = 21 , pretrained = True ) PASCALVOC . benchmark ( model = model , paper_model_name = 'FCN ResNet-101' , paper_arxiv_id = '1605.06211' ) These are the key arguments: the model which is a usually a nn.Module type object, but more generally, is any method with a forward method that takes in input data and outputs predictions. paper_model_name refers to the name of the model and paper_arxiv_id (optionally) refers to the paper from which the model originated. If these two arguments match a record paper result, then sotabench.com will match your model with the paper and compare your code's results with the reported results in the paper. A full sotabench.py example Below shows an example for the torchvision repository benchmarking a FCN ResNet-101 model: from torchbench.semantic_segmentation import PASCALVOC from torchbench.semantic_segmentation.transforms import ( Normalize , Resize , ToTensor , Compose , ) from torchvision.models.segmentation import fcn_resnet101 import torchvision.transforms as transforms import PIL def model_output_function ( output , labels ): return output [ 'out' ] . argmax ( 1 ) . flatten (), target . flatten () def seg_collate_fn ( batch ): images , targets = list ( zip ( * batch )) batched_imgs = cat_list ( images , fill_value = 0 ) batched_targets = cat_list ( targets , fill_value = 255 ) return batched_imgs , batched_targets model = fcn_resnet101 ( num_classes = 21 , pretrained = True ) normalize = Normalize ( mean = [ 0.485 , 0.456 , 0.406 ], std = [ 0.229 , 0.224 , 0.225 ] ) my_transforms = Compose ([ Resize (( 520 , 480 )), ToTensor (), normalize ]) PASCALVOC . benchmark ( batch_size = 32 , model = model , transforms = my_transforms , model_output_transform = model_output_function , collate_fn = seg_collate_fn , paper_model_name = 'FCN ResNet-101' , paper_arxiv_id = '1605.06211' ) PASCALVOC.benchmark() Arguments The source code for the PASCALVOC evaluation method can be found here . We now explain each argument. model a PyTorch module, (e.g. a nn.Module object), that takes in VOC data and outputs detections. For example, from the torchvision repository: from torchvision.models.segmentation import fcn_resnet101 model = fcn_resnet101 ( num_classes = 21 , pretrained = True ) model_description (str, optional): Optional model description. For example: model_description = 'Using ported TensorFlow weights' input_transform Composing the transforms used to transform the input data (the images), e.g. resizing (e.g transforms.Resize ), center cropping, to tensor transformations and normalization. For example: import torchvision.transforms as transforms input_transform = transforms . Compose ([ transforms . Resize ( 512 , PIL . Image . BICUBIC ), transforms . ToTensor (), ]) target_transform Composing the transforms used to transform the target data transforms Composing the transforms used to transform the input data (the images) and the target data (the labels) in a dual fashion - for example resizing the pair of data jointly. Below shows an example; note the fact that the __call__ takes in two arguments and returns two arguments (ordinary torchvision transforms return one result). from torchvision.transforms import functional as F class Compose ( object ): def __init__ ( self , transforms ): self . transforms = transforms def __call__ ( self , image , target ): for t in self . transforms : image , target = t ( image , target ) return image , target class ToTensor ( object ): def __call__ ( self , image , target ): image = F . to_tensor ( image ) return image , target class ImageResize ( object ): def __init__ ( self , resize_shape ): self . resize_shape = resize_shape def __call__ ( self , image , target ): image = F . resize ( image , self . resize_shape ) return image , target transforms = Compose ([ ImageResize (( 512 , 512 )), ToTensor ()]) Note that the default transforms are: from torchbench.semantic_segmentation.transforms import ( Normalize , Resize , ToTensor , Compose ) normalize = Normalize ( mean = [ 0.485 , 0.456 , 0.406 ], std = [ 0.229 , 0.224 , 0.225 ]) transforms = Compose ([ Resize (( 520 , 480 )), ToTensor (), normalize ]) model_output_transform (callable, optional): An optional function that takes in model output (after being passed through your model forward pass) and transforms it. Afterwards, the output will be passed into an evaluation function. The model output transform is a function that you can pass in to transform the model output after the data has been passed into the model. This is useful if you have to do further processing steps after inference to get the predictions in the right format for evaluation. The model evaluation for each batch is as follows from utils.py are: with torch . no_grad (): for i , ( input , target ) in enumerate ( iterator ): input , target = send_data_to_device ( input , target , device = device ) output = model ( input ) output , target = model_output_transform ( output , target ) confmat . update ( target , output ) The default model_output_transform is: def default_seg_output_transform ( output , target ): return output [ \"out\" ] . argmax ( 1 ) . flatten (), target . flatten () We can see the output and target are flattened to 1D tensors, and in the case of the output, we take the maximum predicted class to compare against for accuracy. Each element in each tensor represents a pixel, and contains a class, e.g. class 6, and we compare pixel-by-pixel the model predictions against the ground truth labels to calculate the accuracy. collate_fn How the dataset is collated - an optional callable passed into the DataLoader As an example the default collate function is: def default_seg_collate_fn ( batch ): images , targets = list ( zip ( * batch )) batched_imgs = cat_list ( images , fill_value = 0 ) batched_targets = cat_list ( targets , fill_value = 255 ) return batched_imgs , batched_targets send_data_to_device An optional function specifying how the model is sent to a device As an example the PASCAL VOC default is: def default_data_to_device ( input , target = None , device : str = \"cuda\" , non_blocking : bool = True ): \"\"\"Sends data output from a PyTorch Dataloader to the device.\"\"\" input = input . to ( device = device , non_blocking = non_blocking ) if target is not None : target = target . to ( device = device , non_blocking = non_blocking ) return input , target data_root data_root (str): The location of the VOC dataset - change this parameter when evaluating locally if your VOC data is located in a different folder (or alternatively if you want to download to an alternative location). Note that this parameter will be overriden when the evaluation is performed on the server, so it is solely for your local use. num_workers num_workers (int): The number of workers to use for the DataLoader. batch_size batch_size (int) : The batch_size to use for evaluation; if you get memory errors, then reduce this (half each time) until your model fits onto the GPU. paper_model_name paper_model_name (str, optional): The name of the model from the paper - if you want to link your build to a machine learning paper. See the VOC benchmark page for model names, https://sotabench.com/benchmarks/semantic-segmentation-on-pascal-voc-2012, e.g. on the paper leaderboard tab. paper_arxiv_id paper_arxiv_id (str, optional): Optional linking to ArXiv if you want to link to papers on the leaderboard; put in the corresponding paper's ArXiv ID, e.g. '1611.05431'. paper_pwc_id paper_pwc_id (str, optional): Optional linking to Papers With Code; put in the corresponding papers with code URL slug, e.g. 'u-gat-it-unsupervised-generative-attentional' paper_results paper_results (dict, optional) : If the paper you are reproducing does not have model results on sotabench.com, you can specify the paper results yourself through this argument, where keys are metric names, values are metric values. e.g:: { 'Accuracy' : 0.745 , 'Mean IOU' : 0.592 } . Ensure that the metric names match those on the sotabench leaderboard - for VOC it should be 'Accuracy', 'Mean IOU'. pytorch_hub_url pytorch_hub_url (str, optional): Optional linking to PyTorch Hub url if your model is linked there; e.g: 'nvidia_deeplearningexamples_waveglow'. Need More Help? Head on over to the Computer Vision section of the sotabench forums if you have any questions or difficulties.","title":"PASCAL VOC 2012"},{"location":"pascalvoc/#pascal-voc-2012","text":"You can view the PASCAL VOC 2012 leaderboard here . Warning Semantic Segmentations APIs in PyTorch are not very standardised across repositories, meaning that it may require a lot of glue to get them working with this evaluation procedure (which is based on torchvision). For easier VOC integration with sotabench it is recommended to use the more general API sotabencheval .","title":"PASCAL VOC 2012"},{"location":"pascalvoc/#getting-started","text":"You'll need the following in the root of your repository: sotabench.py file - contains benchmarking logic; the server will run this on each commit requirements.txt file - Python dependencies to be installed before running sotabench.py sotabench_setup.sh (optional) - any advanced dependencies or setup, e.g. compilation Once you connect your repository to sotabench.com , the platform will run your sotabench.py file whenever you commit to master. We now show how to write the sotabench.py file to evaluate a PyTorch object model with the torchbench library, and to allow your results to be recorded and reported for the community.","title":"Getting Started"},{"location":"pascalvoc/#the-voc-evaluation-class","text":"You can import the evaluation class from the following module: from torchbench.semantic_segmentation import PASCALVOC The PASCALVOC class contains several components used in the evaluation, such as the dataset : PASCALVOC . dataset # torchvision.datasets.voc.VOCSegmentation And some default arguments used for evaluation (which can be overridden): PASCALVOC . normalize # PASCALVOC . transforms # PASCALVOC . send_data_to_device # PASCALVOC . collate_fn # PASCALVOC . model_output_transform # We will explain these different options shortly and how you can manipulate them to get the evaluation logic to play nicely with your model. An evaluation call - which performs evaluation, and if on the sotabench.com server, saves the results - looks like the following through the benchmark() method: from torchvision.models.segmentation import fcn_resnet101 model = fcn_resnet101 ( num_classes = 21 , pretrained = True ) PASCALVOC . benchmark ( model = model , paper_model_name = 'FCN ResNet-101' , paper_arxiv_id = '1605.06211' ) These are the key arguments: the model which is a usually a nn.Module type object, but more generally, is any method with a forward method that takes in input data and outputs predictions. paper_model_name refers to the name of the model and paper_arxiv_id (optionally) refers to the paper from which the model originated. If these two arguments match a record paper result, then sotabench.com will match your model with the paper and compare your code's results with the reported results in the paper.","title":"The VOC Evaluation Class"},{"location":"pascalvoc/#a-full-sotabenchpy-example","text":"Below shows an example for the torchvision repository benchmarking a FCN ResNet-101 model: from torchbench.semantic_segmentation import PASCALVOC from torchbench.semantic_segmentation.transforms import ( Normalize , Resize , ToTensor , Compose , ) from torchvision.models.segmentation import fcn_resnet101 import torchvision.transforms as transforms import PIL def model_output_function ( output , labels ): return output [ 'out' ] . argmax ( 1 ) . flatten (), target . flatten () def seg_collate_fn ( batch ): images , targets = list ( zip ( * batch )) batched_imgs = cat_list ( images , fill_value = 0 ) batched_targets = cat_list ( targets , fill_value = 255 ) return batched_imgs , batched_targets model = fcn_resnet101 ( num_classes = 21 , pretrained = True ) normalize = Normalize ( mean = [ 0.485 , 0.456 , 0.406 ], std = [ 0.229 , 0.224 , 0.225 ] ) my_transforms = Compose ([ Resize (( 520 , 480 )), ToTensor (), normalize ]) PASCALVOC . benchmark ( batch_size = 32 , model = model , transforms = my_transforms , model_output_transform = model_output_function , collate_fn = seg_collate_fn , paper_model_name = 'FCN ResNet-101' , paper_arxiv_id = '1605.06211' )","title":"A full sotabench.py example"},{"location":"pascalvoc/#pascalvocbenchmark-arguments","text":"The source code for the PASCALVOC evaluation method can be found here . We now explain each argument.","title":"PASCALVOC.benchmark() Arguments"},{"location":"pascalvoc/#model","text":"a PyTorch module, (e.g. a nn.Module object), that takes in VOC data and outputs detections. For example, from the torchvision repository: from torchvision.models.segmentation import fcn_resnet101 model = fcn_resnet101 ( num_classes = 21 , pretrained = True )","title":"model"},{"location":"pascalvoc/#model_description","text":"(str, optional): Optional model description. For example: model_description = 'Using ported TensorFlow weights'","title":"model_description"},{"location":"pascalvoc/#input_transform","text":"Composing the transforms used to transform the input data (the images), e.g. resizing (e.g transforms.Resize ), center cropping, to tensor transformations and normalization. For example: import torchvision.transforms as transforms input_transform = transforms . Compose ([ transforms . Resize ( 512 , PIL . Image . BICUBIC ), transforms . ToTensor (), ])","title":"input_transform"},{"location":"pascalvoc/#target_transform","text":"Composing the transforms used to transform the target data","title":"target_transform"},{"location":"pascalvoc/#transforms","text":"Composing the transforms used to transform the input data (the images) and the target data (the labels) in a dual fashion - for example resizing the pair of data jointly. Below shows an example; note the fact that the __call__ takes in two arguments and returns two arguments (ordinary torchvision transforms return one result). from torchvision.transforms import functional as F class Compose ( object ): def __init__ ( self , transforms ): self . transforms = transforms def __call__ ( self , image , target ): for t in self . transforms : image , target = t ( image , target ) return image , target class ToTensor ( object ): def __call__ ( self , image , target ): image = F . to_tensor ( image ) return image , target class ImageResize ( object ): def __init__ ( self , resize_shape ): self . resize_shape = resize_shape def __call__ ( self , image , target ): image = F . resize ( image , self . resize_shape ) return image , target transforms = Compose ([ ImageResize (( 512 , 512 )), ToTensor ()]) Note that the default transforms are: from torchbench.semantic_segmentation.transforms import ( Normalize , Resize , ToTensor , Compose ) normalize = Normalize ( mean = [ 0.485 , 0.456 , 0.406 ], std = [ 0.229 , 0.224 , 0.225 ]) transforms = Compose ([ Resize (( 520 , 480 )), ToTensor (), normalize ])","title":"transforms"},{"location":"pascalvoc/#model_output_transform","text":"(callable, optional): An optional function that takes in model output (after being passed through your model forward pass) and transforms it. Afterwards, the output will be passed into an evaluation function. The model output transform is a function that you can pass in to transform the model output after the data has been passed into the model. This is useful if you have to do further processing steps after inference to get the predictions in the right format for evaluation. The model evaluation for each batch is as follows from utils.py are: with torch . no_grad (): for i , ( input , target ) in enumerate ( iterator ): input , target = send_data_to_device ( input , target , device = device ) output = model ( input ) output , target = model_output_transform ( output , target ) confmat . update ( target , output ) The default model_output_transform is: def default_seg_output_transform ( output , target ): return output [ \"out\" ] . argmax ( 1 ) . flatten (), target . flatten () We can see the output and target are flattened to 1D tensors, and in the case of the output, we take the maximum predicted class to compare against for accuracy. Each element in each tensor represents a pixel, and contains a class, e.g. class 6, and we compare pixel-by-pixel the model predictions against the ground truth labels to calculate the accuracy.","title":"model_output_transform"},{"location":"pascalvoc/#collate_fn","text":"How the dataset is collated - an optional callable passed into the DataLoader As an example the default collate function is: def default_seg_collate_fn ( batch ): images , targets = list ( zip ( * batch )) batched_imgs = cat_list ( images , fill_value = 0 ) batched_targets = cat_list ( targets , fill_value = 255 ) return batched_imgs , batched_targets","title":"collate_fn"},{"location":"pascalvoc/#send_data_to_device","text":"An optional function specifying how the model is sent to a device As an example the PASCAL VOC default is: def default_data_to_device ( input , target = None , device : str = \"cuda\" , non_blocking : bool = True ): \"\"\"Sends data output from a PyTorch Dataloader to the device.\"\"\" input = input . to ( device = device , non_blocking = non_blocking ) if target is not None : target = target . to ( device = device , non_blocking = non_blocking ) return input , target","title":"send_data_to_device"},{"location":"pascalvoc/#data_root","text":"data_root (str): The location of the VOC dataset - change this parameter when evaluating locally if your VOC data is located in a different folder (or alternatively if you want to download to an alternative location). Note that this parameter will be overriden when the evaluation is performed on the server, so it is solely for your local use.","title":"data_root"},{"location":"pascalvoc/#num_workers","text":"num_workers (int): The number of workers to use for the DataLoader.","title":"num_workers"},{"location":"pascalvoc/#batch_size","text":"batch_size (int) : The batch_size to use for evaluation; if you get memory errors, then reduce this (half each time) until your model fits onto the GPU.","title":"batch_size"},{"location":"pascalvoc/#paper_model_name","text":"paper_model_name (str, optional): The name of the model from the paper - if you want to link your build to a machine learning paper. See the VOC benchmark page for model names, https://sotabench.com/benchmarks/semantic-segmentation-on-pascal-voc-2012, e.g. on the paper leaderboard tab.","title":"paper_model_name"},{"location":"pascalvoc/#paper_arxiv_id","text":"paper_arxiv_id (str, optional): Optional linking to ArXiv if you want to link to papers on the leaderboard; put in the corresponding paper's ArXiv ID, e.g. '1611.05431'.","title":"paper_arxiv_id"},{"location":"pascalvoc/#paper_pwc_id","text":"paper_pwc_id (str, optional): Optional linking to Papers With Code; put in the corresponding papers with code URL slug, e.g. 'u-gat-it-unsupervised-generative-attentional'","title":"paper_pwc_id"},{"location":"pascalvoc/#paper_results","text":"paper_results (dict, optional) : If the paper you are reproducing does not have model results on sotabench.com, you can specify the paper results yourself through this argument, where keys are metric names, values are metric values. e.g:: { 'Accuracy' : 0.745 , 'Mean IOU' : 0.592 } . Ensure that the metric names match those on the sotabench leaderboard - for VOC it should be 'Accuracy', 'Mean IOU'.","title":"paper_results"},{"location":"pascalvoc/#pytorch_hub_url","text":"pytorch_hub_url (str, optional): Optional linking to PyTorch Hub url if your model is linked there; e.g: 'nvidia_deeplearningexamples_waveglow'.","title":"pytorch_hub_url"},{"location":"pascalvoc/#need-more-help","text":"Head on over to the Computer Vision section of the sotabench forums if you have any questions or difficulties.","title":"Need More Help?"}]} \ No newline at end of file diff --git a/docs/site/sitemap.xml b/docs/site/sitemap.xml new file mode 100644 index 0000000..b2477c1 --- /dev/null +++ b/docs/site/sitemap.xml @@ -0,0 +1,23 @@ + + + + None + 2019-09-27 + daily + + + None + 2019-09-27 + daily + + + None + 2019-09-27 + daily + + + None + 2019-09-27 + daily + + \ No newline at end of file diff --git a/docs/site/sitemap.xml.gz b/docs/site/sitemap.xml.gz new file mode 100644 index 0000000..b568d67 Binary files /dev/null and b/docs/site/sitemap.xml.gz differ diff --git a/torchbench/image_classification/cifar10.py b/torchbench/image_classification/cifar10.py index c6b4d02..867263c 100644 --- a/torchbench/image_classification/cifar10.py +++ b/torchbench/image_classification/cifar10.py @@ -64,7 +64,7 @@ def benchmark( num_workers=num_workers, pin_memory=True, ) - test_results, run_hash = evaluate_classification( + test_results, speed_mem_metrics, run_hash = evaluate_classification( model=model, test_loader=test_loader, model_output_transform=model_output_transform, @@ -84,6 +84,7 @@ def benchmark( config=config, dataset=cls.dataset.__name__, results=test_results, + speed_mem_metrics=speed_mem_metrics, pytorch_hub_id=pytorch_hub_url, model=paper_model_name, model_description=model_description, diff --git a/torchbench/image_classification/cifar100.py b/torchbench/image_classification/cifar100.py index bb0cb8c..1f24341 100644 --- a/torchbench/image_classification/cifar100.py +++ b/torchbench/image_classification/cifar100.py @@ -64,7 +64,7 @@ def benchmark( num_workers=num_workers, pin_memory=True, ) - test_results, run_hash = evaluate_classification( + test_results, speed_mem_metrics, run_hash = evaluate_classification( model=model, test_loader=test_loader, model_output_transform=model_output_transform, @@ -84,6 +84,7 @@ def benchmark( config=config, dataset=cls.dataset.__name__, results=test_results, + speed_mem_metrics=speed_mem_metrics, pytorch_hub_id=pytorch_hub_url, model=paper_model_name, model_description=model_description, diff --git a/torchbench/image_classification/imagenet.py b/torchbench/image_classification/imagenet.py index 26db708..47630e7 100644 --- a/torchbench/image_classification/imagenet.py +++ b/torchbench/image_classification/imagenet.py @@ -200,7 +200,7 @@ def benchmark( num_workers=num_workers, pin_memory=pin_memory, ) - test_results, run_hash = evaluate_classification( + test_results, speed_mem_metrics, run_hash = evaluate_classification( model=model, test_loader=test_loader, model_output_transform=model_output_transform, @@ -220,6 +220,7 @@ def benchmark( config=config, dataset=cls.dataset.__name__, results=test_results, + speed_mem_metrics=speed_mem_metrics, pytorch_hub_id=pytorch_hub_url, model=paper_model_name, model_description=model_description, diff --git a/torchbench/image_classification/mnist.py b/torchbench/image_classification/mnist.py index 0451485..7b8ff83 100644 --- a/torchbench/image_classification/mnist.py +++ b/torchbench/image_classification/mnist.py @@ -63,7 +63,7 @@ def benchmark( num_workers=num_workers, pin_memory=True, ) - test_results, run_hash = evaluate_classification( + test_results, speed_mem_metrics, run_hash = evaluate_classification( model=model, test_loader=test_loader, model_output_transform=model_output_transform, @@ -83,6 +83,7 @@ def benchmark( config=config, dataset=cls.dataset.__name__, results=test_results, + speed_mem_metrics=speed_mem_metrics, pytorch_hub_id=pytorch_hub_url, model=paper_model_name, model_description=model_description, diff --git a/torchbench/image_classification/stl10.py b/torchbench/image_classification/stl10.py index 2140fd5..01304c3 100644 --- a/torchbench/image_classification/stl10.py +++ b/torchbench/image_classification/stl10.py @@ -63,7 +63,7 @@ def benchmark( num_workers=num_workers, pin_memory=True, ) - test_results, run_hash = evaluate_classification( + test_results, speed_mem_metrics, run_hash = evaluate_classification( model=model, test_loader=test_loader, model_output_transform=model_output_transform, @@ -83,6 +83,7 @@ def benchmark( config=config, dataset=cls.dataset.__name__, results=test_results, + speed_mem_metrics=speed_mem_metrics, pytorch_hub_id=pytorch_hub_url, model=paper_model_name, model_description=model_description, diff --git a/torchbench/image_classification/svhn.py b/torchbench/image_classification/svhn.py index f61e2eb..14e7add 100644 --- a/torchbench/image_classification/svhn.py +++ b/torchbench/image_classification/svhn.py @@ -66,7 +66,7 @@ def benchmark( num_workers=num_workers, pin_memory=True, ) - test_results, run_hash = evaluate_classification( + test_results, speed_mem_metrics, run_hash = evaluate_classification( model=model, test_loader=test_loader, model_output_transform=model_output_transform, @@ -86,6 +86,7 @@ def benchmark( config=config, dataset=cls.dataset.__name__, results=test_results, + speed_mem_metrics=speed_mem_metrics, pytorch_hub_id=pytorch_hub_url, model=paper_model_name, model_description=model_description, diff --git a/torchbench/image_classification/utils.py b/torchbench/image_classification/utils.py index 0bd1ce5..179a114 100644 --- a/torchbench/image_classification/utils.py +++ b/torchbench/image_classification/utils.py @@ -1,5 +1,6 @@ import time +import numpy as np import tqdm import torch import torchvision @@ -16,14 +17,12 @@ def evaluate_classification( send_data_to_device, device="cuda", ): - batch_time = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() - - end = time.time() - iterator = tqdm.tqdm(test_loader, desc="Evaluation", mininterval=5) + init_time = time.time() + with torch.no_grad(): for i, (input, target) in enumerate(iterator): @@ -37,8 +36,6 @@ def evaluate_classification( prec1, prec5 = accuracy(output, target, topk=(1, 5)) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) - batch_time.update(time.time() - end) - end = time.time() if i == 0: # for sotabench.com caching of evaluation run_hash = calculate_run_hash([prec1, prec5], output) @@ -57,10 +54,31 @@ def evaluate_classification( "No model change detected (using the first batch run " "hash). Returning cached results." ) - return cached_res, run_hash + + speed_mem_metrics = { + 'Tasks / Evaluation Time': None, + 'Evaluation Time': None, + 'Tasks': None, + 'Max Memory Allocated (Total)': None, + } + + return cached_res, speed_mem_metrics, run_hash + + exec_time = (time.time() - init_time) + + memory_allocated = torch.cuda.max_memory_allocated(device=device) + torch.cuda.reset_max_memory_allocated(device=device) + + speed_mem_metrics = { + 'Tasks / Evaluation Time': len(test_loader.dataset) / exec_time, + 'Tasks': len(test_loader.dataset), + 'Evaluation Time': (time.time() - init_time), + 'Max Memory Allocated (Total)': memory_allocated, + } return ( - {"Top 1 Accuracy": top1.avg / 100, "Top 5 Accuracy": top5.avg / 100}, + {"Top 1 Accuracy": top1.avg / 100, + "Top 5 Accuracy": top5.avg / 100}, speed_mem_metrics, run_hash, ) diff --git a/torchbench/object_detection/coco.py b/torchbench/object_detection/coco.py index 9fcc00c..772f890 100644 --- a/torchbench/object_detection/coco.py +++ b/torchbench/object_detection/coco.py @@ -119,15 +119,14 @@ def benchmark( Args: model: a PyTorch module, (e.g. a ``nn.Module`` object), that takes - in ImageNet inputs and outputs ImageNet predictions. + in COCO inputs and outputs COCO predictions. model_description (str, optional): Optional model description. input_transform (transforms.Compose, optional): Composing the transforms used to transform the dataset, e.g. applying resizing (e.g ``transforms.Resize``), center cropping, to tensor transformations and normalization. target_transform (torchvision.transforms.Compose, optional): - Composing any transforms used to transform the target. This is - usually not used for ImageNet. + Composing any transforms used to transform the target. transforms (torchbench.object_detection.transforms.Compose, optional): Does a joint transform on the input and the target - please see the torchbench.object_detection.transforms file for more information. @@ -145,8 +144,8 @@ def benchmark( default (2017) creates the 'minival' validation set. device (str): Default is 'cuda' - this is the device that the model is sent to in the default treatment. - data_root (str): The location of the ImageNet dataset - change this - parameter when evaluating locally if your ImageNet data is + data_root (str): The location of the COCO dataset - change this + parameter when evaluating locally if your COCO data is located in a different folder (or alternatively if you want to download to an alternative location). num_workers (int): The number of workers to use for the DataLoader. @@ -157,8 +156,8 @@ def benchmark( only support 1 GPU for now. paper_model_name (str, optional): The name of the model from the paper - if you want to link your build to a machine learning - paper. See the ImageNet benchmark page for model names, - https://www.sotabench.com/benchmark/imagenet, e.g. on the paper + paper. See the COCO benchmark page for model names, + https://www.sotabench.com/benchmark/coco-minival, e.g. on the paper leaderboard tab. paper_arxiv_id (str, optional): Optional linking to ArXiv if you want to link to papers on the leaderboard; put in the @@ -171,11 +170,11 @@ def benchmark( the paper results yourself through this argument, where keys are metric names, values are metric values. e.g:: - {'Top 1 Accuracy': 0.543, 'Top 5 Accuracy': 0.654}. + {'box AP': 0.349, 'AP50': 0.592, ...}. Ensure that the metric names match those on the sotabench - leaderboard - for ImageNet it should be 'Top 1 Accuracy' and - 'Top 5 Accuracy'. + leaderboard - for COCO it should be 'box AP', 'AP50', + 'AP75', 'APS', 'APM', 'APL' pytorch_hub_url (str, optional): Optional linking to PyTorch Hub url if your model is linked there; e.g: 'nvidia_deeplearningexamples_waveglow'. @@ -218,7 +217,7 @@ def benchmark( collate_fn=collate_fn, ) test_loader.no_classes = 91 # Number of classes for COCO Detection - test_results, run_hash = evaluate_detection_coco( + test_results, speed_mem_metrics, run_hash = evaluate_detection_coco( model=model, test_loader=test_loader, model_output_transform=model_output_transform, @@ -233,6 +232,7 @@ def benchmark( config=config, dataset='COCO minival', results=test_results, + speed_mem_metrics=speed_mem_metrics, pytorch_hub_id=pytorch_hub_url, model=paper_model_name, model_description=model_description, diff --git a/torchbench/object_detection/utils.py b/torchbench/object_detection/utils.py index 9e173fd..7e56ad6 100644 --- a/torchbench/object_detection/utils.py +++ b/torchbench/object_detection/utils.py @@ -6,11 +6,12 @@ import torchvision from sotabenchapi.check import in_check_mode from sotabenchapi.client import Client +import time from pycocotools import mask as coco_mask from pycocotools.coco import COCO -from torchbench.utils import calculate_run_hash +from torchbench.utils import calculate_run_hash, AverageMeter from torchbench.datasets import CocoDetection from .coco_eval import CocoEvaluator @@ -200,17 +201,19 @@ def evaluate_detection_coco( iterator = tqdm.tqdm(test_loader, desc="Evaluation", mininterval=5) + init_time = time.time() + with torch.no_grad(): for i, (input, target) in enumerate(iterator): input, target = send_data_to_device(input, target, device=device) original_output = model(input) output, target = model_output_transform(original_output, target) + result = { tar["image_id"].item(): out for tar, out in zip(target, output) } coco_evaluator.update(result) - if i == 0: # for sotabench.com caching of evaluation run_hash = calculate_run_hash([], original_output) # if we are in check model we don't need to go beyond the first @@ -228,13 +231,33 @@ def evaluate_detection_coco( "No model change detected (using the first batch run " "hash). Returning cached results." ) - return cached_res, run_hash + + speed_mem_metrics = { + 'Tasks / Evaluation Time': None, + 'Evaluation Time': None, + 'Tasks': None, + 'Max Memory Allocated (Total)': None, + } + + return cached_res, speed_mem_metrics, run_hash + + exec_time = (time.time() - init_time) coco_evaluator.synchronize_between_processes() coco_evaluator.accumulate() coco_evaluator.summarize() - return (get_coco_metrics(coco_evaluator), run_hash) + memory_allocated = torch.cuda.max_memory_allocated(device=device) + torch.cuda.reset_max_memory_allocated(device=device) + + speed_mem_metrics = { + 'Tasks / Evaluation Time': len(test_loader.dataset) / exec_time, + 'Tasks': len(test_loader.dataset), + 'Evaluation Time': (time.time() - init_time), + 'Max Memory Allocated (Total)': memory_allocated, + } + + return (get_coco_metrics(coco_evaluator), speed_mem_metrics, run_hash) def evaluate_detection_voc( diff --git a/torchbench/semantic_segmentation/ade20k.py b/torchbench/semantic_segmentation/ade20k.py index da12434..7b8a2ec 100644 --- a/torchbench/semantic_segmentation/ade20k.py +++ b/torchbench/semantic_segmentation/ade20k.py @@ -85,7 +85,7 @@ def benchmark( collate_fn=collate_fn, ) test_loader.no_classes = 150 # Number of classes for ADE20K - test_results = evaluate_segmentation( + test_results, speed_mem_metrics, run_hash = evaluate_segmentation( model=model, test_loader=test_loader, model_output_transform=model_output_transform, @@ -98,8 +98,9 @@ def benchmark( return BenchmarkResult( task=cls.task, config=config, - dataset=cls.dataset.__name__, + dataset=cls.dataset.__name__ + " val", results=test_results, + speed_mem_metrics=speed_mem_metrics, pytorch_hub_id=pytorch_hub_url, model=paper_model_name, model_description=model_description, diff --git a/torchbench/semantic_segmentation/camvid.py b/torchbench/semantic_segmentation/camvid.py index b71d403..7fd71f2 100644 --- a/torchbench/semantic_segmentation/camvid.py +++ b/torchbench/semantic_segmentation/camvid.py @@ -85,7 +85,7 @@ def benchmark( collate_fn=collate_fn, ) test_loader.no_classes = 12 # Number of classes for CamVid - test_results = evaluate_segmentation( + test_results, speed_mem_metrics, run_hash = evaluate_segmentation( model=model, test_loader=test_loader, model_output_transform=model_output_transform, @@ -100,6 +100,7 @@ def benchmark( config=config, dataset=cls.dataset.__name__, results=test_results, + speed_mem_metrics=speed_mem_metrics, pytorch_hub_id=pytorch_hub_url, model=paper_model_name, model_description=model_description, diff --git a/torchbench/semantic_segmentation/cityscapes.py b/torchbench/semantic_segmentation/cityscapes.py index 764148c..9050a49 100644 --- a/torchbench/semantic_segmentation/cityscapes.py +++ b/torchbench/semantic_segmentation/cityscapes.py @@ -88,7 +88,7 @@ def benchmark( collate_fn=collate_fn, ) test_loader.no_classes = 19 # Number of classes for Cityscapes - test_results = evaluate_segmentation( + test_results, speed_mem_metrics, run_hash = evaluate_segmentation( model=model, test_loader=test_loader, model_output_transform=model_output_transform, @@ -103,6 +103,7 @@ def benchmark( config=config, dataset=cls.dataset.__name__, results=test_results, + speed_mem_metrics=speed_mem_metrics, pytorch_hub_id=pytorch_hub_url, model=paper_model_name, model_description=model_description, diff --git a/torchbench/semantic_segmentation/pascalcontext.py b/torchbench/semantic_segmentation/pascalcontext.py index eb7c492..c73fab8 100644 --- a/torchbench/semantic_segmentation/pascalcontext.py +++ b/torchbench/semantic_segmentation/pascalcontext.py @@ -85,7 +85,7 @@ def benchmark( collate_fn=collate_fn, ) test_loader.no_classes = 59 # Number of classes for PASCALContext - test_results = evaluate_segmentation( + test_results, speed_mem_metrics, run_hash = evaluate_segmentation( model=model, test_loader=test_loader, model_output_transform=model_output_transform, @@ -100,6 +100,7 @@ def benchmark( config=config, dataset=cls.dataset.__name__, results=test_results, + speed_mem_metrics=speed_mem_metrics, pytorch_hub_id=pytorch_hub_url, model=paper_model_name, model_description=model_description, diff --git a/torchbench/semantic_segmentation/pascalvoc.py b/torchbench/semantic_segmentation/pascalvoc.py index 559dbaa..a9b165a 100644 --- a/torchbench/semantic_segmentation/pascalvoc.py +++ b/torchbench/semantic_segmentation/pascalvoc.py @@ -130,7 +130,7 @@ def benchmark( collate_fn=collate_fn, ) test_loader.no_classes = 21 # Number of classes for PASCALVOC - test_results, run_hash = evaluate_segmentation( + test_results, speed_mem_metrics, run_hash = evaluate_segmentation( model=model, test_loader=test_loader, model_output_transform=model_output_transform, @@ -142,8 +142,9 @@ def benchmark( return BenchmarkResult( task=cls.task, config=config, - dataset='PASCAL VOC %s' % dataset_year, + dataset='PASCAL VOC %s %s' % (dataset_year, "val"), results=test_results, + speed_mem_metrics=speed_mem_metrics, pytorch_hub_id=pytorch_hub_url, model=paper_model_name, model_description=model_description, diff --git a/torchbench/semantic_segmentation/utils.py b/torchbench/semantic_segmentation/utils.py index 6942c80..e6197d2 100644 --- a/torchbench/semantic_segmentation/utils.py +++ b/torchbench/semantic_segmentation/utils.py @@ -1,13 +1,14 @@ import cv2 import numpy as np import tqdm +import time import torch from albumentations.core.transforms_interface import DualTransform from PIL import Image from sotabenchapi.check import in_check_mode from sotabenchapi.client import Client -from torchbench.utils import calculate_run_hash +from torchbench.utils import calculate_run_hash, AverageMeter def minmax_normalize(img, norm_range=(0, 1), orig_range=(0, 255)): @@ -143,6 +144,8 @@ def evaluate_segmentation( iterator = tqdm.tqdm(test_loader, desc="Evaluation", mininterval=5) + init_time = time.time() + with torch.no_grad(): for i, (input, target) in enumerate(iterator): input, target = send_data_to_device(input, target, device=device) @@ -167,11 +170,31 @@ def evaluate_segmentation( "No model change detected (using the first batch run " "hash). Returning cached results." ) - return cached_res, run_hash + + speed_mem_metrics = { + 'Tasks / Evaluation Time': None, + 'Evaluation Time': None, + 'Tasks': None, + 'Max Memory Allocated (Total)': None, + } + + return cached_res, speed_mem_metrics, run_hash + + exec_time = (time.time() - init_time) acc_global, acc, iu = confmat.compute() + memory_allocated = torch.cuda.max_memory_allocated(device=device) + torch.cuda.reset_max_memory_allocated(device=device) + + speed_mem_metrics = { + 'Tasks / Evaluation Time': len(test_loader.dataset) / exec_time, + 'Tasks': len(test_loader.dataset), + 'Evaluation Time': (time.time() - init_time), + 'Max Memory Allocated (Total)': memory_allocated, + } + return { - "Accuracy": acc_global.item() * 100, - "Mean IOU": iu.mean().item() * 100, - }, run_hash + "Accuracy": acc_global.item(), + "Mean IOU": iu.mean().item()}, \ + speed_mem_metrics, run_hash diff --git a/torchbench/version.py b/torchbench/version.py index 7d8a877..d2de67e 100644 --- a/torchbench/version.py +++ b/torchbench/version.py @@ -16,5 +16,5 @@ def __repr__(self): ) -version = Version(0, 0, 22) +version = Version(0, 0, 31) __version__ = str(version)