From c0d564a465372bbb8f036ae1e282d9d0374427a6 Mon Sep 17 00:00:00 2001 From: Maciej Smyk Date: Mon, 8 Jan 2024 12:40:54 +0100 Subject: [PATCH] [DOCS] Hyperlink 23.3 update for master (#21908) * link update * link update * link update * Update dev_api_references.rst * Update dev_api_references.rst * Update apidoc.py * notebooks links * notebook link update * filename update * link update * link update api --- README.md | 24 +++--- .../openvino_plugin_library.rst | 4 +- .../dev_api_references.rst | 8 +- .../deployment_migration.rst | 2 +- .../supported_model_formats.rst | 2 +- .../Convert_RetinaNet_From_Tensorflow.rst | 2 +- .../installing-openvino-pip.rst | 8 +- ...sample_hello_nv12_input_classification.rst | 2 +- .../openvino_samples/get_started_demos.rst | 2 +- ...on_sample_automatic_speech_recognition.rst | 12 +-- .../python_sample_hello_classification.rst | 28 +++---- .../python_sample_hello_query_device.rst | 8 +- .../python_sample_hello_reshape_ssd.rst | 8 +- ...thon_sample_image_classification_async.rst | 10 +-- .../python_sample_model_creation.rst | 24 +++--- .../dldt_deployment_optimization_common.rst | 2 +- .../preprocessing_usecase_save.rst | 6 +- .../integrate_with_your_application.rst | 3 +- .../ov_dynamic_shapes.rst | 4 +- .../cmake_options_for_custom_compilation.md | 4 +- docs/dev/debug_capabilities.md | 2 +- docs/dev/pypi_publish/pypi-openvino-dev.md | 4 +- docs/dev/pypi_publish/pypi-openvino-rt.md | 10 +-- docs/home.rst | 8 +- .../notebooks/001-hello-world-with-output.rst | 2 +- .../002-openvino-api-with-output.rst | 4 +- .../003-hello-segmentation-with-output.rst | 2 +- .../004-hello-detection-with-output.rst | 2 +- ...classification-to-openvino-with-output.rst | 10 +-- ...2-pytorch-onnx-to-openvino-with-output.rst | 8 +- ...to-openvino-classification-with-output.rst | 6 +- .../notebooks/104-model-tools-with-output.rst | 4 +- ...105-language-quantize-bert-with-output.rst | 2 +- .../notebooks/106-auto-device-with-output.rst | 14 ++-- ...tion-quantization-data2vec-with-output.rst | 2 +- docs/notebooks/108-gpu-device-with-output.rst | 38 +++++----- .../109-latency-tricks-with-output.rst | 6 +- .../109-throughput-tricks-with-output.rst | 8 +- ...110-ct-scan-live-inference-with-output.rst | 4 +- ...segmentation-quantize-nncf-with-output.rst | 4 +- ...ov5-quantization-migration-with-output.rst | 16 ++-- ...training-quantization-nncf-with-output.rst | 6 +- ...lassification-quantization-with-output.rst | 6 +- docs/notebooks/115-async-api-with-output.rst | 2 +- .../116-sparsity-optimization-with-output.rst | 6 +- .../117-model-server-with-output.rst | 4 +- ...118-optimize-preprocessing-with-output.rst | 16 ++-- .../119-tflite-to-openvino-with-output.rst | 6 +- ...e-segmentation-to-openvino-with-output.rst | 6 +- ...ject-detection-to-openvino-with-output.rst | 8 +- .../121-convert-to-openvino-with-output.rst | 76 +++++++++---------- ...tion-quantization-wav2vec2-with-output.rst | 2 +- ...tion-with-accuracy-control-with-output.rst | 2 +- .../124-hugging-face-hub-with-output.rst | 2 +- .../125-lraspp-segmentation-with-output.rst | 2 +- .../201-vision-monodepth-with-output.rst | 2 +- ...sion-superresolution-image-with-output.rst | 2 +- ...sion-superresolution-video-with-output.rst | 2 +- .../203-meter-reader-with-output.rst | 2 +- ...nter-semantic-segmentation-with-output.rst | 2 +- ...-vision-background-removal-with-output.rst | 2 +- ...206-vision-paddlegan-anime-with-output.rst | 4 +- ...-paddlegan-superresolution-with-output.rst | 2 +- ...ical-character-recognition-with-output.rst | 8 +- .../209-handwritten-ocr-with-output.rst | 4 +- .../215-image-inpainting-with-output.rst | 4 +- .../216-attention-center-with-output.rst | 2 +- .../217-vision-deblur-with-output.rst | 4 +- ...219-knowledge-graphs-conve-with-output.rst | 4 +- ...ss-lingual-books-alignment-with-output.rst | 18 ++--- ...-vision-image-colorization-with-output.rst | 4 +- .../223-text-prediction-with-output.rst | 2 +- ...-segmentation-point-clouds-with-output.rst | 2 +- .../226-yolov7-optimization-with-output.rst | 4 +- ...228-clip-zero-shot-convert-with-output.rst | 2 +- ...rt-sequence-classification-with-output.rst | 6 +- ...lov8-instance-segmentation-with-output.rst | 2 +- ...-yolov8-keypoint-detection-with-output.rst | 2 +- ...30-yolov8-object-detection-with-output.rst | 4 +- ...ruct-pix2pix-image-editing-with-output.rst | 2 +- .../237-segment-anything-with-output.rst | 2 +- ...238-deep-floyd-if-optimize-with-output.rst | 2 +- .../239-image-bind-convert-with-output.rst | 2 +- ...42-freevc-voice-conversion-with-output.rst | 2 +- ...tflite-selfie-segmentation-with-output.rst | 4 +- ...7-llava-multimodal-chatbot-with-output.rst | 4 +- .../260-pix2struct-docvqa-with-output.rst | 2 +- ...sound-generation-audioldm2-with-output.rst | 2 +- ...low-training-openvino-nncf-with-output.rst | 10 +-- ...uantization-aware-training-with-output.rst | 2 +- ...uantization-aware-training-with-output.rst | 4 +- .../401-object-detection-with-output.rst | 2 +- .../406-3D-pose-estimation-with-output.rst | 2 +- .../407-person-tracking-with-output.rst | 8 +- docs/scripts/apidoc.py | 3 +- samples/c/hello_classification/README.md | 10 +-- .../hello_nv12_input_classification/README.md | 10 +-- .../cpp/benchmark/sync_benchmark/README.md | 8 +- .../benchmark/throughput_benchmark/README.md | 10 +-- samples/cpp/benchmark_app/README.md | 10 +-- .../cpp/classification_sample_async/README.md | 10 +-- samples/cpp/hello_classification/README.md | 12 +-- .../hello_nv12_input_classification/README.md | 10 +-- samples/cpp/hello_query_device/README.md | 10 +-- samples/cpp/hello_reshape_ssd/README.md | 10 +-- samples/cpp/model_creation_sample/README.md | 10 +-- .../python/benchmark/bert_benchmark/README.md | 4 +- .../python/benchmark/sync_benchmark/README.md | 16 ++-- .../benchmark/throughput_benchmark/README.md | 18 ++--- .../classification_sample_async/README.md | 20 ++--- samples/python/hello_classification/README.md | 40 +++++----- samples/python/hello_query_device/README.md | 16 ++-- samples/python/hello_reshape_ssd/README.md | 18 ++--- .../python/model_creation_sample/README.md | 34 ++++----- src/README.md | 2 +- src/bindings/c/README.md | 8 +- .../how_to_wrap_openvino_interfaces_with_c.md | 2 +- .../how_to_wrap_openvino_objects_with_c.md | 2 +- src/bindings/c/docs/how_to_write_unit_test.md | 2 +- src/bindings/python/README.md | 6 +- .../python/src/openvino/preprocess/README.md | 6 +- src/core/README.md | 4 +- src/core/docs/api_details.md | 2 +- src/core/docs/debug_capabilities.md | 2 +- src/frontends/ir/README.md | 2 +- src/frontends/paddle/README.md | 2 +- src/frontends/pytorch/README.md | 2 +- src/frontends/tensorflow/README.md | 6 +- src/inference/docs/api_details.md | 4 +- src/plugins/auto/README.md | 2 +- src/plugins/auto/docs/architecture.md | 4 +- src/plugins/auto/docs/integration.md | 2 +- src/plugins/intel_cpu/docs/fake_quantize.md | 2 +- .../docs/internal_cpu_plugin_optimization.md | 2 +- .../docs/gpu_plugin_driver_troubleshooting.md | 4 +- .../intel_gpu/docs/source_code_structure.md | 2 +- src/plugins/proxy/README.md | 2 +- src/plugins/template/README.md | 4 +- tools/benchmark_tool/README.md | 8 +- 139 files changed, 482 insertions(+), 484 deletions(-) diff --git a/README.md b/README.md index 04f649bfe68577..b6e586e32857e9 100644 --- a/README.md +++ b/README.md @@ -67,18 +67,18 @@ The OpenVINO™ Runtime can infer models on different hardware devices. This sec CPU - Intel CPU + Intel CPU openvino_intel_cpu_plugin Intel Xeon with Intel® Advanced Vector Extensions 2 (Intel® AVX2), Intel® Advanced Vector Extensions 512 (Intel® AVX-512), and AVX512_BF16, Intel Core Processors with Intel AVX2, Intel Atom Processors with Intel® Streaming SIMD Extensions (Intel® SSE), Intel® Advanced Matrix Extensions (Intel® AMX) - ARM CPU + ARM CPU openvino_arm_cpu_plugin Raspberry Pi™ 4 Model B, Apple® Mac mini with Apple silicon GPU - Intel GPU + Intel GPU openvino_intel_gpu_plugin Intel Processor Graphics, including Intel HD Graphics and Intel Iris Graphics @@ -96,22 +96,22 @@ OpenVINO™ Toolkit also contains several plugins which simplify loading models - Auto + Auto openvino_auto_plugin Auto plugin enables selecting Intel device for inference automatically - Auto Batch + Auto Batch openvino_auto_batch_plugin Auto batch plugin performs on-the-fly automatic batching (i.e. grouping inference requests together) to improve device utilization, with no programming effort from the user - Hetero + Hetero openvino_hetero_plugin Heterogeneous execution enables automatic inference splitting between several devices - Multi + Multi openvino_auto_plugin Multi plugin enables simultaneous inference of the same model on several devices in parallel @@ -158,9 +158,9 @@ The list of OpenVINO tutorials: ## System requirements The system requirements vary depending on platform and are available on dedicated pages: -- [Linux](https://docs.openvino.ai/2023.2/openvino_docs_install_guides_installing_openvino_linux_header.html) -- [Windows](https://docs.openvino.ai/2023.2/openvino_docs_install_guides_installing_openvino_windows_header.html) -- [macOS](https://docs.openvino.ai/2023.2/openvino_docs_install_guides_installing_openvino_macos_header.html) +- [Linux](https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_linux_header.html) +- [Windows](https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_windows_header.html) +- [macOS](https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_macos_header.html) ## How to build @@ -203,6 +203,6 @@ Report questions, issues and suggestions, using: \* Other names and brands may be claimed as the property of others. [Open Model Zoo]:https://github.com/openvinotoolkit/open_model_zoo -[OpenVINO™ Runtime]:https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_OV_Runtime_User_Guide.html -[OpenVINO Model Converter (OVC)]:https://docs.openvino.ai/2023.2/openvino_docs_model_processing_introduction.html#convert-a-model-in-cli-ovc +[OpenVINO™ Runtime]:https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_OV_Runtime_User_Guide.html +[OpenVINO Model Converter (OVC)]:https://docs.openvino.ai/2023.3/openvino_docs_model_processing_introduction.html#convert-a-model-in-cli-ovc [Samples]:https://github.com/openvinotoolkit/openvino/tree/master/samples diff --git a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library.rst b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library.rst index 6625fab17d656b..6597cbcf196097 100644 --- a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library.rst +++ b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library.rst @@ -96,6 +96,6 @@ Detailed Guides API References ############## -* `OpenVINO Plugin API `__ -* `OpenVINO Transformation API `__ +* `OpenVINO Plugin API `__ +* `OpenVINO Transformation API `__ diff --git a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/dev_api_references.rst b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/dev_api_references.rst index 8b56adcd0bd6b4..c9eecf01bbd186 100644 --- a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/dev_api_references.rst +++ b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/dev_api_references.rst @@ -12,11 +12,11 @@ Plugin API Reference :maxdepth: 1 :hidden: - ../groupov_dev_api - ../groupie_transformation_api + ../api/c_cpp_api/group__ov__dev__api + ../api/c_cpp_api/group__ie__transformation__api The guides below provides extra API references needed for OpenVINO plugin development: -* `OpenVINO Plugin API `__ -* `OpenVINO Transformation API `__ +* `OpenVINO Plugin API `__ +* `OpenVINO Transformation API `__ diff --git a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/deployment_migration.rst b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/deployment_migration.rst index af71f1d3aa5ca3..8801091f9a4b3b 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/deployment_migration.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/api_2_0_transition_guide/deployment_migration.rst @@ -82,7 +82,7 @@ Then, the tools can be used by commands like: Installation of any other dependencies is not required. For more details on the installation steps, see the -`Install OpenVINO Development Tools `__ prior to OpenVINO 2023.1. +`Install OpenVINO Development Tools `__ prior to OpenVINO 2023.1. Interface Changes for Building C/C++ Applications ################################################# diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats.rst index 182456dfae06ee..c4fb928e807e01 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats.rst @@ -556,7 +556,7 @@ converting them to ONNX for use with OpenVINO should be considered the default p OpenVINO versions of 2023 are mostly compatible with the old instructions, through a deprecated MO tool, installed with the deprecated OpenVINO Developer Tools package. - `OpenVINO 2023.0 `__ is the last + `OpenVINO 2023.0 `__ is the last release officially supporting the MO conversion process for the legacy formats. diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RetinaNet_From_Tensorflow.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RetinaNet_From_Tensorflow.rst index 554b3fff9dd181..153cd444347436 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RetinaNet_From_Tensorflow.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/legacy_conversion_api/supported_model_formats/convert_model_tutorials/Convert_RetinaNet_From_Tensorflow.rst @@ -18,7 +18,7 @@ Converting a TensorFlow RetinaNet Model This tutorial explains how to convert a RetinaNet model to the Intermediate Representation (IR). `Public RetinaNet model `__ does not contain pretrained TensorFlow weights. -To convert this model to the TensorFlow format, follow the `Reproduce Keras to TensorFlow Conversion tutorial `__. +To convert this model to the TensorFlow format, follow the `Reproduce Keras to TensorFlow Conversion tutorial `__. After converting the model to TensorFlow format, run the following command: diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.rst index ba513383a030b3..5185fc192d8e9b 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-shared/installing-openvino-pip.rst @@ -136,16 +136,16 @@ Now that you've installed OpenVINO Runtime, you're ready to run your own machine .. image:: https://user-images.githubusercontent.com/15709723/127752390-f6aa371f-31b5-4846-84b9-18dd4f662406.gif :width: 400 -Try the `Python Quick Start Example `__ to estimate depth in a scene using an OpenVINO monodepth model in a Jupyter Notebook inside your web browser. +Try the `Python Quick Start Example `__ to estimate depth in a scene using an OpenVINO monodepth model in a Jupyter Notebook inside your web browser. Get started with Python +++++++++++++++++++++++ Visit the :doc:`Tutorials ` page for more Jupyter Notebooks to get you started with OpenVINO, such as: -* `OpenVINO Python API Tutorial `__ -* `Basic image classification program with Hello Image Classification `__ -* `Convert a PyTorch model and use it for image background removal `__ +* `OpenVINO Python API Tutorial `__ +* `Basic image classification program with Hello Image Classification `__ +* `Convert a PyTorch model and use it for image background removal `__ diff --git a/docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_nv12_input_classification.rst b/docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_nv12_input_classification.rst index f3564d3bf6dfbb..36010fcb4fdf7c 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_nv12_input_classification.rst +++ b/docs/articles_en/learn_openvino/openvino_samples/c_sample_hello_nv12_input_classification.rst @@ -142,6 +142,6 @@ See Also - :doc:`Using OpenVINO™ Samples ` - :doc:`Model Downloader ` - :doc:`Convert a Model ` -- `C API Reference `__ +- `C API Reference `__ diff --git a/docs/articles_en/learn_openvino/openvino_samples/get_started_demos.rst b/docs/articles_en/learn_openvino/openvino_samples/get_started_demos.rst index bdcb8714cdc1b7..50674bc6a14963 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/get_started_demos.rst +++ b/docs/articles_en/learn_openvino/openvino_samples/get_started_demos.rst @@ -233,7 +233,7 @@ You need a model that is specific for your inference task. You can get it from o Convert the Model -------------------- -If Your model requires conversion, check the `article `__ for information how to do it. +If Your model requires conversion, check the `article `__ for information how to do it. .. _download-media: diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_automatic_speech_recognition.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_automatic_speech_recognition.rst index 61d380bd0fcad7..546a95af34dc56 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_automatic_speech_recognition.rst +++ b/docs/articles_en/learn_openvino/openvino_samples/python_sample_automatic_speech_recognition.rst @@ -48,17 +48,17 @@ The sample works with Kaldi ARK or Numpy* uncompressed NPZ files, so it does not +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ | Feature | API | Description | +===================================================================+================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================+=======================================================================+ - | Import/Export Model | `openvino.runtime.Core.import_model `__ , `openvino.runtime.CompiledModel.export_model `__ | The GNA plugin supports loading and saving of the GNA-optimized model | + | Import/Export Model | `openvino.runtime.Core.import_model `__ , `openvino.runtime.CompiledModel.export_model `__ | The GNA plugin supports loading and saving of the GNA-optimized model | +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | Model Operations | `openvino.runtime.Model.add_outputs `__ , `openvino.runtime.set_batch `__ , `openvino.runtime.CompiledModel.inputs `__ , `openvino.runtime.CompiledModel.outputs `__ , `openvino.runtime.ConstOutput.any_name `__ | Managing of model: configure batch_size, input and output tensors | + | Model Operations | `openvino.runtime.Model.add_outputs `__ , `openvino.runtime.set_batch `__ , `openvino.runtime.CompiledModel.inputs `__ , `openvino.runtime.CompiledModel.outputs `__ , `openvino.runtime.ConstOutput.any_name `__ | Managing of model: configure batch_size, input and output tensors | +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | Synchronous Infer | `openvino.runtime.CompiledModel.create_infer_request `__ , `openvino.runtime.InferRequest.infer `__ | Do synchronous inference | + | Synchronous Infer | `openvino.runtime.CompiledModel.create_infer_request `__ , `openvino.runtime.InferRequest.infer `__ | Do synchronous inference | +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | InferRequest Operations | `openvino.runtime.InferRequest.get_input_tensor `__ , `openvino.runtime.InferRequest.model_outputs `__ , `openvino.runtime.InferRequest.model_inputs `__ , | Get info about model using infer request API | + | InferRequest Operations | `openvino.runtime.InferRequest.get_input_tensor `__ , `openvino.runtime.InferRequest.model_outputs `__ , `openvino.runtime.InferRequest.model_inputs `__ , | Get info about model using infer request API | +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | InferRequest Operations | `openvino.runtime.InferRequest.query_state `__ , `openvino.runtime.VariableState.reset `__ | Gets and resets CompiledModel state control | + | InferRequest Operations | `openvino.runtime.InferRequest.query_state `__ , `openvino.runtime.VariableState.reset `__ | Gets and resets CompiledModel state control | +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ - | Profiling | `openvino.runtime.InferRequest.profiling_info `__ , `openvino.runtime.ProfilingInfo.real_time `__ | Get infer request profiling info | + | Profiling | `openvino.runtime.InferRequest.profiling_info `__ , `openvino.runtime.ProfilingInfo.real_time `__ | Get infer request profiling info | +-------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------+ Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification Python* Sample `. diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_classification.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_classification.rst index 266438a5981e99..91b894f9b43286 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_classification.rst +++ b/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_classification.rst @@ -36,23 +36,23 @@ Models with only 1 input and output are supported. +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Feature | API | Description | +=============================+===========================================================================================================================================================================================================================================+============================================================================================================================================================================================+ - | Basic Infer Flow | `openvino.runtime.Core `__ , | | - | | `openvino.runtime.Core.read_model `__ , | | - | | `openvino.runtime.Core.compile_model `__ | Common API to do inference | + | Basic Infer Flow | `openvino.runtime.Core `__ , | | + | | `openvino.runtime.Core.read_model `__ , | | + | | `openvino.runtime.Core.compile_model `__ | Common API to do inference | +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Synchronous Infer | `openvino.runtime.CompiledModel.infer_new_request `__ | Do synchronous inference | + | Synchronous Infer | `openvino.runtime.CompiledModel.infer_new_request `__ | Do synchronous inference | +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Model Operations | `openvino.runtime.Model.inputs `__ , | Managing of model | - | | `openvino.runtime.Model.outputs `__ | | + | Model Operations | `openvino.runtime.Model.inputs `__ , | Managing of model | + | | `openvino.runtime.Model.outputs `__ | | +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Preprocessing | `openvino.preprocess.PrePostProcessor `__ , | Set image of the original size as input for a model with other input size. Resize and layout conversions will be performed automatically by the corresponding plugin just before inference | - | | `openvino.preprocess.InputTensorInfo.set_element_type `__ , | | - | | `openvino.preprocess.InputTensorInfo.set_layout `__ , | | - | | `openvino.preprocess.InputTensorInfo.set_spatial_static_shape `__ , | | - | | `openvino.preprocess.PreProcessSteps.resize `__ , | | - | | `openvino.preprocess.InputModelInfo.set_layout `__ , | | - | | `openvino.preprocess.OutputTensorInfo.set_element_type `__ , | | - | | `openvino.preprocess.PrePostProcessor.build `__ | | + | Preprocessing | `openvino.preprocess.PrePostProcessor `__ , | Set image of the original size as input for a model with other input size. Resize and layout conversions will be performed automatically by the corresponding plugin just before inference | + | | `openvino.preprocess.InputTensorInfo.set_element_type `__ , | | + | | `openvino.preprocess.InputTensorInfo.set_layout `__ , | | + | | `openvino.preprocess.InputTensorInfo.set_spatial_static_shape `__ , | | + | | `openvino.preprocess.PreProcessSteps.resize `__ , | | + | | `openvino.preprocess.InputModelInfo.set_layout `__ , | | + | | `openvino.preprocess.OutputTensorInfo.set_element_type `__ , | | + | | `openvino.preprocess.PrePostProcessor.build `__ | | +-----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ .. tab-item:: Sample Code diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_query_device.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_query_device.rst index af395356efc44d..1adfd6bb24af48 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_query_device.rst +++ b/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_query_device.rst @@ -31,11 +31,11 @@ This sample demonstrates how to show OpenVINO™ Runtime devices and prints thei +---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------+ | Feature | API | Description | +=======================================+============================================================================================================================================================================================+========================================+ - | Basic | `openvino.runtime.Core `__ | Common API | + | Basic | `openvino.runtime.Core `__ | Common API | +---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------+ - | Query Device | `openvino.runtime.Core.available_devices `__ , | Get device properties | - | | `openvino.runtime.Core.get_metric `__ , | | - | | `openvino.runtime.Core.get_config `__ | | + | Query Device | `openvino.runtime.Core.available_devices `__ , | Get device properties | + | | `openvino.runtime.Core.get_metric `__ , | | + | | `openvino.runtime.Core.get_config `__ | | +---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------+ .. tab-item:: Sample Code diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_reshape_ssd.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_reshape_ssd.rst index b90b7950f5901d..e17233f675cea4 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_reshape_ssd.rst +++ b/docs/articles_en/learn_openvino/openvino_samples/python_sample_hello_reshape_ssd.rst @@ -39,10 +39,10 @@ Models with only 1 input and output are supported. +------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------+ | Feature | API | Description | +====================================+================================================================================================================================================================================+======================================+ - | Model Operations | `openvino.runtime.Model.reshape `__ , | Managing of model | - | | `openvino.runtime.Model.input `__ , | | - | | `openvino.runtime.Output.get_any_name `__ , | | - | | `openvino.runtime.PartialShape `__ | | + | Model Operations | `openvino.runtime.Model.reshape `__ , | Managing of model | + | | `openvino.runtime.Model.input `__ , | | + | | `openvino.runtime.Output.get_any_name `__ , | | + | | `openvino.runtime.PartialShape `__ | | +------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------------------------------------+ Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification Python* Sample `. diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_image_classification_async.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_image_classification_async.rst index 3a6ae2276ed1e4..f586689d891de5 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_image_classification_async.rst +++ b/docs/articles_en/learn_openvino/openvino_samples/python_sample_image_classification_async.rst @@ -36,11 +36,11 @@ Models with only 1 input and output are supported. +--------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------+ | Feature | API | Description | +====================+===========================================================================================================================================================================================================+===========================+ - | Asynchronous Infer | `openvino.runtime.AsyncInferQueue `__ , | Do asynchronous inference | - | | `openvino.runtime.AsyncInferQueue.set_callback `__ , | | - | | `openvino.runtime.AsyncInferQueue.start_async `__ , | | - | | `openvino.runtime.AsyncInferQueue.wait_all `__ , | | - | | `openvino.runtime.InferRequest.results `__ | | + | Asynchronous Infer | `openvino.runtime.AsyncInferQueue `__ , | Do asynchronous inference | + | | `openvino.runtime.AsyncInferQueue.set_callback `__ , | | + | | `openvino.runtime.AsyncInferQueue.start_async `__ , | | + | | `openvino.runtime.AsyncInferQueue.wait_all `__ , | | + | | `openvino.runtime.InferRequest.results `__ | | +--------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------+ Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification Python Sample `. diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_model_creation.rst b/docs/articles_en/learn_openvino/openvino_samples/python_sample_model_creation.rst index 163795e812709b..936e62760a151d 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_model_creation.rst +++ b/docs/articles_en/learn_openvino/openvino_samples/python_sample_model_creation.rst @@ -35,19 +35,19 @@ This sample demonstrates how to run inference using a :doc:`model `__ , | Managing of model | - | | `openvino.runtime.set_batch `__ , | | - | | `openvino.runtime.Model.input `__ | | + | Model Operations | `openvino.runtime.Model `__ , | Managing of model | + | | `openvino.runtime.set_batch `__ , | | + | | `openvino.runtime.Model.input `__ | | +------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+ - | Opset operations | `openvino.runtime.op.Parameter `__ , | Description of a model topology using OpenVINO Python API | - | | `openvino.runtime.op.Constant `__ , | | - | | `openvino.runtime.opset8.convolution `__ , | | - | | `openvino.runtime.opset8.add `__ , | | - | | `openvino.runtime.opset1.max_pool `__ , | | - | | `openvino.runtime.opset8.reshape `__ , | | - | | `openvino.runtime.opset8.matmul `__ , | | - | | `openvino.runtime.opset8.relu `__ , | | - | | `openvino.runtime.opset8.softmax `__ | | + | Opset operations | `openvino.runtime.op.Parameter `__ , | Description of a model topology using OpenVINO Python API | + | | `openvino.runtime.op.Constant `__ , | | + | | `openvino.runtime.opset8.convolution `__ , | | + | | `openvino.runtime.opset8.add `__ , | | + | | `openvino.runtime.opset1.max_pool `__ , | | + | | `openvino.runtime.opset8.reshape `__ , | | + | | `openvino.runtime.opset8.matmul `__ , | | + | | `openvino.runtime.opset8.relu `__ , | | + | | `openvino.runtime.opset8.softmax `__ | | +------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------+ Basic OpenVINO™ Runtime API is covered by :doc:`Hello Classification Python* Sample `. diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_common.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_common.rst index b7db8252a9e95b..f31cbff195cf2e 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_common.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_common.rst @@ -62,7 +62,7 @@ Below are example-codes for the regular and async-based approaches to compare: The technique can be generalized to any available parallel slack. For example, you can do inference and simultaneously encode the resulting or previous frames or run further inference, like emotion detection on top of the face detection results. -Refer to the `Object Detection C++ Demo `__ , `Object Detection Python Demo `__ (latency-oriented Async API showcase) and :doc:`Benchmark App Sample ` for complete examples of the Async API in action. +Refer to the `Object Detection C++ Demo `__ , `Object Detection Python Demo `__ (latency-oriented Async API showcase) and :doc:`Benchmark App Sample ` for complete examples of the Async API in action. .. note:: diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_usecase_save.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_usecase_save.rst index 0d721076b565c9..ef9fa71106401d 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_usecase_save.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_usecase_save.rst @@ -112,7 +112,7 @@ Additional Resources * :doc:`Layout API overview ` * :doc:`Model Optimizer - Optimize Preprocessing Computation ` * :doc:`Model Caching Overview ` -* The `ov::preprocess::PrePostProcessor `__ C++ class documentation -* The `ov::pass::Serialize `__ - pass to serialize model to XML/BIN -* The `ov::set_batch `__ - update batch dimension for a given model +* The `ov::preprocess::PrePostProcessor `__ C++ class documentation +* The `ov::pass::Serialize `__ - pass to serialize model to XML/BIN +* The `ov::set_batch `__ - update batch dimension for a given model diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application.rst index 9d10e1dd893801..0b167d932d767a 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application.rst @@ -438,8 +438,7 @@ To build your project using CMake with the default build tools currently availab Additional Resources #################### -* See the :doc:`OpenVINO Samples ` page or the `Open Model Zoo Demos `__ page for specific examples of how OpenVINO pipelines are implemented for applications like image classification, text prediction, and many others. +* See the :doc:`OpenVINO Samples ` page or the `Open Model Zoo Demos `__ page for specific examples of how OpenVINO pipelines are implemented for applications like image classification, text prediction, and many others. * :doc:`OpenVINO™ Runtime Preprocessing ` * :doc:`Using Encrypted Models with OpenVINO ` -* `Open Model Zoo Demos `__ diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/ov_dynamic_shapes.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/ov_dynamic_shapes.rst index d0d4beb9b05a4c..26b8a369a9b548 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/ov_dynamic_shapes.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/ov_dynamic_shapes.rst @@ -64,7 +64,7 @@ Model input dimensions can be specified as dynamic using the model.reshape metho Some models may already have dynamic shapes out of the box and do not require additional configuration. This can either be because it was generated with dynamic shapes from the source framework, or because it was converted with Model Conversion API to use dynamic shapes. For more information, see the Dynamic Dimensions “Out of the Box” section. -The examples below show how to set dynamic dimensions with a model that has a static ``[1, 3, 224, 224]`` input shape (such as `mobilenet-v2 `__). The first example shows how to change the first dimension (batch size) to be dynamic. In the second example, the third and fourth dimensions (height and width) are set as dynamic. +The examples below show how to set dynamic dimensions with a model that has a static ``[1, 3, 224, 224]`` input shape (such as `mobilenet-v2 `__). The first example shows how to change the first dimension (batch size) to be dynamic. In the second example, the third and fourth dimensions (height and width) are set as dynamic. .. tab-set:: @@ -177,7 +177,7 @@ The lower and/or upper bounds of a dynamic dimension can also be specified. They .. tab-item:: C :sync: c - The dimension bounds can be coded as arguments for `ov_dimension `__, as shown in these examples: + The dimension bounds can be coded as arguments for `ov_dimension `__, as shown in these examples: .. doxygensnippet:: docs/snippets/ov_dynamic_shapes.c :language: cpp diff --git a/docs/dev/cmake_options_for_custom_compilation.md b/docs/dev/cmake_options_for_custom_compilation.md index 725dfa940d04f7..6df1ecddf3cd42 100644 --- a/docs/dev/cmake_options_for_custom_compilation.md +++ b/docs/dev/cmake_options_for_custom_compilation.md @@ -183,8 +183,8 @@ In this case OpenVINO CMake scripts take `TBBROOT` environment variable into acc [pugixml]:https://pugixml.org/ [ONNX]:https://onnx.ai/ [protobuf]:https://github.com/protocolbuffers/protobuf -[deployment manager]:https://docs.openvino.ai/2023.2/openvino_docs_install_guides_deployment_manager_tool.html -[OpenVINO Runtime Introduction]:https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_Integrate_OV_with_your_application.html +[deployment manager]:https://docs.openvino.ai/2023.3/openvino_docs_install_guides_deployment_manager_tool.html +[OpenVINO Runtime Introduction]:https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_Integrate_OV_with_your_application.html [PDPD]:https://github.com/PaddlePaddle/Paddle [TensorFlow]:https://www.tensorflow.org/ [TensorFlow Lite]:https://www.tensorflow.org/lite diff --git a/docs/dev/debug_capabilities.md b/docs/dev/debug_capabilities.md index acee03fde92ee7..78d9c381bfed26 100644 --- a/docs/dev/debug_capabilities.md +++ b/docs/dev/debug_capabilities.md @@ -2,7 +2,7 @@ OpenVINO components provides different debug capabilities, to get more information please read: -* [OpenVINO Model Debug Capabilities](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_Model_Representation.html#model-debug-capabilities) +* [OpenVINO Model Debug Capabilities](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_Model_Representation.html#model-debug-capabilities) * [OpenVINO Pass Manager Debug Capabilities](#todo) ## See also diff --git a/docs/dev/pypi_publish/pypi-openvino-dev.md b/docs/dev/pypi_publish/pypi-openvino-dev.md index c9077cc78f272f..d20167c5ee33d0 100644 --- a/docs/dev/pypi_publish/pypi-openvino-dev.md +++ b/docs/dev/pypi_publish/pypi-openvino-dev.md @@ -3,7 +3,7 @@ > **NOTE**: This version is pre-release software and has not undergone full release validation or qualification. No support is offered on pre-release software and APIs/behavior are subject to change. It should NOT be incorporated into any production software/solution and instead should be used only for early testing and integration while awaiting a final release version of this software. -> **NOTE**: OpenVINO™ Development Tools package has been deprecated and will be discontinued with 2025.0 release. To learn more, refer to the [OpenVINO Legacy Features and Components page](https://docs.openvino.ai/2023.2/openvino_legacy_features.html). +> **NOTE**: OpenVINO™ Development Tools package has been deprecated and will be discontinued with 2025.0 release. To learn more, refer to the [OpenVINO Legacy Features and Components page](https://docs.openvino.ai/2023.3/openvino_legacy_features.html). Intel® Distribution of OpenVINO™ toolkit is an open-source toolkit for optimizing and deploying AI inference. It can be used to develop applications and solutions based on deep learning tasks, such as: emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, etc. It provides high-performance and rich deployment options, from edge to cloud. @@ -126,7 +126,7 @@ For example, to install and configure the components for working with TensorFlow ## Troubleshooting -For general troubleshooting steps and issues, see [Troubleshooting Guide for OpenVINO Installation](https://docs.openvino.ai/2023.2/openvino_docs_get_started_guide_troubleshooting.html). The following sections also provide explanations to several error messages. +For general troubleshooting steps and issues, see [Troubleshooting Guide for OpenVINO Installation](https://docs.openvino.ai/2023.3/openvino_docs_get_started_guide_troubleshooting.html). The following sections also provide explanations to several error messages. ### Errors with Installing via PIP for Users in China diff --git a/docs/dev/pypi_publish/pypi-openvino-rt.md b/docs/dev/pypi_publish/pypi-openvino-rt.md index 494c3db97362d3..a5d37d2b05c3ee 100644 --- a/docs/dev/pypi_publish/pypi-openvino-rt.md +++ b/docs/dev/pypi_publish/pypi-openvino-rt.md @@ -5,7 +5,7 @@ Intel® Distribution of OpenVINO™ toolkit is an open-source toolkit for optimizing and deploying AI inference. It can be used to develop applications and solutions based on deep learning tasks, such as: emulation of human vision, automatic speech recognition, natural language processing, recommendation systems, etc. It provides high-performance and rich deployment options, from edge to cloud. -If you have already finished developing your models and converting them to the OpenVINO model format, you can install OpenVINO Runtime to deploy your applications on various devices. The [OpenVINO™](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_OV_Runtime_User_Guide.html) Python package includes a set of libraries for an easy inference integration with your products. +If you have already finished developing your models and converting them to the OpenVINO model format, you can install OpenVINO Runtime to deploy your applications on various devices. The [OpenVINO™](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_OV_Runtime_User_Guide.html) Python package includes a set of libraries for an easy inference integration with your products. ## System Requirements @@ -75,13 +75,13 @@ If installation was successful, you will see the list of available devices. | Component | Content | Description | |------------------|---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [OpenVINO Runtime](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_OV_Runtime_User_Guide.html) | `openvino package` |**OpenVINO Runtime** is a set of C++ libraries with C and Python bindings providing a common API to deliver inference solutions on the platform of your choice. Use the OpenVINO Runtime API to read PyTorch\*, TensorFlow\*, TensorFlow Lite\*, ONNX\*, and PaddlePaddle\* models and execute them on preferred devices. OpenVINO Runtime uses a plugin architecture and includes the following plugins: [CPU](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_supported_plugins_CPU.html), [GPU](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_supported_plugins_GPU.html), [Auto Batch](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_Automatic_Batching.html), [Auto](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_supported_plugins_AUTO.html), [Hetero](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_Hetero_execution.html). -| [OpenVINO Model Converter (OVC)](https://docs.openvino.ai/2023.2/openvino_docs_model_processing_introduction.html#convert-a-model-in-cli-ovc) | `ovc` |**OpenVINO Model Converter** converts models that were trained in popular frameworks to a format usable by OpenVINO components.
Supported frameworks include ONNX\*, TensorFlow\*, TensorFlow Lite\*, and PaddlePaddle\*. | -| [Benchmark Tool](https://docs.openvino.ai/2023.2/openvino_inference_engine_tools_benchmark_tool_README.html)| `benchmark_app` | **Benchmark Application** allows you to estimate deep learning inference performance on supported devices for synchronous and asynchronous modes. | +| [OpenVINO Runtime](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_OV_Runtime_User_Guide.html) | `openvino package` |**OpenVINO Runtime** is a set of C++ libraries with C and Python bindings providing a common API to deliver inference solutions on the platform of your choice. Use the OpenVINO Runtime API to read PyTorch\*, TensorFlow\*, TensorFlow Lite\*, ONNX\*, and PaddlePaddle\* models and execute them on preferred devices. OpenVINO Runtime uses a plugin architecture and includes the following plugins: [CPU](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_CPU.html), [GPU](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_GPU.html), [Auto Batch](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_Automatic_Batching.html), [Auto](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_AUTO.html), [Hetero](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_Hetero_execution.html). +| [OpenVINO Model Converter (OVC)](https://docs.openvino.ai/2023.3/openvino_docs_model_processing_introduction.html#convert-a-model-in-cli-ovc) | `ovc` |**OpenVINO Model Converter** converts models that were trained in popular frameworks to a format usable by OpenVINO components.
Supported frameworks include ONNX\*, TensorFlow\*, TensorFlow Lite\*, and PaddlePaddle\*. | +| [Benchmark Tool](https://docs.openvino.ai/2023.3/openvino_inference_engine_tools_benchmark_tool_README.html)| `benchmark_app` | **Benchmark Application** allows you to estimate deep learning inference performance on supported devices for synchronous and asynchronous modes. | ## Troubleshooting -For general troubleshooting steps and issues, see [Troubleshooting Guide for OpenVINO Installation](https://docs.openvino.ai/2023.2/openvino_docs_get_started_guide_troubleshooting.html). The following sections also provide explanations to several error messages. +For general troubleshooting steps and issues, see [Troubleshooting Guide for OpenVINO Installation](https://docs.openvino.ai/2023.3/openvino_docs_get_started_guide_troubleshooting.html). The following sections also provide explanations to several error messages. ### Errors with Installing via PIP for Users in China diff --git a/docs/home.rst b/docs/home.rst index bbde83b3e3fe9b..4f9f7034cb4676 100644 --- a/docs/home.rst +++ b/docs/home.rst @@ -14,7 +14,7 @@ OpenVINO 2023.2 .. container:: :name: ov-homepage-banner - OpenVINO 2023.2 + OpenVINO 2023.3 .. raw:: html @@ -25,13 +25,13 @@ OpenVINO 2023.2
  • An open-source toolkit for optimizing and deploying deep learning models.
    Boost your AI deep-learning inference performance!
  • Use PyTorch models directly, without converting them first.
    - Learn more... + Learn more...
  • OpenVINO via PyTorch 2.0 torch.compile()
    Use OpenVINO directly in PyTorch-native applications!
    - Learn more... + Learn more...
  • Do you like Generative AI? You will love how it performs with OpenVINO!
    - Check out our new notebooks... + Check out our new notebooks... diff --git a/docs/notebooks/001-hello-world-with-output.rst b/docs/notebooks/001-hello-world-with-output.rst index 685b163de8868b..e52ea3af6429a8 100644 --- a/docs/notebooks/001-hello-world-with-output.rst +++ b/docs/notebooks/001-hello-world-with-output.rst @@ -5,7 +5,7 @@ This basic introduction to OpenVINO™ shows how to do inference with an image classification model. A pre-trained `MobileNetV3 -model `__ +model `__ from `Open Model Zoo `__ is used in this tutorial. For more information about how OpenVINO IR models are diff --git a/docs/notebooks/002-openvino-api-with-output.rst b/docs/notebooks/002-openvino-api-with-output.rst index 466d51c361018b..18bdc59b5ca2f0 100644 --- a/docs/notebooks/002-openvino-api-with-output.rst +++ b/docs/notebooks/002-openvino-api-with-output.rst @@ -137,7 +137,7 @@ After initializing OpenVINO Runtime, first read the model file with ``compile_model()`` method. `OpenVINO™ supports several model -formats `__ +formats `__ and enables developers to convert them to its own OpenVINO IR format using a tool dedicated to this task. @@ -158,7 +158,7 @@ file has a different filename, it can be specified using the ``weights`` parameter in ``read_model()``. The OpenVINO `Model Conversion -API `__ +API `__ tool is used to convert models to OpenVINO IR format. Model conversion API reads the original model and creates an OpenVINO IR model (``.xml`` and ``.bin`` files) so inference can be performed without delays due to diff --git a/docs/notebooks/003-hello-segmentation-with-output.rst b/docs/notebooks/003-hello-segmentation-with-output.rst index dc80c1733045ea..9a49528a7d07b0 100644 --- a/docs/notebooks/003-hello-segmentation-with-output.rst +++ b/docs/notebooks/003-hello-segmentation-with-output.rst @@ -4,7 +4,7 @@ Hello Image Segmentation A very basic introduction to using segmentation models with OpenVINO™. In this tutorial, a pre-trained -`road-segmentation-adas-0001 `__ +`road-segmentation-adas-0001 `__ model from the `Open Model Zoo `__ is used. ADAS stands for Advanced Driver Assistance Services. The model diff --git a/docs/notebooks/004-hello-detection-with-output.rst b/docs/notebooks/004-hello-detection-with-output.rst index b9daf8d04adcaa..f1ce601889d5bc 100644 --- a/docs/notebooks/004-hello-detection-with-output.rst +++ b/docs/notebooks/004-hello-detection-with-output.rst @@ -5,7 +5,7 @@ A very basic introduction to using object detection models with OpenVINO™. The -`horizontal-text-detection-0001 `__ +`horizontal-text-detection-0001 `__ model from `Open Model Zoo `__ is used. It detects horizontal text in images and returns a blob of data in the diff --git a/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst b/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst index a3585b7972aaad..377f79990ed3d9 100644 --- a/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst +++ b/docs/notebooks/101-tensorflow-classification-to-openvino-with-output.rst @@ -2,11 +2,11 @@ Convert a TensorFlow Model to OpenVINO™ ======================================= This short tutorial shows how to convert a TensorFlow -`MobileNetV3 `__ +`MobileNetV3 `__ image classification model to OpenVINO `Intermediate -Representation `__ +Representation `__ (OpenVINO IR) format, using `Model Conversion -API `__. +API `__. After creating the OpenVINO IR, load the model in `OpenVINO Runtime `__ and do inference with a sample image. @@ -165,7 +165,7 @@ model directory and returns OpenVINO Model class instance which represents this model. Obtained model is ready to use and to be loaded on a device using ``ov.compile_model`` or can be saved on a disk using the ``ov.save_model`` function. See the -`tutorial `__ +`tutorial `__ for more information about using model conversion API with TensorFlow models. @@ -329,7 +329,7 @@ Timing Measure the time it takes to do inference on thousand images. This gives an indication of performance. For more accurate benchmarking, use the `Benchmark -Tool `__ +Tool `__ in OpenVINO. Note that many optimizations are possible to improve the performance. diff --git a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst index 2168de355e96ff..714e6f6a2c3248 100644 --- a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst +++ b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst @@ -240,7 +240,7 @@ Convert ONNX Model to OpenVINO IR Format To convert the ONNX model to OpenVINO IR with ``FP16`` precision, use model conversion API. The models are saved inside the current directory. For more information on how to convert models, see this -`page `__. +`page `__. .. code:: ipython3 @@ -509,7 +509,7 @@ Performance Comparison Measure the time it takes to do inference on twenty images. This gives an indication of performance. For more accurate benchmarking, use the `Benchmark -Tool `__. +Tool `__. Keep in mind that many optimizations are possible to improve the performance. @@ -608,6 +608,6 @@ References - `OpenVINO ONNX support `__ - `Model Conversion API - documentation `__ + documentation `__ - `Converting Pytorch - model `__ + model `__ diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst b/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst index 53d9144842e38b..221d1fbf970e14 100644 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst @@ -305,7 +305,7 @@ accept path to PaddlePaddle model and returns OpenVINO Model class instance which represents this model. Obtained model is ready to use and loading on device using ``ov.compile_model`` or can be saved on disk using ``ov.save_model`` function. See the `Model Conversion -Guide `__ +Guide `__ for more information about the Model Conversion API. .. code:: ipython3 @@ -406,7 +406,7 @@ Measure the time it takes to do inference on fifty images and compare the result. The timing information gives an indication of performance. For a fair comparison, we include the time it takes to process the image. For more accurate benchmarking, use the `OpenVINO benchmark -tool `__. +tool `__. Note that many optimizations are possible to improve the performance. .. code:: ipython3 @@ -538,4 +538,4 @@ References - `PaddleClas `__ - `OpenVINO PaddlePaddle - support `__ + support `__ diff --git a/docs/notebooks/104-model-tools-with-output.rst b/docs/notebooks/104-model-tools-with-output.rst index 3b7b163a6fb07a..29ca485e8e9c9c 100644 --- a/docs/notebooks/104-model-tools-with-output.rst +++ b/docs/notebooks/104-model-tools-with-output.rst @@ -222,9 +222,9 @@ Converting mobilenet-v2-pytorch… Conversion command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/bin/mo --framework=onnx --output_dir=model/public/mobilenet-v2-pytorch/FP16 --model_name=mobilenet-v2-pytorch --input=data '--mean_values=data[123.675,116.28,103.53]' '--scale_values=data[58.624,57.12,57.375]' --reverse_input_channels --output=prob --input_model=model/public/mobilenet-v2-pytorch/mobilenet-v2.onnx '--layout=data(NCHW)' '--input_shape=[1, 3, 224, 224]' --compress_to_fp16=True [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression explicitly by adding argument --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.3/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/104-model-tools/model/public/mobilenet-v2-pytorch/FP16/mobilenet-v2-pytorch.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/104-model-tools/model/public/mobilenet-v2-pytorch/FP16/mobilenet-v2-pytorch.bin diff --git a/docs/notebooks/105-language-quantize-bert-with-output.rst b/docs/notebooks/105-language-quantize-bert-with-output.rst index 5c089756cf15da..2170c3406a1aa1 100644 --- a/docs/notebooks/105-language-quantize-bert-with-output.rst +++ b/docs/notebooks/105-language-quantize-bert-with-output.rst @@ -532,7 +532,7 @@ Frames Per Second (FPS) for images. Finally, measure the inference performance of OpenVINO ``FP32`` and ``INT8`` models. For this purpose, use `Benchmark -Tool `__ +Tool `__ in OpenVINO. **Note**: The ``benchmark_app`` tool is able to measure the diff --git a/docs/notebooks/106-auto-device-with-output.rst b/docs/notebooks/106-auto-device-with-output.rst index 639842d29e0304..ec3dc7014b8905 100644 --- a/docs/notebooks/106-auto-device-with-output.rst +++ b/docs/notebooks/106-auto-device-with-output.rst @@ -2,19 +2,19 @@ Automatic Device Selection with OpenVINO™ ========================================= The `Auto -device `__ +device `__ (or AUTO in short) selects the most suitable device for inference by considering the model precision, power efficiency and processing capability of the available `compute -devices `__. +devices `__. The model precision (such as ``FP32``, ``FP16``, ``INT8``, etc.) is the first consideration to filter out the devices that cannot run the network efficiently. Next, if dedicated accelerators are available, these devices are preferred (for example, integrated and discrete -`GPU `__). -`CPU `__ +`GPU `__). +`CPU `__ is used as the default “fallback device”. Keep in mind that AUTO makes this selection only once, during the loading of a model. @@ -122,7 +122,7 @@ with ``ov.compile_model`` or serialized for next usage with ``ov.save_model``. For more information about model conversion API, see this -`page `__. +`page `__. .. code:: ipython3 @@ -361,9 +361,9 @@ completely portable between devices – meaning AUTO can configure the performance hint on whichever device is being used. For more information, refer to the `Performance -Hints `__ +Hints `__ section of `Automatic Device -Selection `__ +Selection `__ article. Class and callback definition diff --git a/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst b/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst index 77ad8fdb5d173a..3296e830cfa403 100644 --- a/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst +++ b/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst @@ -336,7 +336,7 @@ steps: accurate results, we should keep the operation in the postprocessing subgraph in floating point precision, using the ``ignored_scope`` parameter. For more information see `Tune quantization - parameters `__. + parameters `__. 3. Serialize OpenVINO IR model using ``ov.save_model`` function. .. code:: ipython3 diff --git a/docs/notebooks/108-gpu-device-with-output.rst b/docs/notebooks/108-gpu-device-with-output.rst index f484250f8208c8..05477911d5d8f8 100644 --- a/docs/notebooks/108-gpu-device-with-output.rst +++ b/docs/notebooks/108-gpu-device-with-output.rst @@ -95,10 +95,10 @@ cards `__. To get started, first `install -OpenVINO `__ +OpenVINO `__ on a system equipped with one or more Intel GPUs. Follow the `GPU configuration -instructions `__ +instructions `__ to configure OpenVINO to work with your GPU. Then, read on to learn how to accelerate inference with GPUs in OpenVINO! @@ -162,12 +162,12 @@ the system has a CPU, an integrated and discrete GPU, we should expect to see a list like this: ``['CPU', 'GPU.0', 'GPU.1']``. To simplify its use, the “GPU.0” can also be addressed with just “GPU”. For more details, see the `Device Naming -Convention `__ +Convention `__ section. If the GPUs are installed correctly on the system and still do not appear in the list, follow the steps described -`here `__ +`here `__ to configure your GPU drivers to work with OpenVINO. Once we have the GPUs working with OpenVINO, we can proceed with the next sections. @@ -279,7 +279,7 @@ the key properties are: speed up compilation time. To learn more about devices and properties, see the `Query Device -Properties `__ +Properties `__ page. Compiling a Model on GPU @@ -288,7 +288,7 @@ Compiling a Model on GPU Now, we know how to list the GPUs in the system and check their properties. We can easily use one for compiling and running models with OpenVINO `GPU -plugin `__. +plugin `__. Download and Convert a Model ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -363,7 +363,7 @@ Convert the Model to OpenVINO IR format To convert the model to OpenVINO IR with ``FP16`` precision, use model conversion API. The models are saved to the ``model/ir_model/`` directory. For more details about model conversion, see this -`page `__. +`page `__. .. code:: ipython3 @@ -420,7 +420,7 @@ the ``available_devices`` method are valid device specifiers. You may also use “AUTO”, which will automatically select the best device for inference (which is often the GPU). To learn more about AUTO plugin, visit the `Automatic Device -Selection `__ +Selection `__ page as well as the `AUTO device tutorial `__. @@ -490,7 +490,7 @@ compile times with caching enabled and disabled as follows: The actual time improvements will depend on the environment as well as the model being used but it is definitely something to consider when optimizing an application. To read more about this, see the `Model -Caching `__ +Caching `__ docs. Throughput and Latency Performance Hints @@ -529,7 +529,7 @@ Using Multiple GPUs with Multi-Device and Cumulative Throughput The latency and throughput hints mentioned above are great and can make a difference when used adequately but they usually use just one device, either due to the `AUTO -plugin `__ +plugin `__ or by manual specification of the device name as above. When we have multiple devices, such as an integrated and discrete GPU, we may use both at the same time to improve the utilization of the resources. In @@ -561,7 +561,7 @@ manually specify devices to use. Below is an example showing how to use how to set up an asynchronous pipeline that takes advantage of parallelism to increase throughput.** To learn more, see `Asynchronous - Inferencing `__ + Inferencing `__ in OpenVINO as well as the `Asynchronous Inference notebook `__. @@ -585,7 +585,7 @@ Note that benchmark_app only requires the model path to run but both the device and hint arguments will be useful to us. For more advanced usages, the tool itself has other options that can be checked by running ``benchmark_app -h`` or reading the -`docs `__. +`docs `__. The following example shows how to benchmark a simple model, using a GPU with a latency focus: @@ -1335,18 +1335,18 @@ To read more about any of these topics, feel free to visit their corresponding documentation: - `GPU - Plugin `__ + Plugin `__ - `AUTO - Plugin `__ + Plugin `__ - `Model - Caching `__ + Caching `__ - `MULTI Device Mode `__ - `Query Device - Properties `__ + Properties `__ - `Configurations for GPUs with - OpenVINO `__ + OpenVINO `__ - `Benchmark Python - Tool `__ + Tool `__ - `Asynchronous - Inferencing `__ + Inferencing `__ diff --git a/docs/notebooks/109-latency-tricks-with-output.rst b/docs/notebooks/109-latency-tricks-with-output.rst index 4d93f99b2be828..6d9f242115a46c 100644 --- a/docs/notebooks/109-latency-tricks-with-output.rst +++ b/docs/notebooks/109-latency-tricks-with-output.rst @@ -521,7 +521,7 @@ OpenVINO IR model + more inference threads There is a possibility to add a config for any device (CPU in this case). We will increase the number of threads to an equal number of our cores. There are `more -options `__ +options `__ to be changed, so it’s worth playing with them to see what works best in our case. In some cases, this optimization may worsen the performance. If it is the case, don’t use it. @@ -555,7 +555,7 @@ OpenVINO IR model in latency mode OpenVINO offers a virtual device called -`AUTO `__, +`AUTO `__, which can select the best device for us based on a performance hint. There are three different hints: ``LATENCY``, ``THROUGHPUT``, and ``CUMULATIVE_THROUGHPUT``. As this notebook is focused on the latency @@ -678,6 +678,6 @@ object detection model. Even if you experience much better performance after running this notebook, please note this may not be valid for every hardware or every model. For the most accurate results, please use ``benchmark_app`` `command-line -tool `__. +tool `__. Note that ``benchmark_app`` cannot measure the impact of some tricks above, e.g., shared memory. diff --git a/docs/notebooks/109-throughput-tricks-with-output.rst b/docs/notebooks/109-throughput-tricks-with-output.rst index 38c1b8cd3060d1..43e6c9b9867ca5 100644 --- a/docs/notebooks/109-throughput-tricks-with-output.rst +++ b/docs/notebooks/109-throughput-tricks-with-output.rst @@ -558,7 +558,7 @@ configuration of the device. There are three different hints: notebook is focused on the throughput mode, we will use the latter two. The hints can be used with other devices as well. Throughput mode implicitly triggers using the `Automatic -Batching `__ +Batching `__ feature, which sets the batch size to the optimal level. .. code:: ipython3 @@ -612,7 +612,7 @@ OpenVINO IR model in throughput mode on AUTO OpenVINO offers a virtual device called -`AUTO `__, +`AUTO `__, which can select the best device for us based on the aforementioned performance hint. @@ -672,7 +672,7 @@ There are other tricks for performance improvement, such as advanced options, quantization and pre-post-processing or dedicated to latency mode. To get even more from your model, please visit `advanced throughput -options `__, +options `__, `109-latency-tricks <109-latency-tricks.ipynb>`__, `111-detection-quantization <../111-detection-quantization>`__, and `118-optimize-preprocessing <../118-optimize-preprocessing>`__. @@ -725,6 +725,6 @@ object detection model. Even if you experience much better performance after running this notebook, please note this may not be valid for every hardware or every model. For the most accurate results, please use ``benchmark_app`` `command-line -tool `__. +tool `__. Note that ``benchmark_app`` cannot measure the impact of some tricks above. diff --git a/docs/notebooks/110-ct-scan-live-inference-with-output.rst b/docs/notebooks/110-ct-scan-live-inference-with-output.rst index e965f2fdd48be7..343da65cc36238 100644 --- a/docs/notebooks/110-ct-scan-live-inference-with-output.rst +++ b/docs/notebooks/110-ct-scan-live-inference-with-output.rst @@ -127,7 +127,7 @@ Benchmark Model Performance --------------------------------------------------------------------- To measure the inference performance of the IR model, use `Benchmark -Tool `__ +Tool `__ - an inference performance measurement tool in OpenVINO. Benchmark tool is a command-line application that can be run in the notebook with ``! benchmark_app`` or ``%sx benchmark_app`` commands. @@ -311,7 +311,7 @@ Caching, refer to the `OpenVINO API tutorial <002-openvino-api-with-output.html>`__. We will use -`AsyncInferQueue `__ +`AsyncInferQueue `__ to perform asynchronous inference. It can be instantiated with compiled model and a number of jobs - parallel execution threads. If you don’t pass a number of jobs or pass ``0``, then OpenVINO will pick the optimal diff --git a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst index f8762186f0c374..c4a466e2acdd70 100644 --- a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst +++ b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst @@ -14,7 +14,7 @@ scratch; the data is from This third tutorial in the series shows how to: - Convert an Original model to OpenVINO IR with `model conversion - API `__ + API `__ - Quantize a PyTorch model with NNCF - Evaluate the F1 score metric of the original model and the quantized model @@ -639,7 +639,7 @@ Compare Performance of the FP32 IR Model and Quantized Models To measure the inference performance of the ``FP32`` and ``INT8`` models, we use `Benchmark -Tool `__ +Tool `__ - OpenVINO’s inference performance measurement tool. Benchmark tool is a command line application, part of OpenVINO development tools, that can be run in the notebook with ``! benchmark_app`` or diff --git a/docs/notebooks/111-yolov5-quantization-migration-with-output.rst b/docs/notebooks/111-yolov5-quantization-migration-with-output.rst index 46a8162d8bcc2f..f3eac6a0fe0697 100644 --- a/docs/notebooks/111-yolov5-quantization-migration-with-output.rst +++ b/docs/notebooks/111-yolov5-quantization-migration-with-output.rst @@ -3,7 +3,7 @@ Migrate quantization from POT API to NNCF API This tutorial demonstrates how to migrate quantization pipeline written using the OpenVINO `Post-Training Optimization Tool -(POT) `__ to +(POT) `__ to `NNCF Post-Training Quantization API `__. This tutorial is based on `Ultralytics @@ -180,18 +180,18 @@ following content: Convert the ONNX model to OpenVINO Intermediate Representation (IR) model generated by `OpenVINO model conversion -API `__. +API `__. We will use the ``ov.convert_model`` function of model conversion Python API to convert ONNX model to OpenVINO Model, then it can be serialized using ``ov.save_model``. As the result, directory with the ``{MODEL_DIR}`` name will be created with the following content: \* ``{MODEL_NAME}_fp32.xml``, ``{MODEL_NAME}_fp32.bin`` - OpenVINO Intermediate Representation (IR) model generated by `OpenVINO Model -Converter `__, +Converter `__, saved with FP32 precision. \* ``{MODEL_NAME}_fp16.xml``, ``{MODEL_NAME}_fp16.bin`` - OpenVINO Intermediate Representation (IR) model generated by `OpenVINO Model -Converter `__, +Converter `__, saved with FP16 precision. .. code:: ipython3 @@ -467,7 +467,7 @@ Quantization parameters ``preset``, ``model_type``, ``subset_size``, ``fast_bias_correction``, ``ignored_scope`` are arguments of function. More details about supported parameters and formats can be found in NNCF Post-Training Quantization -`documentation `__. +`documentation `__. NNCF also expect providing model object in inference framework format, in our case ``ov.Model`` instance created using ``core.read_model`` or ``ov.convert_model``. @@ -1201,8 +1201,8 @@ References - `Ultralytics YOLOv5 `__ - `OpenVINO Post-training Optimization - Tool `__ + Tool `__ - `NNCF Post-training - quantization `__ + quantization `__ - `Model Conversion - API `__ + API `__ diff --git a/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst b/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst index 311973516693b3..3187ce53bd9e03 100644 --- a/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst +++ b/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst @@ -495,7 +495,7 @@ layers. The framework is designed so that modifications to your original training code are minor. Quantization is the simplest scenario and requires a few modifications. For more information about NNCF Post Training Quantization (PTQ) API, refer to the `Basic Quantization Flow -Guide `__. +Guide `__. 1. Create a transformation function that accepts a sample from the dataset and returns data suitable for model inference. This enables @@ -618,7 +618,7 @@ Python API. The models will be saved to the ‘OUTPUT’ directory for later benchmarking. For more information about model conversion, refer to this -`page `__. +`page `__. .. code:: ipython3 @@ -738,7 +738,7 @@ IV. Compare performance of INT8 model and FP32 model in OpenVINO Finally, measure the inference performance of the ``FP32`` and ``INT8`` models, using `Benchmark -Tool `__ +Tool `__ - an inference performance measurement tool in OpenVINO. By default, Benchmark Tool runs inference for 60 seconds in asynchronous mode on CPU. It returns inference speed as latency (milliseconds per image) and diff --git a/docs/notebooks/113-image-classification-quantization-with-output.rst b/docs/notebooks/113-image-classification-quantization-with-output.rst index 50eeb5d4eab7e3..9c5291d3e34cbe 100644 --- a/docs/notebooks/113-image-classification-quantization-with-output.rst +++ b/docs/notebooks/113-image-classification-quantization-with-output.rst @@ -113,7 +113,7 @@ static shape. The converted model is ready to be loaded on a device for inference and can be saved on a disk for next usage via the ``save_model`` function. More details about model conversion Python API can be found on this -`page `__. +`page `__. .. code:: ipython3 @@ -221,7 +221,7 @@ dataset for performing basic quantization. Optionally, additional parameters like ``subset_size``, ``preset``, ``ignored_scope`` can be provided to improve quantization result if applicable. More details about supported parameters can be found on this -`page `__ +`page `__ .. code:: ipython3 @@ -382,7 +382,7 @@ Compare Performance of the Original and Quantized Models Finally, measure the inference performance of the ``FP32`` and ``INT8`` models, using `Benchmark -Tool `__ +Tool `__ - an inference performance measurement tool in OpenVINO. **NOTE**: For more accurate performance, it is recommended to run diff --git a/docs/notebooks/115-async-api-with-output.rst b/docs/notebooks/115-async-api-with-output.rst index 3850ebc5ed1e05..a3378d228e5917 100644 --- a/docs/notebooks/115-async-api-with-output.rst +++ b/docs/notebooks/115-async-api-with-output.rst @@ -491,7 +491,7 @@ Compare the performance Asynchronous mode pipelines can be supported with the -`AsyncInferQueue `__ +`AsyncInferQueue `__ wrapper class. This class automatically spawns the pool of ``InferRequest`` objects (also called “jobs”) and provides synchronization mechanisms to control the flow of the pipeline. It is a diff --git a/docs/notebooks/116-sparsity-optimization-with-output.rst b/docs/notebooks/116-sparsity-optimization-with-output.rst index b6eb46e54c5252..2a8b1fbc4d0ef2 100644 --- a/docs/notebooks/116-sparsity-optimization-with-output.rst +++ b/docs/notebooks/116-sparsity-optimization-with-output.rst @@ -12,7 +12,7 @@ datasets `__ using `Optimum-Intel `__. It demonstrates the inference performance advantage on 4th Gen Intel® Xeon® Scalable Processors by running it with `Sparse Weight -Decompression `__, +Decompression `__, a runtime option that seizes model sparsity for efficiency. The notebook consists of the following steps: @@ -394,6 +394,6 @@ For more details about asynchronous inference with OpenVINO, refer to the following documentation: - `Deployment Optimization - Guide `__ + Guide `__ - `Inference Request - API `__ + API `__ diff --git a/docs/notebooks/117-model-server-with-output.rst b/docs/notebooks/117-model-server-with-output.rst index 870031ab70f1da..3ffe8005c92701 100644 --- a/docs/notebooks/117-model-server-with-output.rst +++ b/docs/notebooks/117-model-server-with-output.rst @@ -231,7 +231,7 @@ Check whether the OVMS container is running normally: The required Model Server parameters are listed below. For additional configuration options, see the `Model Server Parameters -section `__. +section `__. .. raw:: html @@ -888,6 +888,6 @@ References ---------------------------------------------------- 1. `OpenVINO™ Model Server - documentation `__ + documentation `__ 2. `OpenVINO™ Model Server GitHub repository `__ diff --git a/docs/notebooks/118-optimize-preprocessing-with-output.rst b/docs/notebooks/118-optimize-preprocessing-with-output.rst index 0723e03832d786..bf39ecd4881e70 100644 --- a/docs/notebooks/118-optimize-preprocessing-with-output.rst +++ b/docs/notebooks/118-optimize-preprocessing-with-output.rst @@ -9,9 +9,9 @@ instrument, that enables integration of preprocessing steps into an execution graph and performing it on a selected device, which can improve device utilization. For more information about Preprocessing API, see this -`overview `__ +`overview `__ and -`details `__ +`details `__ This tutorial include following steps: @@ -268,7 +268,7 @@ Graph modifications of a model shall be performed after the model is read from a drive and before it is loaded on the actual device. Pre-processing support following operations (please, see more details -`here `__) +`here `__) - Mean/Scale Normalization - Converting Precision @@ -304,7 +304,7 @@ Create ``PrePostProcessor`` Object The -`PrePostProcessor() `__ +`PrePostProcessor() `__ class enables specifying the preprocessing and postprocessing steps for a model. @@ -329,7 +329,7 @@ about user’s input tensor will be initialized to same data (type/shape/etc) as model’s input parameter. User application can override particular parameters according to application’s data. Refer to the following -`page `__ +`page `__ for more information about parameters for overriding. Below is all the specified input information: @@ -367,7 +367,7 @@ Declaring Model Layout Model input already has information about precision and shape. Preprocessing API is not intended to modify this. The only thing that may be specified is input data -`layout `__. +`layout `__. .. code:: ipython3 @@ -397,7 +397,7 @@ Preprocessing Steps Now, the sequence of preprocessing steps can be defined. For more information about preprocessing steps, see -`here `__. +`here `__. Perform the following: @@ -406,7 +406,7 @@ Perform the following: dynamic size, for example, ``{?, 3, ?, ?}`` resize will not know how to resize the picture. Therefore, in this case, target height/ width should be specified. For more details, see also the - `PreProcessSteps.resize() `__. + `PreProcessSteps.resize() `__. - Subtract mean from each channel. - Divide each pixel data to appropriate scale value. diff --git a/docs/notebooks/119-tflite-to-openvino-with-output.rst b/docs/notebooks/119-tflite-to-openvino-with-output.rst index 299489f15e64e5..2367aff15ae492 100644 --- a/docs/notebooks/119-tflite-to-openvino-with-output.rst +++ b/docs/notebooks/119-tflite-to-openvino-with-output.rst @@ -8,7 +8,7 @@ machine learning models to edge devices. This short tutorial shows how to convert a TensorFlow Lite `EfficientNet-Lite-B0 `__ image classification model to OpenVINO `Intermediate -Representation `__ +Representation `__ (OpenVINO IR) format, using Model Converter. After creating the OpenVINO IR, load the model in `OpenVINO Runtime `__ @@ -120,9 +120,9 @@ using ``ov.save_model`` function, reducing loading time for next running. By default, model weights are compressed to FP16 during serialization by ``ov.save_model``. For more information about model conversion, see this -`page `__. +`page `__. For TensorFlow Lite models support, refer to this -`tutorial `__. +`tutorial `__. .. code:: ipython3 diff --git a/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output.rst b/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output.rst index 034af17d0cb6b6..347579959ec3f7 100644 --- a/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output.rst +++ b/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output.rst @@ -17,9 +17,9 @@ This tutorial shows how to convert a TensorFlow `Mask R-CNN with Inception ResNet V2 `__ instance segmentation model to OpenVINO `Intermediate -Representation `__ +Representation `__ (OpenVINO IR) format, using `Model -Optimizer `__. +Optimizer `__. After creating the OpenVINO IR, load the model in `OpenVINO Runtime `__ and do inference with a sample image. @@ -689,4 +689,4 @@ utilization. For more information, refer to the `Optimize Preprocessing tutorial <118-optimize-preprocessing-with-output.html>`__ and to the overview of `Preprocessing -API `__. +API `__. diff --git a/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst b/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst index 0a69d8cc2ff110..e4f7d5c5fdde62 100644 --- a/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst +++ b/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst @@ -17,7 +17,7 @@ This tutorial shows how to convert a TensorFlow `Faster R-CNN with Resnet-50 V1 `__ object detection model to OpenVINO `Intermediate -Representation `__ +Representation `__ (OpenVINO IR) format, using Model Converter. After creating the OpenVINO IR, load the model in `OpenVINO Runtime `__ @@ -189,9 +189,9 @@ or saved on disk using the ``save_model`` function to reduce loading time when the model is run in the future. See the `Model Converter Developer -Guide `__ +Guide `__ for more information about Model Converter and TensorFlow `models -support `__. +support `__. .. code:: ipython3 @@ -709,4 +709,4 @@ utilization. For more information, refer to the `Optimize Preprocessing tutorial <118-optimize-preprocessing-with-output.html>`__ and to the overview of `Preprocessing -API `__. +API `__. diff --git a/docs/notebooks/121-convert-to-openvino-with-output.rst b/docs/notebooks/121-convert-to-openvino-with-output.rst index 20b8583863daa0..993df191836796 100644 --- a/docs/notebooks/121-convert-to-openvino-with-output.rst +++ b/docs/notebooks/121-convert-to-openvino-with-output.rst @@ -51,7 +51,7 @@ OpenVINO IR format OpenVINO `Intermediate Representation -(IR) `__ is the +(IR) `__ is the proprietary model format of OpenVINO. It is produced after converting a model with model conversion API. Model conversion API translates the frequently used deep learning operations to their respective similar @@ -71,7 +71,7 @@ tool. You can choose one of them based on whichever is most convenient for you. There should not be any differences in the results of model conversion if the same set of parameters is used. For more details, refer to `Model -Preparation `__ +Preparation `__ documentation. .. code:: ipython3 @@ -1007,9 +1007,9 @@ To convert a model to OpenVINO IR, use the following command: .. parsed-literal:: [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression explicitly by adding argument --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.3/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/distilbert.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/distilbert.bin @@ -1046,20 +1046,20 @@ Both Python conversion API and Model Optimizer command-line tool provide the following capabilities: \* overriding original input shapes for model conversion with ``input`` and ``input_shape`` parameters. `Setting Input Shapes -guide `__. +guide `__. \* cutting off unwanted parts of a model (such as unsupported operations and training sub-graphs) using the ``input`` and ``output`` parameters to define new inputs and outputs of the converted model. `Cutting Off Parts of a Model -guide `__. +guide `__. \* inserting additional input pre-processing sub-graphs into the converted model by using the ``mean_values``, ``scales_values``, ``layout``, and other parameters. `Embedding Preprocessing Computation -article `__. +article `__. \* compressing the model weights (for example, weights for convolutions and matrix multiplications) to FP16 data type using ``compress_to_fp16`` compression parameter. `Compression of a Model to FP16 -guide `__. +guide `__. If the out-of-the-box conversion (only the ``input_model`` parameter is specified) is not successful, it may be required to use the parameters @@ -1080,7 +1080,7 @@ up static shapes, model conversion API provides the ``input`` and ``input_shape`` parameters. For more information refer to `Setting Input Shapes -guide `__. +guide `__. .. code:: ipython3 @@ -1103,9 +1103,9 @@ guide `__. +guide `__. .. code:: ipython3 @@ -1286,9 +1286,9 @@ guide `__. +article `__. Specifying Layout ^^^^^^^^^^^^^^^^^ @@ -1356,7 +1356,7 @@ for both inputs and outputs. Some preprocessing requires to set input layouts, for example, setting a batch, applying mean or scales, and reversing input channels (BGR<->RGB). For the layout syntax, check the `Layout API -overview `__. +overview `__. To specify the layout, you can use the layout option followed by the layout value. @@ -1381,9 +1381,9 @@ Resnet50 model that was exported to the ONNX format: .. parsed-literal:: [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression explicitly by adding argument --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.3/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.bin @@ -1427,9 +1427,9 @@ presented by input data. Use either ``layout`` or ``source_layout`` with .. parsed-literal:: [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression explicitly by adding argument --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.3/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.bin @@ -1446,9 +1446,9 @@ presented by input data. Use either ``layout`` or ``source_layout`` with .. parsed-literal:: [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression explicitly by adding argument --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.3/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.bin @@ -1498,9 +1498,9 @@ that the preprocessing takes negligible time for inference. .. parsed-literal:: [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression explicitly by adding argument --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.3/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.bin @@ -1517,9 +1517,9 @@ that the preprocessing takes negligible time for inference. .. parsed-literal:: [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression explicitly by adding argument --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.3/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.bin @@ -1566,9 +1566,9 @@ the color channels before inference. .. parsed-literal:: [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression explicitly by adding argument --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.3/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.bin @@ -1611,9 +1611,9 @@ models, this decrease is negligible. .. parsed-literal:: [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression explicitly by adding argument --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.3/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/resnet.bin diff --git a/docs/notebooks/122-speech-recognition-quantization-wav2vec2-with-output.rst b/docs/notebooks/122-speech-recognition-quantization-wav2vec2-with-output.rst index 50076690c479b9..c6cacb354a2500 100644 --- a/docs/notebooks/122-speech-recognition-quantization-wav2vec2-with-output.rst +++ b/docs/notebooks/122-speech-recognition-quantization-wav2vec2-with-output.rst @@ -24,7 +24,7 @@ The advanced quantization flow allows to apply 8-bit quantization to the model with control of accuracy metric. This is achieved by keeping the most impactful operations within the model in the original precision. The flow is based on the `Basic 8-bit -quantization `__ +quantization `__ and has the following differences: - Besides the calibration dataset, a validation dataset is required to diff --git a/docs/notebooks/122-yolov8-quantization-with-accuracy-control-with-output.rst b/docs/notebooks/122-yolov8-quantization-with-accuracy-control-with-output.rst index a7753269757085..f339f259cadc42 100644 --- a/docs/notebooks/122-yolov8-quantization-with-accuracy-control-with-output.rst +++ b/docs/notebooks/122-yolov8-quantization-with-accuracy-control-with-output.rst @@ -14,7 +14,7 @@ quantization flow allows to apply 8-bit quantization to the model with control of accuracy metric. This is achieved by keeping the most impactful operations within the model in the original precision. The flow is based on the `Basic 8-bit -quantization `__ +quantization `__ and has the following differences: - Besides the calibration dataset, a validation dataset is required to diff --git a/docs/notebooks/124-hugging-face-hub-with-output.rst b/docs/notebooks/124-hugging-face-hub-with-output.rst index 863d6facd0ca71..3c2e3f583e73ae 100644 --- a/docs/notebooks/124-hugging-face-hub-with-output.rst +++ b/docs/notebooks/124-hugging-face-hub-with-output.rst @@ -163,7 +163,7 @@ Converting the Model to OpenVINO IR format We use the OpenVINO `Model conversion -API `__ +API `__ to convert the model (this one is implemented in PyTorch) to OpenVINO Intermediate Representation (IR). diff --git a/docs/notebooks/125-lraspp-segmentation-with-output.rst b/docs/notebooks/125-lraspp-segmentation-with-output.rst index a4caf53a4634fa..eb895e593c51cc 100644 --- a/docs/notebooks/125-lraspp-segmentation-with-output.rst +++ b/docs/notebooks/125-lraspp-segmentation-with-output.rst @@ -178,7 +178,7 @@ Convert the original model to OpenVINO IR Format To convert the original model to OpenVINO IR with ``FP16`` precision, use model conversion API. The models are saved inside the current directory. For more information on how to convert models, see this -`page `__. +`page `__. .. code:: ipython3 diff --git a/docs/notebooks/201-vision-monodepth-with-output.rst b/docs/notebooks/201-vision-monodepth-with-output.rst index ca1b1630ed59d3..197e60058aef43 100644 --- a/docs/notebooks/201-vision-monodepth-with-output.rst +++ b/docs/notebooks/201-vision-monodepth-with-output.rst @@ -3,7 +3,7 @@ Monodepth Estimation with OpenVINO This tutorial demonstrates Monocular Depth Estimation with MidasNet in OpenVINO. Model information can be found -`here `__. +`here `__. .. figure:: https://user-images.githubusercontent.com/36741649/127173017-a0bbcf75-db24-4d2c-81b9-616e04ab7cd9.gif :alt: monodepth diff --git a/docs/notebooks/202-vision-superresolution-image-with-output.rst b/docs/notebooks/202-vision-superresolution-image-with-output.rst index ae8261674fb22f..9def0d6d1acbea 100644 --- a/docs/notebooks/202-vision-superresolution-image-with-output.rst +++ b/docs/notebooks/202-vision-superresolution-image-with-output.rst @@ -5,7 +5,7 @@ Super Resolution is the process of enhancing the quality of an image by increasing the pixel count using deep learning. This notebook shows the Single Image Super Resolution (SISR) which takes just one low resolution image. A model called -`single-image-super-resolution-1032 `__, +`single-image-super-resolution-1032 `__, which is available in Open Model Zoo, is used in this tutorial. It is based on the research paper cited below. diff --git a/docs/notebooks/202-vision-superresolution-video-with-output.rst b/docs/notebooks/202-vision-superresolution-video-with-output.rst index bf00da7c4390ff..2312f8ce0e2623 100644 --- a/docs/notebooks/202-vision-superresolution-video-with-output.rst +++ b/docs/notebooks/202-vision-superresolution-video-with-output.rst @@ -5,7 +5,7 @@ Super Resolution is the process of enhancing the quality of an image by increasing the pixel count using deep learning. This notebook applies Single Image Super Resolution (SISR) to frames in a 360p (480×360) video in 360p resolution. A model called -`single-image-super-resolution-1032 `__, +`single-image-super-resolution-1032 `__, which is available in Open Model Zoo, is used in this tutorial. It is based on the research paper cited below. diff --git a/docs/notebooks/203-meter-reader-with-output.rst b/docs/notebooks/203-meter-reader-with-output.rst index 5dbc17fc108e2c..510b21b86fbf3a 100644 --- a/docs/notebooks/203-meter-reader-with-output.rst +++ b/docs/notebooks/203-meter-reader-with-output.rst @@ -580,7 +580,7 @@ select device from dropdown list for running inference using OpenVINO The number of detected meter from detection network can be arbitrary in some scenarios, which means the batch size of segmentation network input is a `dynamic -dimension `__, +dimension `__, and it should be specified as ``-1`` or the ``ov::Dimension()`` instead of a positive number used for static dimensions. In this case, for memory consumption optimization, we can specify the lower and/or upper diff --git a/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst b/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst index 02d4ce98e33fef..c6ad363f260a27 100644 --- a/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst +++ b/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst @@ -568,7 +568,7 @@ Benchmarking performance of converted model Finally, use the OpenVINO `Benchmark -Tool `__ +Tool `__ to measure the inference performance of the model. NOTE: For more accurate performance, it is recommended to run diff --git a/docs/notebooks/205-vision-background-removal-with-output.rst b/docs/notebooks/205-vision-background-removal-with-output.rst index 46c28a42cd3d2a..ee784cd31c629c 100644 --- a/docs/notebooks/205-vision-background-removal-with-output.rst +++ b/docs/notebooks/205-vision-background-removal-with-output.rst @@ -411,7 +411,7 @@ References - `PIP install openvino-dev `__ - `Model Conversion - API `__ + API `__ - `U^2-Net `__ - U^2-Net research paper: `U^2-Net: Going Deeper with Nested U-Structure for Salient Object diff --git a/docs/notebooks/206-vision-paddlegan-anime-with-output.rst b/docs/notebooks/206-vision-paddlegan-anime-with-output.rst index fe600a809f1f09..55861d59f9f1a2 100644 --- a/docs/notebooks/206-vision-paddlegan-anime-with-output.rst +++ b/docs/notebooks/206-vision-paddlegan-anime-with-output.rst @@ -366,7 +366,7 @@ feeding them to the converted model. Now we use model conversion API and convert the model to OpenVINO IR. **Convert ONNX Model to OpenVINO IR with**\ `Model Conversion Python -API `__ +API `__ .. code:: ipython3 @@ -602,7 +602,7 @@ References - `OpenVINO ONNX support `__ - `Model Conversion - API `__ + API `__ The PaddleGAN code that is shown in this notebook is written by PaddlePaddle Authors and licensed under the Apache 2.0 license. The diff --git a/docs/notebooks/207-vision-paddlegan-superresolution-with-output.rst b/docs/notebooks/207-vision-paddlegan-superresolution-with-output.rst index ee4b4ed06b07cf..c459cd80ca5f69 100644 --- a/docs/notebooks/207-vision-paddlegan-superresolution-with-output.rst +++ b/docs/notebooks/207-vision-paddlegan-superresolution-with-output.rst @@ -275,7 +275,7 @@ Convert PaddlePaddle Model to ONNX 2023-10-30 23:18:47 [INFO] ONNX model saved in model/paddlegan_sr.onnx. -Convert ONNX Model to OpenVINO IR with `Model Conversion Python API `__ +Convert ONNX Model to OpenVINO IR with `Model Conversion Python API `__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 diff --git a/docs/notebooks/208-optical-character-recognition-with-output.rst b/docs/notebooks/208-optical-character-recognition-with-output.rst index 15e9bad1737d39..2f4dc983e89c77 100644 --- a/docs/notebooks/208-optical-character-recognition-with-output.rst +++ b/docs/notebooks/208-optical-character-recognition-with-output.rst @@ -7,9 +7,9 @@ This tutorial demonstrates how to perform optical character recognition tutorial, which shows only text detection. The -`horizontal-text-detection-0001 `__ +`horizontal-text-detection-0001 `__ and -`text-recognition-resnet `__ +`text-recognition-resnet `__ models are used together for text detection and then text recognition. In this tutorial, Open Model Zoo tools including Model Downloader, Model @@ -355,9 +355,9 @@ Converting text-recognition-resnet-fc… Conversion command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/bin/mo --framework=onnx --output_dir=model/public/text-recognition-resnet-fc/FP16 --model_name=text-recognition-resnet-fc --input=input '--mean_values=input[127.5]' '--scale_values=input[127.5]' --output=output --input_model=model/public/text-recognition-resnet-fc/resnet_fc.onnx '--layout=input(NCHW)' '--input_shape=[1, 1, 32, 100]' --compress_to_fp16=True [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression explicitly by adding argument --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.3/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/208-optical-character-recognition/model/public/text-recognition-resnet-fc/FP16/text-recognition-resnet-fc.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/208-optical-character-recognition/model/public/text-recognition-resnet-fc/FP16/text-recognition-resnet-fc.bin diff --git a/docs/notebooks/209-handwritten-ocr-with-output.rst b/docs/notebooks/209-handwritten-ocr-with-output.rst index 64e1668efdfb7f..950f6b0743af26 100644 --- a/docs/notebooks/209-handwritten-ocr-with-output.rst +++ b/docs/notebooks/209-handwritten-ocr-with-output.rst @@ -8,9 +8,9 @@ Latin alphabet is available in `notebook This model is capable of processing only one line of symbols at a time. The models used in this notebook are -`handwritten-japanese-recognition-0001 `__ +`handwritten-japanese-recognition-0001 `__ and -`handwritten-simplified-chinese-0001 `__. +`handwritten-simplified-chinese-0001 `__. To decode model outputs as readable text `kondate_nakayosi `__ and diff --git a/docs/notebooks/215-image-inpainting-with-output.rst b/docs/notebooks/215-image-inpainting-with-output.rst index d5d59bd81aeb3c..159caf0e450f19 100644 --- a/docs/notebooks/215-image-inpainting-with-output.rst +++ b/docs/notebooks/215-image-inpainting-with-output.rst @@ -62,7 +62,7 @@ Download ``gmcnn-places2-tf``\ model (this step will be skipped if the model is already downloaded) and then unzip it. Downloaded model stored in TensorFlow frozen graph format. The steps how this frozen graph can be obtained from original model checkpoint can be found in this -`instruction `__ +`instruction `__ .. code:: ipython3 @@ -95,7 +95,7 @@ Convert Tensorflow model to OpenVINO IR format The pre-trained model is in TensorFlow format. To use it with OpenVINO, convert it to OpenVINO IR format with model conversion API. For more information about model conversion, see this -`page `__. +`page `__. This step is also skipped if the model is already converted. .. code:: ipython3 diff --git a/docs/notebooks/216-attention-center-with-output.rst b/docs/notebooks/216-attention-center-with-output.rst index 39b3c2bb2e05e6..3b7b31fa3e92bc 100644 --- a/docs/notebooks/216-attention-center-with-output.rst +++ b/docs/notebooks/216-attention-center-with-output.rst @@ -140,7 +140,7 @@ The attention-center model is pre-trained model in TensorFlow Lite format. In this Notebook the model will be converted to OpenVINO IR format with model conversion API. For more information about model conversion, see this -`page `__. +`page `__. This step is also skipped if the model is already converted. Also TFLite models format is supported in OpenVINO by TFLite frontend, diff --git a/docs/notebooks/217-vision-deblur-with-output.rst b/docs/notebooks/217-vision-deblur-with-output.rst index 997f500ee1c802..b8ce289da5d3f4 100644 --- a/docs/notebooks/217-vision-deblur-with-output.rst +++ b/docs/notebooks/217-vision-deblur-with-output.rst @@ -31,7 +31,7 @@ DeblurGAN-v2 in OpenVINO, by first converting the `VITA-Group/DeblurGANv2 `__ model to OpenVINO Intermediate Representation (OpenVINO IR) format. For more information about the model, see the -`documentation `__. +`documentation `__. What is deblurring? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -223,7 +223,7 @@ an OpenVINO model ready to load on a device and start making predictions. We can save the model on the disk for next usage with ``ov.save_model``. For more information about model conversion Python API, see this -`page `__. +`page `__. Model conversion may take a while. diff --git a/docs/notebooks/219-knowledge-graphs-conve-with-output.rst b/docs/notebooks/219-knowledge-graphs-conve-with-output.rst index 6929be7701ee58..cf62c9253ac99e 100644 --- a/docs/notebooks/219-knowledge-graphs-conve-with-output.rst +++ b/docs/notebooks/219-knowledge-graphs-conve-with-output.rst @@ -514,7 +514,7 @@ Benchmark the converted OpenVINO model using benchmark app The OpenVINO toolkit provides a benchmarking application to gauge the platform specific runtime performance that can be obtained under optimal configuration parameters for a given model. For more details refer to: -https://docs.openvino.ai/2023.0/openvino_inference_engine_tools_benchmark_tool_README.html +https://docs.openvino.ai/2023.3/openvino_inference_engine_tools_benchmark_tool_README.html Here, we use the benchmark application to obtain performance estimates under optimal configuration for the knowledge graph model inference. We @@ -613,7 +613,7 @@ evaluation on the knowledge graph. Then, we determine the platform specific speedup in runtime performance that can be obtained through OpenVINO graph optimizations. To learn more about the OpenVINO performance optimizations, refer to: -https://docs.openvino.ai/2023.0/openvino_docs_optimization_guide_dldt_optimization_guide.html +https://docs.openvino.ai/2023.3/openvino_docs_deployment_optimization_guide_dldt_optimization_guide.html References ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst b/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst index 4b7c9d1ac0818f..4f7fc79b33cb01 100644 --- a/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst +++ b/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst @@ -475,7 +475,7 @@ Optimize the Model with OpenVINO The LaBSE model is quite large and can be slow to infer on some hardware, so let’s optimize it with OpenVINO. `Model conversion Python -API `__ +API `__ accepts the PyTorch/Transformers model object and additional information about model inputs. An ``example_input`` is needed to trace the model execution graph, as PyTorch constructs it dynamically during inference. @@ -855,7 +855,7 @@ the pipeline - getting embeddings. You might wonder why, when using OpenVINO, you need to compile the model after reading it. There are two main reasons for this: 1. Compatibility with different devices. The model can be compiled to run on a `specific -device `__, +device `__, like CPU, GPU or GNA. Each device may work with different data types, support different features, and gain performance by changing the neural network for a specific computing model. With OpenVINO, you do not need @@ -864,13 +864,13 @@ hardware. A universal OpenVINO model representation is enough. 1. Optimization for different scenarios. For example, one scenario prioritizes minimizing the *time between starting and finishing model inference* (`latency-oriented -optimization `__). +optimization `__). In our case, it is more important *how many texts per second the model can process* (`throughput-oriented -optimization `__). +optimization `__). To get a throughput-optimized model, pass a `performance -hint `__ +hint `__ as a configuration during compilation. Then OpenVINO selects the optimal parameters for execution on the available hardware. @@ -934,7 +934,7 @@ advance and fill it in as the inference requests are executed. Let’s compare the models and plot the results. **Note**: To get a more accurate benchmark, use the `Benchmark Python - Tool `__ + Tool `__ .. code:: ipython3 @@ -1043,8 +1043,8 @@ boost. Here are useful links with information about the techniques used in this notebook: - `OpenVINO performance -hints `__ +hints `__ - `OpenVINO Async -API `__ +API `__ - `Throughput -Optimizations `__ +Optimizations `__ diff --git a/docs/notebooks/222-vision-image-colorization-with-output.rst b/docs/notebooks/222-vision-image-colorization-with-output.rst index ae26763e46459d..19f57cb85cd0c3 100644 --- a/docs/notebooks/222-vision-image-colorization-with-output.rst +++ b/docs/notebooks/222-vision-image-colorization-with-output.rst @@ -233,9 +233,9 @@ respectively Conversion command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/bin/mo --framework=onnx --output_dir=models/public/colorization-v2/FP16 --model_name=colorization-v2 --input=data_l --output=color_ab --input_model=models/public/colorization-v2/colorization-v2-eccv16.onnx '--layout=data_l(NCHW)' '--input_shape=[1, 1, 256, 256]' --compress_to_fp16=True [ INFO ] Generated IR will be compressed to FP16. If you get lower accuracy, please consider disabling compression explicitly by adding argument --compress_to_fp16=False. - Find more information about compression to FP16 at https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html + Find more information about compression to FP16 at https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_FP16_Compression.html [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.3/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/222-vision-image-colorization/models/public/colorization-v2/FP16/colorization-v2.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/222-vision-image-colorization/models/public/colorization-v2/FP16/colorization-v2.bin diff --git a/docs/notebooks/223-text-prediction-with-output.rst b/docs/notebooks/223-text-prediction-with-output.rst index 53b9e25f31fbf5..b4118126c71b8f 100644 --- a/docs/notebooks/223-text-prediction-with-output.rst +++ b/docs/notebooks/223-text-prediction-with-output.rst @@ -178,7 +178,7 @@ converted to OpenVINO Intermediate Representation (IR) format. HuggingFace provides a GPT-Neo model in PyTorch format, which is supported in OpenVINO via Model Conversion API. The ``ov.convert_model`` Python function of `model conversion -API `__ +API `__ can be used for converting the model. The function returns instance of OpenVINO Model class, which is ready to use in Python interface. The Model can also be save on device in OpenVINO IR format for future diff --git a/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst b/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst index d976db28c19a15..51abbfe9672264 100644 --- a/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst +++ b/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst @@ -93,7 +93,7 @@ function returns an OpenVINO model ready to load on a device and start making predictions. We can save it on a disk for next usage with ``ov.save_model``. For more information about model conversion Python API, see this -`page `__. +`page `__. .. code:: ipython3 diff --git a/docs/notebooks/226-yolov7-optimization-with-output.rst b/docs/notebooks/226-yolov7-optimization-with-output.rst index 1e45938931cbd1..77bb7c91c825d5 100644 --- a/docs/notebooks/226-yolov7-optimization-with-output.rst +++ b/docs/notebooks/226-yolov7-optimization-with-output.rst @@ -304,7 +304,7 @@ While ONNX models are directly supported by OpenVINO runtime, it can be useful to convert them to IR format to take the advantage of OpenVINO model conversion API features. The ``ov.convert_model`` python function of `model conversion -API `__ +API `__ can be used for converting the model. The function returns instance of OpenVINO Model class, which is ready to use in Python interface. However, it can also be save on device in OpenVINO IR format using @@ -996,7 +996,7 @@ Compare Performance of the Original and Quantized Models Finally, use the OpenVINO `Benchmark -Tool `__ +Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. diff --git a/docs/notebooks/228-clip-zero-shot-convert-with-output.rst b/docs/notebooks/228-clip-zero-shot-convert-with-output.rst index f1f0a51277e204..c4c62ccbed0101 100644 --- a/docs/notebooks/228-clip-zero-shot-convert-with-output.rst +++ b/docs/notebooks/228-clip-zero-shot-convert-with-output.rst @@ -160,7 +160,7 @@ For best results with OpenVINO, it is recommended to convert the model to OpenVINO IR format. OpenVINO supports PyTorch via Model conversion API. To convert the PyTorch model to OpenVINO IR format we will use ``ov.convert_model`` of `model conversion -API `__. +API `__. The ``ov.convert_model`` Python function returns an OpenVINO Model object ready to load on the device and start making predictions. We can save it on disk for the next usage with ``ov.save_model``. diff --git a/docs/notebooks/229-distilbert-sequence-classification-with-output.rst b/docs/notebooks/229-distilbert-sequence-classification-with-output.rst index a6ee3609e825fc..6b0c8b1df69a52 100644 --- a/docs/notebooks/229-distilbert-sequence-classification-with-output.rst +++ b/docs/notebooks/229-distilbert-sequence-classification-with-output.rst @@ -112,7 +112,7 @@ Convert Model to OpenVINO Intermediate Representation format `Model conversion -API `__ +API `__ facilitates the transition between training and deployment environments, performs static model analysis, and adjusts deep learning models for optimal execution on end-point target devices. @@ -144,14 +144,14 @@ optimal execution on end-point target devices. OpenVINO™ Runtime uses the `Infer -Request `__ +Request `__ mechanism which enables running models on different devices in asynchronous or synchronous manners. The model graph is sent as an argument to the OpenVINO API and an inference request is created. The default inference mode is AUTO but it can be changed according to requirements and hardware available. You can explore the different inference modes and their usage `in -documentation. `__ +documentation. `__ .. code:: ipython3 diff --git a/docs/notebooks/230-yolov8-instance-segmentation-with-output.rst b/docs/notebooks/230-yolov8-instance-segmentation-with-output.rst index 0aabef1688e35a..a920d4c1087bd1 100644 --- a/docs/notebooks/230-yolov8-instance-segmentation-with-output.rst +++ b/docs/notebooks/230-yolov8-instance-segmentation-with-output.rst @@ -987,7 +987,7 @@ Compare performance of the Original and Quantized Models Finally, use the OpenVINO `Benchmark -Tool `__ +Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. diff --git a/docs/notebooks/230-yolov8-keypoint-detection-with-output.rst b/docs/notebooks/230-yolov8-keypoint-detection-with-output.rst index e79775191c5464..b07d5900184bda 100644 --- a/docs/notebooks/230-yolov8-keypoint-detection-with-output.rst +++ b/docs/notebooks/230-yolov8-keypoint-detection-with-output.rst @@ -979,7 +979,7 @@ Compare performance of the Original and Quantized Models Finally, use the OpenVINO `Benchmark -Tool `__ +Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. diff --git a/docs/notebooks/230-yolov8-object-detection-with-output.rst b/docs/notebooks/230-yolov8-object-detection-with-output.rst index d45e71089959df..95a2f00d1fab0e 100644 --- a/docs/notebooks/230-yolov8-object-detection-with-output.rst +++ b/docs/notebooks/230-yolov8-object-detection-with-output.rst @@ -949,7 +949,7 @@ Compare performance object detection models Finally, use the OpenVINO `Benchmark -Tool `__ +Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. @@ -1221,7 +1221,7 @@ CPU as part of an application. This will improve selected device utilization. For more information, refer to the overview of `Preprocessing -API `__. +API `__. For example, we can integrate converting input data layout and normalization defined in ``image_to_tensor`` function. diff --git a/docs/notebooks/231-instruct-pix2pix-image-editing-with-output.rst b/docs/notebooks/231-instruct-pix2pix-image-editing-with-output.rst index bdc9b80be3adb4..913f985aaddd1d 100644 --- a/docs/notebooks/231-instruct-pix2pix-image-editing-with-output.rst +++ b/docs/notebooks/231-instruct-pix2pix-image-editing-with-output.rst @@ -101,7 +101,7 @@ Convert Models to OpenVINO IR OpenVINO supports PyTorch models using `Model Conversion -API `__ +API `__ to convert the model to IR format. ``ov.convert_model`` function accepts PyTorch model object and example input and then converts it to ``ov.Model`` class instance that ready to use for loading on device or diff --git a/docs/notebooks/237-segment-anything-with-output.rst b/docs/notebooks/237-segment-anything-with-output.rst index 08df47cf6f7d49..7c5475f0c56881 100644 --- a/docs/notebooks/237-segment-anything-with-output.rst +++ b/docs/notebooks/237-segment-anything-with-output.rst @@ -1528,7 +1528,7 @@ Compare Performance of the Original and Quantized Models Finally, use the OpenVINO `Benchmark -Tool `__ +Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. diff --git a/docs/notebooks/238-deep-floyd-if-optimize-with-output.rst b/docs/notebooks/238-deep-floyd-if-optimize-with-output.rst index 6d8f92e26dabd6..929faec71c4f5c 100644 --- a/docs/notebooks/238-deep-floyd-if-optimize-with-output.rst +++ b/docs/notebooks/238-deep-floyd-if-optimize-with-output.rst @@ -736,7 +736,7 @@ Compare performance time of the converted and optimized models To measure the inference performance of OpenVINO FP16 and INT8 models, use `Benchmark -Tool `__. +Tool `__. **NOTE**: For more accurate performance, run ``benchmark_app`` in a terminal/command prompt after closing other applications. Run diff --git a/docs/notebooks/239-image-bind-convert-with-output.rst b/docs/notebooks/239-image-bind-convert-with-output.rst index 2d3ea2a420a527..acf2275cb63ac6 100644 --- a/docs/notebooks/239-image-bind-convert-with-output.rst +++ b/docs/notebooks/239-image-bind-convert-with-output.rst @@ -204,7 +204,7 @@ Convert Model to OpenVINO Intermediate Representation (IR) format OpenVINO supports PyTorch through Model Conversion API. You will use `model conversion Python -API `__ +API `__ to convert model to IR format. The ``ov.convert_model`` function returns OpenVINO Model class instance ready to load on a device or save on a disk for next loading using ``ov.save_model``. diff --git a/docs/notebooks/242-freevc-voice-conversion-with-output.rst b/docs/notebooks/242-freevc-voice-conversion-with-output.rst index 52818de95578ec..2607606dd94d9a 100644 --- a/docs/notebooks/242-freevc-voice-conversion-with-output.rst +++ b/docs/notebooks/242-freevc-voice-conversion-with-output.rst @@ -288,7 +288,7 @@ model. The obtained model is ready to use and to be loaded on a device using ``compile_model`` or can be saved on a disk using the ``ov.save_model`` function. The ``read_model`` method loads a saved model from a disk. For more information about model conversion, see this -`page `__. +`page `__. ### Convert Prior Encoder. First we convert WavLM model, as a part of Convert Prior Encoder, to the ONNX format, then to OpenVINO’s IR format. We keep the original name of diff --git a/docs/notebooks/243-tflite-selfie-segmentation-with-output.rst b/docs/notebooks/243-tflite-selfie-segmentation-with-output.rst index 5bf7ab1f87b2aa..b4d7eeff6ef819 100644 --- a/docs/notebooks/243-tflite-selfie-segmentation-with-output.rst +++ b/docs/notebooks/243-tflite-selfie-segmentation-with-output.rst @@ -133,9 +133,9 @@ instance which represents this model. The obtained model is ready to use and to be loaded on the device using ``compile_model`` or can be saved on a disk using the ``ov.save_model`` function reducing loading time for the next running. For more information about model conversion, see this -`page `__. +`page `__. For TensorFlow Lite, refer to the `models -support `__. +support `__. .. code:: ipython3 diff --git a/docs/notebooks/257-llava-multimodal-chatbot-with-output.rst b/docs/notebooks/257-llava-multimodal-chatbot-with-output.rst index 4db0fb97ec0ea4..ae36bcd50fde1b 100644 --- a/docs/notebooks/257-llava-multimodal-chatbot-with-output.rst +++ b/docs/notebooks/257-llava-multimodal-chatbot-with-output.rst @@ -228,7 +228,7 @@ The code below preparing function for converting LLaVA model to OpenVINO Intermediate Representation format. It splits model on parts described above, prepare example inputs for each part and convert each part using `OpenVINO Model Conversion -API `__. +API `__. ``ov.convert_model`` function accepts PyTorch model instance and returns ``ov.Model`` object that represent model in OpenVINO format. It is ready to use for loading on device using ``ov.compile_model`` or can be saved @@ -535,7 +535,7 @@ improves performance even more, but introduces a minor drop in prediction quality. More details about weights compression, can be found in `OpenVINO -documentation `__. +documentation `__. **Note**: There is no speedup for INT4 compressed models on dGPU. diff --git a/docs/notebooks/260-pix2struct-docvqa-with-output.rst b/docs/notebooks/260-pix2struct-docvqa-with-output.rst index 8cb08a85db2341..99082a1ae10e2f 100644 --- a/docs/notebooks/260-pix2struct-docvqa-with-output.rst +++ b/docs/notebooks/260-pix2struct-docvqa-with-output.rst @@ -231,7 +231,7 @@ by ``Pix2StructProcessor.decode`` Let’s see the model in action. For testing the model, we will use a screenshot from `OpenVINO -documentation `__ +documentation `__ .. code:: ipython3 diff --git a/docs/notebooks/270-sound-generation-audioldm2-with-output.rst b/docs/notebooks/270-sound-generation-audioldm2-with-output.rst index 4999c60715a7f3..f089c1b99a0659 100644 --- a/docs/notebooks/270-sound-generation-audioldm2-with-output.rst +++ b/docs/notebooks/270-sound-generation-audioldm2-with-output.rst @@ -136,7 +136,7 @@ Convert models to OpenVINO Intermediate representation (IR) format `Model conversion -API `__ +API `__ enables direct conversion of PyTorch models backing the pipeline. We need to provide a model object, input data for model tracing to ``ov.convert_model`` function to obtain OpenVINO ``ov.Model`` object diff --git a/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst b/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst index 425e0a521a2ec6..2c5c02d1d847fc 100644 --- a/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst +++ b/docs/notebooks/301-tensorflow-training-openvino-nncf-with-output.rst @@ -410,11 +410,11 @@ Download Intermediate Representation (IR) model. ir_model = core.read_model(model_xml) Use `Basic Quantization -Flow `__. +Flow `__. To use the most advanced quantization flow that allows to apply 8-bit quantization to the model with accuracy control see `Quantizing with accuracy -control `__. +control `__. .. code:: ipython3 @@ -630,7 +630,7 @@ Compare Inference Speed Measure inference speed with the `OpenVINO Benchmark -App `__. +App `__. Benchmark App is a command line tool that measures raw inference performance for a specified OpenVINO IR model. Run @@ -640,7 +640,7 @@ the ``-m`` parameter with asynchronous inference on CPU, for one minute. Use the ``-d`` parameter to test performance on a different device, for example an Intel integrated Graphics (iGPU), and ``-t`` to set the number of seconds to run inference. See the -`documentation `__ +`documentation `__ for more information. This tutorial uses a wrapper function from `Notebook @@ -922,7 +922,7 @@ cached to the ``model_cache`` directory. With a recent Intel CPU, the best performance can often be achieved by doing inference on both the CPU and the iGPU, with OpenVINO’s `Multi Device -Plugin `__. +Plugin `__. It takes a bit longer to load a model on GPU than on CPU, so this benchmark will take a bit longer to complete than the CPU benchmark. diff --git a/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst b/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst index 4b79ee714072c6..b44100bea8b20b 100644 --- a/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst +++ b/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst @@ -718,7 +718,7 @@ Benchmark Model Performance by Computing Inference Time Finally, measure the inference performance of the ``FP32`` and ``INT8`` models, using `Benchmark -Tool `__ +Tool `__ - inference performance measurement tool in OpenVINO. By default, Benchmark Tool runs inference for 60 seconds in asynchronous mode on CPU. It returns inference speed as latency (milliseconds per image) and diff --git a/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst b/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst index f8e575bdcf3b80..b73b88c965a25f 100644 --- a/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst +++ b/docs/notebooks/305-tensorflow-quantization-aware-training-with-output.rst @@ -447,7 +447,7 @@ Export Models to OpenVINO Intermediate Representation (IR) Use model conversion Python API to convert the models to OpenVINO IR. For more information about model conversion, see this -`page `__. +`page `__. Executing this command may take a while. @@ -477,7 +477,7 @@ Benchmark Model Performance by Computing Inference Time Finally, measure the inference performance of the ``FP32`` and ``INT8`` models, using `Benchmark -Tool `__ +Tool `__ - an inference performance measurement tool in OpenVINO. By default, Benchmark Tool runs inference for 60 seconds in asynchronous mode on CPU. It returns inference speed as latency (milliseconds per image) and diff --git a/docs/notebooks/401-object-detection-with-output.rst b/docs/notebooks/401-object-detection-with-output.rst index fa57f7eae4cc92..fbf1ee804dc0cc 100644 --- a/docs/notebooks/401-object-detection-with-output.rst +++ b/docs/notebooks/401-object-detection-with-output.rst @@ -154,7 +154,7 @@ Convert the Model The pre-trained model is in TensorFlow format. To use it with OpenVINO, convert it to OpenVINO IR format, using `model conversion Python -API `__ +API `__ (``mo.convert_model`` function). If the model has been already converted, this step is skipped. diff --git a/docs/notebooks/406-3D-pose-estimation-with-output.rst b/docs/notebooks/406-3D-pose-estimation-with-output.rst index c55447a9e76387..c4555288cd3192 100644 --- a/docs/notebooks/406-3D-pose-estimation-with-output.rst +++ b/docs/notebooks/406-3D-pose-estimation-with-output.rst @@ -242,7 +242,7 @@ IR format. Conversion command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/bin/mo --framework=onnx --output_dir=model/public/human-pose-estimation-3d-0001/FP32 --model_name=human-pose-estimation-3d-0001 --input=data '--mean_values=data[128.0,128.0,128.0]' '--scale_values=data[255.0,255.0,255.0]' --output=features,heatmaps,pafs --input_model=model/public/human-pose-estimation-3d-0001/human-pose-estimation-3d-0001.onnx '--layout=data(NCHW)' '--input_shape=[1, 3, 256, 448]' --compress_to_fp16=False [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. - Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html + Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.3/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/406-3D-pose-estimation-webcam/model/public/human-pose-estimation-3d-0001/FP32/human-pose-estimation-3d-0001.xml [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/406-3D-pose-estimation-webcam/model/public/human-pose-estimation-3d-0001/FP32/human-pose-estimation-3d-0001.bin diff --git a/docs/notebooks/407-person-tracking-with-output.rst b/docs/notebooks/407-person-tracking-with-output.rst index 067bc2c26bdd9b..6ca163578d0350 100644 --- a/docs/notebooks/407-person-tracking-with-output.rst +++ b/docs/notebooks/407-person-tracking-with-output.rst @@ -185,18 +185,18 @@ Representation (OpenVINO IR). and post-processing. In this case, `person detection -model `__ +model `__ is deployed to detect the person in each frame of the video, and `reidentification -model `__ +model `__ is used to output embedding vector to match a pair of images of a person by the cosine distance. If you want to download another model (``person-detection-xxx`` from `Object Detection Models -list `__, +list `__, ``person-reidentification-retail-xxx`` from `Reidentification Models -list `__), +list `__), replace the name of the model in the code below. .. code:: ipython3 diff --git a/docs/scripts/apidoc.py b/docs/scripts/apidoc.py index 59549509851f01..898eb97cbea484 100644 --- a/docs/scripts/apidoc.py +++ b/docs/scripts/apidoc.py @@ -125,8 +125,7 @@ def get_compound_data(compound, args, hide=True): title = compound.findtext("name") refs = [] for ing in root.iter('innergroup'): - if ' ' not in ing.text: - refs.append((ing.text, ing.get('refid') + '.rst')) + refs.append((ing.text, ing.get('refid') + '.rst')) for inc in root.iter('innerclass'): if ' ' not in inc.text: refs.append((inc.text, inc.get('refid') + '.rst')) diff --git a/samples/c/hello_classification/README.md b/samples/c/hello_classification/README.md index c32717191f9ced..f42cd925270ef2 100644 --- a/samples/c/hello_classification/README.md +++ b/samples/c/hello_classification/README.md @@ -2,18 +2,18 @@ This sample demonstrates how to execute an inference of image classification networks like AlexNet and GoogLeNet using Synchronous Inference Request API and input auto-resize feature. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.2/openvino_inference_engine_ie_bridges_c_samples_hello_classification_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_c_samples_hello_classification_README.html) ## Requirements | Options | Values | | ---------------------------| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Validated Models | [alexnet](https://docs.openvino.ai/2023.2/omz_models_model_alexnet.html), [googlenet-v1](https://docs.openvino.ai/2023.2/omz_models_model_googlenet_v1.html) | +| Validated Models | [alexnet](https://docs.openvino.ai/2023.3/omz_models_model_alexnet.html), [googlenet-v1](https://docs.openvino.ai/2023.3/omz_models_model_googlenet_v1.html) | | Model Format | Inference Engine Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | | Validated images | The sample uses OpenCV\* to [read input image](https://docs.opencv.org/master/d4/da8/group__imgcodecs.html#ga288b8b3da0892bd651fce07b3bbd3a56) (\*.bmp, \*.png) | -| Supported devices | [All](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2023.2/openvino_inference_engine_samples_hello_classification_README.html), | -| | [Python](https://docs.openvino.ai/2023.2/openvino_inference_engine_ie_bridges_python_sample_hello_classification_README.html) | +| Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | +| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_classification_README.html), | +| | [Python](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_hello_classification_README.html) | Hello Classification C sample application demonstrates how to use the C API from OpenVINO in applications. diff --git a/samples/c/hello_nv12_input_classification/README.md b/samples/c/hello_nv12_input_classification/README.md index cff375d8e04770..c6ca2ea77de0dc 100644 --- a/samples/c/hello_nv12_input_classification/README.md +++ b/samples/c/hello_nv12_input_classification/README.md @@ -4,17 +4,17 @@ This sample demonstrates how to execute an inference of image classification net Hello NV12 Input Classification C Sample demonstrates how to use the NV12 automatic input pre-processing API in your applications. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.2/openvino_inference_engine_ie_bridges_c_samples_hello_nv12_input_classification_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_c_samples_hello_nv12_input_classification_README.html) ## Requirements | Options | Values | | ----------------------------| ---------------------------------------------------------------------------------------------------------------------| -| Validated Models | [alexnet](https://docs.openvino.ai/2023.2/omz_models_model_alexnet.html) | +| Validated Models | [alexnet](https://docs.openvino.ai/2023.3/omz_models_model_alexnet.html) | | Model Format | Inference Engine Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | | Validated images | An uncompressed image in the NV12 color format - \*.yuv | -| Supported devices | [All](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2023.2/openvino_inference_engine_samples_hello_nv12_input_classification_README.html) | +| Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | +| Other language realization | [C++](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_nv12_input_classification_README.html) | The following C++ API is used in the application: @@ -28,6 +28,6 @@ The following C++ API is used in the application: | | ``ov_preprocess_preprocess_steps_convert_color`` | | -Basic Inference Engine API is covered by [Hello Classification C sample](https://docs.openvino.ai/2023.2/openvino_inference_engine_ie_bridges_c_samples_hello_classification_README.html). +Basic Inference Engine API is covered by [Hello Classification C sample](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_c_samples_hello_classification_README.html). diff --git a/samples/cpp/benchmark/sync_benchmark/README.md b/samples/cpp/benchmark/sync_benchmark/README.md index 49877e0d839495..697d0efc439e05 100644 --- a/samples/cpp/benchmark/sync_benchmark/README.md +++ b/samples/cpp/benchmark/sync_benchmark/README.md @@ -1,8 +1,8 @@ # Sync Benchmark C++ Sample -This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://docs.openvino.ai/2023.2/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://docs.openvino.ai/2023.3/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.2/openvino_inference_engine_samples_sync_benchmark_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_sync_benchmark_README.html) ## Requirements @@ -14,8 +14,8 @@ For more detailed information on how this sample works, check the dedicated [art | | [face-detection-0200](https://docs.openvino.ai/nightly/omz_models_model_face_detection_0200.html) | | Model Format | OpenVINO™ toolkit Intermediate Representation | | | (\*.xml + \*.bin), ONNX (\*.onnx) | -| Supported devices | [All](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [Python](https://docs.openvino.ai/2023.2/openvino_inference_engine_ie_bridges_python_sample_sync_benchmark_README.html) | +| Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | +| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_sync_benchmark_README.html) | The following C++ API is used in the application: diff --git a/samples/cpp/benchmark/throughput_benchmark/README.md b/samples/cpp/benchmark/throughput_benchmark/README.md index 228524ea858cd4..0331a9d2ce7664 100644 --- a/samples/cpp/benchmark/throughput_benchmark/README.md +++ b/samples/cpp/benchmark/throughput_benchmark/README.md @@ -1,10 +1,10 @@ # Throughput Benchmark C++ Sample -This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://docs.openvino.ai/2023.2/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://docs.openvino.ai/2023.3/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. -The reported results may deviate from what [benchmark_app](https://docs.openvino.ai/2023.2/openvino_inference_engine_samples_benchmark_app_README.html) reports. One example is model input precision for computer vision tasks. benchmark_app sets ``uint8``, while the sample uses default model precision which is usually ``float32``. +The reported results may deviate from what [benchmark_app](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_benchmark_app_README.html) reports. One example is model input precision for computer vision tasks. benchmark_app sets ``uint8``, while the sample uses default model precision which is usually ``float32``. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.2/openvino_inference_engine_samples_throughput_benchmark_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_throughput_benchmark_README.html) ## Requirements @@ -16,8 +16,8 @@ For more detailed information on how this sample works, check the dedicated [art | | [face-detection-](https://docs.openvino.ai/nightly/omz_models_model_face_detection_0200.html) | | Model Format | OpenVINO™ toolkit Intermediate Representation | | | (\*.xml + \*.bin), ONNX (\*.onnx) | -| Supported devices | [All](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [Python](https://docs.openvino.ai/2023.2/openvino_inference_engine_ie_bridges_python_sample_throughput_benchmark_README.html) | +| Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | +| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_throughput_benchmark_README.html) | The following C++ API is used in the application: diff --git a/samples/cpp/benchmark_app/README.md b/samples/cpp/benchmark_app/README.md index c128c7a93819af..ac100994de45af 100644 --- a/samples/cpp/benchmark_app/README.md +++ b/samples/cpp/benchmark_app/README.md @@ -2,14 +2,14 @@ This page demonstrates how to use the Benchmark C++ Tool to estimate deep learning inference performance on supported devices. -> **NOTE**: This page describes usage of the C++ implementation of the Benchmark Tool. For the Python implementation, refer to the [Benchmark Python Tool](https://docs.openvino.ai/2023.2/openvino_inference_engine_tools_benchmark_tool_README.html) page. The Python version is recommended for benchmarking models that will be used in Python applications, and the C++ version is recommended for benchmarking models that will be used in C++ applications. Both tools have a similar command interface and backend. +> **NOTE**: This page describes usage of the C++ implementation of the Benchmark Tool. For the Python implementation, refer to the [Benchmark Python Tool](https://docs.openvino.ai/2023.3/openvino_inference_engine_tools_benchmark_tool_README.html) page. The Python version is recommended for benchmarking models that will be used in Python applications, and the C++ version is recommended for benchmarking models that will be used in C++ applications. Both tools have a similar command interface and backend. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.2/openvino_inference_engine_samples_benchmark_app_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_benchmark_app_README.html) ## Requriements -To use the C++ benchmark_app, you must first build it following the [Build the Sample Applications](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_Samples_Overview.html) instructions and then set up paths and environment variables by following the [Get Ready for Running the Sample Applications](https://docs.openvino.ai/2023.2/openvino_docs_get_started_get_started_demos.html) instructions. Navigate to the directory where the benchmark_app C++ sample binary was built. +To use the C++ benchmark_app, you must first build it following the [Build the Sample Applications](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_Samples_Overview.html) instructions and then set up paths and environment variables by following the [Get Ready for Running the Sample Applications](https://docs.openvino.ai/2023.3/openvino_docs_get_started_get_started_demos.html) instructions. Navigate to the directory where the benchmark_app C++ sample binary was built. -> **NOTE**: If you installed OpenVINO Runtime using PyPI or Anaconda Cloud, only the [Benchmark Python Tool](https://docs.openvino.ai/2023.2/openvino_inference_engine_tools_benchmark_tool_README.html) is available, and you should follow the usage instructions on that page instead. +> **NOTE**: If you installed OpenVINO Runtime using PyPI or Anaconda Cloud, only the [Benchmark Python Tool](https://docs.openvino.ai/2023.3/openvino_inference_engine_tools_benchmark_tool_README.html) is available, and you should follow the usage instructions on that page instead. -The benchmarking application works with models in the OpenVINO IR, TensorFlow, TensorFlow Lite, PaddlePaddle, PyTorch and ONNX formats. If you need it, OpenVINO also allows you to [convert your models](https://docs.openvino.ai/2023.2/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html). +The benchmarking application works with models in the OpenVINO IR, TensorFlow, TensorFlow Lite, PaddlePaddle, PyTorch and ONNX formats. If you need it, OpenVINO also allows you to [convert your models](https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html). diff --git a/samples/cpp/classification_sample_async/README.md b/samples/cpp/classification_sample_async/README.md index f13ca76d376a60..f0188c0342f39c 100644 --- a/samples/cpp/classification_sample_async/README.md +++ b/samples/cpp/classification_sample_async/README.md @@ -6,17 +6,17 @@ Models with only one input and output are supported. In addition to regular images, the sample also supports single-channel ``ubyte`` images as an input for LeNet model. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.2/openvino_inference_engine_samples_classification_sample_async_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_classification_sample_async_README.html) ## Requirements | Options | Values | | ---------------------------| -------------------------------------------------------------------------------------------------------------------------------------| -| Validated Models | [alexnet](https://docs.openvino.ai/2023.2/omz_models_model_alexnet.html), | -| | [googlenet-v1](https://docs.openvino.ai/2023.2/omz_models_model_googlenet_v1.html) | +| Validated Models | [alexnet](https://docs.openvino.ai/2023.3/omz_models_model_alexnet.html), | +| | [googlenet-v1](https://docs.openvino.ai/2023.3/omz_models_model_googlenet_v1.html) | | Model Format | OpenVINO™ toolkit Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | -| Supported devices | [All](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [Python](https://docs.openvino.ai/2023.2/openvino_inference_engine_ie_bridges_python_sample_classification_sample_async_README.html) | +| Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | +| Other language realization | [Python](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_classification_sample_async_README.html) | The following C++ API is used in the application: diff --git a/samples/cpp/hello_classification/README.md b/samples/cpp/hello_classification/README.md index a6a40f5231f596..7ed0affa099610 100644 --- a/samples/cpp/hello_classification/README.md +++ b/samples/cpp/hello_classification/README.md @@ -4,18 +4,18 @@ This sample demonstrates how to do inference of image classification models usin Models with only one input and output are supported. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.2/openvino_inference_engine_samples_hello_classification_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_classification_README.html) ## Requirements | Options | Values | | ----------------------------| ------------------------------------------------------------------------------------------------------------------------------| -| Validated Models | [alexnet](https://docs.openvino.ai/2023.2/omz_models_model_alexnet.html), | -| | [googlenet-v1](https://docs.openvino.ai/2023.2/omz_models_model_googlenet_v1.html) | +| Validated Models | [alexnet](https://docs.openvino.ai/2023.3/omz_models_model_alexnet.html), | +| | [googlenet-v1](https://docs.openvino.ai/2023.3/omz_models_model_googlenet_v1.html) | | Model Format | OpenVINO™ toolkit Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | -| Supported devices | [All](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | -| Other language realization | [C](https://docs.openvino.ai/2023.2/openvino_inference_engine_ie_bridges_c_samples_hello_classification_README.html), | -| | [Python](https://docs.openvino.ai/2023.2/openvino_inference_engine_ie_bridges_python_sample_hello_classification_README.html) | +| Supported devices | [All](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) | +| Other language realization | [C](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_c_samples_hello_classification_README.html), | +| | [Python](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_hello_classification_README.html) | The following C++ API is used in the application: diff --git a/samples/cpp/hello_nv12_input_classification/README.md b/samples/cpp/hello_nv12_input_classification/README.md index cb863019ab5aa4..fa921a6e2c6702 100644 --- a/samples/cpp/hello_nv12_input_classification/README.md +++ b/samples/cpp/hello_nv12_input_classification/README.md @@ -2,17 +2,17 @@ This sample demonstrates how to execute an inference of image classification models with images in NV12 color format using Synchronous Inference Request API. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.2/openvino_inference_engine_samples_hello_nv12_input_classification_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_hello_nv12_input_classification_README.html) ## Requirements | Options | Values | | ----------------------------| --------------------------------------------------------------------------------------------------------------------------------| -| Validated Models | [alexnet openvino - click ir "https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_IR_and_opsets.html" + click ir "https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_IR_and_opsets.html" ``` The primary function of the OpenVINO IR Frontend is to load an OpenVINO IR into memory. diff --git a/src/frontends/paddle/README.md b/src/frontends/paddle/README.md index 2abd536c6d15fa..e65b027d49e6fc 100644 --- a/src/frontends/paddle/README.md +++ b/src/frontends/paddle/README.md @@ -21,7 +21,7 @@ OpenVINO Paddle Frontend has the following structure: ## Debug capabilities -Developers can use OpenVINO Model debug capabilities that are described in the [OpenVINO Model User Guide](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_Model_Representation.html#model-debug-capabilities). +Developers can use OpenVINO Model debug capabilities that are described in the [OpenVINO Model User Guide](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_Model_Representation.html#model-debug-capabilities). ## Tutorials diff --git a/src/frontends/pytorch/README.md b/src/frontends/pytorch/README.md index 54ae5e5f254e50..46a483a6c3b201 100644 --- a/src/frontends/pytorch/README.md +++ b/src/frontends/pytorch/README.md @@ -115,7 +115,7 @@ In rare cases, converting PyTorch operations requires transformation. The main difference between transformation and translation is that transformation works on the graph rather than on the `NodeContext` of a single operation. This means that some functionality provided by `NodeContext` is not accessible in transformation and usually -requires working with `PtFramworkNode` directly. [General rules](https://docs.openvino.ai/2023.2/openvino_docs_transformations.html) +requires working with `PtFramworkNode` directly. [General rules](https://docs.openvino.ai/2023.3/openvino_docs_transformations.html) for writing transformations also apply to PT FE transformations. ### PyTorch Frontend Layer Tests diff --git a/src/frontends/tensorflow/README.md b/src/frontends/tensorflow/README.md index 67c9c39729a5b7..bf0f818e76e05b 100644 --- a/src/frontends/tensorflow/README.md +++ b/src/frontends/tensorflow/README.md @@ -140,15 +140,15 @@ The main rules for loaders implementation: In rare cases, TensorFlow operation conversion requires two transformations (`Loader` and `Internal Transformation`). In the first step, `Loader` must convert a TF operation into [Internal Operation](../tensorflow_common/helper_ops) that is used temporarily by the conversion pipeline. -The internal operation implementation must also contain the `validate_and_infer_types()` method as similar to [OpenVINO Core](https://docs.openvino.ai/2023.2/groupov_ops_cpp_api.html) operations. +The internal operation implementation must also contain the `validate_and_infer_types()` method as similar to [OpenVINO Core](https://docs.openvino.ai/2023.3/api/c_cpp_api/group__ov__ops__cpp__api.html) operations. Here is an example of an implementation for the internal operation `SparseFillEmptyRows` used to convert Wide and Deep models. https://github.com/openvinotoolkit/openvino/blob/7f3c95c161bc78ab2aefa6eab8b008142fb945bc/src/frontends/tensorflow/src/helper_ops/sparse_fill_empty_rows.hpp#L17-L55 In the second step, `Internal Transformation` based on `ov::pass::MatcherPass` must convert sub-graphs with internal operations into sub-graphs consisting only of the OpenVINO opset. -For more information about `ov::pass::MatcherPass` based transformations and their development, read [Overview of Transformations API](https://docs.openvino.ai/2023.2/openvino_docs_transformations.html) -and [OpenVINO Matcher Pass](https://docs.openvino.ai/2023.2/openvino_docs_Extensibility_UG_matcher_pass.html) documentation. +For more information about `ov::pass::MatcherPass` based transformations and their development, read [Overview of Transformations API](https://docs.openvino.ai/2023.3/openvino_docs_transformations.html) +and [OpenVINO Matcher Pass](https://docs.openvino.ai/2023.3/openvino_docs_Extensibility_UG_matcher_pass.html) documentation. The internal transformation must be called in the `ov::frontend::tensorflow::FrontEnd::normalize()` method. It is important to check the order of applying internal transformations to avoid situations when some internal operation breaks a graph pattern with an internal operation for another internal transformation. diff --git a/src/inference/docs/api_details.md b/src/inference/docs/api_details.md index 0ae42bbc8d280e..89a4ec9965196b 100644 --- a/src/inference/docs/api_details.md +++ b/src/inference/docs/api_details.md @@ -9,12 +9,12 @@ OpenVINO Inference API contains two folders: Public OpenVINO Inference API defines global header [openvino/openvino.hpp](../include/openvino/openvino.hpp) which includes all common OpenVINO headers. All Inference components are placed inside the [openvino/runtime](../include/openvino/runtime) folder. -To learn more about the Inference API usage, read [How to integrate OpenVINO with your application](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_Integrate_OV_with_your_application.html). +To learn more about the Inference API usage, read [How to integrate OpenVINO with your application](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_Integrate_OV_with_your_application.html). The diagram with dependencies is presented on the [OpenVINO Architecture page](../../docs/architecture.md#openvino-inference-pipeline). ## Components of OpenVINO Developer API -OpenVINO Developer API is required for OpenVINO plugin development. This process is described in the [OpenVINO Plugin Development Guide](https://docs.openvino.ai/2023.2/openvino_docs_ie_plugin_dg_overview.html). +OpenVINO Developer API is required for OpenVINO plugin development. This process is described in the [OpenVINO Plugin Development Guide](https://docs.openvino.ai/2023.3/openvino_docs_ie_plugin_dg_overview.html). ## See also * [OpenVINO™ Core README](../README.md) diff --git a/src/plugins/auto/README.md b/src/plugins/auto/README.md index 6b38a19953a50c..981f353b90dfd2 100644 --- a/src/plugins/auto/README.md +++ b/src/plugins/auto/README.md @@ -20,7 +20,7 @@ The AUTO plugin follows the OpenVINO™ plugin architecture and consists of seve * [src](./src/) - folder contains sources of the AUTO plugin. * [tests](./tests/) - tests for Auto Plugin components. -Learn more in the [OpenVINO™ Plugin Developer Guide](https://docs.openvino.ai/2023.2/openvino_docs_ie_plugin_dg_overview.html). +Learn more in the [OpenVINO™ Plugin Developer Guide](https://docs.openvino.ai/2023.3/openvino_docs_ie_plugin_dg_overview.html). ## Architecture The diagram below shows an overview of the components responsible for the basic inference flow: diff --git a/src/plugins/auto/docs/architecture.md b/src/plugins/auto/docs/architecture.md index 7b9bbad884e91d..a4db66d9481925 100644 --- a/src/plugins/auto/docs/architecture.md +++ b/src/plugins/auto/docs/architecture.md @@ -8,7 +8,7 @@ AUTO is a meta plugin in OpenVINO that doesn’t bind to a specific type of hard The logic behind the choice is as follows: * Check what supported devices are available. -* Check performance hint of input setting (For detailed information of performance hint, please read more on the [ov::hint::PerformanceMode](https://docs.openvino.ai/2023.2/openvino_docs_OV_UG_Performance_Hints.html)). +* Check performance hint of input setting (For detailed information of performance hint, please read more on the [ov::hint::PerformanceMode](https://docs.openvino.ai/2023.3/openvino_docs_OV_UG_Performance_Hints.html)). * Check precisions of the input model (for detailed information on precisions read more on the [ov::device::capabilities](https://docs.openvino.ai/2023.2/namespaceov_1_1device_1_1capability.html)). * Select the highest-priority device capable of supporting the given model for LATENCY hint and THROUGHPUT hint. Or Select all devices capable of supporting the given model for CUMULATIVE THROUGHPUT hint. * If model’s precision is FP32 but there is no device capable of supporting it, offload the model to a device supporting FP16. @@ -21,7 +21,7 @@ The AUTO plugin is also the default plugin for OpenVINO, if the user does not se Compiling the model to accelerator-optimized kernels may take some time. When AUTO selects one accelerator, it can start inference with the system's CPU by default, as it provides very low latency and can start inference with no additional delays. While the CPU is performing inference, AUTO continues to load the model to the device best suited for the purpose and transfers the task to it when ready. -![alt text](https://docs.openvino.ai/2023.2/_images/autoplugin_accelerate.svg "AUTO cuts first inference latency (FIL) by running inference on the CPU until the GPU is ready") +![alt text](https://docs.openvino.ai/2023.3/_images/autoplugin_accelerate.svg "AUTO cuts first inference latency (FIL) by running inference on the CPU until the GPU is ready") The user can disable this acceleration feature by excluding CPU from the priority list or disabling `ov::intel_auto::enable_startup_fallback`. Its default value is `true`. diff --git a/src/plugins/auto/docs/integration.md b/src/plugins/auto/docs/integration.md index 9f7fb8aec751e2..e79730f0f4b919 100644 --- a/src/plugins/auto/docs/integration.md +++ b/src/plugins/auto/docs/integration.md @@ -1,7 +1,7 @@ # AUTO Plugin Integration ## Implement a New Plugin -Refer to [OpenVINO Plugin Developer Guide](https://docs.openvino.ai/2023.2/openvino_docs_ie_plugin_dg_overview.html) for detailed information on how to implement a new plugin. +Refer to [OpenVINO Plugin Developer Guide](https://docs.openvino.ai/2023.3/openvino_docs_ie_plugin_dg_overview.html) for detailed information on how to implement a new plugin. Query model method `ov::IPlugin::query_model()` is recommended as it is important for AUTO to quickly make decisions and save selection time. diff --git a/src/plugins/intel_cpu/docs/fake_quantize.md b/src/plugins/intel_cpu/docs/fake_quantize.md index 15dd77582ae411..db2616450dd592 100644 --- a/src/plugins/intel_cpu/docs/fake_quantize.md +++ b/src/plugins/intel_cpu/docs/fake_quantize.md @@ -1,5 +1,5 @@ # FakeQuantize in OpenVINO -https://docs.openvino.ai/2023.2/openvino_docs_ops_quantization_FakeQuantize_1.html +https://docs.openvino.ai/2023.3/openvino_docs_ops_quantization_FakeQuantize_1.html definition: ``` diff --git a/src/plugins/intel_cpu/docs/internal_cpu_plugin_optimization.md b/src/plugins/intel_cpu/docs/internal_cpu_plugin_optimization.md index f72f2d46b7c882..c13d5ed25dbe76 100644 --- a/src/plugins/intel_cpu/docs/internal_cpu_plugin_optimization.md +++ b/src/plugins/intel_cpu/docs/internal_cpu_plugin_optimization.md @@ -3,7 +3,7 @@ The CPU plugin supports several graph optimization algorithms, such as fusing or removing layers. Refer to the sections below for details. -> **NOTE**: For layer descriptions, see the [IR Notation Reference](https://docs.openvino.ai/2023.2/openvino_docs_ops_opset.html). +> **NOTE**: For layer descriptions, see the [IR Notation Reference](https://docs.openvino.ai/2023.3/openvino_docs_ops_opset.html). ## Fusing Convolution and Simple Layers diff --git a/src/plugins/intel_gpu/docs/gpu_plugin_driver_troubleshooting.md b/src/plugins/intel_gpu/docs/gpu_plugin_driver_troubleshooting.md index 3642748af18a7e..02b9e4eb636609 100644 --- a/src/plugins/intel_gpu/docs/gpu_plugin_driver_troubleshooting.md +++ b/src/plugins/intel_gpu/docs/gpu_plugin_driver_troubleshooting.md @@ -28,7 +28,7 @@ Some Intel® CPUs might not have integrated GPU, so if you want to run OpenVINO ## 2. Make sure that OpenCL® Runtime is installed -OpenCL runtime is a part of the GPU driver on Windows, but on Linux it should be installed separately. For the installation tips, refer to [OpenVINO docs](https://docs.openvino.ai/2023.2/openvino_docs_install_guides_installing_openvino_linux_header.html) and [OpenCL Compute Runtime docs](https://github.com/intel/compute-runtime/tree/master/opencl/doc). +OpenCL runtime is a part of the GPU driver on Windows, but on Linux it should be installed separately. For the installation tips, refer to [OpenVINO docs](https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_linux_header.html) and [OpenCL Compute Runtime docs](https://github.com/intel/compute-runtime/tree/master/opencl/doc). To get the support of Intel® Iris® Xe MAX Graphics with Linux, follow the [driver installation guide](https://dgpu-docs.intel.com/devices/iris-xe-max-graphics/index.html) ## 3. Make sure that user has all required permissions to work with GPU device @@ -59,7 +59,7 @@ For more details, see the [OpenCL on Linux](https://github.com/bashbaug/OpenCLPa ## 7. If you are using dGPU with XMX, ensure that HW_MATMUL feature is recognized -OpenVINO contains *hello_query_device* sample application: [link](https://docs.openvino.ai/2023.2/openvino_inference_engine_ie_bridges_python_sample_hello_query_device_README.html) +OpenVINO contains *hello_query_device* sample application: [link](https://docs.openvino.ai/2023.3/openvino_inference_engine_ie_bridges_python_sample_hello_query_device_README.html) With this option, you can check whether Intel XMX(Xe Matrix Extension) feature is properly recognized or not. This is a hardware feature to accelerate matrix operations and available on some discrete GPUs. diff --git a/src/plugins/intel_gpu/docs/source_code_structure.md b/src/plugins/intel_gpu/docs/source_code_structure.md index c59133748b2db5..fec04e4321073b 100644 --- a/src/plugins/intel_gpu/docs/source_code_structure.md +++ b/src/plugins/intel_gpu/docs/source_code_structure.md @@ -5,7 +5,7 @@ but at some point clDNN became a part of OpenVINO, so now it's a part of overall via embedding of [oneDNN library](https://github.com/oneapi-src/oneDNN) OpenVINO GPU plugin is responsible for: - 1. [IE Plugin API](https://docs.openvino.ai/2023.2/openvino_docs_ie_plugin_dg_overview.html) implementation. + 1. [IE Plugin API](https://docs.openvino.ai/2023.3/openvino_docs_ie_plugin_dg_overview.html) implementation. 2. Translation of a model from common IE semantic (`ov::Function`) into plugin-specific one (`cldnn::topology`), which is then compiled into GPU graph representation (`cldnn::network`). 3. Implementation of OpenVINO operation set for Intel® GPU. diff --git a/src/plugins/proxy/README.md b/src/plugins/proxy/README.md index 711ffb51a3ad57..4e176ec164a4d3 100644 --- a/src/plugins/proxy/README.md +++ b/src/plugins/proxy/README.md @@ -47,5 +47,5 @@ After the creation the proxy plugin has next properties: * [OpenVINO Core Components](../../README.md) * [OpenVINO Plugins](../README.md) * [Developer documentation](../../../docs/dev/index.md) - * [OpenVINO Plugin Developer Guide](https://docs.openvino.ai/2023.2/openvino_docs_ie_plugin_dg_overview.html) + * [OpenVINO Plugin Developer Guide](https://docs.openvino.ai/2023.3/openvino_docs_ie_plugin_dg_overview.html) diff --git a/src/plugins/template/README.md b/src/plugins/template/README.md index ced5368f839a08..84ad2feff7b7fd 100644 --- a/src/plugins/template/README.md +++ b/src/plugins/template/README.md @@ -35,11 +35,11 @@ $ make -j8 ## Tutorials -* [OpenVINO Plugin Developer Guide](https://docs.openvino.ai/2023.2/openvino_docs_ie_plugin_dg_overview.html) +* [OpenVINO Plugin Developer Guide](https://docs.openvino.ai/2023.3/openvino_docs_ie_plugin_dg_overview.html) ## See also * [OpenVINO™ README](../../../README.md) * [OpenVINO Core Components](../../README.md) * [OpenVINO Plugins](../README.md) * [Developer documentation](../../../docs/dev/index.md) - * [OpenVINO Plugin Developer Guide](https://docs.openvino.ai/2023.2/openvino_docs_ie_plugin_dg_overview.html) + * [OpenVINO Plugin Developer Guide](https://docs.openvino.ai/2023.3/openvino_docs_ie_plugin_dg_overview.html) diff --git a/tools/benchmark_tool/README.md b/tools/benchmark_tool/README.md index 2885a329d8cfd3..0fde3e8d7612a7 100644 --- a/tools/benchmark_tool/README.md +++ b/tools/benchmark_tool/README.md @@ -2,13 +2,13 @@ This page demonstrates how to use the Benchmark Python Tool to estimate deep learning inference performance on supported devices. -> **NOTE**: This page describes usage of the Python implementation of the Benchmark Tool. For the C++ implementation, refer to the [Benchmark C++ Tool](https://docs.openvino.ai/2023.2/openvino_inference_engine_samples_benchmark_app_README.html) page. The Python version is recommended for benchmarking models that will be used in Python applications, and the C++ version is recommended for benchmarking models that will be used in C++ applications. Both tools have a similar command interface and backend. +> **NOTE**: This page describes usage of the Python implementation of the Benchmark Tool. For the C++ implementation, refer to the [Benchmark C++ Tool](https://docs.openvino.ai/2023.3/openvino_inference_engine_samples_benchmark_app_README.html) page. The Python version is recommended for benchmarking models that will be used in Python applications, and the C++ version is recommended for benchmarking models that will be used in C++ applications. Both tools have a similar command interface and backend. -For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.2/openvino_inference_engine_tools_benchmark_tool_README.html) +For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2023.3/openvino_inference_engine_tools_benchmark_tool_README.html) ## Requriements -The Python benchmark_app is automatically installed when you install OpenVINO Developer Tools using [PyPI](https://docs.openvino.ai/2023.2/openvino_docs_install_guides_installing_openvino_pip.html) Before running ``benchmark_app``, make sure the ``openvino_env`` virtual environment is activated, and navigate to the directory where your model is located. +The Python benchmark_app is automatically installed when you install OpenVINO Developer Tools using [PyPI](https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_pip.html) Before running ``benchmark_app``, make sure the ``openvino_env`` virtual environment is activated, and navigate to the directory where your model is located. The benchmarking application works with models in the OpenVINO IR (``model.xml`` and ``model.bin``) and ONNX (``model.onnx``) formats. -Make sure to [convert your models](https://docs.openvino.ai/2023.2/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html) if necessary. +Make sure to [convert your models](https://docs.openvino.ai/2023.3/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html) if necessary.