Skip to content

Commit

Permalink
README and code improvements (openvinotoolkit#299)
Browse files Browse the repository at this point in the history
* README and code improvements

Remove Binder button from 102 in READMEs
Remove postBuild script for Binder
Incremental code style improvements for 202 and 205

* Fix and simplify read_network in 202
  • Loading branch information
helena-intel authored Oct 20, 2021
1 parent fcfb3c0 commit e618046
Show file tree
Hide file tree
Showing 7 changed files with 138 additions and 181 deletions.
4 changes: 0 additions & 4 deletions .binder/postBuild

This file was deleted.

2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ Tutorials that explain how to optimize and quantize models with OpenVINO tools.
| Notebook | Description | Preview |
| :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| [101-tensorflow-to-openvino](notebooks/101-tensorflow-to-openvino/)<br>[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/openvinotoolkit/openvino_notebooks/HEAD?filepath=notebooks%2F101-tensorflow-to-openvino%2F101-tensorflow-to-openvino.ipynb) | Convert TensorFlow models to OpenVINO IR | <img src="https://user-images.githubusercontent.com/15709723/127779167-9d33dcc6-9001-4d74-a089-8248310092fe.png" width=250> |
| [102-pytorch-onnx-to-openvino](notebooks/102-pytorch-onnx-to-openvino/)<br>[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/openvinotoolkit/openvino_notebooks/HEAD?filepath=notebooks%2F102-pytorch-onnx-to-openvino%2F102-pytorch-onnx-to-openvino.ipynb) | Convert PyTorch models to OpenVINO IR | <img src="https://user-images.githubusercontent.com/15709723/127779246-32e7392b-2d72-4a7d-b871-e79e7bfdd2e9.png" width=300 > |
| [102-pytorch-onnx-to-openvino](notebooks/102-pytorch-onnx-to-openvino/) | Convert PyTorch models to OpenVINO IR | <img src="https://user-images.githubusercontent.com/15709723/127779246-32e7392b-2d72-4a7d-b871-e79e7bfdd2e9.png" width=300 > |
| [103-paddle-onnx-to-openvino](notebooks/103-paddle-onnx-to-openvino/)<br>[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/openvinotoolkit/openvino_notebooks/HEAD?filepath=notebooks%2F103-paddle-onnx-to-openvino%2F103-paddle-onnx-to-openvino-classification.ipynb) | Convert PaddlePaddle models to OpenVINO IR | <img src="https://user-images.githubusercontent.com/15709723/127779326-dc14653f-a960-4877-b529-86908a6f2a61.png" width=300> |
| [104-model-tools](notebooks/104-model-tools/)<br>[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/openvinotoolkit/openvino_notebooks/HEAD?filepath=notebooks%2F104-model-tools%2F104-model-tools.ipynb) | Download, convert and benchmark models from Open Model Zoo | |
| [105-language-quantize-bert](notebooks/105-language-quantize-bert/) | Optimize and quantize a pre-trained BERT model ||
Expand Down
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -119,12 +119,8 @@
" x, y = org\n",
"\n",
" image = cv2.UMat(image)\n",
" (text_w, text_h), _ = cv2.getTextSize(\n",
" text, font, font_scale, font_thickness\n",
" )\n",
" result_im = cv2.rectangle(\n",
" image, org, (x + text_w, y + text_h), text_color_bg, -1\n",
" )\n",
" (text_w, text_h), _ = cv2.getTextSize(text, font, font_scale, font_thickness)\n",
" result_im = cv2.rectangle(image, org, (x + text_w, y + text_h), text_color_bg, -1)\n",
"\n",
" textim = cv2.putText(\n",
" result_im,\n",
Expand All @@ -149,9 +145,7 @@
" if path.startswith(\"http\"):\n",
" # Set User-Agent to Mozilla because some websites block requests\n",
" # with User-Agent Python\n",
" request = urllib.request.Request(\n",
" path, headers={\"User-Agent\": \"Mozilla/5.0\"}\n",
" )\n",
" request = urllib.request.Request(path, headers={\"User-Agent\": \"Mozilla/5.0\"})\n",
" response = urllib.request.urlopen(request)\n",
" array = np.asarray(bytearray(response.read()), dtype=\"uint8\")\n",
" image = cv2.imdecode(array, -1) # Loads the image as BGR\n",
Expand Down Expand Up @@ -218,7 +212,7 @@
"outputs": [],
"source": [
"ie = IECore()\n",
"net = ie.read_network(str(model_xml_path))\n",
"net = ie.read_network(model=str(model_xml_path))\n",
"exec_net = ie.load_network(network=net, device_name=DEVICE)"
]
},
Expand Down Expand Up @@ -248,23 +242,13 @@
"# Get the expected input and target shape. `.dims[2:]` returns the height\n",
"# and width. OpenCV's resize function expects the shape as (width, height),\n",
"# so we reverse the shape with `[::-1]` and convert it to a tuple\n",
"input_height, input_width = tuple(\n",
" exec_net.input_info[original_image_key].tensor_desc.dims[2:]\n",
")\n",
"target_height, target_width = tuple(\n",
" exec_net.input_info[bicubic_image_key].tensor_desc.dims[2:]\n",
")\n",
"input_height, input_width = tuple(exec_net.input_info[original_image_key].tensor_desc.dims[2:])\n",
"target_height, target_width = tuple(exec_net.input_info[bicubic_image_key].tensor_desc.dims[2:])\n",
"\n",
"upsample_factor = int(target_height / input_height)\n",
"\n",
"print(\n",
" f\"The network expects inputs with a width of {input_width}, \"\n",
" f\"height of {input_height}\"\n",
")\n",
"print(\n",
" f\"The network returns images with a width of {target_width}, \"\n",
" f\"height of {target_height}\"\n",
")\n",
"print(f\"The network expects inputs with a width of {input_width}, \" f\"height of {input_height}\")\n",
"print(f\"The network returns images with a width of {target_width}, \" f\"height of {target_height}\")\n",
"\n",
"print(\n",
" f\"The image sides are upsampled by a factor {upsample_factor}. \"\n",
Expand Down Expand Up @@ -310,10 +294,7 @@
"# full_image = raw.postprocess()[:,:,(2,1,0)]\n",
"\n",
"plt.imshow(to_rgb(full_image))\n",
"print(\n",
" f\"Showing full image with width {full_image.shape[1]} \"\n",
" f\"and height {full_image.shape[0]}\"\n",
")"
"print(f\"Showing full image with width {full_image.shape[1]} \" f\"and height {full_image.shape[0]}\")"
]
},
{
Expand Down Expand Up @@ -360,10 +341,7 @@
"]\n",
"\n",
"# Show the cropped image\n",
"print(\n",
" f\"Showing image crop with width {image_crop.shape[1]} and \"\n",
" f\"height {image_crop.shape[0]}.\"\n",
")\n",
"print(f\"Showing image crop with width {image_crop.shape[1]} and \" f\"height {image_crop.shape[0]}.\")\n",
"plt.imshow(to_rgb(image_crop));"
]
},
Expand All @@ -388,12 +366,12 @@
"source": [
"# Resize the image to the target shape with bicubic interpolation\n",
"bicubic_image = cv2.resize(\n",
" image_crop, (target_width, target_height), interpolation=cv2.INTER_CUBIC\n",
" src=image_crop, dsize=(target_width, target_height), interpolation=cv2.INTER_CUBIC\n",
")\n",
"\n",
"# If required, resize image to input image shape\n",
"if CROP_FACTOR > 1:\n",
" image_crop = cv2.resize(image_crop, (input_width, input_height))\n",
" image_crop = cv2.resize(src=image_crop, dsize=(input_width, input_height))\n",
"\n",
"# Reshape the images from (H,W,C) to (N,C,H,W)\n",
"input_image_original = np.expand_dims(image_crop.transpose(2, 0, 1), axis=0)\n",
Expand Down Expand Up @@ -448,7 +426,7 @@
},
"outputs": [],
"source": [
"fig, ax = plt.subplots(1, 2, figsize=(30, 15))\n",
"fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(30, 15))\n",
"ax[0].imshow(to_rgb(bicubic_image))\n",
"ax[1].imshow(to_rgb(result_image))\n",
"ax[0].set_title(\"Bicubic\")\n",
Expand All @@ -475,19 +453,23 @@
"outputs": [],
"source": [
"# Add text with \"SUPER\" or \"BICUBIC\" to the superresolution or bicubic image\n",
"image_super = write_text_on_image(result_image, \"SUPER\")\n",
"image_bicubic = write_text_on_image(bicubic_image, \"BICUBIC\")\n",
"image_super = write_text_on_image(image=result_image, text=\"SUPER\")\n",
"image_bicubic = write_text_on_image(image=bicubic_image, text=\"BICUBIC\")\n",
"\n",
"# Store the image and the results\n",
"crop_image_path = Path(f\"{OUTPUT_PATH.stem}/{image_id}_{adjusted_upsample_factor}x_crop.png\")\n",
"superres_image_path = Path(f\"{OUTPUT_PATH.stem}/{image_id}_{adjusted_upsample_factor}x_crop_superres.png\")\n",
"bicubic_image_path = Path(f\"{OUTPUT_PATH.stem}/{image_id}_{adjusted_upsample_factor}x_crop_bicubic.png\")\n",
"cv2.imwrite(str(crop_image_path), image_crop, [cv2.IMWRITE_PNG_COMPRESSION, 0])\n",
"superres_image_path = Path(\n",
" f\"{OUTPUT_PATH.stem}/{image_id}_{adjusted_upsample_factor}x_crop_superres.png\"\n",
")\n",
"bicubic_image_path = Path(\n",
" f\"{OUTPUT_PATH.stem}/{image_id}_{adjusted_upsample_factor}x_crop_bicubic.png\"\n",
")\n",
"cv2.imwrite(filename=str(crop_image_path), img=image_crop, params=[cv2.IMWRITE_PNG_COMPRESSION, 0])\n",
"cv2.imwrite(\n",
" str(superres_image_path), image_super, [cv2.IMWRITE_PNG_COMPRESSION, 0]\n",
" filename=str(superres_image_path), img=image_super, params=[cv2.IMWRITE_PNG_COMPRESSION, 0]\n",
")\n",
"cv2.imwrite(\n",
" str(bicubic_image_path), image_bicubic, [cv2.IMWRITE_PNG_COMPRESSION, 0]\n",
" filename=str(bicubic_image_path), img=image_bicubic, params=[cv2.IMWRITE_PNG_COMPRESSION, 0]\n",
")\n",
"print(f\"Images written to directory: {OUTPUT_PATH}\")"
]
Expand Down Expand Up @@ -523,7 +505,7 @@
")\n",
"\n",
"# DisplayImage(str(gif_image_path)) doesn't work in Colab\n",
"DisplayImage(open(gif_image_path, \"rb\").read(), width=1920 // 2)"
"DisplayImage(data=open(gif_image_path, \"rb\").read(), width=1920 // 2)"
]
},
{
Expand All @@ -547,24 +529,24 @@
"source": [
"FOURCC = cv2.VideoWriter_fourcc(*\"MJPG\")\n",
"\n",
"result_video_path = Path(f\"{OUTPUT_PATH.stem}/{image_id}_crop_comparison_{adjusted_upsample_factor}x.avi\")\n",
"result_video_path = Path(\n",
" f\"{OUTPUT_PATH.stem}/{image_id}_crop_comparison_{adjusted_upsample_factor}x.avi\"\n",
")\n",
"video_target_height, video_target_width = (\n",
" result_image.shape[0] // 2,\n",
" result_image.shape[1] // 2,\n",
")\n",
"\n",
"out_video = cv2.VideoWriter(\n",
" str(result_video_path),\n",
" FOURCC,\n",
" 90,\n",
" (video_target_width, video_target_height),\n",
" filename=str(result_video_path),\n",
" fourcc=FOURCC,\n",
" fps=90,\n",
" frameSize=(video_target_width, video_target_height),\n",
")\n",
"\n",
"resized_result_image = cv2.resize(\n",
" result_image, (video_target_width, video_target_height)\n",
")\n",
"resized_result_image = cv2.resize(src=result_image, dsize=(video_target_width, video_target_height))\n",
"resized_bicubic_image = cv2.resize(\n",
" bicubic_image, (video_target_width, video_target_height)\n",
" src=bicubic_image, dsize=(video_target_width, video_target_height)\n",
")\n",
"\n",
"progress_bar = ProgressBar(total=video_target_width)\n",
Expand All @@ -583,7 +565,7 @@
" # create a small black border line between the superresolution\n",
" # and bicubic part of the image\n",
" comparison_frame[:, i - 1 : i + 1, :] = 0\n",
" out_video.write(comparison_frame)\n",
" out_video.write(image=comparison_frame)\n",
" progress_bar.progress = i\n",
" progress_bar.update()\n",
"out_video.release()\n",
Expand Down Expand Up @@ -632,14 +614,10 @@
"full_image_height, full_image_width = full_image.shape[:2]\n",
"\n",
"# Compute x and y coordinates of left top of image tiles\n",
"x_coords = list(\n",
" range(0, full_image_width, input_width * CROP_FACTOR - CROPLINES * 2)\n",
")\n",
"x_coords = list(range(0, full_image_width, input_width * CROP_FACTOR - CROPLINES * 2))\n",
"while full_image_width - x_coords[-1] < input_width * CROP_FACTOR:\n",
" x_coords.pop(-1)\n",
"y_coords = list(\n",
" range(0, full_image_height, input_height * CROP_FACTOR - CROPLINES * 2)\n",
")\n",
"y_coords = list(range(0, full_image_height, input_height * CROP_FACTOR - CROPLINES * 2))\n",
"while full_image_height - y_coords[-1] < input_height * CROP_FACTOR:\n",
" y_coords.pop(-1)\n",
"\n",
Expand All @@ -659,10 +637,7 @@
" + target_height\n",
" - CROPLINES * 2 * (upsample_factor // CROP_FACTOR)\n",
")\n",
"print(\n",
" f\"The output image will have a width of {new_width} \"\n",
" f\"and a height of {new_height}\"\n",
")"
"print(f\"The output image will have a width of {new_width} \" f\"and a height of {new_height}\")"
]
},
{
Expand Down Expand Up @@ -694,14 +669,12 @@
"full_image_crop = full_image.copy()[:crop_height, :crop_width, :]\n",
"\n",
"# Create empty array of target size.\n",
"full_superresolution_image = np.empty(\n",
" (new_height, new_width, 3), dtype=np.uint8\n",
")\n",
"full_superresolution_image = np.empty((new_height, new_width, 3), dtype=np.uint8)\n",
"\n",
"# Create bicubic upsampled image of target size for comparison\n",
"full_bicubic_image = cv2.resize(\n",
" full_image_crop[CROPLINES:-CROPLINES, CROPLINES:-CROPLINES, :],\n",
" (new_width, new_height),\n",
" src=full_image_crop[CROPLINES:-CROPLINES, CROPLINES:-CROPLINES, :],\n",
" dsize=(new_width, new_height),\n",
" interpolation=cv2.INTER_CUBIC,\n",
")\n",
"\n",
Expand All @@ -718,21 +691,17 @@
"\n",
" # Resize the images to the target shape with bicubic interpolation\n",
" bicubic_image = cv2.resize(\n",
" image_crop,\n",
" (target_width, target_height),\n",
" src=image_crop,\n",
" dsize=(target_width, target_height),\n",
" interpolation=cv2.INTER_CUBIC,\n",
" )\n",
"\n",
" if CROP_FACTOR > 1:\n",
" image_crop = cv2.resize(image_crop, (input_width, input_height))\n",
" image_crop = cv2.resize(src=image_crop, dsize=(input_width, input_height))\n",
"\n",
" input_image_original = np.expand_dims(\n",
" image_crop.transpose(2, 0, 1), axis=0\n",
" )\n",
" input_image_original = np.expand_dims(image_crop.transpose(2, 0, 1), axis=0)\n",
"\n",
" input_image_bicubic = np.expand_dims(\n",
" bicubic_image.transpose(2, 0, 1), axis=0\n",
" )\n",
" input_image_bicubic = np.expand_dims(bicubic_image.transpose(2, 0, 1), axis=0)\n",
"\n",
" # Do inference\n",
" inference_start_time = time.perf_counter()\n",
Expand All @@ -758,19 +727,11 @@
" new_y = y * adjusted_upsample_factor\n",
" new_x = x * adjusted_upsample_factor\n",
" full_superresolution_image[\n",
" new_y : new_y\n",
" + target_height\n",
" - CROPLINES * adjusted_upsample_factor * 2,\n",
" new_x : new_x\n",
" + target_width\n",
" - CROPLINES * adjusted_upsample_factor * 2,\n",
" new_y : new_y + target_height - CROPLINES * adjusted_upsample_factor * 2,\n",
" new_x : new_x + target_width - CROPLINES * adjusted_upsample_factor * 2,\n",
" ] = result_image[\n",
" CROPLINES\n",
" * adjusted_upsample_factor : -CROPLINES\n",
" * adjusted_upsample_factor,\n",
" CROPLINES\n",
" * adjusted_upsample_factor : -CROPLINES\n",
" * adjusted_upsample_factor,\n",
" CROPLINES * adjusted_upsample_factor : -CROPLINES * adjusted_upsample_factor,\n",
" CROPLINES * adjusted_upsample_factor : -CROPLINES * adjusted_upsample_factor,\n",
" :,\n",
" ]\n",
"\n",
Expand Down Expand Up @@ -816,7 +777,9 @@
},
"outputs": [],
"source": [
"full_superresolution_image_path = Path(f\"{OUTPUT_PATH.stem}/full_superres_{adjusted_upsample_factor}x.jpg\")\n",
"full_superresolution_image_path = Path(\n",
" f\"{OUTPUT_PATH.stem}/full_superres_{adjusted_upsample_factor}x.jpg\"\n",
")\n",
"full_bicubic_image_path = Path(f\"{OUTPUT_PATH.stem}/full_bicubic_{adjusted_upsample_factor}x.jpg\")\n",
"\n",
"cv2.imwrite(str(full_superresolution_image_path), full_superresolution_image)\n",
Expand Down
Loading

0 comments on commit e618046

Please sign in to comment.