diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..128d516a0c91a6715b8bf8d03437c413e89b5d85 --- /dev/null +++ b/app.py @@ -0,0 +1,373 @@ +import os +os.system("cd open_flamingo && pip install .") +import numpy as np +import torch +from PIL import Image + + +import string +import cv2 + + +import gradio as gr +import torch +from PIL import Image +from huggingface_hub import hf_hub_download, login + +from open_flamingo.src.factory import create_model_and_transforms +flamingo, image_processor, tokenizer, vis_embed_size = create_model_and_transforms( + "ViT-L-14", + "datacomp_xl_s13b_b90k", + "EleutherAI/pythia-1.4b", + "EleutherAI/pythia-1.4b", + add_visual_grounding=True, + location_token_num=1000, + add_visual_token = True, + use_format_v2 = True, + ) + +checkpoint_path = hf_hub_download("chendl/compositional_test", "pythiaS.pt") +checkpoint = torch.load(checkpoint_path, map_location="cpu") +model_state_dict = {} +for key in checkpoint.keys(): + model_state_dict[key.replace("module.", "")] = checkpoint[key] +if "vision_encoder.logit_scale"in model_state_dict: + # previous checkpoint has some unnecessary weights + del model_state_dict["vision_encoder.logit_scale"] + del model_state_dict["vision_encoder.visual.proj"] + del model_state_dict["vision_encoder.visual.ln_post.weight"] + del model_state_dict["vision_encoder.visual.ln_post.bias"] +flamingo.load_state_dict(model_state_dict, strict=True) + +def get_outputs( + model, + batch_images, + attention_mask, + max_generation_length, + min_generation_length, + num_beams, + length_penalty, + input_ids, + image_start_index_list=None, + image_nums=None, + bad_words_ids=None, +): + # and torch.cuda.amp.autocast(dtype=torch.float16) + with torch.inference_mode(): + outputs = model.generate( + batch_images, + input_ids, + attention_mask=attention_mask, + max_new_tokens=max_generation_length, + min_length=min_generation_length, + num_beams=num_beams, + length_penalty=length_penalty, + image_start_index_list=image_start_index_list, + image_nums=image_nums, + bad_words_ids=bad_words_ids, + ) + + return outputs + + +def evaluate_refcoco( + model, + tokenizer, + image_processor, + batch_size, + tsvfile, + max_generation_length=20, + num_beams=3, + length_penalty=-2.0, + device=-1, + vis_embed_size=None, + rank=0, + world_size=1, + id=0, +): + model.eval().cuda() + loc_token_ids = [] + for i in range(1000): + loc_token_ids.append(int(tokenizer(f"", add_special_tokens=False)["input_ids"][-1])) + media_token_id = tokenizer("<|#image#|>", add_special_tokens=False)["input_ids"][-1] + endofmedia_token_id = tokenizer("<|#endofimage#|>", add_special_tokens=False)["input_ids"][-1] + pad_token_id = tokenizer(tokenizer.pad_token, add_special_tokens=False)["input_ids"][-1] + bos_token_id = tokenizer(tokenizer.bos_token, add_special_tokens=False)["input_ids"][-1] + prebox_token_id = tokenizer("<|#prebox#|>", add_special_tokens=False)["input_ids"][-1] + # all_ids = set(range(model.lang_encoder.lm_head.out_features)) + # bad_words_ids = list(all_ids - set(loc_token_ids)) + # bad_words_ids = [[b] for b in bad_words_ids] + # min_loc_token_id = min(loc_token_ids) + # max_loc_token_id = max(loc_token_ids) + total = 0 + correct = 0 + ious = [] + if "refcocog" in tsvfile: + dataset_name = "refcocog" + elif "refcocoplus" in tsvfile: + dataset_name = "refcocoplus" + else: + dataset_name = "refcoco" + with open(tsvfile, "r") as f: + lines = f.readlines() + pbar = tqdm(lines, disable=(rank != 0)) + for ii, line in enumerate(pbar): + if ii % world_size != rank: + continue + total += 1 + line = line.rstrip() + uniq_id, image_id, text, region_coord, image = line.split("\t") + + image = Image.open(BytesIO(base64.urlsafe_b64decode(image))).convert("RGB") + # image = Image.open("/gpfs/u/home/LMCG/LMCGljnn/scratch/code/multimodal2/yolo.png").convert("RGB") + # image = Image.open("/gpfs/u/home/LMCG/LMCGljnn/scratch/code/multimodal/temp/cat.png").convert("RGB") + # image = Image.open("/gpfs/u/home/LMCG/LMCGljnn/scratch/code/multimodal/temp/262148000.png") + + gt_box = np.array(list(map(float, region_coord.split(",")))) + width = image.width + height = image.height + image = image.resize((224, 224)) + gt_box = gt_box / np.array([width, height, width, height]) * 224 + batch_images = image_processor(image).unsqueeze(0).unsqueeze(1).unsqueeze(0) + prompt = [ + f"{tokenizer.bos_token}<|#image#|>{tokenizer.pad_token * vis_embed_size}<|#endofimage#|><|#object#|>{text.rstrip('.').strip()}<|#endofobject#|><|#visual#|>"] + # prompt = [f"<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>the cat<|#visual#|>"] + # prompt = [f"<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>"] + # prompt = [f"<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>a man<|#visual#|> is doing a trick on a skateboard<|#visual#|>"] + + encodings = tokenizer( + prompt, + padding="longest", + truncation=True, + return_tensors="pt", + max_length=2000, + ) + input_ids = encodings["input_ids"] + attention_mask = encodings["attention_mask"] + # attention_mask[input_ids == prebox_token_id] = 0 + image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist() + image_start_index_list = [[x] for x in image_start_index_list] + image_nums = [1] * len(input_ids) + vision_x = batch_images.cuda() + lang_x = input_ids.cuda() + attention_mask = attention_mask.cuda() + + model.debug_id = 0 + with torch.inference_mode() and torch.cuda.amp.autocast(dtype=torch.float16): + outputs = model( + vision_x=vision_x, + lang_x=lang_x, + attention_mask=attention_mask, + labels=None, + image_nums=image_nums, + image_start_index_list=image_start_index_list, + added_bbox_list=None, + add_box=False, + ) + boxes = outputs["boxes"] + scores = outputs["scores"] + if len(scores) > 0: + box = boxes[scores.argmax()] + iou = get_iou(box, gt_box) + else: + iou = 0.0 + # tqdm.write(f"output: {tokenizer.batch_decode(outputs)}") + tqdm.write(f"no output for: {uniq_id}, {image_id}, {text}") + if iou >= 0.5: + correct += 1 + pbar.set_description(f"iou: {iou:.2f} score: {correct / total:.4f}") + # open_cv_image = np.array(image) + # # Convert RGB to BGR + # open_cv_image = open_cv_image[:, :, ::-1].copy() + # for box, score in zip(boxes, scores): + # open_cv_image = cv2.rectangle(open_cv_image, box[:2].astype(int), box[2:].astype(int), (255, 0, 0), 2) + # cv2.imwrite("output.jpg", open_cv_image) + # print(boxes) + # print(scores) + # exit() + + +def generate( + idx, + image, + text, + vis_embed_size=256, + rank=0, + world_size=1, +): + if image is None: + raise gr.Error("Please upload an image.") + flamingo.eval() + loc_token_ids = [] + for i in range(1000): + loc_token_ids.append(int(tokenizer(f"", add_special_tokens=False)["input_ids"][-1])) + media_token_id = tokenizer("<|#image#|>", add_special_tokens=False)["input_ids"][-1] + endofmedia_token_id = tokenizer("<|#endofimage#|>", add_special_tokens=False)["input_ids"][-1] + pad_token_id = tokenizer(tokenizer.pad_token, add_special_tokens=False)["input_ids"][-1] + bos_token_id = tokenizer(tokenizer.bos_token, add_special_tokens=False)["input_ids"][-1] + prebox_token_id = tokenizer("<|#prebox#|>", add_special_tokens=False)["input_ids"][-1] + + image_ori = image + image = image.convert("RGB") + width = image.width + height = image.height + image = image.resize((224, 224)) + batch_images = image_processor(image).unsqueeze(0).unsqueeze(1).unsqueeze(0) + if idx == 1: + prompt = [f"{tokenizer.bos_token}<|#image#|>{tokenizer.pad_token * vis_embed_size}<|#endofimage#|><|#object#|>{text.rstrip('.').strip()}<|#endofobject#|><|#visual#|>"] + bad_words_ids = None + max_generation_length = 5 + else: + prompt = [f"<|#image#|>{tokenizer.pad_token * vis_embed_size}<|#endofimage#|>{text.rstrip('.')}"] + bad_words_ids = loc_word_ids + max_generation_length = 30 + encodings = tokenizer( + prompt, + padding="longest", + truncation=True, + return_tensors="pt", + max_length=2000, + ) + input_ids = encodings["input_ids"] + attention_mask = encodings["attention_mask"] + image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist() + image_start_index_list = [[x] for x in image_start_index_list] + image_nums = [1] * len(input_ids) + outputs = get_outputs( + model=flamingo, + batch_images=batch_images, + attention_mask=attention_mask, + max_generation_length=max_generation_length, + min_generation_length=4, + num_beams=1, + length_penalty=1.0, + input_ids=input_ids, + bad_words_ids=bad_words_ids, + image_start_index_list=image_start_index_list, + image_nums=image_nums, + ) + boxes = outputs["boxes"] + scores = outputs["scores"] + if len(scores) > 0: + box = boxes[scores.argmax()] + iou = get_iou(box, gt_box) + else: + iou = 0.0 + # tqdm.write(f"output: {tokenizer.batch_decode(outputs)}") + tqdm.write(f"no output for: {uniq_id}, {image_id}, {text}") + if iou >= 0.5: + correct += 1 + + + gen_text = tokenizer.batch_decode(outputs) + if idx == 1: + return f"Output:{gen_text}", out_image + elif idx == 2: + return (f"Question: {text.strip()} Answer: {gen_text}") + else: + return (f"Output:{gen_text}") + + +with gr.Blocks() as demo: + gr.Markdown( + """ + 🍜 Object Centric Pretraining Demo + In this demo we showcase the in-context learning and grounding capabilities of the Object-Centric Pretrained model, a large multimodal model. Note that we add two additional demonstrations to the ones presented to improve the demo experience. + The model is trained on an interleaved mixture of text, images and bounding box and is able to generate text conditioned on sequences of images/text. + """ + ) + + with gr.Accordion("See terms and conditions"): + gr.Markdown( + """**Please read the following information carefully before proceeding.**This demo does NOT store any personal information on its users, and it does NOT store user queries.""") + + with gr.Tab("📷 Image Captioning"): + with gr.Row(): + + + query_image = gr.Image(type="pil") + with gr.Row(): + chat_input = gr.Textbox(lines=1, label="Chat Input") + text_output = gr.Textbox(value="Output:", label="Model output") + + run_btn = gr.Button("Run model") + + + + def on_click_fn(img,text): return generate(0, img, text) + + run_btn.click(on_click_fn, inputs=[query_image,chat_input], outputs=[text_output]) + + with gr.Tab("🦓 Grounding"): + with gr.Row(): + with gr.Column(scale=1): + query_image = gr.Image(type="pil") + with gr.Column(scale=1): + out_image = gr.Image(type="pil") + with gr.Row(): + chat_input = gr.Textbox(lines=1, label="Chat Input") + text_output = gr.Textbox(value="Output:", label="Model output") + + run_btn = gr.Button("Run model") + + + def on_click_fn(img, text): return generate(1, img, text) + + + run_btn.click(on_click_fn, inputs=[query_image, chat_input], outputs=[text_output, out_image]) + + with gr.Tab("🔢 Counting objects"): + with gr.Row(): + query_image = gr.Image(type="pil") + with gr.Row(): + chat_input = gr.Textbox(lines=1, label="Chat Input") + text_output = gr.Textbox(value="Output:", label="Model output") + + run_btn = gr.Button("Run model") + + + def on_click_fn(img,text): return generate(0, img, text) + + + run_btn.click(on_click_fn, inputs=[query_image, chat_input], outputs=[text_output]) + + with gr.Tab("🕵️ Visual Question Answering"): + with gr.Row(): + query_image = gr.Image(type="pil") + with gr.Row(): + question = gr.Textbox(lines=1, label="Question") + text_output = gr.Textbox(value="Output:", label="Model output") + + run_btn = gr.Button("Run model") + + + def on_click_fn(img, txt): return generate(2, img, txt) + + + run_btn.click( + on_click_fn, inputs=[query_image, question], outputs=[text_output] + ) + + with gr.Tab("🌎 Custom"): + gr.Markdown( + """### Customize the demonstration by uploading your own images and text samples. + ### **Note: Any text prompt you use will be prepended with an 'Output:', so you don't need to include it in your prompt.**""" + ) + with gr.Row(): + query_image = gr.Image(type="pil") + with gr.Row(): + question = gr.Textbox(lines=1, label="Question") + text_output = gr.Textbox(value="Output:", label="Model output") + + run_btn = gr.Button("Run model") + + + def on_click_fn(img, txt): return generate(2, img, txt) + + + run_btn.click( + on_click_fn, inputs=[query_image, question], outputs=[text_output] + ) + +demo.queue(concurrency_count=1) +demo.launch() diff --git a/multimodal/HISTORY.md b/multimodal/HISTORY.md new file mode 100644 index 0000000000000000000000000000000000000000..556720509176152deea697bddb9070a138143888 --- /dev/null +++ b/multimodal/HISTORY.md @@ -0,0 +1,3 @@ +## 1.0.0 + +* it works \ No newline at end of file diff --git a/multimodal/LICENSE b/multimodal/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..206be3ebbf3a41276af664447106615b0a954814 --- /dev/null +++ b/multimodal/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Anas Awadalla, Irena Gao, Joshua Gardner, Jack Hessel, Yusuf Hanafy, Wanrong Zhu, Kalyani Marathe, Yonatan Bitton, Samir Gadre, Jenia Jitsev, Simon Kornblith, Pang Wei Koh, Gabriel Ilharco, Mitchell Wortsman, Ludwig Schmidt. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/multimodal/MODEL_CARD.md b/multimodal/MODEL_CARD.md new file mode 100644 index 0000000000000000000000000000000000000000..b1264ae72debb5cc083e995ceca73e8534422302 --- /dev/null +++ b/multimodal/MODEL_CARD.md @@ -0,0 +1,44 @@ +--- +language: en +datasets: +- laion2b +--- + +# OpenFlamingo-9B + +[Blog post]() | [Code](https://github.com/mlfoundations/open_flamingo) | [Demo](https://7164d2142d11.ngrok.app) + +OpenFlamingo is an open source implementation of DeepMind's [Flamingo](https://www.deepmind.com/blog/tackling-multiple-tasks-with-a-single-visual-language-model) models. +OpenFlamingo-9B is built off of [CLIP ViT-L/14](https://huggingface.co/openai/clip-vit-large-patch14) and [LLaMA-7B](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/). + + +## Model Details +We freeze the pretrained vision encoder and language model, and then we train connecting Perceiver modules and cross-attention layers, following the original Flamingo paper. + +Our training data is a mixture of [LAION 2B](https://huggingface.co/datasets/laion/laion2B-en) and a large interleaved image-text dataset called Multimodal C4, which will be released soon. + +The current model is an early checkpoint of an ongoing effort. This checkpoint has seen 5 million interleaved image-text examples from Multimodal C4 and 10 million samples from LAION 2B. + +## Uses +OpenFlamingo-9B is intended to be used **for academic research purposes only.** Commercial use is prohibited, in line with LLaMA's non-commercial license. + +### Bias, Risks, and Limitations +This model may generate inaccurate or offensive outputs, reflecting biases in its training data and pretrained priors. + +In an effort to mitigate current potential biases and harms, we have deployed a text content filter on model outputs in the OpenFlamingo demo. We continue to red-team the model to understand and improve its safety. + +## Evaluation +We've evaluated this checkpoint on the validation sets for two vision-language tasks: COCO captioning and VQAv2. Results are displayed below. + +**COCO (CIDEr)** + +|0-shot|4-shot|8-shot|16-shot|32-shot| +|--|--|--|--|--| +|65.52|74.28|79.26|81.84|84.52| + + +**VQAv2 (VQA accuracy)** + +|0-shot|4-shot|8-shot|16-shot|32-shot| +|---|---|---|---|---| +|43.55|44.05|47.5|48.87|50.34| diff --git a/multimodal/Makefile b/multimodal/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d5cc3840bce9ce0e5aebc435f63ffa5b534d4a8f --- /dev/null +++ b/multimodal/Makefile @@ -0,0 +1,19 @@ +install: ## [Local development] Upgrade pip, install requirements, install package. + python -m pip install -U pip + python -m pip install -e . + +install-dev: ## [Local development] Install test requirements + python -m pip install -r requirements-test.txt + +lint: ## [Local development] Run mypy, pylint and black + python -m mypy open_flamingo + python -m pylint open_flamingo + python -m black --check -l 120 open_flamingo + +black: ## [Local development] Auto-format python code using black + python -m black -l 120 . + +.PHONY: help + +help: # Run `make help` to get help on the make commands + @grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' \ No newline at end of file diff --git a/multimodal/README.md b/multimodal/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e789675fd3dc0168eeb83756c6f24763451eeac7 --- /dev/null +++ b/multimodal/README.md @@ -0,0 +1,233 @@ +# 🦩 OpenFlamingo + +[![PyPI version](https://badge.fury.io/py/open_flamingo.svg)](https://badge.fury.io/py/open_flamingo) + +[Blog post](https://laion.ai/blog/open-flamingo/) | Paper (coming soon) + +Welcome to our open source version of DeepMind's [Flamingo](https://www.deepmind.com/blog/tackling-multiple-tasks-with-a-single-visual-language-model) model! In this repository, we provide a PyTorch implementation for training and evaluating OpenFlamingo models. We also provide an initial [OpenFlamingo 9B model](https://huggingface.co/openflamingo/OpenFlamingo-9B) trained on a new Multimodal C4 dataset (coming soon). Please refer to our blog post for more details. + +This repo is still under development, and we hope to release better performing and larger OpenFlamingo models soon. If you have any questions, please feel free to open an issue. We also welcome contributions! + +# Table of Contents +- [Installation](#installation) +- [Approach](#approach) + * [Model architecture](#model-architecture) +- [Usage](#usage) + * [Initializing an OpenFlamingo model](#initializing-an-openflamingo-model) + * [Generating text](#generating-text) +- [Training](#training) + * [Dataset](#dataset) +- [Evaluation](#evaluation) +- [Future plans](#future-plans) +- [Team](#team) +- [Acknowledgments](#acknowledgments) +- [Citing](#citing) + +# Installation + +To install the package in an existing environment, run +``` +pip install open-flamingo +``` + +or to create a conda environment for running OpenFlamingo, run +``` +conda env create -f environment.yml +``` + +# Usage +We provide an initial [OpenFlamingo 9B model](https://huggingface.co/openflamingo/OpenFlamingo-9B) using a CLIP ViT-Large vision encoder and a LLaMA-7B language model. In general, we support any [CLIP vision encoder](https://huggingface.co/models?search=clip). For the language model, we support [LLaMA](https://huggingface.co/models?search=llama), [OPT](https://huggingface.co/models?search=opt), [GPT-Neo](https://huggingface.co/models?search=gpt-neo), [GPT-J](https://huggingface.co/models?search=gptj), and [Pythia](https://huggingface.co/models?search=pythia) models. + +#### NOTE: To use LLaMA models, you will need to install the latest version of transformers via +``` +pip install git+https://github.com/huggingface/transformers +``` +Use this [script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py) for converting LLaMA weights to HuggingFace format. + +## Initializing an OpenFlamingo model +``` python +from open_flamingo import create_model_and_transforms + +model, image_processor, tokenizer = create_model_and_transforms( + clip_vision_encoder_path="ViT-L-14", + clip_vision_encoder_pretrained="openai", + lang_encoder_path="", + tokenizer_path="", + cross_attn_every_n_layers=4 +) + +# grab model checkpoint from huggingface hub +from huggingface_hub import hf_hub_download +import torch + +checkpoint_path = hf_hub_download("openflamingo/OpenFlamingo-9B", "checkpoint.pt") +model.load_state_dict(torch.load(checkpoint_path), strict=False) +``` + +## Generating text +Here is an example of generating text conditioned on interleaved images/text, in this case we will do few-shot image captioning. + +``` python +from PIL import Image +import requests + +""" +Step 1: Load images +""" +demo_image_one = Image.open( + requests.get( + "http://images.cocodataset.org/val2017/000000039769.jpg", stream=True + ).raw +) + +demo_image_two = Image.open( + requests.get( + "http://images.cocodataset.org/test-stuff2017/000000028137.jpg", + stream=True + ).raw +) + +query_image = Image.open( + requests.get( + "http://images.cocodataset.org/test-stuff2017/000000028352.jpg", + stream=True + ).raw +) + + +""" +Step 2: Preprocessing images +Details: For OpenFlamingo, we expect the image to be a torch tensor of shape + batch_size x num_media x num_frames x channels x height x width. + In this case batch_size = 1, num_media = 3, num_frames = 1 + (this will always be one expect for video which we don't support yet), + channels = 3, height = 224, width = 224. +""" +vision_x = [image_processor(demo_image_one).unsqueeze(0), image_processor(demo_image_two).unsqueeze(0), image_processor(query_image).unsqueeze(0)] +vision_x = torch.cat(vision_x, dim=0) +vision_x = vision_x.unsqueeze(1).unsqueeze(0) + +""" +Step 3: Preprocessing text +Details: In the text we expect an <|#image#|> special token to indicate where an image is. + We also expect an <|endofchunk|> special token to indicate the end of the text + portion associated with an image. +""" +tokenizer.padding_side = "left" # For generation padding tokens should be on the left +lang_x = tokenizer( + ["<|#image#|>An image of two cats.<|endofchunk|><|#image#|>An image of a bathroom sink.<|endofchunk|><|#image#|>An image of"], + return_tensors="pt", +) + + +""" +Step 4: Generate text +""" +generated_text = model.generate( + vision_x=vision_x, + lang_x=lang_x["input_ids"], + attention_mask=lang_x["attention_mask"], + max_new_tokens=20, + num_beams=3, +) + +print("Generated text: ", tokenizer.decode(generated_text[0])) +``` + +# Approach +OpenFlamingo is a multimodal language model that can be used for a variety of tasks. It is trained on a large multimodal dataset (e.g. Multimodal C4) and can be used to generate text conditioned on interleaved images/text. For example, OpenFlamingo can be used to generate a caption for an image, or to generate a question given an image and a text passage. The benefit of this approach is that we are able to rapidly adapt to new tasks using in-context training. + +## Model architecture +OpenFlamingo seeks to fuse a pretrained vision encoder and a language model using cross attention layers. The model architecture is shown below. + +![OpenFlamingo architecture](docs/flamingo.png) +Credit: [Flamingo](https://www.deepmind.com/blog/tackling-multiple-tasks-with-a-single-visual-language-model) + +# Training +To train a model, modify the following example command, which uses OPT 1.3B as an example LM: +``` +torchrun --nnodes=1 --nproc_per_node=4 train.py \ +--run_name flamingo3B \ +--lm_path facebook/opt-1.3b \ +--tokenizer_path facebook/opt-1.3b \ +--dataset_resampled \ +--laion_shards "/path/to/shards/shard-{0000..0999}.tar" \ +--mmc4_shards "/path/to/shards/shard-{0000..0999}.tar" \ +--batch_size_mmc4 4 \ +--batch_size_laion 8 \ +--train_num_samples_mmc4 125000 \ +--train_num_samples_laion 250000 \ +--loss_multiplier_laion 0.2 \ +--workers=6 \ +--num_epochs 250 \ +--lr_scheduler constant \ +--warmup_steps 5000 \ +--use_media_placement_augmentation \ +--mmc4_textsim_threshold 30 +``` + +## Dataset +We expect all our training datasets to be [WebDataset](https://github.com/webdataset/webdataset) shards. +We train our models on the [LAION 2B](https://huggingface.co/datasets/laion/laion2B-en) and Multimodal C4 (coming soon) datasets. By default the LAION 2B dataset is in WebDataset format if it is downloaded using the [img2dataset tool](https://github.com/rom1504/img2dataset) and Multimodal C4 comes packaged in the WebDataset format. + + +# Evaluation +We currently support running evaluations on [COCO](https://cocodataset.org/#home), [VQAv2](https://visualqa.org/index.html), [OKVQA](https://okvqa.allenai.org), [Flickr30k](https://www.kaggle.com/datasets/hsankesara/flickr-image-dataset), and [ImageNet](https://image-net.org/index.php). Note that currently these evaluations are ran in validation mode (as specified in the Flamingo paper). We will be adding support for running evaluations in test mode in the future. + +Before evaluating the model, you will need to install the coco evaluation package by running the following command: +``` +pip install pycocoevalcap +``` + +To run evaluations on OKVQA you will need to run the following command: +``` +import nltk +nltk.download('wordnet') +``` + +To evaluate the model, run the script at `open_flamingo/scripts/run_eval.sh` + +# Future plans +- [ ] Add support for video input +- [ ] Release better performing and larger OpenFlamingo models +- [ ] Expand our evaluation suite +- [ ] Add support for FSDP training + +# Team + +OpenFlamingo is developed by: + +[Anas Awadalla](https://anas-awadalla.streamlit.app/), [Irena Gao](https://i-gao.github.io/), [Joshua Gardner](https://homes.cs.washington.edu/~jpgard/), [Jack Hessel](https://jmhessel.com/), [Yusuf Hanafy](https://www.linkedin.com/in/yusufhanafy/), [Wanrong Zhu](https://wanrong-zhu.com/), [Kalyani Marathe](https://sites.google.com/uw.edu/kalyanimarathe/home?authuser=0), [Yonatan Bitton](https://yonatanbitton.github.io/), [Samir Gadre](https://sagadre.github.io/), [Jenia Jitsev](https://scholar.google.de/citations?user=p1FuAMkAAAAJ&hl=en), [Simon Kornblith](https://simonster.com/), [Pang Wei Koh](https://koh.pw/), [Gabriel Ilharco](https://gabrielilharco.com/), [Mitchell Wortsman](https://mitchellnw.github.io/), [Ludwig Schmidt](https://people.csail.mit.edu/ludwigs/). + +The team is primarily from the University of Washington, Stanford, AI2, UCSB, and Google. + +# Acknowledgments +This code is based on Lucidrains' [flamingo implementation](https://github.com/lucidrains/flamingo-pytorch) and David Hansmair's [flamingo-mini repo](https://github.com/dhansmair/flamingo-mini). Thank you for making your code public! We also thank the [OpenCLIP](https://github.com/mlfoundations/open_clip) team as we use their data loading code and take inspiration from their library design. + +We would also like to thank [Jean-Baptiste Alayrac](https://www.jbalayrac.com) and [Antoine Miech](https://antoine77340.github.io) for their advice, [Rohan Taori](https://www.rohantaori.com/), [Nicholas Schiefer](https://nicholasschiefer.com/), [Deep Ganguli](https://hai.stanford.edu/people/deep-ganguli), [Thomas Liao](https://thomasliao.com/), [Tatsunori Hashimoto](https://thashim.github.io/), and [Nicholas Carlini](https://nicholas.carlini.com/) for their help with assessing the safety risks of our release, and to [Stability AI](https://stability.ai) for providing us with compute resources to train these models. + +# Citing +If you found this repository useful, please consider citing: + +``` +@software{anas_awadalla_2023_7733589, + author = {Awadalla, Anas and Gao, Irena and Gardner, Joshua and Hessel, Jack and Hanafy, Yusuf and Zhu, Wanrong and Marathe, Kalyani and Bitton, Yonatan and Gadre, Samir and Jitsev, Jenia and Kornblith, Simon and Koh, Pang Wei and Ilharco, Gabriel and Wortsman, Mitchell and Schmidt, Ludwig}, + title = {OpenFlamingo}, + month = mar, + year = 2023, + publisher = {Zenodo}, + version = {v0.1.1}, + doi = {10.5281/zenodo.7733589}, + url = {https://doi.org/10.5281/zenodo.7733589} +} +``` + +``` +@article{Alayrac2022FlamingoAV, + title={Flamingo: a Visual Language Model for Few-Shot Learning}, + author={Jean-Baptiste Alayrac and Jeff Donahue and Pauline Luc and Antoine Miech and Iain Barr and Yana Hasson and Karel Lenc and Arthur Mensch and Katie Millican and Malcolm Reynolds and Roman Ring and Eliza Rutherford and Serkan Cabi and Tengda Han and Zhitao Gong and Sina Samangooei and Marianne Monteiro and Jacob Menick and Sebastian Borgeaud and Andy Brock and Aida Nematzadeh and Sahand Sharifzadeh and Mikolaj Binkowski and Ricardo Barreira and Oriol Vinyals and Andrew Zisserman and Karen Simonyan}, + journal={ArXiv}, + year={2022}, + volume={abs/2204.14198} +} +``` diff --git a/multimodal/YOLOX/.gitignore b/multimodal/YOLOX/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..9842565a17ef40856a03a8cf7a4c7b672520a868 --- /dev/null +++ b/multimodal/YOLOX/.gitignore @@ -0,0 +1,228 @@ +### Linux ### +*~ + +# user experiments directory +YOLOX_outputs/ +datasets/ +# do not ignore datasets under yolox/data +!*yolox/data/datasets/ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +### PyCharm ### +# User-specific stuff +.idea + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +# JetBrains templates +**___jb_tmp___ + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ +docs/build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don’t work, or not +# install all needed dependencies. +#Pipfile.lock + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim + +# Temporary +.netrwhist +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +# output +docs/api +.code-workspace.code-workspace +*.pkl +*.npy +*.pth +*.onnx +*.engine +events.out.tfevents* + +# vscode +*.code-workspace +.vscode + +# vim +.vim + +# OS generated files +.DS_Store +.DS_Store? +.Trashes +ehthumbs.db +Thumbs.db diff --git a/multimodal/YOLOX/.pre-commit-config.yaml b/multimodal/YOLOX/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5120983f908eae6415390f17c58194c671e59899 --- /dev/null +++ b/multimodal/YOLOX/.pre-commit-config.yaml @@ -0,0 +1,43 @@ +repos: + - repo: https://github.com/pycqa/flake8 + rev: 3.8.3 + hooks: + - id: flake8 + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.1.0 + hooks: + - id: check-added-large-files + - id: check-docstring-first + - id: check-executables-have-shebangs + - id: check-json + - id: check-yaml + args: ["--unsafe"] + - id: debug-statements + - id: end-of-file-fixer + - id: requirements-txt-fixer + - id: trailing-whitespace + - repo: https://github.com/jorisroovers/gitlint + rev: v0.15.1 + hooks: + - id: gitlint + - repo: https://github.com/pycqa/isort + rev: 4.3.21 + hooks: + - id: isort + + - repo: https://github.com/PyCQA/autoflake + rev: v1.4 + hooks: + - id: autoflake + name: Remove unused variables and imports + entry: autoflake + language: python + args: + [ + "--in-place", + "--remove-all-unused-imports", + "--remove-unused-variables", + "--expand-star-imports", + "--ignore-init-module-imports", + ] + files: \.py$ diff --git a/multimodal/YOLOX/.readthedocs.yaml b/multimodal/YOLOX/.readthedocs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7e77c229649f01859f5f2945ebd002e52d1f1835 --- /dev/null +++ b/multimodal/YOLOX/.readthedocs.yaml @@ -0,0 +1,21 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: docs/conf.py + +# Optionally build your docs in additional formats such as PDF +formats: + - pdf + +# Optionally set the version of Python and requirements required to build your docs +python: + version: "3.7" + install: + - requirements: docs/requirements-doc.txt + - requirements: requirements.txt diff --git a/multimodal/YOLOX/LICENSE b/multimodal/YOLOX/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..1d4dc763d3d33d3722c6d86054c01b8a459bb2ea --- /dev/null +++ b/multimodal/YOLOX/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2021-2022 Megvii Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/multimodal/YOLOX/MANIFEST.in b/multimodal/YOLOX/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..aea4f44a71f15b2faf0b70f0583ffdad7e557f3f --- /dev/null +++ b/multimodal/YOLOX/MANIFEST.in @@ -0,0 +1,2 @@ +include requirements.txt +recursive-include yolox *.cpp *.h *.cu *.cuh *.cc diff --git a/multimodal/YOLOX/README.md b/multimodal/YOLOX/README.md new file mode 100644 index 0000000000000000000000000000000000000000..18d9d404048c7a3b85a2e9a400ed2d6bfa772ae9 --- /dev/null +++ b/multimodal/YOLOX/README.md @@ -0,0 +1,255 @@ +
+ + +## Introduction +YOLOX is an anchor-free version of YOLO, with a simpler design but better performance! It aims to bridge the gap between research and industrial communities. +For more details, please refer to our [report on Arxiv](https://arxiv.org/abs/2107.08430). + +This repo is an implementation of PyTorch version YOLOX, there is also a [MegEngine implementation](https://github.com/MegEngine/YOLOX). + + + +## Updates!! +* 【2023/02/28】 We support assignment visualization tool, see doc [here](./docs/assignment_visualization.md). +* 【2022/04/14】 We support jit compile op. +* 【2021/08/19】 We optimize the training process with **2x** faster training and **~1%** higher performance! See [notes](docs/updates_note.md) for more details. +* 【2021/08/05】 We release [MegEngine version YOLOX](https://github.com/MegEngine/YOLOX). +* 【2021/07/28】 We fix the fatal error of [memory leak](https://github.com/Megvii-BaseDetection/YOLOX/issues/103) +* 【2021/07/26】 We now support [MegEngine](https://github.com/Megvii-BaseDetection/YOLOX/tree/main/demo/MegEngine) deployment. +* 【2021/07/20】 We have released our technical report on [Arxiv](https://arxiv.org/abs/2107.08430). + +## Coming soon +- [ ] YOLOX-P6 and larger model. +- [ ] Objects365 pretrain. +- [ ] Transformer modules. +- [ ] More features in need. + +## Benchmark + +#### Standard Models. + +|Model |size |mAPval
0.5:0.95 |mAPtest
0.5:0.95 | Speed V100
(ms) | Params
(M) |FLOPs
(G)| weights | +| ------ |:---: | :---: | :---: |:---: |:---: | :---: | :----: | +|[YOLOX-s](./exps/default/yolox_s.py) |640 |40.5 |40.5 |9.8 |9.0 | 26.8 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_s.pth) | +|[YOLOX-m](./exps/default/yolox_m.py) |640 |46.9 |47.2 |12.3 |25.3 |73.8| [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_m.pth) | +|[YOLOX-l](./exps/default/yolox_l.py) |640 |49.7 |50.1 |14.5 |54.2| 155.6 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_l.pth) | +|[YOLOX-x](./exps/default/yolox_x.py) |640 |51.1 |**51.5** | 17.3 |99.1 |281.9 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_x.pth) | +|[YOLOX-Darknet53](./exps/default/yolov3.py) |640 | 47.7 | 48.0 | 11.1 |63.7 | 185.3 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_darknet.pth) | + +
+Legacy models + +|Model |size |mAPtest
0.5:0.95 | Speed V100
(ms) | Params
(M) |FLOPs
(G)| weights | +| ------ |:---: | :---: |:---: |:---: | :---: | :----: | +|[YOLOX-s](./exps/default/yolox_s.py) |640 |39.6 |9.8 |9.0 | 26.8 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EW62gmO2vnNNs5npxjzunVwB9p307qqygaCkXdTO88BLUg?e=NMTQYw)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_s.pth) | +|[YOLOX-m](./exps/default/yolox_m.py) |640 |46.4 |12.3 |25.3 |73.8| [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ERMTP7VFqrVBrXKMU7Vl4TcBQs0SUeCT7kvc-JdIbej4tQ?e=1MDo9y)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_m.pth) | +|[YOLOX-l](./exps/default/yolox_l.py) |640 |50.0 |14.5 |54.2| 155.6 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EWA8w_IEOzBKvuueBqfaZh0BeoG5sVzR-XYbOJO4YlOkRw?e=wHWOBE)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_l.pth) | +|[YOLOX-x](./exps/default/yolox_x.py) |640 |**51.2** | 17.3 |99.1 |281.9 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EdgVPHBziOVBtGAXHfeHI5kBza0q9yyueMGdT0wXZfI1rQ?e=tABO5u)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_x.pth) | +|[YOLOX-Darknet53](./exps/default/yolov3.py) |640 | 47.4 | 11.1 |63.7 | 185.3 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EZ-MV1r_fMFPkPrNjvbJEMoBLOLAnXH-XKEB77w8LhXL6Q?e=mf6wOc)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_darknet53.pth) | + +
+ +#### Light Models. + +|Model |size |mAPval
0.5:0.95 | Params
(M) |FLOPs
(G)| weights | +| ------ |:---: | :---: |:---: |:---: | :---: | +|[YOLOX-Nano](./exps/default/yolox_nano.py) |416 |25.8 | 0.91 |1.08 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_nano.pth) | +|[YOLOX-Tiny](./exps/default/yolox_tiny.py) |416 |32.8 | 5.06 |6.45 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_tiny.pth) | + + +
+Legacy models + +|Model |size |mAPval
0.5:0.95 | Params
(M) |FLOPs
(G)| weights | +| ------ |:---: | :---: |:---: |:---: | :---: | +|[YOLOX-Nano](./exps/default/yolox_nano.py) |416 |25.3 | 0.91 |1.08 | [github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_nano.pth) | +|[YOLOX-Tiny](./exps/default/yolox_tiny.py) |416 |32.8 | 5.06 |6.45 | [github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_tiny_32dot8.pth) | + +
+ +## Quick Start + +
+Installation + +Step1. Install YOLOX from source. +```shell +git clone git@github.com:Megvii-BaseDetection/YOLOX.git +cd YOLOX +pip3 install -v -e . # or python3 setup.py develop +``` + +
+ +
+Demo + +Step1. Download a pretrained model from the benchmark table. + +Step2. Use either -n or -f to specify your detector's config. For example: + +```shell +python tools/demo.py image -n yolox-s -c /path/to/your/yolox_s.pth --path assets/dog.jpg --conf 0.25 --nms 0.45 --tsize 640 --save_result --device [cpu/gpu] +``` +or +```shell +python tools/demo.py image -f exps/default/yolox_s.py -c /path/to/your/yolox_s.pth --path assets/dog.jpg --conf 0.25 --nms 0.45 --tsize 640 --save_result --device [cpu/gpu] +``` +Demo for video: +```shell +python tools/demo.py video -n yolox-s -c /path/to/your/yolox_s.pth --path /path/to/your/video --conf 0.25 --nms 0.45 --tsize 640 --save_result --device [cpu/gpu] +``` + + +
+ +
+Reproduce our results on COCO + +Step1. Prepare COCO dataset +```shell +cd +ln -s /path/to/your/COCO ./datasets/COCO +``` + +Step2. Reproduce our results on COCO by specifying -n: + +```shell +python -m yolox.tools.train -n yolox-s -d 8 -b 64 --fp16 -o [--cache] + yolox-m + yolox-l + yolox-x +``` +* -d: number of gpu devices +* -b: total batch size, the recommended number for -b is num-gpu * 8 +* --fp16: mixed precision training +* --cache: caching imgs into RAM to accelarate training, which need large system RAM. + + + +When using -f, the above commands are equivalent to: +```shell +python -m yolox.tools.train -f exps/default/yolox_s.py -d 8 -b 64 --fp16 -o [--cache] + exps/default/yolox_m.py + exps/default/yolox_l.py + exps/default/yolox_x.py +``` + +**Multi Machine Training** + +We also support multi-nodes training. Just add the following args: +* --num\_machines: num of your total training nodes +* --machine\_rank: specify the rank of each node + +Suppose you want to train YOLOX on 2 machines, and your master machines's IP is 123.123.123.123, use port 12312 and TCP. + +On master machine, run +```shell +python tools/train.py -n yolox-s -b 128 --dist-url tcp://123.123.123.123:12312 --num_machines 2 --machine_rank 0 +``` +On the second machine, run +```shell +python tools/train.py -n yolox-s -b 128 --dist-url tcp://123.123.123.123:12312 --num_machines 2 --machine_rank 1 +``` + +**Logging to Weights & Biases** + +To log metrics, predictions and model checkpoints to [W&B](https://docs.wandb.ai/guides/integrations/other/yolox) use the command line argument `--logger wandb` and use the prefix "wandb-" to specify arguments for initializing the wandb run. + +```shell +python tools/train.py -n yolox-s -d 8 -b 64 --fp16 -o [--cache] --logger wandb wandb-project + yolox-m + yolox-l + yolox-x +``` + +An example wandb dashboard is available [here](https://wandb.ai/manan-goel/yolox-nano/runs/3pzfeom0) + +**Others** + +See more information with the following command: +```shell +python -m yolox.tools.train --help +``` + +
+ + +
+Evaluation + +We support batch testing for fast evaluation: + +```shell +python -m yolox.tools.eval -n yolox-s -c yolox_s.pth -b 64 -d 8 --conf 0.001 [--fp16] [--fuse] + yolox-m + yolox-l + yolox-x +``` +* --fuse: fuse conv and bn +* -d: number of GPUs used for evaluation. DEFAULT: All GPUs available will be used. +* -b: total batch size across on all GPUs + +To reproduce speed test, we use the following command: +```shell +python -m yolox.tools.eval -n yolox-s -c yolox_s.pth -b 1 -d 1 --conf 0.001 --fp16 --fuse + yolox-m + yolox-l + yolox-x +``` + +
+ + +
+Tutorials + +* [Training on custom data](docs/train_custom_data.md) +* [Caching for custom data](docs/cache.md) +* [Manipulating training image size](docs/manipulate_training_image_size.md) +* [Assignment visualization](docs/assignment_visualization.md) +* [Freezing model](docs/freeze_module.md) + +
+ +## Deployment + + +1. [MegEngine in C++ and Python](./demo/MegEngine) +2. [ONNX export and an ONNXRuntime](./demo/ONNXRuntime) +3. [TensorRT in C++ and Python](./demo/TensorRT) +4. [ncnn in C++ and Java](./demo/ncnn) +5. [OpenVINO in C++ and Python](./demo/OpenVINO) +6. [Accelerate YOLOX inference with nebullvm in Python](./demo/nebullvm) + +## Third-party resources +* YOLOX for streaming perception: [StreamYOLO (CVPR 2022 Oral)](https://github.com/yancie-yjr/StreamYOLO) +* The YOLOX-s and YOLOX-nano are Integrated into [ModelScope](https://www.modelscope.cn/home). Try out the Online Demo at [YOLOX-s](https://www.modelscope.cn/models/damo/cv_cspnet_image-object-detection_yolox/summary) and [YOLOX-Nano](https://www.modelscope.cn/models/damo/cv_cspnet_image-object-detection_yolox_nano_coco/summary) respectively 🚀. +* Integrated into [Huggingface Spaces 🤗](https://huggingface.co/spaces) using [Gradio](https://github.com/gradio-app/gradio). Try out the Web Demo: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/Sultannn/YOLOX-Demo) +* The ncnn android app with video support: [ncnn-android-yolox](https://github.com/FeiGeChuanShu/ncnn-android-yolox) from [FeiGeChuanShu](https://github.com/FeiGeChuanShu) +* YOLOX with Tengine support: [Tengine](https://github.com/OAID/Tengine/blob/tengine-lite/examples/tm_yolox.cpp) from [BUG1989](https://github.com/BUG1989) +* YOLOX + ROS2 Foxy: [YOLOX-ROS](https://github.com/Ar-Ray-code/YOLOX-ROS) from [Ar-Ray](https://github.com/Ar-Ray-code) +* YOLOX Deploy DeepStream: [YOLOX-deepstream](https://github.com/nanmi/YOLOX-deepstream) from [nanmi](https://github.com/nanmi) +* YOLOX MNN/TNN/ONNXRuntime: [YOLOX-MNN](https://github.com/DefTruth/lite.ai.toolkit/blob/main/lite/mnn/cv/mnn_yolox.cpp)、[YOLOX-TNN](https://github.com/DefTruth/lite.ai.toolkit/blob/main/lite/tnn/cv/tnn_yolox.cpp) and [YOLOX-ONNXRuntime C++](https://github.com/DefTruth/lite.ai.toolkit/blob/main/lite/ort/cv/yolox.cpp) from [DefTruth](https://github.com/DefTruth) +* Converting darknet or yolov5 datasets to COCO format for YOLOX: [YOLO2COCO](https://github.com/RapidAI/YOLO2COCO) from [Daniel](https://github.com/znsoftm) + +## Cite YOLOX +If you use YOLOX in your research, please cite our work by using the following BibTeX entry: + +```latex + @article{yolox2021, + title={YOLOX: Exceeding YOLO Series in 2021}, + author={Ge, Zheng and Liu, Songtao and Wang, Feng and Li, Zeming and Sun, Jian}, + journal={arXiv preprint arXiv:2107.08430}, + year={2021} +} +``` +## In memory of Dr. Jian Sun +Without the guidance of [Dr. Jian Sun](http://www.jiansun.org/), YOLOX would not have been released and open sourced to the community. +The passing away of Dr. Jian is a huge loss to the Computer Vision field. We add this section here to express our remembrance and condolences to our captain Dr. Jian. +It is hoped that every AI practitioner in the world will stick to the concept of "continuous innovation to expand cognitive boundaries, and extraordinary technology to achieve product value" and move forward all the way. + +
+没有孙剑博士的指导,YOLOX也不会问世并开源给社区使用。 +孙剑博士的离去是CV领域的一大损失,我们在此特别添加了这个部分来表达对我们的“船长”孙老师的纪念和哀思。 +希望世界上的每个AI从业者秉持着“持续创新拓展认知边界,非凡科技成就产品价值”的观念,一路向前。 diff --git a/multimodal/YOLOX/demo/MegEngine/cpp/README.md b/multimodal/YOLOX/demo/MegEngine/cpp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c639f27d96b731bbe58cd99f1347fdb9d11e2cb6 --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/cpp/README.md @@ -0,0 +1,173 @@ +# YOLOX-CPP-MegEngine + +Cpp file compile of YOLOX object detection base on [MegEngine](https://github.com/MegEngine/MegEngine). + +## Tutorial + +### Step1: install toolchain + + * host: sudo apt install gcc/g++ (gcc/g++, which version >= 6) build-essential git git-lfs gfortran libgfortran-6-dev autoconf gnupg flex bison gperf curl zlib1g-dev gcc-multilib g++-multilib cmake + * cross build android: download [NDK](https://developer.android.com/ndk/downloads) + * after unzip download NDK, then export NDK_ROOT="path of NDK" + +### Step2: build MegEngine + +```shell +git clone https://github.com/MegEngine/MegEngine.git + +# then init third_party + +export megengine_root="path of MegEngine" +cd $megengine_root && ./third_party/prepare.sh && ./third_party/install-mkl.sh + +# build example: +# build host without cuda: +./scripts/cmake-build/host_build.sh +# or build host with cuda: +./scripts/cmake-build/host_build.sh -c +# or cross build for android aarch64: +./scripts/cmake-build/cross_build_android_arm_inference.sh +# or cross build for android aarch64(with V8.2+fp16): +./scripts/cmake-build/cross_build_android_arm_inference.sh -f + +# after build MegEngine, you need export the `MGE_INSTALL_PATH` +# host without cuda: +export MGE_INSTALL_PATH=${megengine_root}/build_dir/host/MGE_WITH_CUDA_OFF/MGE_INFERENCE_ONLY_ON/Release/install +# or host with cuda: +export MGE_INSTALL_PATH=${megengine_root}/build_dir/host/MGE_WITH_CUDA_ON/MGE_INFERENCE_ONLY_ON/Release/install +# or cross build for android aarch64: +export MGE_INSTALL_PATH=${megengine_root}/build_dir/android/arm64-v8a/Release/install +``` +* you can refs [build tutorial of MegEngine](https://github.com/MegEngine/MegEngine/blob/master/scripts/cmake-build/BUILD_README.md) to build other platform, eg, windows/macos/ etc! + +### Step3: build OpenCV + +```shell +git clone https://github.com/opencv/opencv.git + +git checkout 3.4.15 (we test at 3.4.15, if test other version, may need modify some build) +``` + +- patch diff for android: + +``` +# ``` +# diff --git a/CMakeLists.txt b/CMakeLists.txt +# index f6a2da5310..10354312c9 100644 +# --- a/CMakeLists.txt +# +++ b/CMakeLists.txt +# @@ -643,7 +643,7 @@ if(UNIX) +# if(NOT APPLE) +# CHECK_INCLUDE_FILE(pthread.h HAVE_PTHREAD) +# if(ANDROID) +# - set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} dl m log) +# + set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} dl m log z) +# elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD|NetBSD|DragonFly|OpenBSD|Haiku") +# set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} m pthread) +# elseif(EMSCRIPTEN) + +# ``` +``` + +- build for host + +```shell +cd root_dir_of_opencv +mkdir -p build/install +cd build +cmake -DBUILD_JAVA=OFF -DBUILD_SHARED_LIBS=ON -DCMAKE_INSTALL_PREFIX=$PWD/install +make install -j32 +``` + +* build for android-aarch64 + +```shell +cd root_dir_of_opencv +mkdir -p build_android/install +cd build_android + +cmake -DCMAKE_TOOLCHAIN_FILE="$NDK_ROOT/build/cmake/android.toolchain.cmake" -DANDROID_NDK="$NDK_ROOT" -DANDROID_ABI=arm64-v8a -DANDROID_NATIVE_API_LEVEL=21 -DBUILD_JAVA=OFF -DBUILD_ANDROID_PROJECTS=OFF -DBUILD_ANDROID_EXAMPLES=OFF -DBUILD_SHARED_LIBS=ON -DCMAKE_INSTALL_PREFIX=$PWD/install .. + +make install -j32 +``` + +* after build OpenCV, you need export `OPENCV_INSTALL_INCLUDE_PATH ` and `OPENCV_INSTALL_LIB_PATH` + +```shell +# host build: +export OPENCV_INSTALL_INCLUDE_PATH=${path of opencv}/build/install/include +export OPENCV_INSTALL_LIB_PATH=${path of opencv}/build/install/lib +# or cross build for android aarch64: +export OPENCV_INSTALL_INCLUDE_PATH=${path of opencv}/build_android/install/sdk/native/jni/include +export OPENCV_INSTALL_LIB_PATH=${path of opencv}/build_android/install/sdk/native/libs/arm64-v8a +``` + +### Step4: build test demo + +```shell +run build.sh + +# if host: +export CXX=g++ +./build.sh +# or cross android aarch64 +export CXX=aarch64-linux-android21-clang++ +./build.sh +``` + +### Step5: run demo + +> **Note**: two ways to get `yolox_s.mge` model file +> +> * reference to python demo's `dump.py` script. +> * For users with code before 0.1.0 version, wget yolox-s weights [here](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_s.mge). +> * For users with code after 0.1.0 version, use [python code in megengine](../python) to generate mge file. + +```shell +# if host: +LD_LIBRARY_PATH=$MGE_INSTALL_PATH/lib/:$OPENCV_INSTALL_LIB_PATH ./yolox yolox_s.mge ../../../assets/dog.jpg cuda/cpu/multithread + +# or cross android +adb push/scp $MGE_INSTALL_PATH/lib/libmegengine.so android_phone +adb push/scp $OPENCV_INSTALL_LIB_PATH/*.so android_phone +adb push/scp ./yolox yolox_s.mge android_phone +adb push/scp ../../../assets/dog.jpg android_phone + +# login in android_phone by adb or ssh +# then run: +LD_LIBRARY_PATH=. ./yolox yolox_s.mge dog.jpg cpu/multithread + +# * means warmup count, valid number >=0 +# * means thread number, valid number >=1, only take effect `multithread` device +# * if >=1 , will use fastrun to choose best algo +# * if >=1, will handle weight preprocess before exe +# * if >=1, will run with fp16 mode +``` + +## Bechmark + +* model info: yolox-s @ input(1,3,640,640) + +* test devices + +``` + * x86_64 -- Intel(R) Xeon(R) CPU E5-2620 v4 @ 2.10GHz + * aarch64 -- xiamo phone mi9 + * cuda -- 1080TI @ cuda-10.1-cudnn-v7.6.3-TensorRT-6.0.1.5.sh @ Intel(R) Xeon(R) CPU E5-2620 v4 @ 2.10GHz +``` + + | megengine @ tag1.4(fastrun + weight\_preprocess)/sec | 1 thread | + | ---------------------------------------------------- | -------- | + | x86\_64 | 0.516245 | + | aarch64(fp32+chw44) | 0.587857 | + + | CUDA @ 1080TI/sec | 1 batch | 2 batch | 4 batch | 8 batch | 16 batch | 32 batch | 64 batch | + | ------------------- | ---------- | --------- | --------- | --------- | --------- | -------- | -------- | + | megengine(fp32+chw) | 0.00813703 | 0.0132893 | 0.0236633 | 0.0444699 | 0.0864917 | 0.16895 | 0.334248 | + +## Acknowledgement + +* [MegEngine](https://github.com/MegEngine/MegEngine) +* [OpenCV](https://github.com/opencv/opencv) +* [NDK](https://developer.android.com/ndk) +* [CMAKE](https://cmake.org/) diff --git a/multimodal/YOLOX/demo/MegEngine/cpp/build.sh b/multimodal/YOLOX/demo/MegEngine/cpp/build.sh new file mode 100755 index 0000000000000000000000000000000000000000..0954305ab4ee9c76c68567c0ed851749049f5bab --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/cpp/build.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +set -e + +if [ -z $CXX ];then + echo "please export you c++ toolchain to CXX" + echo "for example:" + echo "build for host: export CXX=g++" + echo "cross build for aarch64-android(always locate in NDK): export CXX=aarch64-linux-android21-clang++" + echo "cross build for aarch64-linux: export CXX=aarch64-linux-gnu-g++" + exit -1 +fi + +if [ -z $MGE_INSTALL_PATH ];then + echo "please refsi ./README.md to init MGE_INSTALL_PATH env" + exit -1 +fi + +if [ -z $OPENCV_INSTALL_INCLUDE_PATH ];then + echo "please refs ./README.md to init OPENCV_INSTALL_INCLUDE_PATH env" + exit -1 +fi + +if [ -z $OPENCV_INSTALL_LIB_PATH ];then + echo "please refs ./README.md to init OPENCV_INSTALL_LIB_PATH env" + exit -1 +fi + +INCLUDE_FLAG="-I$MGE_INSTALL_PATH/include -I$OPENCV_INSTALL_INCLUDE_PATH" +LINK_FLAG="-L$MGE_INSTALL_PATH/lib/ -lmegengine -L$OPENCV_INSTALL_LIB_PATH -lopencv_core -lopencv_highgui -lopencv_imgproc -lopencv_imgcodecs" +BUILD_FLAG="-static-libstdc++ -O3 -pie -fPIE -g" + +if [[ $CXX =~ "android" ]]; then + LINK_FLAG="${LINK_FLAG} -llog -lz" +fi + +echo "CXX: $CXX" +echo "MGE_INSTALL_PATH: $MGE_INSTALL_PATH" +echo "INCLUDE_FLAG: $INCLUDE_FLAG" +echo "LINK_FLAG: $LINK_FLAG" +echo "BUILD_FLAG: $BUILD_FLAG" + +echo "[" > compile_commands.json +echo "{" >> compile_commands.json +echo "\"directory\": \"$PWD\"," >> compile_commands.json +echo "\"command\": \"$CXX yolox.cpp -o yolox ${INCLUDE_FLAG} ${LINK_FLAG}\"," >> compile_commands.json +echo "\"file\": \"$PWD/yolox.cpp\"," >> compile_commands.json +echo "}," >> compile_commands.json +echo "]" >> compile_commands.json +$CXX yolox.cpp -o yolox ${INCLUDE_FLAG} ${LINK_FLAG} ${BUILD_FLAG} + +echo "build success, output file: yolox" +if [[ $CXX =~ "android" ]]; then + echo "try command to run:" + echo "adb push/scp $MGE_INSTALL_PATH/lib/libmegengine.so android_phone" + echo "adb push/scp $OPENCV_INSTALL_LIB_PATH/*.so android_phone" + echo "adb push/scp ./yolox yolox_s.mge android_phone" + echo "adb push/scp ../../../assets/dog.jpg android_phone" + echo "adb/ssh to android_phone, then run: LD_LIBRARY_PATH=. ./yolox yolox_s.mge dog.jpg cpu/multithread " +else + echo "try command to run: LD_LIBRARY_PATH=$MGE_INSTALL_PATH/lib/:$OPENCV_INSTALL_LIB_PATH ./yolox yolox_s.mge ../../../assets/dog.jpg cuda/cpu/multithread " +fi diff --git a/multimodal/YOLOX/demo/MegEngine/cpp/yolox.cpp b/multimodal/YOLOX/demo/MegEngine/cpp/yolox.cpp new file mode 100644 index 0000000000000000000000000000000000000000..859e6dcd2d1c42165a901d07a20e60ac256da912 --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/cpp/yolox.cpp @@ -0,0 +1,470 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "megbrain/gopt/inference.h" +#include "megbrain/opr/search_policy/algo_chooser_helper.h" +#include "megbrain/serialization/serializer.h" +#include +#include +#include +#include +#include +#include +#include + +/** + * @brief Define names based depends on Unicode path support + */ +#define NMS_THRESH 0.45 +#define BBOX_CONF_THRESH 0.25 + +constexpr int INPUT_W = 640; +constexpr int INPUT_H = 640; + +using namespace mgb; + +cv::Mat static_resize(cv::Mat &img) { + float r = std::min(INPUT_W / (img.cols * 1.0), INPUT_H / (img.rows * 1.0)); + int unpad_w = r * img.cols; + int unpad_h = r * img.rows; + cv::Mat re(unpad_h, unpad_w, CV_8UC3); + cv::resize(img, re, re.size()); + cv::Mat out(INPUT_W, INPUT_H, CV_8UC3, cv::Scalar(114, 114, 114)); + re.copyTo(out(cv::Rect(0, 0, re.cols, re.rows))); + return out; +} + +void blobFromImage(cv::Mat &img, float *blob_data) { + int channels = 3; + int img_h = img.rows; + int img_w = img.cols; + for (size_t c = 0; c < channels; c++) { + for (size_t h = 0; h < img_h; h++) { + for (size_t w = 0; w < img_w; w++) { + blob_data[c * img_w * img_h + h * img_w + w] = + (float)img.at(h, w)[c]; + } + } + } +} + +struct Object { + cv::Rect_ rect; + int label; + float prob; +}; + +struct GridAndStride { + int grid0; + int grid1; + int stride; +}; + +static void +generate_grids_and_stride(const int target_size, std::vector &strides, + std::vector &grid_strides) { + for (auto stride : strides) { + int num_grid = target_size / stride; + for (int g1 = 0; g1 < num_grid; g1++) { + for (int g0 = 0; g0 < num_grid; g0++) { + grid_strides.push_back((GridAndStride){g0, g1, stride}); + } + } + } +} + +static void generate_yolox_proposals(std::vector grid_strides, + const float *feat_ptr, + float prob_threshold, + std::vector &objects) { + const int num_class = 80; + const int num_anchors = grid_strides.size(); + + for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++) { + const int grid0 = grid_strides[anchor_idx].grid0; + const int grid1 = grid_strides[anchor_idx].grid1; + const int stride = grid_strides[anchor_idx].stride; + + const int basic_pos = anchor_idx * 85; + + float x_center = (feat_ptr[basic_pos + 0] + grid0) * stride; + float y_center = (feat_ptr[basic_pos + 1] + grid1) * stride; + float w = exp(feat_ptr[basic_pos + 2]) * stride; + float h = exp(feat_ptr[basic_pos + 3]) * stride; + float x0 = x_center - w * 0.5f; + float y0 = y_center - h * 0.5f; + + float box_objectness = feat_ptr[basic_pos + 4]; + for (int class_idx = 0; class_idx < num_class; class_idx++) { + float box_cls_score = feat_ptr[basic_pos + 5 + class_idx]; + float box_prob = box_objectness * box_cls_score; + if (box_prob > prob_threshold) { + Object obj; + obj.rect.x = x0; + obj.rect.y = y0; + obj.rect.width = w; + obj.rect.height = h; + obj.label = class_idx; + obj.prob = box_prob; + + objects.push_back(obj); + } + + } // class loop + + } // point anchor loop +} + +static inline float intersection_area(const Object &a, const Object &b) { + cv::Rect_ inter = a.rect & b.rect; + return inter.area(); +} + +static void qsort_descent_inplace(std::vector &faceobjects, int left, + int right) { + int i = left; + int j = right; + float p = faceobjects[(left + right) / 2].prob; + + while (i <= j) { + while (faceobjects[i].prob > p) + i++; + + while (faceobjects[j].prob < p) + j--; + + if (i <= j) { + // swap + std::swap(faceobjects[i], faceobjects[j]); + + i++; + j--; + } + } + +#pragma omp parallel sections + { +#pragma omp section + { + if (left < j) + qsort_descent_inplace(faceobjects, left, j); + } +#pragma omp section + { + if (i < right) + qsort_descent_inplace(faceobjects, i, right); + } + } +} + +static void qsort_descent_inplace(std::vector &objects) { + if (objects.empty()) + return; + + qsort_descent_inplace(objects, 0, objects.size() - 1); +} + +static void nms_sorted_bboxes(const std::vector &faceobjects, + std::vector &picked, float nms_threshold) { + picked.clear(); + + const int n = faceobjects.size(); + + std::vector areas(n); + for (int i = 0; i < n; i++) { + areas[i] = faceobjects[i].rect.area(); + } + + for (int i = 0; i < n; i++) { + const Object &a = faceobjects[i]; + + int keep = 1; + for (int j = 0; j < (int)picked.size(); j++) { + const Object &b = faceobjects[picked[j]]; + + // intersection over union + float inter_area = intersection_area(a, b); + float union_area = areas[i] + areas[picked[j]] - inter_area; + // float IoU = inter_area / union_area + if (inter_area / union_area > nms_threshold) + keep = 0; + } + + if (keep) + picked.push_back(i); + } +} + +static void decode_outputs(const float *prob, std::vector &objects, + float scale, const int img_w, const int img_h) { + std::vector proposals; + std::vector strides = {8, 16, 32}; + std::vector grid_strides; + + generate_grids_and_stride(INPUT_W, strides, grid_strides); + generate_yolox_proposals(grid_strides, prob, BBOX_CONF_THRESH, proposals); + qsort_descent_inplace(proposals); + + std::vector picked; + nms_sorted_bboxes(proposals, picked, NMS_THRESH); + int count = picked.size(); + objects.resize(count); + + for (int i = 0; i < count; i++) { + objects[i] = proposals[picked[i]]; + + // adjust offset to original unpadded + float x0 = (objects[i].rect.x) / scale; + float y0 = (objects[i].rect.y) / scale; + float x1 = (objects[i].rect.x + objects[i].rect.width) / scale; + float y1 = (objects[i].rect.y + objects[i].rect.height) / scale; + + // clip + x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f); + y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f); + x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f); + y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f); + + objects[i].rect.x = x0; + objects[i].rect.y = y0; + objects[i].rect.width = x1 - x0; + objects[i].rect.height = y1 - y0; + } +} + +const float color_list[80][3] = { + {0.000, 0.447, 0.741}, {0.850, 0.325, 0.098}, {0.929, 0.694, 0.125}, + {0.494, 0.184, 0.556}, {0.466, 0.674, 0.188}, {0.301, 0.745, 0.933}, + {0.635, 0.078, 0.184}, {0.300, 0.300, 0.300}, {0.600, 0.600, 0.600}, + {1.000, 0.000, 0.000}, {1.000, 0.500, 0.000}, {0.749, 0.749, 0.000}, + {0.000, 1.000, 0.000}, {0.000, 0.000, 1.000}, {0.667, 0.000, 1.000}, + {0.333, 0.333, 0.000}, {0.333, 0.667, 0.000}, {0.333, 1.000, 0.000}, + {0.667, 0.333, 0.000}, {0.667, 0.667, 0.000}, {0.667, 1.000, 0.000}, + {1.000, 0.333, 0.000}, {1.000, 0.667, 0.000}, {1.000, 1.000, 0.000}, + {0.000, 0.333, 0.500}, {0.000, 0.667, 0.500}, {0.000, 1.000, 0.500}, + {0.333, 0.000, 0.500}, {0.333, 0.333, 0.500}, {0.333, 0.667, 0.500}, + {0.333, 1.000, 0.500}, {0.667, 0.000, 0.500}, {0.667, 0.333, 0.500}, + {0.667, 0.667, 0.500}, {0.667, 1.000, 0.500}, {1.000, 0.000, 0.500}, + {1.000, 0.333, 0.500}, {1.000, 0.667, 0.500}, {1.000, 1.000, 0.500}, + {0.000, 0.333, 1.000}, {0.000, 0.667, 1.000}, {0.000, 1.000, 1.000}, + {0.333, 0.000, 1.000}, {0.333, 0.333, 1.000}, {0.333, 0.667, 1.000}, + {0.333, 1.000, 1.000}, {0.667, 0.000, 1.000}, {0.667, 0.333, 1.000}, + {0.667, 0.667, 1.000}, {0.667, 1.000, 1.000}, {1.000, 0.000, 1.000}, + {1.000, 0.333, 1.000}, {1.000, 0.667, 1.000}, {0.333, 0.000, 0.000}, + {0.500, 0.000, 0.000}, {0.667, 0.000, 0.000}, {0.833, 0.000, 0.000}, + {1.000, 0.000, 0.000}, {0.000, 0.167, 0.000}, {0.000, 0.333, 0.000}, + {0.000, 0.500, 0.000}, {0.000, 0.667, 0.000}, {0.000, 0.833, 0.000}, + {0.000, 1.000, 0.000}, {0.000, 0.000, 0.167}, {0.000, 0.000, 0.333}, + {0.000, 0.000, 0.500}, {0.000, 0.000, 0.667}, {0.000, 0.000, 0.833}, + {0.000, 0.000, 1.000}, {0.000, 0.000, 0.000}, {0.143, 0.143, 0.143}, + {0.286, 0.286, 0.286}, {0.429, 0.429, 0.429}, {0.571, 0.571, 0.571}, + {0.714, 0.714, 0.714}, {0.857, 0.857, 0.857}, {0.000, 0.447, 0.741}, + {0.314, 0.717, 0.741}, {0.50, 0.5, 0}}; + +static void draw_objects(const cv::Mat &bgr, + const std::vector &objects) { + static const char *class_names[] = { + "person", "bicycle", "car", + "motorcycle", "airplane", "bus", + "train", "truck", "boat", + "traffic light", "fire hydrant", "stop sign", + "parking meter", "bench", "bird", + "cat", "dog", "horse", + "sheep", "cow", "elephant", + "bear", "zebra", "giraffe", + "backpack", "umbrella", "handbag", + "tie", "suitcase", "frisbee", + "skis", "snowboard", "sports ball", + "kite", "baseball bat", "baseball glove", + "skateboard", "surfboard", "tennis racket", + "bottle", "wine glass", "cup", + "fork", "knife", "spoon", + "bowl", "banana", "apple", + "sandwich", "orange", "broccoli", + "carrot", "hot dog", "pizza", + "donut", "cake", "chair", + "couch", "potted plant", "bed", + "dining table", "toilet", "tv", + "laptop", "mouse", "remote", + "keyboard", "cell phone", "microwave", + "oven", "toaster", "sink", + "refrigerator", "book", "clock", + "vase", "scissors", "teddy bear", + "hair drier", "toothbrush"}; + + cv::Mat image = bgr.clone(); + + for (size_t i = 0; i < objects.size(); i++) { + const Object &obj = objects[i]; + + fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob, + obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height); + + cv::Scalar color = + cv::Scalar(color_list[obj.label][0], color_list[obj.label][1], + color_list[obj.label][2]); + float c_mean = cv::mean(color)[0]; + cv::Scalar txt_color; + if (c_mean > 0.5) { + txt_color = cv::Scalar(0, 0, 0); + } else { + txt_color = cv::Scalar(255, 255, 255); + } + + cv::rectangle(image, obj.rect, color * 255, 2); + + char text[256]; + sprintf(text, "%s %.1f%%", class_names[obj.label], obj.prob * 100); + + int baseLine = 0; + cv::Size label_size = + cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.4, 1, &baseLine); + + cv::Scalar txt_bk_color = color * 0.7 * 255; + + int x = obj.rect.x; + int y = obj.rect.y + 1; + // int y = obj.rect.y - label_size.height - baseLine; + if (y > image.rows) + y = image.rows; + // if (x + label_size.width > image.cols) + // x = image.cols - label_size.width; + + cv::rectangle( + image, + cv::Rect(cv::Point(x, y), + cv::Size(label_size.width, label_size.height + baseLine)), + txt_bk_color, -1); + + cv::putText(image, text, cv::Point(x, y + label_size.height), + cv::FONT_HERSHEY_SIMPLEX, 0.4, txt_color, 1); + } + + cv::imwrite("out.jpg", image); + std::cout << "save output to out.jpg" << std::endl; +} + +cg::ComputingGraph::OutputSpecItem make_callback_copy(SymbolVar dev, + HostTensorND &host) { + auto cb = [&host](DeviceTensorND &d) { host.copy_from(d); }; + return {dev, cb}; +} + +int main(int argc, char *argv[]) { + serialization::GraphLoader::LoadConfig load_config; + load_config.comp_graph = ComputingGraph::make(); + auto &&graph_opt = load_config.comp_graph->options(); + graph_opt.graph_opt_level = 0; + + if (argc != 9) { + std::cout << "Usage : " << argv[0] + << " " + " " + "" + << std::endl; + return EXIT_FAILURE; + } + + const std::string input_model{argv[1]}; + const std::string input_image_path{argv[2]}; + const std::string device{argv[3]}; + const size_t warmup_count = atoi(argv[4]); + const size_t thread_number = atoi(argv[5]); + const size_t use_fast_run = atoi(argv[6]); + const size_t use_weight_preprocess = atoi(argv[7]); + const size_t run_with_fp16 = atoi(argv[8]); + + if (device == "cuda") { + load_config.comp_node_mapper = [](CompNode::Locator &loc) { + loc.type = CompNode::DeviceType::CUDA; + }; + } else if (device == "cpu") { + load_config.comp_node_mapper = [](CompNode::Locator &loc) { + loc.type = CompNode::DeviceType::CPU; + }; + } else if (device == "multithread") { + load_config.comp_node_mapper = [thread_number](CompNode::Locator &loc) { + loc.type = CompNode::DeviceType::MULTITHREAD; + loc.device = 0; + loc.stream = thread_number; + }; + std::cout << "use " << thread_number << " thread" << std::endl; + } else { + std::cout << "device only support cuda or cpu or multithread" << std::endl; + return EXIT_FAILURE; + } + + if (use_weight_preprocess) { + std::cout << "use weight preprocess" << std::endl; + graph_opt.graph_opt.enable_weight_preprocess(); + } + if (run_with_fp16) { + std::cout << "run with fp16" << std::endl; + graph_opt.graph_opt.enable_f16_io_comp(); + } + + if (device == "cuda") { + std::cout << "choose format for cuda" << std::endl; + } else { + std::cout << "choose format for non-cuda" << std::endl; +#if defined(__arm__) || defined(__aarch64__) + if (run_with_fp16) { + std::cout << "use chw format when enable fp16" << std::endl; + } else { + std::cout << "choose format for nchw44 for aarch64" << std::endl; + graph_opt.graph_opt.enable_nchw44(); + } +#endif +#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) + // graph_opt.graph_opt.enable_nchw88(); +#endif + } + + std::unique_ptr inp_file = + serialization::InputFile::make_fs(input_model.c_str()); + auto loader = serialization::GraphLoader::make(std::move(inp_file)); + serialization::GraphLoader::LoadResult network = + loader->load(load_config, false); + + if (use_fast_run) { + std::cout << "use fastrun" << std::endl; + using S = opr::mixin::AlgoChooserHelper::ExecutionPolicy::Strategy; + S strategy = static_cast(0); + strategy = S::PROFILE | S::OPTIMIZED | strategy; + mgb::gopt::modify_opr_algo_strategy_inplace(network.output_var_list, + strategy); + } + + auto data = network.tensor_map["data"]; + cv::Mat image = cv::imread(input_image_path); + cv::Mat pr_img = static_resize(image); + float *data_ptr = data->resize({1, 3, 640, 640}).ptr(); + blobFromImage(pr_img, data_ptr); + HostTensorND predict; + std::unique_ptr func = network.graph->compile( + {make_callback_copy(network.output_var_map.begin()->second, predict)}); + + for (auto i = 0; i < warmup_count; i++) { + std::cout << "warmup: " << i << std::endl; + func->execute(); + func->wait(); + } + auto start = std::chrono::system_clock::now(); + func->execute(); + func->wait(); + auto end = std::chrono::system_clock::now(); + std::chrono::duration exec_seconds = end - start; + std::cout << "elapsed time: " << exec_seconds.count() << "s" << std::endl; + + float *predict_ptr = predict.ptr(); + int img_w = image.cols; + int img_h = image.rows; + float scale = + std::min(INPUT_W / (image.cols * 1.0), INPUT_H / (image.rows * 1.0)); + std::vector objects; + + decode_outputs(predict_ptr, objects, scale, img_w, img_h); + draw_objects(image, objects); + + return EXIT_SUCCESS; +} diff --git a/multimodal/YOLOX/demo/MegEngine/python/README.md b/multimodal/YOLOX/demo/MegEngine/python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..97ec25563229fcc2914deb80c1135cda8d49bfb2 --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/python/README.md @@ -0,0 +1,33 @@ +# YOLOX-Python-MegEngine + +Python version of YOLOX object detection base on [MegEngine](https://github.com/MegEngine/MegEngine). + +## Tutorial + +### Step1: install requirements + +``` +python3 -m pip install megengine -f https://megengine.org.cn/whl/mge.html +``` + +### Step2: convert checkpoint weights from torch's path file + +``` +python3 convert_weights.py -w yolox_s.pth -o yolox_s_mge.pkl +``` + +### Step3: run demo + +This part is the same as torch's python demo, but no need to specify device. + +``` +python3 demo.py image -n yolox-s -c yolox_s_mge.pkl --path ../../../assets/dog.jpg --conf 0.25 --nms 0.45 --tsize 640 --save_result +``` + +### [Optional]Step4: dump model for cpp inference + +> **Note**: result model is dumped with `optimize_for_inference` and `enable_fuse_conv_bias_nonlinearity`. + +``` +python3 dump.py -n yolox-s -c yolox_s_mge.pkl --dump_path yolox_s.mge +``` diff --git a/multimodal/YOLOX/demo/MegEngine/python/build.py b/multimodal/YOLOX/demo/MegEngine/python/build.py new file mode 100644 index 0000000000000000000000000000000000000000..139f4e7c7302e6ad9c3ae09c2918599b2b192a03 --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/python/build.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- + +import megengine as mge +import megengine.module as M + +from models.yolo_fpn import YOLOFPN +from models.yolo_head import YOLOXHead +from models.yolo_pafpn import YOLOPAFPN +from models.yolox import YOLOX + + +def build_yolox(name="yolox-s"): + num_classes = 80 + + # value meaning: depth, width + param_dict = { + "yolox-nano": (0.33, 0.25), + "yolox-tiny": (0.33, 0.375), + "yolox-s": (0.33, 0.50), + "yolox-m": (0.67, 0.75), + "yolox-l": (1.0, 1.0), + "yolox-x": (1.33, 1.25), + } + if name == "yolov3": + depth = 1.0 + width = 1.0 + backbone = YOLOFPN() + head = YOLOXHead(num_classes, width, in_channels=[128, 256, 512], act="lrelu") + model = YOLOX(backbone, head) + else: + assert name in param_dict + kwargs = {} + depth, width = param_dict[name] + if name == "yolox-nano": + kwargs["depthwise"] = True + in_channels = [256, 512, 1024] + backbone = YOLOPAFPN(depth, width, in_channels=in_channels, **kwargs) + head = YOLOXHead(num_classes, width, in_channels=in_channels, **kwargs) + model = YOLOX(backbone, head) + + for m in model.modules(): + if isinstance(m, M.BatchNorm2d): + m.eps = 1e-3 + + return model + + +def build_and_load(weight_file, name="yolox-s"): + model = build_yolox(name) + model_weights = mge.load(weight_file) + model.load_state_dict(model_weights, strict=False) + return model diff --git a/multimodal/YOLOX/demo/MegEngine/python/convert_weights.py b/multimodal/YOLOX/demo/MegEngine/python/convert_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..198caeeb38efe5400323828e4c0e91ba94a99167 --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/python/convert_weights.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- +import argparse +from collections import OrderedDict + +import megengine as mge +import torch + + +def make_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("-w", "--weights", type=str, help="path of weight file") + parser.add_argument( + "-o", + "--output", + default="weight_mge.pkl", + type=str, + help="path of weight file", + ) + return parser + + +def numpy_weights(weight_file): + torch_weights = torch.load(weight_file, map_location="cpu") + if "model" in torch_weights: + torch_weights = torch_weights["model"] + new_dict = OrderedDict() + for k, v in torch_weights.items(): + new_dict[k] = v.cpu().numpy() + return new_dict + + +def map_weights(weight_file, output_file): + torch_weights = numpy_weights(weight_file) + + new_dict = OrderedDict() + for k, v in torch_weights.items(): + if "num_batches_tracked" in k: + print("drop: {}".format(k)) + continue + if k.endswith("bias"): + print("bias key: {}".format(k)) + v = v.reshape(1, -1, 1, 1) + new_dict[k] = v + elif "dconv" in k and "conv.weight" in k: + print("depthwise conv key: {}".format(k)) + cout, cin, k1, k2 = v.shape + v = v.reshape(cout, 1, cin, k1, k2) + new_dict[k] = v + else: + new_dict[k] = v + + mge.save(new_dict, output_file) + print("save weights to {}".format(output_file)) + + +def main(): + parser = make_parser() + args = parser.parse_args() + map_weights(args.weights, args.output) + + +if __name__ == "__main__": + main() diff --git a/multimodal/YOLOX/demo/MegEngine/python/demo.py b/multimodal/YOLOX/demo/MegEngine/python/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..6542853a1a0eb1f8882892fcf55fff8838bd1468 --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/python/demo.py @@ -0,0 +1,237 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- +# Copyright (c) Megvii, Inc. and its affiliates. + +import argparse +import os +import time + +import cv2 +import megengine as mge +import megengine.functional as F +from loguru import logger + +from yolox.data.datasets import COCO_CLASSES +from yolox.utils import vis +from yolox.data.data_augment import preproc as preprocess + +from build import build_and_load + +IMAGE_EXT = [".jpg", ".jpeg", ".webp", ".bmp", ".png"] + + +def make_parser(): + parser = argparse.ArgumentParser("YOLOX Demo!") + parser.add_argument( + "demo", default="image", help="demo type, eg. image, video and webcam" + ) + parser.add_argument("-n", "--name", type=str, default="yolox-s", help="model name") + parser.add_argument("--path", default="./test.png", help="path to images or video") + parser.add_argument("--camid", type=int, default=0, help="webcam demo camera id") + parser.add_argument( + "--save_result", + action="store_true", + help="whether to save the inference result of image/video", + ) + + parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval") + parser.add_argument("--conf", default=None, type=float, help="test conf") + parser.add_argument("--nms", default=None, type=float, help="test nms threshold") + parser.add_argument("--tsize", default=None, type=int, help="test img size") + return parser + + +def get_image_list(path): + image_names = [] + for maindir, subdir, file_name_list in os.walk(path): + for filename in file_name_list: + apath = os.path.join(maindir, filename) + ext = os.path.splitext(apath)[1] + if ext in IMAGE_EXT: + image_names.append(apath) + return image_names + + +def postprocess(prediction, num_classes, conf_thre=0.7, nms_thre=0.45): + box_corner = F.zeros_like(prediction) + box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2 + box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2 + box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2 + box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2 + prediction[:, :, :4] = box_corner[:, :, :4] + + output = [None for _ in range(len(prediction))] + for i, image_pred in enumerate(prediction): + + # If none are remaining => process next image + if not image_pred.shape[0]: + continue + # Get score and class with highest confidence + class_conf = F.max(image_pred[:, 5: 5 + num_classes], 1, keepdims=True) + class_pred = F.argmax(image_pred[:, 5: 5 + num_classes], 1, keepdims=True) + + class_conf_squeeze = F.squeeze(class_conf) + conf_mask = image_pred[:, 4] * class_conf_squeeze >= conf_thre + detections = F.concat((image_pred[:, :5], class_conf, class_pred), 1) + detections = detections[conf_mask] + if not detections.shape[0]: + continue + + nms_out_index = F.vision.nms( + detections[:, :4], detections[:, 4] * detections[:, 5], nms_thre, + ) + detections = detections[nms_out_index] + if output[i] is None: + output[i] = detections + else: + output[i] = F.concat((output[i], detections)) + + return output + + +class Predictor(object): + def __init__( + self, + model, + confthre=0.01, + nmsthre=0.65, + test_size=(640, 640), + cls_names=COCO_CLASSES, + trt_file=None, + decoder=None, + ): + self.model = model + self.cls_names = cls_names + self.decoder = decoder + self.num_classes = 80 + self.confthre = confthre + self.nmsthre = nmsthre + self.test_size = test_size + + def inference(self, img): + img_info = {"id": 0} + if isinstance(img, str): + img_info["file_name"] = os.path.basename(img) + img = cv2.imread(img) + if img is None: + raise ValueError("test image path is invalid!") + else: + img_info["file_name"] = None + + height, width = img.shape[:2] + img_info["height"] = height + img_info["width"] = width + img_info["raw_img"] = img + + img, ratio = preprocess(img, self.test_size) + img_info["ratio"] = ratio + img = F.expand_dims(mge.tensor(img), 0) + + t0 = time.time() + outputs = self.model(img) + outputs = postprocess(outputs, self.num_classes, self.confthre, self.nmsthre) + logger.info("Infer time: {:.4f}s".format(time.time() - t0)) + return outputs, img_info + + def visual(self, output, img_info, cls_conf=0.35): + ratio = img_info["ratio"] + img = img_info["raw_img"] + if output is None: + return img + output = output.numpy() + + # preprocessing: resize + bboxes = output[:, 0:4] / ratio + + cls = output[:, 6] + scores = output[:, 4] * output[:, 5] + + vis_res = vis(img, bboxes, scores, cls, cls_conf, self.cls_names) + return vis_res + + +def image_demo(predictor, vis_folder, path, current_time, save_result): + if os.path.isdir(path): + files = get_image_list(path) + else: + files = [path] + files.sort() + for image_name in files: + outputs, img_info = predictor.inference(image_name) + result_image = predictor.visual(outputs[0], img_info) + if save_result: + save_folder = os.path.join( + vis_folder, time.strftime("%Y_%m_%d_%H_%M_%S", current_time) + ) + os.makedirs(save_folder, exist_ok=True) + save_file_name = os.path.join(save_folder, os.path.basename(image_name)) + logger.info("Saving detection result in {}".format(save_file_name)) + cv2.imwrite(save_file_name, result_image) + ch = cv2.waitKey(0) + if ch == 27 or ch == ord("q") or ch == ord("Q"): + break + + +def imageflow_demo(predictor, vis_folder, current_time, args): + cap = cv2.VideoCapture(args.path if args.demo == "video" else args.camid) + width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float + height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float + fps = cap.get(cv2.CAP_PROP_FPS) + save_folder = os.path.join( + vis_folder, time.strftime("%Y_%m_%d_%H_%M_%S", current_time) + ) + os.makedirs(save_folder, exist_ok=True) + if args.demo == "video": + save_path = os.path.join(save_folder, os.path.basename(args.path)) + else: + save_path = os.path.join(save_folder, "camera.mp4") + logger.info(f"video save_path is {save_path}") + vid_writer = cv2.VideoWriter( + save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height)) + ) + while True: + ret_val, frame = cap.read() + if ret_val: + outputs, img_info = predictor.inference(frame) + result_frame = predictor.visual(outputs[0], img_info) + if args.save_result: + vid_writer.write(result_frame) + ch = cv2.waitKey(1) + if ch == 27 or ch == ord("q") or ch == ord("Q"): + break + else: + break + + +def main(args): + file_name = os.path.join("./yolox_outputs", args.name) + os.makedirs(file_name, exist_ok=True) + + if args.save_result: + vis_folder = os.path.join(file_name, "vis_res") + os.makedirs(vis_folder, exist_ok=True) + + confthre = 0.01 + nmsthre = 0.65 + test_size = (640, 640) + if args.conf is not None: + confthre = args.conf + if args.nms is not None: + nmsthre = args.nms + if args.tsize is not None: + test_size = (args.tsize, args.tsize) + + model = build_and_load(args.ckpt, name=args.name) + model.eval() + + predictor = Predictor(model, confthre, nmsthre, test_size, COCO_CLASSES, None, None) + current_time = time.localtime() + if args.demo == "image": + image_demo(predictor, vis_folder, args.path, current_time, args.save_result) + elif args.demo == "video" or args.demo == "webcam": + imageflow_demo(predictor, vis_folder, current_time, args) + + +if __name__ == "__main__": + args = make_parser().parse_args() + main(args) diff --git a/multimodal/YOLOX/demo/MegEngine/python/dump.py b/multimodal/YOLOX/demo/MegEngine/python/dump.py new file mode 100644 index 0000000000000000000000000000000000000000..9ca1215bccb2f450e7cba1d971998531c38366cf --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/python/dump.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- +# Copyright (c) Megvii, Inc. and its affiliates. + +import argparse + +import megengine as mge +import numpy as np +from megengine import jit + +from build import build_and_load + + +def make_parser(): + parser = argparse.ArgumentParser("YOLOX Demo Dump") + parser.add_argument("-n", "--name", type=str, default="yolox-s", help="model name") + parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval") + parser.add_argument( + "--dump_path", default="model.mge", help="path to save the dumped model" + ) + return parser + + +def dump_static_graph(model, graph_name="model.mge"): + model.eval() + model.head.decode_in_inference = False + + data = mge.Tensor(np.random.random((1, 3, 640, 640))) + + @jit.trace(capture_as_const=True) + def pred_func(data): + outputs = model(data) + return outputs + + pred_func(data) + pred_func.dump( + graph_name, + arg_names=["data"], + optimize_for_inference=True, + enable_fuse_conv_bias_nonlinearity=True, + ) + + +def main(args): + model = build_and_load(args.ckpt, name=args.name) + dump_static_graph(model, args.dump_path) + + +if __name__ == "__main__": + args = make_parser().parse_args() + main(args) diff --git a/multimodal/YOLOX/demo/MegEngine/python/models/__init__.py b/multimodal/YOLOX/demo/MegEngine/python/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e55d18e337f0f1630afef4312fb9c7a1cdd293e8 --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/python/models/__init__.py @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- +# Copyright (c) Megvii Inc. All rights reserved. + +from .darknet import CSPDarknet, Darknet +from .yolo_fpn import YOLOFPN +from .yolo_head import YOLOXHead +from .yolo_pafpn import YOLOPAFPN +from .yolox import YOLOX diff --git a/multimodal/YOLOX/demo/MegEngine/python/models/darknet.py b/multimodal/YOLOX/demo/MegEngine/python/models/darknet.py new file mode 100644 index 0000000000000000000000000000000000000000..47469aa683a91cdf88091956b71637cae7a97dc3 --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/python/models/darknet.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python3 +# -*- encoding: utf-8 -*- +# Copyright (c) Megvii Inc. All rights reserved. + +import megengine.module as M + +from .network_blocks import BaseConv, CSPLayer, DWConv, Focus, ResLayer, SPPBottleneck + + +class Darknet(M.Module): + # number of blocks from dark2 to dark5. + depth2blocks = {21: [1, 2, 2, 1], 53: [2, 8, 8, 4]} + + def __init__( + self, depth, in_channels=3, stem_out_channels=32, out_features=("dark3", "dark4", "dark5"), + ): + """ + Args: + depth (int): depth of darknet used in model, usually use [21, 53] for this param. + in_channels (int): number of input channels, for example, use 3 for RGB image. + stem_out_channels (int): number of output channels of darknet stem. + It decides channels of darknet layer2 to layer5. + out_features (Tuple[str]): desired output layer name. + """ + super().__init__() + assert out_features, "please provide output features of Darknet" + self.out_features = out_features + self.stem = M.Sequential( + BaseConv(in_channels, stem_out_channels, ksize=3, stride=1, act="lrelu"), + *self.make_group_layer(stem_out_channels, num_blocks=1, stride=2), + ) + in_channels = stem_out_channels * 2 # 64 + + num_blocks = Darknet.depth2blocks[depth] + # create darknet with `stem_out_channels` and `num_blocks` layers. + # to make model structure more clear, we don't use `for` statement in python. + self.dark2 = M.Sequential(*self.make_group_layer(in_channels, num_blocks[0], stride=2)) + in_channels *= 2 # 128 + self.dark3 = M.Sequential(*self.make_group_layer(in_channels, num_blocks[1], stride=2)) + in_channels *= 2 # 256 + self.dark4 = M.Sequential(*self.make_group_layer(in_channels, num_blocks[2], stride=2)) + in_channels *= 2 # 512 + + self.dark5 = M.Sequential( + *self.make_group_layer(in_channels, num_blocks[3], stride=2), + *self.make_spp_block([in_channels, in_channels * 2], in_channels * 2), + ) + + def make_group_layer(self, in_channels: int, num_blocks: int, stride: int = 1): + "starts with conv layer then has `num_blocks` `ResLayer`" + return [ + BaseConv(in_channels, in_channels * 2, ksize=3, stride=stride, act="lrelu"), + *[(ResLayer(in_channels * 2)) for _ in range(num_blocks)] + ] + + def make_spp_block(self, filters_list, in_filters): + m = M.Sequential( + *[ + BaseConv(in_filters, filters_list[0], 1, stride=1, act="lrelu"), + BaseConv(filters_list[0], filters_list[1], 3, stride=1, act="lrelu"), + SPPBottleneck( + in_channels=filters_list[1], + out_channels=filters_list[0], + activation="lrelu" + ), + BaseConv(filters_list[0], filters_list[1], 3, stride=1, act="lrelu"), + BaseConv(filters_list[1], filters_list[0], 1, stride=1, act="lrelu"), + ] + ) + return m + + def forward(self, x): + outputs = {} + x = self.stem(x) + outputs["stem"] = x + x = self.dark2(x) + outputs["dark2"] = x + x = self.dark3(x) + outputs["dark3"] = x + x = self.dark4(x) + outputs["dark4"] = x + x = self.dark5(x) + outputs["dark5"] = x + return {k: v for k, v in outputs.items() if k in self.out_features} + + +class CSPDarknet(M.Module): + + def __init__( + self, dep_mul, wid_mul, + out_features=("dark3", "dark4", "dark5"), + depthwise=False, act="silu", + ): + super().__init__() + assert out_features, "please provide output features of Darknet" + self.out_features = out_features + Conv = DWConv if depthwise else BaseConv + + base_channels = int(wid_mul * 64) # 64 + base_depth = max(round(dep_mul * 3), 1) # 3 + + # stem + self.stem = Focus(3, base_channels, ksize=3, act=act) + + # dark2 + self.dark2 = M.Sequential( + Conv(base_channels, base_channels * 2, 3, 2, act=act), + CSPLayer( + base_channels * 2, base_channels * 2, + n=base_depth, depthwise=depthwise, act=act + ), + ) + + # dark3 + self.dark3 = M.Sequential( + Conv(base_channels * 2, base_channels * 4, 3, 2, act=act), + CSPLayer( + base_channels * 4, base_channels * 4, + n=base_depth * 3, depthwise=depthwise, act=act, + ), + ) + + # dark4 + self.dark4 = M.Sequential( + Conv(base_channels * 4, base_channels * 8, 3, 2, act=act), + CSPLayer( + base_channels * 8, base_channels * 8, + n=base_depth * 3, depthwise=depthwise, act=act, + ), + ) + + # dark5 + self.dark5 = M.Sequential( + Conv(base_channels * 8, base_channels * 16, 3, 2, act=act), + SPPBottleneck(base_channels * 16, base_channels * 16, activation=act), + CSPLayer( + base_channels * 16, base_channels * 16, n=base_depth, + shortcut=False, depthwise=depthwise, act=act, + ), + ) + + def forward(self, x): + outputs = {} + x = self.stem(x) + outputs["stem"] = x + x = self.dark2(x) + outputs["dark2"] = x + x = self.dark3(x) + outputs["dark3"] = x + x = self.dark4(x) + outputs["dark4"] = x + x = self.dark5(x) + outputs["dark5"] = x + return {k: v for k, v in outputs.items() if k in self.out_features} diff --git a/multimodal/YOLOX/demo/MegEngine/python/models/network_blocks.py b/multimodal/YOLOX/demo/MegEngine/python/models/network_blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..f0e40d3f2aea5bbd00493311219821a7e5d5e8be --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/python/models/network_blocks.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python3 +# -*- encoding: utf-8 -*- +# Copyright (c) Megvii Inc. All rights reserved. + +import megengine.functional as F +import megengine.module as M + + +class UpSample(M.Module): + + def __init__(self, scale_factor=2, mode="bilinear"): + super().__init__() + self.scale_factor = scale_factor + self.mode = mode + + def forward(self, x): + return F.vision.interpolate(x, scale_factor=self.scale_factor, mode=self.mode) + + +class SiLU(M.Module): + """export-friendly version of M.SiLU()""" + + @staticmethod + def forward(x): + return x * F.sigmoid(x) + + +def get_activation(name="silu"): + if name == "silu": + module = SiLU() + elif name == "relu": + module = M.ReLU() + elif name == "lrelu": + module = M.LeakyReLU(0.1) + else: + raise AttributeError("Unsupported act type: {}".format(name)) + return module + + +class BaseConv(M.Module): + """A Conv2d -> Batchnorm -> silu/leaky relu block""" + + def __init__(self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act="silu"): + super().__init__() + # same padding + pad = (ksize - 1) // 2 + self.conv = M.Conv2d( + in_channels, + out_channels, + kernel_size=ksize, + stride=stride, + padding=pad, + groups=groups, + bias=bias, + ) + self.bn = M.BatchNorm2d(out_channels) + self.act = get_activation(act) + + def forward(self, x): + return self.act(self.bn(self.conv(x))) + + def fuseforward(self, x): + return self.act(self.conv(x)) + + +class DWConv(M.Module): + """Depthwise Conv + Conv""" + def __init__(self, in_channels, out_channels, ksize, stride=1, act="silu"): + super().__init__() + self.dconv = BaseConv( + in_channels, in_channels, ksize=ksize, + stride=stride, groups=in_channels, act=act + ) + self.pconv = BaseConv( + in_channels, out_channels, ksize=1, + stride=1, groups=1, act=act + ) + + def forward(self, x): + x = self.dconv(x) + return self.pconv(x) + + +class Bottleneck(M.Module): + # Standard bottleneck + def __init__( + self, in_channels, out_channels, shortcut=True, + expansion=0.5, depthwise=False, act="silu" + ): + super().__init__() + hidden_channels = int(out_channels * expansion) + Conv = DWConv if depthwise else BaseConv + self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) + self.conv2 = Conv(hidden_channels, out_channels, 3, stride=1, act=act) + self.use_add = shortcut and in_channels == out_channels + + def forward(self, x): + y = self.conv2(self.conv1(x)) + if self.use_add: + y = y + x + return y + + +class ResLayer(M.Module): + "Residual layer with `in_channels` inputs." + def __init__(self, in_channels: int): + super().__init__() + mid_channels = in_channels // 2 + self.layer1 = BaseConv(in_channels, mid_channels, ksize=1, stride=1, act="lrelu") + self.layer2 = BaseConv(mid_channels, in_channels, ksize=3, stride=1, act="lrelu") + + def forward(self, x): + out = self.layer2(self.layer1(x)) + return x + out + + +class SPPBottleneck(M.Module): + """Spatial pyramid pooling layer used in YOLOv3-SPP""" + def __init__(self, in_channels, out_channels, kernel_sizes=(5, 9, 13), activation="silu"): + super().__init__() + hidden_channels = in_channels // 2 + self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=activation) + self.m = [M.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) for ks in kernel_sizes] + conv2_channels = hidden_channels * (len(kernel_sizes) + 1) + self.conv2 = BaseConv(conv2_channels, out_channels, 1, stride=1, act=activation) + + def forward(self, x): + x = self.conv1(x) + x = F.concat([x] + [m(x) for m in self.m], axis=1) + x = self.conv2(x) + return x + + +class CSPLayer(M.Module): + """C3 in yolov5, CSP Bottleneck with 3 convolutions""" + + def __init__( + self, in_channels, out_channels, n=1, + shortcut=True, expansion=0.5, depthwise=False, act="silu" + ): + """ + Args: + in_channels (int): input channels. + out_channels (int): output channels. + n (int): number of Bottlenecks. Default value: 1. + """ + # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + hidden_channels = int(out_channels * expansion) # hidden channels + self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) + self.conv2 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) + self.conv3 = BaseConv(2 * hidden_channels, out_channels, 1, stride=1, act=act) + module_list = [ + Bottleneck(hidden_channels, hidden_channels, shortcut, 1.0, depthwise, act=act) + for _ in range(n) + ] + self.m = M.Sequential(*module_list) + + def forward(self, x): + x_1 = self.conv1(x) + x_2 = self.conv2(x) + x_1 = self.m(x_1) + x = F.concat((x_1, x_2), axis=1) + return self.conv3(x) + + +class Focus(M.Module): + """Focus width and height information into channel space.""" + + def __init__(self, in_channels, out_channels, ksize=1, stride=1, act="silu"): + super().__init__() + self.conv = BaseConv(in_channels * 4, out_channels, ksize, stride, act=act) + + def forward(self, x): + # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2) + patch_top_left = x[..., ::2, ::2] + patch_top_right = x[..., ::2, 1::2] + patch_bot_left = x[..., 1::2, ::2] + patch_bot_right = x[..., 1::2, 1::2] + x = F.concat( + (patch_top_left, patch_bot_left, patch_top_right, patch_bot_right,), axis=1, + ) + return self.conv(x) diff --git a/multimodal/YOLOX/demo/MegEngine/python/models/yolo_fpn.py b/multimodal/YOLOX/demo/MegEngine/python/models/yolo_fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..675a7f6e6b8e42ecc8eaf90cfb5b20939b1c3e0d --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/python/models/yolo_fpn.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +# -*- encoding: utf-8 -*- +# Copyright (c) Megvii Inc. All rights reserved. + +import megengine.functional as F +import megengine.module as M + +from .darknet import Darknet +from .network_blocks import BaseConv, UpSample + + +class YOLOFPN(M.Module): + """ + YOLOFPN module. Darknet 53 is the default backbone of this model. + """ + + def __init__( + self, depth=53, in_features=["dark3", "dark4", "dark5"], + ): + super().__init__() + + self.backbone = Darknet(depth) + self.in_features = in_features + + # out 1 + self.out1_cbl = self._make_cbl(512, 256, 1) + self.out1 = self._make_embedding([256, 512], 512 + 256) + + # out 2 + self.out2_cbl = self._make_cbl(256, 128, 1) + self.out2 = self._make_embedding([128, 256], 256 + 128) + + # upsample + self.upsample = UpSample(scale_factor=2, mode="bilinear") + + def _make_cbl(self, _in, _out, ks): + return BaseConv(_in, _out, ks, stride=1, act="lrelu") + + def _make_embedding(self, filters_list, in_filters): + m = M.Sequential( + *[ + self._make_cbl(in_filters, filters_list[0], 1), + self._make_cbl(filters_list[0], filters_list[1], 3), + + self._make_cbl(filters_list[1], filters_list[0], 1), + + self._make_cbl(filters_list[0], filters_list[1], 3), + self._make_cbl(filters_list[1], filters_list[0], 1), + ] + ) + return m + + def forward(self, inputs): + """ + Args: + inputs (Tensor): input image. + + Returns: + Tuple[Tensor]: FPN output features.. + """ + # backbone + out_features = self.backbone(inputs) + x2, x1, x0 = [out_features[f] for f in self.in_features] + + # yolo branch 1 + x1_in = self.out1_cbl(x0) + x1_in = self.upsample(x1_in) + x1_in = F.concat([x1_in, x1], 1) + out_dark4 = self.out1(x1_in) + + # yolo branch 2 + x2_in = self.out2_cbl(out_dark4) + x2_in = self.upsample(x2_in) + x2_in = F.concat([x2_in, x2], 1) + out_dark3 = self.out2(x2_in) + + outputs = (out_dark3, out_dark4, x0) + return outputs diff --git a/multimodal/YOLOX/demo/MegEngine/python/models/yolo_head.py b/multimodal/YOLOX/demo/MegEngine/python/models/yolo_head.py new file mode 100644 index 0000000000000000000000000000000000000000..7bba674d55824bd166389453f7074f9613b49b28 --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/python/models/yolo_head.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- +# Copyright (c) Megvii Inc. All rights reserved. + +import megengine.functional as F +import megengine.module as M + +from .network_blocks import BaseConv, DWConv + + +def meshgrid(x, y): + """meshgrid wrapper for megengine""" + assert len(x.shape) == 1 + assert len(y.shape) == 1 + mesh_shape = (y.shape[0], x.shape[0]) + mesh_x = F.broadcast_to(x, mesh_shape) + mesh_y = F.broadcast_to(y.reshape(-1, 1), mesh_shape) + return mesh_x, mesh_y + + +class YOLOXHead(M.Module): + def __init__( + self, num_classes, width=1.0, strides=[8, 16, 32], + in_channels=[256, 512, 1024], act="silu", depthwise=False + ): + """ + Args: + act (str): activation type of conv. Defalut value: "silu". + depthwise (bool): whether apply depthwise conv in conv branch. Defalut value: False. + """ + super().__init__() + + self.n_anchors = 1 + self.num_classes = num_classes + self.decode_in_inference = True # save for matching + + self.cls_convs = [] + self.reg_convs = [] + self.cls_preds = [] + self.reg_preds = [] + self.obj_preds = [] + self.stems = [] + Conv = DWConv if depthwise else BaseConv + + for i in range(len(in_channels)): + self.stems.append( + BaseConv( + in_channels=int(in_channels[i] * width), + out_channels=int(256 * width), + ksize=1, + stride=1, + act=act, + ) + ) + self.cls_convs.append( + M.Sequential( + *[ + Conv( + in_channels=int(256 * width), + out_channels=int(256 * width), + ksize=3, + stride=1, + act=act, + ), + Conv( + in_channels=int(256 * width), + out_channels=int(256 * width), + ksize=3, + stride=1, + act=act, + ), + ] + ) + ) + self.reg_convs.append( + M.Sequential( + *[ + Conv( + in_channels=int(256 * width), + out_channels=int(256 * width), + ksize=3, + stride=1, + act=act, + ), + Conv( + in_channels=int(256 * width), + out_channels=int(256 * width), + ksize=3, + stride=1, + act=act, + ), + ] + ) + ) + self.cls_preds.append( + M.Conv2d( + in_channels=int(256 * width), + out_channels=self.n_anchors * self.num_classes, + kernel_size=1, + stride=1, + padding=0, + ) + ) + self.reg_preds.append( + M.Conv2d( + in_channels=int(256 * width), + out_channels=4, + kernel_size=1, + stride=1, + padding=0, + ) + ) + self.obj_preds.append( + M.Conv2d( + in_channels=int(256 * width), + out_channels=self.n_anchors * 1, + kernel_size=1, + stride=1, + padding=0, + ) + ) + + self.use_l1 = False + self.strides = strides + self.grids = [F.zeros(1)] * len(in_channels) + + def forward(self, xin, labels=None, imgs=None): + outputs = [] + assert not self.training + + for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate( + zip(self.cls_convs, self.reg_convs, self.strides, xin) + ): + x = self.stems[k](x) + cls_x = x + reg_x = x + + cls_feat = cls_conv(cls_x) + cls_output = self.cls_preds[k](cls_feat) + + reg_feat = reg_conv(reg_x) + reg_output = self.reg_preds[k](reg_feat) + obj_output = self.obj_preds[k](reg_feat) + output = F.concat([reg_output, F.sigmoid(obj_output), F.sigmoid(cls_output)], 1) + outputs.append(output) + + self.hw = [x.shape[-2:] for x in outputs] + # [batch, n_anchors_all, 85] + outputs = F.concat([F.flatten(x, start_axis=2) for x in outputs], axis=2) + outputs = F.transpose(outputs, (0, 2, 1)) + if self.decode_in_inference: + return self.decode_outputs(outputs) + else: + return outputs + + def get_output_and_grid(self, output, k, stride, dtype): + grid = self.grids[k] + + batch_size = output.shape[0] + n_ch = 5 + self.num_classes + hsize, wsize = output.shape[-2:] + if grid.shape[2:4] != output.shape[2:4]: + yv, xv = meshgrid([F.arange(hsize), F.arange(wsize)]) + grid = F.stack((xv, yv), 2).reshape(1, 1, hsize, wsize, 2).type(dtype) + self.grids[k] = grid + + output = output.view(batch_size, self.n_anchors, n_ch, hsize, wsize) + output = ( + output.permute(0, 1, 3, 4, 2) + .reshape(batch_size, self.n_anchors * hsize * wsize, -1) + ) + grid = grid.view(1, -1, 2) + output[..., :2] = (output[..., :2] + grid) * stride + output[..., 2:4] = F.exp(output[..., 2:4]) * stride + return output, grid + + def decode_outputs(self, outputs): + grids = [] + strides = [] + for (hsize, wsize), stride in zip(self.hw, self.strides): + xv, yv = meshgrid(F.arange(hsize), F.arange(wsize)) + grid = F.stack((xv, yv), 2).reshape(1, -1, 2) + grids.append(grid) + shape = grid.shape[:2] + strides.append(F.full((*shape, 1), stride)) + + grids = F.concat(grids, axis=1) + strides = F.concat(strides, axis=1) + + outputs[..., :2] = (outputs[..., :2] + grids) * strides + outputs[..., 2:4] = F.exp(outputs[..., 2:4]) * strides + return outputs diff --git a/multimodal/YOLOX/demo/MegEngine/python/models/yolo_pafpn.py b/multimodal/YOLOX/demo/MegEngine/python/models/yolo_pafpn.py new file mode 100644 index 0000000000000000000000000000000000000000..86154bfa92e8da44042fb2d152322725d0039040 --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/python/models/yolo_pafpn.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +# -*- encoding: utf-8 -*- +# Copyright (c) Megvii Inc. All rights reserved. + +import megengine.module as M +import megengine.functional as F + +from .darknet import CSPDarknet +from .network_blocks import BaseConv, CSPLayer, DWConv, UpSample + + +class YOLOPAFPN(M.Module): + """ + YOLOv3 model. Darknet 53 is the default backbone of this model. + """ + + def __init__( + self, depth=1.0, width=1.0, in_features=("dark3", "dark4", "dark5"), + in_channels=[256, 512, 1024], depthwise=False, act="silu", + ): + super().__init__() + self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act) + self.in_features = in_features + self.in_channels = in_channels + Conv = DWConv if depthwise else BaseConv + + self.upsample = UpSample(scale_factor=2, mode="bilinear") + self.lateral_conv0 = BaseConv( + int(in_channels[2] * width), int(in_channels[1] * width), 1, 1, act=act + ) + self.C3_p4 = CSPLayer( + int(2 * in_channels[1] * width), + int(in_channels[1] * width), + round(3 * depth), + False, + depthwise=depthwise, + act=act, + ) # cat + + self.reduce_conv1 = BaseConv( + int(in_channels[1] * width), int(in_channels[0] * width), 1, 1, act=act + ) + self.C3_p3 = CSPLayer( + int(2 * in_channels[0] * width), + int(in_channels[0] * width), + round(3 * depth), + False, + depthwise=depthwise, + act=act, + ) + + # bottom-up conv + self.bu_conv2 = Conv( + int(in_channels[0] * width), int(in_channels[0] * width), 3, 2, act=act + ) + self.C3_n3 = CSPLayer( + int(2 * in_channels[0] * width), + int(in_channels[1] * width), + round(3 * depth), + False, + depthwise=depthwise, + act=act, + ) + + # bottom-up conv + self.bu_conv1 = Conv( + int(in_channels[1] * width), int(in_channels[1] * width), 3, 2, act=act + ) + self.C3_n4 = CSPLayer( + int(2 * in_channels[1] * width), + int(in_channels[2] * width), + round(3 * depth), + False, + depthwise=depthwise, + act=act, + ) + + def forward(self, input): + """ + Args: + inputs: input images. + + Returns: + Tuple[Tensor]: FPN feature. + """ + + # backbone + out_features = self.backbone(input) + features = [out_features[f] for f in self.in_features] + [x2, x1, x0] = features + + fpn_out0 = self.lateral_conv0(x0) # 1024->512/32 + f_out0 = self.upsample(fpn_out0) # 512/16 + f_out0 = F.concat([f_out0, x1], 1) # 512->1024/16 + f_out0 = self.C3_p4(f_out0) # 1024->512/16 + + fpn_out1 = self.reduce_conv1(f_out0) # 512->256/16 + f_out1 = self.upsample(fpn_out1) # 256/8 + f_out1 = F.concat([f_out1, x2], 1) # 256->512/8 + pan_out2 = self.C3_p3(f_out1) # 512->256/8 + + p_out1 = self.bu_conv2(pan_out2) # 256->256/16 + p_out1 = F.concat([p_out1, fpn_out1], 1) # 256->512/16 + pan_out1 = self.C3_n3(p_out1) # 512->512/16 + + p_out0 = self.bu_conv1(pan_out1) # 512->512/32 + p_out0 = F.concat([p_out0, fpn_out0], 1) # 512->1024/32 + pan_out0 = self.C3_n4(p_out0) # 1024->1024/32 + + outputs = (pan_out2, pan_out1, pan_out0) + return outputs diff --git a/multimodal/YOLOX/demo/MegEngine/python/models/yolox.py b/multimodal/YOLOX/demo/MegEngine/python/models/yolox.py new file mode 100644 index 0000000000000000000000000000000000000000..657049fd36340381224938e224ffe729f39c9d90 --- /dev/null +++ b/multimodal/YOLOX/demo/MegEngine/python/models/yolox.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# -*- encoding: utf-8 -*- +# Copyright (c) Megvii Inc. All rights reserved. + +import megengine.module as M + +from .yolo_head import YOLOXHead +from .yolo_pafpn import YOLOPAFPN + + +class YOLOX(M.Module): + """ + YOLOX model module. The module list is defined by create_yolov3_modules function. + The network returns loss values from three YOLO layers during training + and detection results during test. + """ + + def __init__(self, backbone=None, head=None): + super().__init__() + if backbone is None: + backbone = YOLOPAFPN() + if head is None: + head = YOLOXHead(80) + + self.backbone = backbone + self.head = head + + def forward(self, x): + # fpn output content features of [dark3, dark4, dark5] + fpn_outs = self.backbone(x) + assert not self.training + outputs = self.head(fpn_outs) + + return outputs diff --git a/multimodal/YOLOX/demo/ONNXRuntime/README.md b/multimodal/YOLOX/demo/ONNXRuntime/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6af0944a6b3a984045daf2d4215f96290ed5e9af --- /dev/null +++ b/multimodal/YOLOX/demo/ONNXRuntime/README.md @@ -0,0 +1,78 @@ +## YOLOX-ONNXRuntime in Python + +This doc introduces how to convert your pytorch model into onnx, and how to run an onnxruntime demo to verify your convertion. + +### Step1: Install onnxruntime + +run the following command to install onnxruntime: +```shell +pip install onnxruntime +``` + +### Step2: Get ONNX models + +Users might download our pre-generated ONNX models or convert their own models to ONNX. + +#### Download ONNX models. + +| Model | Parameters | GFLOPs | Test Size | mAP | Weights | +|:------| :----: | :----: | :---: | :---: | :---: | +| YOLOX-Nano | 0.91M | 1.08 | 416x416 | 25.8 |[github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_nano.onnx) | +| YOLOX-Tiny | 5.06M | 6.45 | 416x416 |32.8 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_tiny.onnx) | +| YOLOX-S | 9.0M | 26.8 | 640x640 |40.5 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_s.onnx) | +| YOLOX-M | 25.3M | 73.8 | 640x640 |47.2 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_m.onnx) | +| YOLOX-L | 54.2M | 155.6 | 640x640 |50.1 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_l.onnx) | +| YOLOX-Darknet53| 63.72M | 185.3 | 640x640 |48.0 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_darknet.onnx) | +| YOLOX-X | 99.1M | 281.9 | 640x640 |51.5 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_x.onnx) | + +#### Convert Your Model to ONNX + +First, you should move to by: +```shell +cd +``` +Then, you can: + +1. Convert a standard YOLOX model by -n: +```shell +python3 tools/export_onnx.py --output-name yolox_s.onnx -n yolox-s -c yolox_s.pth +``` +Notes: +* -n: specify a model name. The model name must be one of the [yolox-s,m,l,x and yolox-nano, yolox-tiny, yolov3] +* -c: the model you have trained +* -o: opset version, default 11. **However, if you will further convert your onnx model to [OpenVINO](https://github.com/Megvii-BaseDetection/YOLOX/demo/OpenVINO/), please specify the opset version to 10.** +* --no-onnxsim: disable onnxsim +* To customize an input shape for onnx model, modify the following code in tools/export.py: + + ```python + dummy_input = torch.randn(1, 3, exp.test_size[0], exp.test_size[1]) + ``` + +1. Convert a standard YOLOX model by -f. When using -f, the above command is equivalent to: + +```shell +python3 tools/export_onnx.py --output-name yolox_s.onnx -f exps/default/yolox_s.py -c yolox_s.pth +``` + +3. To convert your customized model, please use -f: + +```shell +python3 tools/export_onnx.py --output-name your_yolox.onnx -f exps/your_dir/your_yolox.py -c your_yolox.pth +``` + +### Step3: ONNXRuntime Demo + +Step1. +```shell +cd /demo/ONNXRuntime +``` + +Step2. +```shell +python3 onnx_inference.py -m -i -o -s 0.3 --input_shape 640,640 +``` +Notes: +* -m: your converted onnx model +* -i: input_image +* -s: score threshold for visualization. +* --input_shape: should be consistent with the shape you used for onnx convertion. diff --git a/multimodal/YOLOX/demo/ONNXRuntime/onnx_inference.py b/multimodal/YOLOX/demo/ONNXRuntime/onnx_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..07654dc9b981d5640274254cc945bad0bbaa1cdf --- /dev/null +++ b/multimodal/YOLOX/demo/ONNXRuntime/onnx_inference.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +# Copyright (c) Megvii, Inc. and its affiliates. + +import argparse +import os + +import cv2 +import numpy as np + +import onnxruntime + +from yolox.data.data_augment import preproc as preprocess +from yolox.data.datasets import COCO_CLASSES +from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis + + +def make_parser(): + parser = argparse.ArgumentParser("onnxruntime inference sample") + parser.add_argument( + "-m", + "--model", + type=str, + default="yolox.onnx", + help="Input your onnx model.", + ) + parser.add_argument( + "-i", + "--image_path", + type=str, + default='test_image.png', + help="Path to your input image.", + ) + parser.add_argument( + "-o", + "--output_dir", + type=str, + default='demo_output', + help="Path to your output directory.", + ) + parser.add_argument( + "-s", + "--score_thr", + type=float, + default=0.3, + help="Score threshould to filter the result.", + ) + parser.add_argument( + "--input_shape", + type=str, + default="640,640", + help="Specify an input shape for inference.", + ) + return parser + + +if __name__ == '__main__': + args = make_parser().parse_args() + + input_shape = tuple(map(int, args.input_shape.split(','))) + origin_img = cv2.imread(args.image_path) + img, ratio = preprocess(origin_img, input_shape) + + session = onnxruntime.InferenceSession(args.model) + + ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]} + output = session.run(None, ort_inputs) + predictions = demo_postprocess(output[0], input_shape)[0] + + boxes = predictions[:, :4] + scores = predictions[:, 4:5] * predictions[:, 5:] + + boxes_xyxy = np.ones_like(boxes) + boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2. + boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2. + boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2. + boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2. + boxes_xyxy /= ratio + dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1) + if dets is not None: + final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5] + origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds, + conf=args.score_thr, class_names=COCO_CLASSES) + + mkdir(args.output_dir) + output_path = os.path.join(args.output_dir, os.path.basename(args.image_path)) + cv2.imwrite(output_path, origin_img) diff --git a/multimodal/YOLOX/demo/OpenVINO/README.md b/multimodal/YOLOX/demo/OpenVINO/README.md new file mode 100644 index 0000000000000000000000000000000000000000..559708f13f2f21bbb16ae331f50a625014a7b28b --- /dev/null +++ b/multimodal/YOLOX/demo/OpenVINO/README.md @@ -0,0 +1,4 @@ +## YOLOX for OpenVINO + +* [C++ Demo](./cpp) +* [Python Demo](./python) \ No newline at end of file diff --git a/multimodal/YOLOX/demo/OpenVINO/cpp/CMakeLists.txt b/multimodal/YOLOX/demo/OpenVINO/cpp/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..614739bda584016b5b46cfc356ba94d23be43464 --- /dev/null +++ b/multimodal/YOLOX/demo/OpenVINO/cpp/CMakeLists.txt @@ -0,0 +1,23 @@ +cmake_minimum_required(VERSION 3.4.1) +set(CMAKE_CXX_STANDARD 14) + +project(yolox_openvino_demo) + +find_package(OpenCV REQUIRED) +find_package(InferenceEngine REQUIRED) +find_package(ngraph REQUIRED) + +include_directories( + ${OpenCV_INCLUDE_DIRS} + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} +) + +add_executable(yolox_openvino yolox_openvino.cpp) + +target_link_libraries( + yolox_openvino + ${InferenceEngine_LIBRARIES} + ${NGRAPH_LIBRARIES} + ${OpenCV_LIBS} +) \ No newline at end of file diff --git a/multimodal/YOLOX/demo/OpenVINO/cpp/README.md b/multimodal/YOLOX/demo/OpenVINO/cpp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c877d94c2834da117c49df41aa936614c175c6df --- /dev/null +++ b/multimodal/YOLOX/demo/OpenVINO/cpp/README.md @@ -0,0 +1,97 @@ +# YOLOX-OpenVINO in C++ + +This tutorial includes a C++ demo for OpenVINO, as well as some converted models. + +### Download OpenVINO models. + +| Model | Parameters | GFLOPs | Test Size | mAP | Weights | +|:------| :----: | :----: | :---: | :---: | :---: | +| [YOLOX-Nano](../../../exps/default/nano.py) | 0.91M | 1.08 | 416x416 | 25.8 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_nano_openvino.tar.gz) | +| [YOLOX-Tiny](../../../exps/default/yolox_tiny.py) | 5.06M | 6.45 | 416x416 |32.8 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_tiny_openvino.tar.gz) | +| [YOLOX-S](../../../exps/default/yolox_s.py) | 9.0M | 26.8 | 640x640 |40.5 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_s_openvino.tar.gz) | +| [YOLOX-M](../../../exps/default/yolox_m.py) | 25.3M | 73.8 | 640x640 |47.2 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_m_openvino.tar.gz) | +| [YOLOX-L](../../../exps/default/yolox_l.py) | 54.2M | 155.6 | 640x640 |50.1 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_l_openvino.tar.gz) | +| [YOLOX-Darknet53](../../../exps/default/yolov3.py) | 63.72M | 185.3 | 640x640 |48.0 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_dark_openvino.tar.gz) | +| [YOLOX-X](../../../exps/default/yolox_x.py) | 99.1M | 281.9 | 640x640 |51.5 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_x_openvino.tar.gz) | + +## Install OpenVINO Toolkit + +Please visit [Openvino Homepage](https://docs.openvinotoolkit.org/latest/get_started_guides.html) for more details. + +## Set up the Environment + +### For Linux + +**Option1. Set up the environment tempororally. You need to run this command everytime you start a new shell window.** + +```shell +source /opt/intel/openvino_2021/bin/setupvars.sh +``` + +**Option2. Set up the environment permenantly.** + +*Step1.* For Linux: +```shell +vim ~/.bashrc +``` + +*Step2.* Add the following line into your file: + +```shell +source /opt/intel/openvino_2021/bin/setupvars.sh +``` + +*Step3.* Save and exit the file, then run: + +```shell +source ~/.bashrc +``` + + +## Convert model + +1. Export ONNX model + + Please refer to the [ONNX tutorial](../../ONNXRuntime). **Note that you should set --opset to 10, otherwise your next step will fail.** + +2. Convert ONNX to OpenVINO + + ``` shell + cd /openvino_2021/deployment_tools/model_optimizer + ``` + + Install requirements for convert tool + + ```shell + sudo ./install_prerequisites/install_prerequisites_onnx.sh + ``` + + Then convert model. + ```shell + python3 mo.py --input_model --input_shape [--data_type FP16] + ``` + For example: + ```shell + python3 mo.py --input_model yolox_tiny.onnx --input_shape [1,3,416,416] --data_type FP16 + ``` + + Make sure the input shape is consistent with [those](yolox_openvino.cpp#L24-L25) in cpp file. + +## Build + +### Linux +```shell +source /opt/intel/openvino_2021/bin/setupvars.sh +mkdir build +cd build +cmake .. +make +``` + +## Demo + +### c++ + +```shell +./yolox_openvino +``` diff --git a/multimodal/YOLOX/demo/OpenVINO/cpp/yolox_openvino.cpp b/multimodal/YOLOX/demo/OpenVINO/cpp/yolox_openvino.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f42344141cd760737e9d2b617d776480d4379a7d --- /dev/null +++ b/multimodal/YOLOX/demo/OpenVINO/cpp/yolox_openvino.cpp @@ -0,0 +1,529 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include +#include +#include +#include + +using namespace InferenceEngine; + +/** + * @brief Define names based depends on Unicode path support + */ +#define tcout std::cout +#define file_name_t std::string +#define imread_t cv::imread +#define NMS_THRESH 0.45 +#define BBOX_CONF_THRESH 0.3 + +static const int INPUT_W = 416; +static const int INPUT_H = 416; +static const int NUM_CLASSES = 80; // COCO has 80 classes. Modify this value on your own dataset. + +cv::Mat static_resize(cv::Mat& img) { + float r = std::min(INPUT_W / (img.cols*1.0), INPUT_H / (img.rows*1.0)); + // r = std::min(r, 1.0f); + int unpad_w = r * img.cols; + int unpad_h = r * img.rows; + cv::Mat re(unpad_h, unpad_w, CV_8UC3); + cv::resize(img, re, re.size()); + //cv::Mat out(INPUT_W, INPUT_H, CV_8UC3, cv::Scalar(114, 114, 114)); + cv::Mat out(INPUT_H, INPUT_W, CV_8UC3, cv::Scalar(114, 114, 114)); + re.copyTo(out(cv::Rect(0, 0, re.cols, re.rows))); + return out; +} + +void blobFromImage(cv::Mat& img, Blob::Ptr& blob){ + int channels = 3; + int img_h = img.rows; + int img_w = img.cols; + InferenceEngine::MemoryBlob::Ptr mblob = InferenceEngine::as(blob); + if (!mblob) + { + THROW_IE_EXCEPTION << "We expect blob to be inherited from MemoryBlob in matU8ToBlob, " + << "but by fact we were not able to cast inputBlob to MemoryBlob"; + } + // locked memory holder should be alive all time while access to its buffer happens + auto mblobHolder = mblob->wmap(); + + float *blob_data = mblobHolder.as(); + + for (size_t c = 0; c < channels; c++) + { + for (size_t h = 0; h < img_h; h++) + { + for (size_t w = 0; w < img_w; w++) + { + blob_data[c * img_w * img_h + h * img_w + w] = + (float)img.at(h, w)[c]; + } + } + } +} + + +struct Object +{ + cv::Rect_ rect; + int label; + float prob; +}; + +struct GridAndStride +{ + int grid0; + int grid1; + int stride; +}; + +static void generate_grids_and_stride(const int target_w, const int target_h, std::vector& strides, std::vector& grid_strides) +{ + for (auto stride : strides) + { + int num_grid_w = target_w / stride; + int num_grid_h = target_h / stride; + for (int g1 = 0; g1 < num_grid_h; g1++) + { + for (int g0 = 0; g0 < num_grid_w; g0++) + { + grid_strides.push_back((GridAndStride){g0, g1, stride}); + } + } + } +} + + +static void generate_yolox_proposals(std::vector grid_strides, const float* feat_ptr, float prob_threshold, std::vector& objects) +{ + + const int num_anchors = grid_strides.size(); + + for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++) + { + const int grid0 = grid_strides[anchor_idx].grid0; + const int grid1 = grid_strides[anchor_idx].grid1; + const int stride = grid_strides[anchor_idx].stride; + + const int basic_pos = anchor_idx * (NUM_CLASSES + 5); + + // yolox/models/yolo_head.py decode logic + // outputs[..., :2] = (outputs[..., :2] + grids) * strides + // outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides + float x_center = (feat_ptr[basic_pos + 0] + grid0) * stride; + float y_center = (feat_ptr[basic_pos + 1] + grid1) * stride; + float w = exp(feat_ptr[basic_pos + 2]) * stride; + float h = exp(feat_ptr[basic_pos + 3]) * stride; + float x0 = x_center - w * 0.5f; + float y0 = y_center - h * 0.5f; + + float box_objectness = feat_ptr[basic_pos + 4]; + for (int class_idx = 0; class_idx < NUM_CLASSES; class_idx++) + { + float box_cls_score = feat_ptr[basic_pos + 5 + class_idx]; + float box_prob = box_objectness * box_cls_score; + if (box_prob > prob_threshold) + { + Object obj; + obj.rect.x = x0; + obj.rect.y = y0; + obj.rect.width = w; + obj.rect.height = h; + obj.label = class_idx; + obj.prob = box_prob; + + objects.push_back(obj); + } + + } // class loop + + } // point anchor loop +} + +static inline float intersection_area(const Object& a, const Object& b) +{ + cv::Rect_ inter = a.rect & b.rect; + return inter.area(); +} + +static void qsort_descent_inplace(std::vector& faceobjects, int left, int right) +{ + int i = left; + int j = right; + float p = faceobjects[(left + right) / 2].prob; + + while (i <= j) + { + while (faceobjects[i].prob > p) + i++; + + while (faceobjects[j].prob < p) + j--; + + if (i <= j) + { + // swap + std::swap(faceobjects[i], faceobjects[j]); + + i++; + j--; + } + } + + #pragma omp parallel sections + { + #pragma omp section + { + if (left < j) qsort_descent_inplace(faceobjects, left, j); + } + #pragma omp section + { + if (i < right) qsort_descent_inplace(faceobjects, i, right); + } + } +} + + +static void qsort_descent_inplace(std::vector& objects) +{ + if (objects.empty()) + return; + + qsort_descent_inplace(objects, 0, objects.size() - 1); +} + +static void nms_sorted_bboxes(const std::vector& faceobjects, std::vector& picked, float nms_threshold) +{ + picked.clear(); + + const int n = faceobjects.size(); + + std::vector areas(n); + for (int i = 0; i < n; i++) + { + areas[i] = faceobjects[i].rect.area(); + } + + for (int i = 0; i < n; i++) + { + const Object& a = faceobjects[i]; + + int keep = 1; + for (int j = 0; j < (int)picked.size(); j++) + { + const Object& b = faceobjects[picked[j]]; + + // intersection over union + float inter_area = intersection_area(a, b); + float union_area = areas[i] + areas[picked[j]] - inter_area; + // float IoU = inter_area / union_area + if (inter_area / union_area > nms_threshold) + keep = 0; + } + + if (keep) + picked.push_back(i); + } +} + + +static void decode_outputs(const float* prob, std::vector& objects, float scale, const int img_w, const int img_h) { + std::vector proposals; + std::vector strides = {8, 16, 32}; + std::vector grid_strides; + + generate_grids_and_stride(INPUT_W, INPUT_H, strides, grid_strides); + generate_yolox_proposals(grid_strides, prob, BBOX_CONF_THRESH, proposals); + qsort_descent_inplace(proposals); + + std::vector picked; + nms_sorted_bboxes(proposals, picked, NMS_THRESH); + int count = picked.size(); + objects.resize(count); + + for (int i = 0; i < count; i++) + { + objects[i] = proposals[picked[i]]; + + // adjust offset to original unpadded + float x0 = (objects[i].rect.x) / scale; + float y0 = (objects[i].rect.y) / scale; + float x1 = (objects[i].rect.x + objects[i].rect.width) / scale; + float y1 = (objects[i].rect.y + objects[i].rect.height) / scale; + + // clip + x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f); + y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f); + x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f); + y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f); + + objects[i].rect.x = x0; + objects[i].rect.y = y0; + objects[i].rect.width = x1 - x0; + objects[i].rect.height = y1 - y0; + } +} + +const float color_list[80][3] = +{ + {0.000, 0.447, 0.741}, + {0.850, 0.325, 0.098}, + {0.929, 0.694, 0.125}, + {0.494, 0.184, 0.556}, + {0.466, 0.674, 0.188}, + {0.301, 0.745, 0.933}, + {0.635, 0.078, 0.184}, + {0.300, 0.300, 0.300}, + {0.600, 0.600, 0.600}, + {1.000, 0.000, 0.000}, + {1.000, 0.500, 0.000}, + {0.749, 0.749, 0.000}, + {0.000, 1.000, 0.000}, + {0.000, 0.000, 1.000}, + {0.667, 0.000, 1.000}, + {0.333, 0.333, 0.000}, + {0.333, 0.667, 0.000}, + {0.333, 1.000, 0.000}, + {0.667, 0.333, 0.000}, + {0.667, 0.667, 0.000}, + {0.667, 1.000, 0.000}, + {1.000, 0.333, 0.000}, + {1.000, 0.667, 0.000}, + {1.000, 1.000, 0.000}, + {0.000, 0.333, 0.500}, + {0.000, 0.667, 0.500}, + {0.000, 1.000, 0.500}, + {0.333, 0.000, 0.500}, + {0.333, 0.333, 0.500}, + {0.333, 0.667, 0.500}, + {0.333, 1.000, 0.500}, + {0.667, 0.000, 0.500}, + {0.667, 0.333, 0.500}, + {0.667, 0.667, 0.500}, + {0.667, 1.000, 0.500}, + {1.000, 0.000, 0.500}, + {1.000, 0.333, 0.500}, + {1.000, 0.667, 0.500}, + {1.000, 1.000, 0.500}, + {0.000, 0.333, 1.000}, + {0.000, 0.667, 1.000}, + {0.000, 1.000, 1.000}, + {0.333, 0.000, 1.000}, + {0.333, 0.333, 1.000}, + {0.333, 0.667, 1.000}, + {0.333, 1.000, 1.000}, + {0.667, 0.000, 1.000}, + {0.667, 0.333, 1.000}, + {0.667, 0.667, 1.000}, + {0.667, 1.000, 1.000}, + {1.000, 0.000, 1.000}, + {1.000, 0.333, 1.000}, + {1.000, 0.667, 1.000}, + {0.333, 0.000, 0.000}, + {0.500, 0.000, 0.000}, + {0.667, 0.000, 0.000}, + {0.833, 0.000, 0.000}, + {1.000, 0.000, 0.000}, + {0.000, 0.167, 0.000}, + {0.000, 0.333, 0.000}, + {0.000, 0.500, 0.000}, + {0.000, 0.667, 0.000}, + {0.000, 0.833, 0.000}, + {0.000, 1.000, 0.000}, + {0.000, 0.000, 0.167}, + {0.000, 0.000, 0.333}, + {0.000, 0.000, 0.500}, + {0.000, 0.000, 0.667}, + {0.000, 0.000, 0.833}, + {0.000, 0.000, 1.000}, + {0.000, 0.000, 0.000}, + {0.143, 0.143, 0.143}, + {0.286, 0.286, 0.286}, + {0.429, 0.429, 0.429}, + {0.571, 0.571, 0.571}, + {0.714, 0.714, 0.714}, + {0.857, 0.857, 0.857}, + {0.000, 0.447, 0.741}, + {0.314, 0.717, 0.741}, + {0.50, 0.5, 0} +}; + +static void draw_objects(const cv::Mat& bgr, const std::vector& objects) +{ + static const char* class_names[] = { + "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", + "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", + "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", + "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", + "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", + "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", + "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", + "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", + "hair drier", "toothbrush" + }; + + cv::Mat image = bgr.clone(); + + for (size_t i = 0; i < objects.size(); i++) + { + const Object& obj = objects[i]; + + fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob, + obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height); + + cv::Scalar color = cv::Scalar(color_list[obj.label][0], color_list[obj.label][1], color_list[obj.label][2]); + float c_mean = cv::mean(color)[0]; + cv::Scalar txt_color; + if (c_mean > 0.5){ + txt_color = cv::Scalar(0, 0, 0); + }else{ + txt_color = cv::Scalar(255, 255, 255); + } + + cv::rectangle(image, obj.rect, color * 255, 2); + + char text[256]; + sprintf(text, "%s %.1f%%", class_names[obj.label], obj.prob * 100); + + int baseLine = 0; + cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.4, 1, &baseLine); + + cv::Scalar txt_bk_color = color * 0.7 * 255; + + int x = obj.rect.x; + int y = obj.rect.y + 1; + //int y = obj.rect.y - label_size.height - baseLine; + if (y > image.rows) + y = image.rows; + //if (x + label_size.width > image.cols) + //x = image.cols - label_size.width; + + cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)), + txt_bk_color, -1); + + cv::putText(image, text, cv::Point(x, y + label_size.height), + cv::FONT_HERSHEY_SIMPLEX, 0.4, txt_color, 1); + } + + cv::imwrite("_demo.jpg" , image); + fprintf(stderr, "save vis file\n"); + /* cv::imshow("image", image); */ + /* cv::waitKey(0); */ +} + + +int main(int argc, char* argv[]) { + try { + // ------------------------------ Parsing and validation of input arguments + // --------------------------------- + if (argc != 4) { + tcout << "Usage : " << argv[0] << " " << std::endl; + return EXIT_FAILURE; + } + + const file_name_t input_model {argv[1]}; + const file_name_t input_image_path {argv[2]}; + const std::string device_name {argv[3]}; + // ----------------------------------------------------------------------------------------------------- + + // --------------------------- Step 1. Initialize inference engine core + // ------------------------------------- + Core ie; + // ----------------------------------------------------------------------------------------------------- + + // Step 2. Read a model in OpenVINO Intermediate Representation (.xml and + // .bin files) or ONNX (.onnx file) format + CNNNetwork network = ie.ReadNetwork(input_model); + if (network.getOutputsInfo().size() != 1) + throw std::logic_error("Sample supports topologies with 1 output only"); + if (network.getInputsInfo().size() != 1) + throw std::logic_error("Sample supports topologies with 1 input only"); + // ----------------------------------------------------------------------------------------------------- + + // --------------------------- Step 3. Configure input & output + // --------------------------------------------- + // --------------------------- Prepare input blobs + // ----------------------------------------------------- + InputInfo::Ptr input_info = network.getInputsInfo().begin()->second; + std::string input_name = network.getInputsInfo().begin()->first; + + /* Mark input as resizable by setting of a resize algorithm. + * In this case we will be able to set an input blob of any shape to an + * infer request. Resize and layout conversions are executed automatically + * during inference */ + //input_info->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR); + //input_info->setLayout(Layout::NHWC); + //input_info->setPrecision(Precision::FP32); + + // --------------------------- Prepare output blobs + // ---------------------------------------------------- + if (network.getOutputsInfo().empty()) { + std::cerr << "Network outputs info is empty" << std::endl; + return EXIT_FAILURE; + } + DataPtr output_info = network.getOutputsInfo().begin()->second; + std::string output_name = network.getOutputsInfo().begin()->first; + + output_info->setPrecision(Precision::FP32); + // ----------------------------------------------------------------------------------------------------- + + // --------------------------- Step 4. Loading a model to the device + // ------------------------------------------ + ExecutableNetwork executable_network = ie.LoadNetwork(network, device_name); + // ----------------------------------------------------------------------------------------------------- + + // --------------------------- Step 5. Create an infer request + // ------------------------------------------------- + InferRequest infer_request = executable_network.CreateInferRequest(); + // ----------------------------------------------------------------------------------------------------- + + // --------------------------- Step 6. Prepare input + // -------------------------------------------------------- + /* Read input image to a blob and set it to an infer request without resize + * and layout conversions. */ + cv::Mat image = imread_t(input_image_path); + cv::Mat pr_img = static_resize(image); + Blob::Ptr imgBlob = infer_request.GetBlob(input_name); // just wrap Mat data by Blob::Ptr + blobFromImage(pr_img, imgBlob); + + // infer_request.SetBlob(input_name, imgBlob); // infer_request accepts input blob of any size + // ----------------------------------------------------------------------------------------------------- + + // --------------------------- Step 7. Do inference + // -------------------------------------------------------- + /* Running the request synchronously */ + infer_request.Infer(); + // ----------------------------------------------------------------------------------------------------- + + // --------------------------- Step 8. Process output + // ------------------------------------------------------ + const Blob::Ptr output_blob = infer_request.GetBlob(output_name); + MemoryBlob::CPtr moutput = as(output_blob); + if (!moutput) { + throw std::logic_error("We expect output to be inherited from MemoryBlob, " + "but by fact we were not able to cast output to MemoryBlob"); + } + // locked memory holder should be alive all time while access to its buffer + // happens + auto moutputHolder = moutput->rmap(); + const float* net_pred = moutputHolder.as::value_type*>(); + + int img_w = image.cols; + int img_h = image.rows; + float scale = std::min(INPUT_W / (image.cols*1.0), INPUT_H / (image.rows*1.0)); + std::vector objects; + + decode_outputs(net_pred, objects, scale, img_w, img_h); + draw_objects(image, objects); + + // ----------------------------------------------------------------------------------------------------- + } catch (const std::exception& ex) { + std::cerr << ex.what() << std::endl; + return EXIT_FAILURE; + } + return EXIT_SUCCESS; +} diff --git a/multimodal/YOLOX/demo/OpenVINO/python/README.md b/multimodal/YOLOX/demo/OpenVINO/python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bbaf5aca44e86523c428735745848d2839351552 --- /dev/null +++ b/multimodal/YOLOX/demo/OpenVINO/python/README.md @@ -0,0 +1,89 @@ +# YOLOX-OpenVINO in Python + +This tutorial includes a Python demo for OpenVINO, as well as some converted models. + +### Download OpenVINO models. + +| Model | Parameters | GFLOPs | Test Size | mAP | Weights | +|:------| :----: | :----: | :---: | :---: | :---: | +| [YOLOX-Nano](../../../exps/default/nano.py) | 0.91M | 1.08 | 416x416 | 25.8 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_nano_openvino.tar.gz) | +| [YOLOX-Tiny](../../../exps/default/yolox_tiny.py) | 5.06M | 6.45 | 416x416 |32.8 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_tiny_openvino.tar.gz) | +| [YOLOX-S](../../../exps/default/yolox_s.py) | 9.0M | 26.8 | 640x640 |40.5 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_s_openvino.tar.gz) | +| [YOLOX-M](../../../exps/default/yolox_m.py) | 25.3M | 73.8 | 640x640 |47.2 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_m_openvino.tar.gz) | +| [YOLOX-L](../../../exps/default/yolox_l.py) | 54.2M | 155.6 | 640x640 |50.1 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_l_openvino.tar.gz) | +| [YOLOX-Darknet53](../../../exps/default/yolov3.py) | 63.72M | 185.3 | 640x640 |48.0 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_dark_openvino.tar.gz) | +| [YOLOX-X](../../../exps/default/yolox_x.py) | 99.1M | 281.9 | 640x640 |51.5 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_x_openvino.tar.gz) | + +## Install OpenVINO Toolkit + +Please visit [Openvino Homepage](https://docs.openvinotoolkit.org/latest/get_started_guides.html) for more details. + +## Set up the Environment + +### For Linux + +**Option1. Set up the environment tempororally. You need to run this command everytime you start a new shell window.** + +```shell +source /opt/intel/openvino_2021/bin/setupvars.sh +``` + +**Option2. Set up the environment permenantly.** + +*Step1.* For Linux: +```shell +vim ~/.bashrc +``` + +*Step2.* Add the following line into your file: + +```shell +source /opt/intel/openvino_2021/bin/setupvars.sh +``` + +*Step3.* Save and exit the file, then run: + +```shell +source ~/.bashrc +``` + + +## Convert model + +1. Export ONNX model + + Please refer to the [ONNX tutorial](https://github.com/Megvii-BaseDetection/YOLOX/demo/ONNXRuntime). **Note that you should set --opset to 10, otherwise your next step will fail.** + +2. Convert ONNX to OpenVINO + + ``` shell + cd /openvino_2021/deployment_tools/model_optimizer + ``` + + Install requirements for convert tool + + ```shell + sudo ./install_prerequisites/install_prerequisites_onnx.sh + ``` + + Then convert model. + ```shell + python3 mo.py --input_model --input_shape [--data_type FP16] + ``` + For example: + ```shell + python3 mo.py --input_model yolox.onnx --input_shape [1,3,640,640] --data_type FP16 --output_dir converted_output + ``` + +## Demo + +### python + +```shell +python openvino_inference.py -m -i +``` +or +```shell +python openvino_inference.py -m -i -o -s -d +``` + diff --git a/multimodal/YOLOX/demo/OpenVINO/python/openvino_inference.py b/multimodal/YOLOX/demo/OpenVINO/python/openvino_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..00952880043c8b24c738324ee3f527aca7774f75 --- /dev/null +++ b/multimodal/YOLOX/demo/OpenVINO/python/openvino_inference.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) Megvii, Inc. and its affiliates. + +import argparse +import logging as log +import os +import sys + +import cv2 +import numpy as np + +from openvino.inference_engine import IECore + +from yolox.data.data_augment import preproc as preprocess +from yolox.data.datasets import COCO_CLASSES +from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis + + +def parse_args() -> argparse.Namespace: + """Parse and return command line arguments""" + parser = argparse.ArgumentParser(add_help=False) + args = parser.add_argument_group('Options') + args.add_argument( + '-h', + '--help', + action='help', + help='Show this help message and exit.') + args.add_argument( + '-m', + '--model', + required=True, + type=str, + help='Required. Path to an .xml or .onnx file with a trained model.') + args.add_argument( + '-i', + '--input', + required=True, + type=str, + help='Required. Path to an image file.') + args.add_argument( + '-o', + '--output_dir', + type=str, + default='demo_output', + help='Path to your output dir.') + args.add_argument( + '-s', + '--score_thr', + type=float, + default=0.3, + help="Score threshould to visualize the result.") + args.add_argument( + '-d', + '--device', + default='CPU', + type=str, + help='Optional. Specify the target device to infer on; CPU, GPU, \ + MYRIAD, HDDL or HETERO: is acceptable. The sample will look \ + for a suitable plugin for device specified. Default value \ + is CPU.') + args.add_argument( + '--labels', + default=None, + type=str, + help='Option:al. Path to a labels mapping file.') + args.add_argument( + '-nt', + '--number_top', + default=10, + type=int, + help='Optional. Number of top results.') + return parser.parse_args() + + +def main(): + log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout) + args = parse_args() + + # ---------------------------Step 1. Initialize inference engine core-------------------------------------------------- + log.info('Creating Inference Engine') + ie = IECore() + + # ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation or ONNX format--------------- + log.info(f'Reading the network: {args.model}') + # (.xml and .bin files) or (.onnx file) + net = ie.read_network(model=args.model) + + if len(net.input_info) != 1: + log.error('Sample supports only single input topologies') + return -1 + if len(net.outputs) != 1: + log.error('Sample supports only single output topologies') + return -1 + + # ---------------------------Step 3. Configure input & output---------------------------------------------------------- + log.info('Configuring input and output blobs') + # Get names of input and output blobs + input_blob = next(iter(net.input_info)) + out_blob = next(iter(net.outputs)) + + # Set input and output precision manually + net.input_info[input_blob].precision = 'FP32' + net.outputs[out_blob].precision = 'FP16' + + # Get a number of classes recognized by a model + num_of_classes = max(net.outputs[out_blob].shape) + + # ---------------------------Step 4. Loading model to the device------------------------------------------------------- + log.info('Loading the model to the plugin') + exec_net = ie.load_network(network=net, device_name=args.device) + + # ---------------------------Step 5. Create infer request-------------------------------------------------------------- + # load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork + # instance which stores infer requests. So you already created Infer requests in the previous step. + + # ---------------------------Step 6. Prepare input--------------------------------------------------------------------- + origin_img = cv2.imread(args.input) + _, _, h, w = net.input_info[input_blob].input_data.shape + image, ratio = preprocess(origin_img, (h, w)) + + # ---------------------------Step 7. Do inference---------------------------------------------------------------------- + log.info('Starting inference in synchronous mode') + res = exec_net.infer(inputs={input_blob: image}) + + # ---------------------------Step 8. Process output-------------------------------------------------------------------- + res = res[out_blob] + + predictions = demo_postprocess(res, (h, w))[0] + + boxes = predictions[:, :4] + scores = predictions[:, 4, None] * predictions[:, 5:] + + boxes_xyxy = np.ones_like(boxes) + boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2. + boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2. + boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2. + boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2. + boxes_xyxy /= ratio + dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1) + + if dets is not None: + final_boxes = dets[:, :4] + final_scores, final_cls_inds = dets[:, 4], dets[:, 5] + origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds, + conf=args.score_thr, class_names=COCO_CLASSES) + + mkdir(args.output_dir) + output_path = os.path.join(args.output_dir, os.path.basename(args.input)) + cv2.imwrite(output_path, origin_img) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/multimodal/YOLOX/demo/TensorRT/cpp/CMakeLists.txt b/multimodal/YOLOX/demo/TensorRT/cpp/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..5f14edd594a5c9106bc5d50bc352c5bb14f716a4 --- /dev/null +++ b/multimodal/YOLOX/demo/TensorRT/cpp/CMakeLists.txt @@ -0,0 +1,36 @@ +cmake_minimum_required(VERSION 2.6) + +project(yolox) + +add_definitions(-std=c++11) + +option(CUDA_USE_STATIC_CUDA_RUNTIME OFF) +set(CMAKE_CXX_STANDARD 11) +set(CMAKE_BUILD_TYPE Debug) + +find_package(CUDA REQUIRED) + +include_directories(${PROJECT_SOURCE_DIR}/include) +# include and link dirs of cuda and tensorrt, you need adapt them if yours are different +# cuda +include_directories(/data/cuda/cuda-10.2/cuda/include) +link_directories(/data/cuda/cuda-10.2/cuda/lib64) +# cudnn +include_directories(/data/cuda/cuda-10.2/cudnn/v8.0.4/include) +link_directories(/data/cuda/cuda-10.2/cudnn/v8.0.4/lib64) +# tensorrt +include_directories(/data/cuda/cuda-10.2/TensorRT/v7.2.1.6/include) +link_directories(/data/cuda/cuda-10.2/TensorRT/v7.2.1.6/lib) + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Ofast -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED") + +find_package(OpenCV) +include_directories(${OpenCV_INCLUDE_DIRS}) + +add_executable(yolox ${PROJECT_SOURCE_DIR}/yolox.cpp) +target_link_libraries(yolox nvinfer) +target_link_libraries(yolox cudart) +target_link_libraries(yolox ${OpenCV_LIBS}) + +add_definitions(-O2 -pthread) + diff --git a/multimodal/YOLOX/demo/TensorRT/cpp/README.md b/multimodal/YOLOX/demo/TensorRT/cpp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0158e7dacdf0af0d427a917e83adf8e7b4e02fac --- /dev/null +++ b/multimodal/YOLOX/demo/TensorRT/cpp/README.md @@ -0,0 +1,48 @@ +# YOLOX-TensorRT in C++ + +As YOLOX models are easy to convert to tensorrt using [torch2trt gitrepo](https://github.com/NVIDIA-AI-IOT/torch2trt), +our C++ demo does not include the model converting or constructing like other tenorrt demos. + + +## Step 1: Prepare serialized engine file + +Follow the trt [python demo README](https://github.com/Megvii-BaseDetection/YOLOX/blob/main/demo/TensorRT/python/README.md) to convert and save the serialized engine file. + +Check the 'model_trt.engine' file generated from Step 1, which will be automatically saved at the current demo dir. + + +## Step 2: build the demo + +Please follow the [TensorRT Installation Guide](https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html) to install TensorRT. + +And you should set the TensorRT path and CUDA path in CMakeLists.txt. + +If you train your custom dataset, you may need to modify the value of `num_class`. + +```c++ +const int num_class = 80; +``` + +Install opencv with ```sudo apt-get install libopencv-dev``` (we don't need a higher version of opencv like v3.3+). + +build the demo: + +```shell +mkdir build +cd build +cmake .. +make +``` + +Then run the demo: + +```shell +./yolox ../model_trt.engine -i ../../../../assets/dog.jpg +``` + +or + +```shell +./yolox -i +``` + diff --git a/multimodal/YOLOX/demo/TensorRT/cpp/logging.h b/multimodal/YOLOX/demo/TensorRT/cpp/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..0edb75fab69b539b755422263c6f474576e21ee6 --- /dev/null +++ b/multimodal/YOLOX/demo/TensorRT/cpp/logging.h @@ -0,0 +1,503 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TENSORRT_LOGGING_H +#define TENSORRT_LOGGING_H + +#include "NvInferRuntimeCommon.h" +#include +#include +#include +#include +#include +#include +#include + +using Severity = nvinfer1::ILogger::Severity; + +class LogStreamConsumerBuffer : public std::stringbuf +{ +public: + LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog) + : mOutput(stream) + , mPrefix(prefix) + , mShouldLog(shouldLog) + { + } + + LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other) + : mOutput(other.mOutput) + { + } + + ~LogStreamConsumerBuffer() + { + // std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence + // std::streambuf::pptr() gives a pointer to the current position of the output sequence + // if the pointer to the beginning is not equal to the pointer to the current position, + // call putOutput() to log the output to the stream + if (pbase() != pptr()) + { + putOutput(); + } + } + + // synchronizes the stream buffer and returns 0 on success + // synchronizing the stream buffer consists of inserting the buffer contents into the stream, + // resetting the buffer and flushing the stream + virtual int sync() + { + putOutput(); + return 0; + } + + void putOutput() + { + if (mShouldLog) + { + // prepend timestamp + std::time_t timestamp = std::time(nullptr); + tm* tm_local = std::localtime(×tamp); + std::cout << "["; + std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/"; + std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/"; + std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-"; + std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":"; + std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":"; + std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] "; + // std::stringbuf::str() gets the string contents of the buffer + // insert the buffer contents pre-appended by the appropriate prefix into the stream + mOutput << mPrefix << str(); + // set the buffer to empty + str(""); + // flush the stream + mOutput.flush(); + } + } + + void setShouldLog(bool shouldLog) + { + mShouldLog = shouldLog; + } + +private: + std::ostream& mOutput; + std::string mPrefix; + bool mShouldLog; +}; + +//! +//! \class LogStreamConsumerBase +//! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer +//! +class LogStreamConsumerBase +{ +public: + LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog) + : mBuffer(stream, prefix, shouldLog) + { + } + +protected: + LogStreamConsumerBuffer mBuffer; +}; + +//! +//! \class LogStreamConsumer +//! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages. +//! Order of base classes is LogStreamConsumerBase and then std::ostream. +//! This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field +//! in LogStreamConsumer and then the address of the buffer is passed to std::ostream. +//! This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream. +//! Please do not change the order of the parent classes. +//! +class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream +{ +public: + //! \brief Creates a LogStreamConsumer which logs messages with level severity. + //! Reportable severity determines if the messages are severe enough to be logged. + LogStreamConsumer(Severity reportableSeverity, Severity severity) + : LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity) + , std::ostream(&mBuffer) // links the stream buffer with the stream + , mShouldLog(severity <= reportableSeverity) + , mSeverity(severity) + { + } + + LogStreamConsumer(LogStreamConsumer&& other) + : LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog) + , std::ostream(&mBuffer) // links the stream buffer with the stream + , mShouldLog(other.mShouldLog) + , mSeverity(other.mSeverity) + { + } + + void setReportableSeverity(Severity reportableSeverity) + { + mShouldLog = mSeverity <= reportableSeverity; + mBuffer.setShouldLog(mShouldLog); + } + +private: + static std::ostream& severityOstream(Severity severity) + { + return severity >= Severity::kINFO ? std::cout : std::cerr; + } + + static std::string severityPrefix(Severity severity) + { + switch (severity) + { + case Severity::kINTERNAL_ERROR: return "[F] "; + case Severity::kERROR: return "[E] "; + case Severity::kWARNING: return "[W] "; + case Severity::kINFO: return "[I] "; + case Severity::kVERBOSE: return "[V] "; + default: assert(0); return ""; + } + } + + bool mShouldLog; + Severity mSeverity; +}; + +//! \class Logger +//! +//! \brief Class which manages logging of TensorRT tools and samples +//! +//! \details This class provides a common interface for TensorRT tools and samples to log information to the console, +//! and supports logging two types of messages: +//! +//! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal) +//! - Test pass/fail messages +//! +//! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is +//! that the logic for controlling the verbosity and formatting of sample output is centralized in one location. +//! +//! In the future, this class could be extended to support dumping test results to a file in some standard format +//! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run). +//! +//! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger +//! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT +//! library and messages coming from the sample. +//! +//! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the +//! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger +//! object. + +class Logger : public nvinfer1::ILogger +{ +public: + Logger(Severity severity = Severity::kWARNING) + : mReportableSeverity(severity) + { + } + + //! + //! \enum TestResult + //! \brief Represents the state of a given test + //! + enum class TestResult + { + kRUNNING, //!< The test is running + kPASSED, //!< The test passed + kFAILED, //!< The test failed + kWAIVED //!< The test was waived + }; + + //! + //! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger + //! \return The nvinfer1::ILogger associated with this Logger + //! + //! TODO Once all samples are updated to use this method to register the logger with TensorRT, + //! we can eliminate the inheritance of Logger from ILogger + //! + nvinfer1::ILogger& getTRTLogger() + { + return *this; + } + + //! + //! \brief Implementation of the nvinfer1::ILogger::log() virtual method + //! + //! Note samples should not be calling this function directly; it will eventually go away once we eliminate the + //! inheritance from nvinfer1::ILogger + //! + void log(Severity severity, const char* msg) noexcept override + { + LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl; + } + + //! + //! \brief Method for controlling the verbosity of logging output + //! + //! \param severity The logger will only emit messages that have severity of this level or higher. + //! + void setReportableSeverity(Severity severity) + { + mReportableSeverity = severity; + } + + //! + //! \brief Opaque handle that holds logging information for a particular test + //! + //! This object is an opaque handle to information used by the Logger to print test results. + //! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used + //! with Logger::reportTest{Start,End}(). + //! + class TestAtom + { + public: + TestAtom(TestAtom&&) = default; + + private: + friend class Logger; + + TestAtom(bool started, const std::string& name, const std::string& cmdline) + : mStarted(started) + , mName(name) + , mCmdline(cmdline) + { + } + + bool mStarted; + std::string mName; + std::string mCmdline; + }; + + //! + //! \brief Define a test for logging + //! + //! \param[in] name The name of the test. This should be a string starting with + //! "TensorRT" and containing dot-separated strings containing + //! the characters [A-Za-z0-9_]. + //! For example, "TensorRT.sample_googlenet" + //! \param[in] cmdline The command line used to reproduce the test + // + //! \return a TestAtom that can be used in Logger::reportTest{Start,End}(). + //! + static TestAtom defineTest(const std::string& name, const std::string& cmdline) + { + return TestAtom(false, name, cmdline); + } + + //! + //! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments + //! as input + //! + //! \param[in] name The name of the test + //! \param[in] argc The number of command-line arguments + //! \param[in] argv The array of command-line arguments (given as C strings) + //! + //! \return a TestAtom that can be used in Logger::reportTest{Start,End}(). + static TestAtom defineTest(const std::string& name, int argc, char const* const* argv) + { + auto cmdline = genCmdlineString(argc, argv); + return defineTest(name, cmdline); + } + + //! + //! \brief Report that a test has started. + //! + //! \pre reportTestStart() has not been called yet for the given testAtom + //! + //! \param[in] testAtom The handle to the test that has started + //! + static void reportTestStart(TestAtom& testAtom) + { + reportTestResult(testAtom, TestResult::kRUNNING); + assert(!testAtom.mStarted); + testAtom.mStarted = true; + } + + //! + //! \brief Report that a test has ended. + //! + //! \pre reportTestStart() has been called for the given testAtom + //! + //! \param[in] testAtom The handle to the test that has ended + //! \param[in] result The result of the test. Should be one of TestResult::kPASSED, + //! TestResult::kFAILED, TestResult::kWAIVED + //! + static void reportTestEnd(const TestAtom& testAtom, TestResult result) + { + assert(result != TestResult::kRUNNING); + assert(testAtom.mStarted); + reportTestResult(testAtom, result); + } + + static int reportPass(const TestAtom& testAtom) + { + reportTestEnd(testAtom, TestResult::kPASSED); + return EXIT_SUCCESS; + } + + static int reportFail(const TestAtom& testAtom) + { + reportTestEnd(testAtom, TestResult::kFAILED); + return EXIT_FAILURE; + } + + static int reportWaive(const TestAtom& testAtom) + { + reportTestEnd(testAtom, TestResult::kWAIVED); + return EXIT_SUCCESS; + } + + static int reportTest(const TestAtom& testAtom, bool pass) + { + return pass ? reportPass(testAtom) : reportFail(testAtom); + } + + Severity getReportableSeverity() const + { + return mReportableSeverity; + } + +private: + //! + //! \brief returns an appropriate string for prefixing a log message with the given severity + //! + static const char* severityPrefix(Severity severity) + { + switch (severity) + { + case Severity::kINTERNAL_ERROR: return "[F] "; + case Severity::kERROR: return "[E] "; + case Severity::kWARNING: return "[W] "; + case Severity::kINFO: return "[I] "; + case Severity::kVERBOSE: return "[V] "; + default: assert(0); return ""; + } + } + + //! + //! \brief returns an appropriate string for prefixing a test result message with the given result + //! + static const char* testResultString(TestResult result) + { + switch (result) + { + case TestResult::kRUNNING: return "RUNNING"; + case TestResult::kPASSED: return "PASSED"; + case TestResult::kFAILED: return "FAILED"; + case TestResult::kWAIVED: return "WAIVED"; + default: assert(0); return ""; + } + } + + //! + //! \brief returns an appropriate output stream (cout or cerr) to use with the given severity + //! + static std::ostream& severityOstream(Severity severity) + { + return severity >= Severity::kINFO ? std::cout : std::cerr; + } + + //! + //! \brief method that implements logging test results + //! + static void reportTestResult(const TestAtom& testAtom, TestResult result) + { + severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # " + << testAtom.mCmdline << std::endl; + } + + //! + //! \brief generate a command line string from the given (argc, argv) values + //! + static std::string genCmdlineString(int argc, char const* const* argv) + { + std::stringstream ss; + for (int i = 0; i < argc; i++) + { + if (i > 0) + ss << " "; + ss << argv[i]; + } + return ss.str(); + } + + Severity mReportableSeverity; +}; + +namespace +{ + +//! +//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE +//! +//! Example usage: +//! +//! LOG_VERBOSE(logger) << "hello world" << std::endl; +//! +inline LogStreamConsumer LOG_VERBOSE(const Logger& logger) +{ + return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE); +} + +//! +//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO +//! +//! Example usage: +//! +//! LOG_INFO(logger) << "hello world" << std::endl; +//! +inline LogStreamConsumer LOG_INFO(const Logger& logger) +{ + return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO); +} + +//! +//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING +//! +//! Example usage: +//! +//! LOG_WARN(logger) << "hello world" << std::endl; +//! +inline LogStreamConsumer LOG_WARN(const Logger& logger) +{ + return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING); +} + +//! +//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR +//! +//! Example usage: +//! +//! LOG_ERROR(logger) << "hello world" << std::endl; +//! +inline LogStreamConsumer LOG_ERROR(const Logger& logger) +{ + return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR); +} + +//! +//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR +// ("fatal" severity) +//! +//! Example usage: +//! +//! LOG_FATAL(logger) << "hello world" << std::endl; +//! +inline LogStreamConsumer LOG_FATAL(const Logger& logger) +{ + return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR); +} + +} // anonymous namespace + +#endif // TENSORRT_LOGGING_H diff --git a/multimodal/YOLOX/demo/TensorRT/cpp/yolox.cpp b/multimodal/YOLOX/demo/TensorRT/cpp/yolox.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ed423380ef35b4c39bf3231bac6e0079f7eea589 --- /dev/null +++ b/multimodal/YOLOX/demo/TensorRT/cpp/yolox.cpp @@ -0,0 +1,530 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include "NvInfer.h" +#include "cuda_runtime_api.h" +#include "logging.h" + +#define CHECK(status) \ + do\ + {\ + auto ret = (status);\ + if (ret != 0)\ + {\ + std::cerr << "Cuda failure: " << ret << std::endl;\ + abort();\ + }\ + } while (0) + +#define DEVICE 0 // GPU id +#define NMS_THRESH 0.45 +#define BBOX_CONF_THRESH 0.3 + +using namespace nvinfer1; + +// stuff we know about the network and the input/output blobs +static const int INPUT_W = 640; +static const int INPUT_H = 640; +static const int NUM_CLASSES = 80; +const char* INPUT_BLOB_NAME = "input_0"; +const char* OUTPUT_BLOB_NAME = "output_0"; +static Logger gLogger; + +cv::Mat static_resize(cv::Mat& img) { + float r = std::min(INPUT_W / (img.cols*1.0), INPUT_H / (img.rows*1.0)); + // r = std::min(r, 1.0f); + int unpad_w = r * img.cols; + int unpad_h = r * img.rows; + cv::Mat re(unpad_h, unpad_w, CV_8UC3); + cv::resize(img, re, re.size()); + cv::Mat out(INPUT_H, INPUT_W, CV_8UC3, cv::Scalar(114, 114, 114)); + re.copyTo(out(cv::Rect(0, 0, re.cols, re.rows))); + return out; +} + +struct Object +{ + cv::Rect_ rect; + int label; + float prob; +}; + +struct GridAndStride +{ + int grid0; + int grid1; + int stride; +}; + +static void generate_grids_and_stride(std::vector& strides, std::vector& grid_strides) +{ + for (auto stride : strides) + { + int num_grid_y = INPUT_H / stride; + int num_grid_x = INPUT_W / stride; + for (int g1 = 0; g1 < num_grid_y; g1++) + { + for (int g0 = 0; g0 < num_grid_x; g0++) + { + grid_strides.push_back((GridAndStride){g0, g1, stride}); + } + } + } +} + +static inline float intersection_area(const Object& a, const Object& b) +{ + cv::Rect_ inter = a.rect & b.rect; + return inter.area(); +} + +static void qsort_descent_inplace(std::vector& faceobjects, int left, int right) +{ + int i = left; + int j = right; + float p = faceobjects[(left + right) / 2].prob; + + while (i <= j) + { + while (faceobjects[i].prob > p) + i++; + + while (faceobjects[j].prob < p) + j--; + + if (i <= j) + { + // swap + std::swap(faceobjects[i], faceobjects[j]); + + i++; + j--; + } + } + + #pragma omp parallel sections + { + #pragma omp section + { + if (left < j) qsort_descent_inplace(faceobjects, left, j); + } + #pragma omp section + { + if (i < right) qsort_descent_inplace(faceobjects, i, right); + } + } +} + +static void qsort_descent_inplace(std::vector& objects) +{ + if (objects.empty()) + return; + + qsort_descent_inplace(objects, 0, objects.size() - 1); +} + +static void nms_sorted_bboxes(const std::vector& faceobjects, std::vector& picked, float nms_threshold) +{ + picked.clear(); + + const int n = faceobjects.size(); + + std::vector areas(n); + for (int i = 0; i < n; i++) + { + areas[i] = faceobjects[i].rect.area(); + } + + for (int i = 0; i < n; i++) + { + const Object& a = faceobjects[i]; + + int keep = 1; + for (int j = 0; j < (int)picked.size(); j++) + { + const Object& b = faceobjects[picked[j]]; + + // intersection over union + float inter_area = intersection_area(a, b); + float union_area = areas[i] + areas[picked[j]] - inter_area; + // float IoU = inter_area / union_area + if (inter_area / union_area > nms_threshold) + keep = 0; + } + + if (keep) + picked.push_back(i); + } +} + + +static void generate_yolox_proposals(std::vector grid_strides, float* feat_blob, float prob_threshold, std::vector& objects) +{ + + const int num_anchors = grid_strides.size(); + + for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++) + { + const int grid0 = grid_strides[anchor_idx].grid0; + const int grid1 = grid_strides[anchor_idx].grid1; + const int stride = grid_strides[anchor_idx].stride; + + const int basic_pos = anchor_idx * (NUM_CLASSES + 5); + + // yolox/models/yolo_head.py decode logic + float x_center = (feat_blob[basic_pos+0] + grid0) * stride; + float y_center = (feat_blob[basic_pos+1] + grid1) * stride; + float w = exp(feat_blob[basic_pos+2]) * stride; + float h = exp(feat_blob[basic_pos+3]) * stride; + float x0 = x_center - w * 0.5f; + float y0 = y_center - h * 0.5f; + + float box_objectness = feat_blob[basic_pos+4]; + for (int class_idx = 0; class_idx < NUM_CLASSES; class_idx++) + { + float box_cls_score = feat_blob[basic_pos + 5 + class_idx]; + float box_prob = box_objectness * box_cls_score; + if (box_prob > prob_threshold) + { + Object obj; + obj.rect.x = x0; + obj.rect.y = y0; + obj.rect.width = w; + obj.rect.height = h; + obj.label = class_idx; + obj.prob = box_prob; + + objects.push_back(obj); + } + + } // class loop + + } // point anchor loop +} + +float* blobFromImage(cv::Mat& img){ + float* blob = new float[img.total()*3]; + int channels = 3; + int img_h = img.rows; + int img_w = img.cols; + for (size_t c = 0; c < channels; c++) + { + for (size_t h = 0; h < img_h; h++) + { + for (size_t w = 0; w < img_w; w++) + { + blob[c * img_w * img_h + h * img_w + w] = + (float)img.at(h, w)[c]; + } + } + } + return blob; +} + + +static void decode_outputs(float* prob, std::vector& objects, float scale, const int img_w, const int img_h) { + std::vector proposals; + std::vector strides = {8, 16, 32}; + std::vector grid_strides; + generate_grids_and_stride(strides, grid_strides); + generate_yolox_proposals(grid_strides, prob, BBOX_CONF_THRESH, proposals); + std::cout << "num of boxes before nms: " << proposals.size() << std::endl; + + qsort_descent_inplace(proposals); + + std::vector picked; + nms_sorted_bboxes(proposals, picked, NMS_THRESH); + + + int count = picked.size(); + + std::cout << "num of boxes: " << count << std::endl; + + objects.resize(count); + for (int i = 0; i < count; i++) + { + objects[i] = proposals[picked[i]]; + + // adjust offset to original unpadded + float x0 = (objects[i].rect.x) / scale; + float y0 = (objects[i].rect.y) / scale; + float x1 = (objects[i].rect.x + objects[i].rect.width) / scale; + float y1 = (objects[i].rect.y + objects[i].rect.height) / scale; + + // clip + x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f); + y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f); + x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f); + y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f); + + objects[i].rect.x = x0; + objects[i].rect.y = y0; + objects[i].rect.width = x1 - x0; + objects[i].rect.height = y1 - y0; + } +} + +const float color_list[80][3] = +{ + {0.000, 0.447, 0.741}, + {0.850, 0.325, 0.098}, + {0.929, 0.694, 0.125}, + {0.494, 0.184, 0.556}, + {0.466, 0.674, 0.188}, + {0.301, 0.745, 0.933}, + {0.635, 0.078, 0.184}, + {0.300, 0.300, 0.300}, + {0.600, 0.600, 0.600}, + {1.000, 0.000, 0.000}, + {1.000, 0.500, 0.000}, + {0.749, 0.749, 0.000}, + {0.000, 1.000, 0.000}, + {0.000, 0.000, 1.000}, + {0.667, 0.000, 1.000}, + {0.333, 0.333, 0.000}, + {0.333, 0.667, 0.000}, + {0.333, 1.000, 0.000}, + {0.667, 0.333, 0.000}, + {0.667, 0.667, 0.000}, + {0.667, 1.000, 0.000}, + {1.000, 0.333, 0.000}, + {1.000, 0.667, 0.000}, + {1.000, 1.000, 0.000}, + {0.000, 0.333, 0.500}, + {0.000, 0.667, 0.500}, + {0.000, 1.000, 0.500}, + {0.333, 0.000, 0.500}, + {0.333, 0.333, 0.500}, + {0.333, 0.667, 0.500}, + {0.333, 1.000, 0.500}, + {0.667, 0.000, 0.500}, + {0.667, 0.333, 0.500}, + {0.667, 0.667, 0.500}, + {0.667, 1.000, 0.500}, + {1.000, 0.000, 0.500}, + {1.000, 0.333, 0.500}, + {1.000, 0.667, 0.500}, + {1.000, 1.000, 0.500}, + {0.000, 0.333, 1.000}, + {0.000, 0.667, 1.000}, + {0.000, 1.000, 1.000}, + {0.333, 0.000, 1.000}, + {0.333, 0.333, 1.000}, + {0.333, 0.667, 1.000}, + {0.333, 1.000, 1.000}, + {0.667, 0.000, 1.000}, + {0.667, 0.333, 1.000}, + {0.667, 0.667, 1.000}, + {0.667, 1.000, 1.000}, + {1.000, 0.000, 1.000}, + {1.000, 0.333, 1.000}, + {1.000, 0.667, 1.000}, + {0.333, 0.000, 0.000}, + {0.500, 0.000, 0.000}, + {0.667, 0.000, 0.000}, + {0.833, 0.000, 0.000}, + {1.000, 0.000, 0.000}, + {0.000, 0.167, 0.000}, + {0.000, 0.333, 0.000}, + {0.000, 0.500, 0.000}, + {0.000, 0.667, 0.000}, + {0.000, 0.833, 0.000}, + {0.000, 1.000, 0.000}, + {0.000, 0.000, 0.167}, + {0.000, 0.000, 0.333}, + {0.000, 0.000, 0.500}, + {0.000, 0.000, 0.667}, + {0.000, 0.000, 0.833}, + {0.000, 0.000, 1.000}, + {0.000, 0.000, 0.000}, + {0.143, 0.143, 0.143}, + {0.286, 0.286, 0.286}, + {0.429, 0.429, 0.429}, + {0.571, 0.571, 0.571}, + {0.714, 0.714, 0.714}, + {0.857, 0.857, 0.857}, + {0.000, 0.447, 0.741}, + {0.314, 0.717, 0.741}, + {0.50, 0.5, 0} +}; + +static void draw_objects(const cv::Mat& bgr, const std::vector& objects, std::string f) +{ + static const char* class_names[] = { + "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", + "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", + "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", + "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", + "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", + "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", + "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", + "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", + "hair drier", "toothbrush" + }; + + cv::Mat image = bgr.clone(); + + for (size_t i = 0; i < objects.size(); i++) + { + const Object& obj = objects[i]; + + fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob, + obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height); + + cv::Scalar color = cv::Scalar(color_list[obj.label][0], color_list[obj.label][1], color_list[obj.label][2]); + float c_mean = cv::mean(color)[0]; + cv::Scalar txt_color; + if (c_mean > 0.5){ + txt_color = cv::Scalar(0, 0, 0); + }else{ + txt_color = cv::Scalar(255, 255, 255); + } + + cv::rectangle(image, obj.rect, color * 255, 2); + + char text[256]; + sprintf(text, "%s %.1f%%", class_names[obj.label], obj.prob * 100); + + int baseLine = 0; + cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.4, 1, &baseLine); + + cv::Scalar txt_bk_color = color * 0.7 * 255; + + int x = obj.rect.x; + int y = obj.rect.y + 1; + //int y = obj.rect.y - label_size.height - baseLine; + if (y > image.rows) + y = image.rows; + //if (x + label_size.width > image.cols) + //x = image.cols - label_size.width; + + cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)), + txt_bk_color, -1); + + cv::putText(image, text, cv::Point(x, y + label_size.height), + cv::FONT_HERSHEY_SIMPLEX, 0.4, txt_color, 1); + } + + cv::imwrite("det_res.jpg", image); + fprintf(stderr, "save vis file\n"); + /* cv::imshow("image", image); */ + /* cv::waitKey(0); */ +} + + +void doInference(IExecutionContext& context, float* input, float* output, const int output_size, cv::Size input_shape) { + const ICudaEngine& engine = context.getEngine(); + + // Pointers to input and output device buffers to pass to engine. + // Engine requires exactly IEngine::getNbBindings() number of buffers. + assert(engine.getNbBindings() == 2); + void* buffers[2]; + + // In order to bind the buffers, we need to know the names of the input and output tensors. + // Note that indices are guaranteed to be less than IEngine::getNbBindings() + const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME); + + assert(engine.getBindingDataType(inputIndex) == nvinfer1::DataType::kFLOAT); + const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME); + assert(engine.getBindingDataType(outputIndex) == nvinfer1::DataType::kFLOAT); + int mBatchSize = engine.getMaxBatchSize(); + + // Create GPU buffers on device + CHECK(cudaMalloc(&buffers[inputIndex], 3 * input_shape.height * input_shape.width * sizeof(float))); + CHECK(cudaMalloc(&buffers[outputIndex], output_size*sizeof(float))); + + // Create stream + cudaStream_t stream; + CHECK(cudaStreamCreate(&stream)); + + // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host + CHECK(cudaMemcpyAsync(buffers[inputIndex], input, 3 * input_shape.height * input_shape.width * sizeof(float), cudaMemcpyHostToDevice, stream)); + context.enqueue(1, buffers, stream, nullptr); + CHECK(cudaMemcpyAsync(output, buffers[outputIndex], output_size * sizeof(float), cudaMemcpyDeviceToHost, stream)); + cudaStreamSynchronize(stream); + + // Release stream and buffers + cudaStreamDestroy(stream); + CHECK(cudaFree(buffers[inputIndex])); + CHECK(cudaFree(buffers[outputIndex])); +} + +int main(int argc, char** argv) { + cudaSetDevice(DEVICE); + // create a model using the API directly and serialize it to a stream + char *trtModelStream{nullptr}; + size_t size{0}; + + if (argc == 4 && std::string(argv[2]) == "-i") { + const std::string engine_file_path {argv[1]}; + std::ifstream file(engine_file_path, std::ios::binary); + if (file.good()) { + file.seekg(0, file.end); + size = file.tellg(); + file.seekg(0, file.beg); + trtModelStream = new char[size]; + assert(trtModelStream); + file.read(trtModelStream, size); + file.close(); + } + } else { + std::cerr << "arguments not right!" << std::endl; + std::cerr << "run 'python3 yolox/deploy/trt.py -n yolox-{tiny, s, m, l, x}' to serialize model first!" << std::endl; + std::cerr << "Then use the following command:" << std::endl; + std::cerr << "./yolox ../model_trt.engine -i ../../../assets/dog.jpg // deserialize file and run inference" << std::endl; + return -1; + } + const std::string input_image_path {argv[3]}; + + //std::vector file_names; + //if (read_files_in_dir(argv[2], file_names) < 0) { + //std::cout << "read_files_in_dir failed." << std::endl; + //return -1; + //} + + IRuntime* runtime = createInferRuntime(gLogger); + assert(runtime != nullptr); + ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream, size); + assert(engine != nullptr); + IExecutionContext* context = engine->createExecutionContext(); + assert(context != nullptr); + delete[] trtModelStream; + auto out_dims = engine->getBindingDimensions(1); + auto output_size = 1; + for(int j=0;j(end - start).count() << "ms" << std::endl; + + std::vector objects; + decode_outputs(prob, objects, scale, img_w, img_h); + draw_objects(img, objects, input_image_path); + // delete the pointer to the float + delete blob; + // destroy the engine + context->destroy(); + engine->destroy(); + runtime->destroy(); + return 0; +} diff --git a/multimodal/YOLOX/demo/TensorRT/python/README.md b/multimodal/YOLOX/demo/TensorRT/python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..236eeb1265344b68e24616293c96fffee9a17262 --- /dev/null +++ b/multimodal/YOLOX/demo/TensorRT/python/README.md @@ -0,0 +1,46 @@ +# YOLOX-TensorRT in Python + +This tutorial includes a Python demo for TensorRT. + +## Install TensorRT Toolkit + +Please follow the [TensorRT Installation Guide](https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html) and [torch2trt gitrepo](https://github.com/NVIDIA-AI-IOT/torch2trt) to install TensorRT and torch2trt. + +## Convert model + +YOLOX models can be easily conveted to TensorRT models using torch2trt + + If you want to convert our model, use the flag -n to specify a model name: + ```shell + python tools/trt.py -n -c + ``` + For example: + ```shell + python tools/trt.py -n yolox-s -c your_ckpt.pth + ``` + can be: yolox-nano, yolox-tiny. yolox-s, yolox-m, yolox-l, yolox-x. + + If you want to convert your customized model, use the flag -f to specify you exp file: + ```shell + python tools/trt.py -f -c + ``` + For example: + ```shell + python tools/trt.py -f /path/to/your/yolox/exps/yolox_s.py -c your_ckpt.pth + ``` + *yolox_s.py* can be any exp file modified by you. + +The converted model and the serialized engine file (for C++ demo) will be saved on your experiment output dir. + +## Demo + +The TensorRT python demo is merged on our pytorch demo file, so you can run the pytorch demo command with ```--trt```. + +```shell +python tools/demo.py image -n yolox-s --trt --save_result +``` +or +```shell +python tools/demo.py image -f exps/default/yolox_s.py --trt --save_result +``` + diff --git a/multimodal/YOLOX/demo/ncnn/README.md b/multimodal/YOLOX/demo/ncnn/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a607abd8caad31c51749884cc433082202ba01af --- /dev/null +++ b/multimodal/YOLOX/demo/ncnn/README.md @@ -0,0 +1,8 @@ +# YOLOX-ncnn + +Compile files of YOLOX object detection base on [ncnn](https://github.com/Tencent/ncnn). +YOLOX is included in ncnn now, you could also try building from ncnn, it's better. + +## Acknowledgement + +* [ncnn](https://github.com/Tencent/ncnn) diff --git a/multimodal/YOLOX/demo/ncnn/android/README.md b/multimodal/YOLOX/demo/ncnn/android/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2197ffe9a348d20f541d0e664363e07dfaf425ac --- /dev/null +++ b/multimodal/YOLOX/demo/ncnn/android/README.md @@ -0,0 +1,27 @@ +# YOLOX-Android-ncnn + +Andoird app of YOLOX object detection base on [ncnn](https://github.com/Tencent/ncnn) + + +## Tutorial + +### Step1 + +Download ncnn-android-vulkan.zip from [releases of ncnn](https://github.com/Tencent/ncnn/releases). This repo uses +[20210525 release](https://github.com/Tencent/ncnn/releases/download/20210525/ncnn-20210525-android-vulkan.zip) for building. + +### Step2 + +After downloading, please extract your zip file. Then, there are two ways to finish this step: +* put your extracted directory into **app/src/main/jni** +* change the **ncnn_DIR** path in **app/src/main/jni/CMakeLists.txt** to your extracted directory + +### Step3 +Download example param and bin file from [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ESXBH_GSSmFMszWJ6YG2VkQB5cWDfqVWXgk0D996jH0rpQ?e=qzEqUh) or [github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_s_ncnn.tar.gz). Unzip the file to **app/src/main/assets**. + +### Step4 +Open this project with Android Studio, build it and enjoy! + +## Reference + +* [ncnn-android-yolov5](https://github.com/nihui/ncnn-android-yolov5) diff --git a/multimodal/YOLOX/demo/ncnn/android/app/build.gradle b/multimodal/YOLOX/demo/ncnn/android/app/build.gradle new file mode 100644 index 0000000000000000000000000000000000000000..72e5ce088e9656749644edddfb7ca5d39f2b67f1 --- /dev/null +++ b/multimodal/YOLOX/demo/ncnn/android/app/build.gradle @@ -0,0 +1,24 @@ +apply plugin: 'com.android.application' + +android { + compileSdkVersion 24 + buildToolsVersion "29.0.2" + + defaultConfig { + applicationId "com.megvii.yoloXncnn" + archivesBaseName = "$applicationId" + + ndk { + moduleName "ncnn" + abiFilters "armeabi-v7a", "arm64-v8a" + } + minSdkVersion 24 + } + + externalNativeBuild { + cmake { + version "3.10.2" + path file('src/main/jni/CMakeLists.txt') + } + } +} diff --git a/multimodal/YOLOX/demo/ncnn/android/app/src/main/AndroidManifest.xml b/multimodal/YOLOX/demo/ncnn/android/app/src/main/AndroidManifest.xml new file mode 100644 index 0000000000000000000000000000000000000000..f69b9a0f1891adae1bd88df713980f1cfd0d1e92 --- /dev/null +++ b/multimodal/YOLOX/demo/ncnn/android/app/src/main/AndroidManifest.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + diff --git a/multimodal/YOLOX/demo/ncnn/android/app/src/main/assets/yolox.param b/multimodal/YOLOX/demo/ncnn/android/app/src/main/assets/yolox.param new file mode 100644 index 0000000000000000000000000000000000000000..f7990f7ae9a71451bf8abb14cfedc74d9cbc38cc --- /dev/null +++ b/multimodal/YOLOX/demo/ncnn/android/app/src/main/assets/yolox.param @@ -0,0 +1,222 @@ +7767517 +220 250 +Input images 0 1 images +YoloV5Focus focus 1 1 images 503 +Convolution Conv_41 1 1 503 877 0=32 1=3 4=1 5=1 6=3456 +Swish Mul_43 1 1 877 507 +Convolution Conv_44 1 1 507 880 0=64 1=3 3=2 4=1 5=1 6=18432 +Swish Mul_46 1 1 880 511 +Split splitncnn_0 1 2 511 511_splitncnn_0 511_splitncnn_1 +Convolution Conv_47 1 1 511_splitncnn_1 883 0=32 1=1 5=1 6=2048 +Swish Mul_49 1 1 883 515 +Split splitncnn_1 1 2 515 515_splitncnn_0 515_splitncnn_1 +Convolution Conv_50 1 1 511_splitncnn_0 886 0=32 1=1 5=1 6=2048 +Swish Mul_52 1 1 886 519 +Convolution Conv_53 1 1 515_splitncnn_1 889 0=32 1=1 5=1 6=1024 +Swish Mul_55 1 1 889 523 +Convolution Conv_56 1 1 523 892 0=32 1=3 4=1 5=1 6=9216 +Swish Mul_58 1 1 892 527 +BinaryOp Add_59 2 1 527 515_splitncnn_0 528 +Concat Concat_60 2 1 528 519 529 +Convolution Conv_61 1 1 529 895 0=64 1=1 5=1 6=4096 +Swish Mul_63 1 1 895 533 +Convolution Conv_64 1 1 533 898 0=128 1=3 3=2 4=1 5=1 6=73728 +Swish Mul_66 1 1 898 537 +Split splitncnn_2 1 2 537 537_splitncnn_0 537_splitncnn_1 +Convolution Conv_67 1 1 537_splitncnn_1 901 0=64 1=1 5=1 6=8192 +Swish Mul_69 1 1 901 541 +Split splitncnn_3 1 2 541 541_splitncnn_0 541_splitncnn_1 +Convolution Conv_70 1 1 537_splitncnn_0 904 0=64 1=1 5=1 6=8192 +Swish Mul_72 1 1 904 545 +Convolution Conv_73 1 1 541_splitncnn_1 907 0=64 1=1 5=1 6=4096 +Swish Mul_75 1 1 907 549 +Convolution Conv_76 1 1 549 910 0=64 1=3 4=1 5=1 6=36864 +Swish Mul_78 1 1 910 553 +BinaryOp Add_79 2 1 553 541_splitncnn_0 554 +Split splitncnn_4 1 2 554 554_splitncnn_0 554_splitncnn_1 +Convolution Conv_80 1 1 554_splitncnn_1 913 0=64 1=1 5=1 6=4096 +Swish Mul_82 1 1 913 558 +Convolution Conv_83 1 1 558 916 0=64 1=3 4=1 5=1 6=36864 +Swish Mul_85 1 1 916 562 +BinaryOp Add_86 2 1 562 554_splitncnn_0 563 +Split splitncnn_5 1 2 563 563_splitncnn_0 563_splitncnn_1 +Convolution Conv_87 1 1 563_splitncnn_1 919 0=64 1=1 5=1 6=4096 +Swish Mul_89 1 1 919 567 +Convolution Conv_90 1 1 567 922 0=64 1=3 4=1 5=1 6=36864 +Swish Mul_92 1 1 922 571 +BinaryOp Add_93 2 1 571 563_splitncnn_0 572 +Concat Concat_94 2 1 572 545 573 +Convolution Conv_95 1 1 573 925 0=128 1=1 5=1 6=16384 +Swish Mul_97 1 1 925 577 +Split splitncnn_6 1 2 577 577_splitncnn_0 577_splitncnn_1 +Convolution Conv_98 1 1 577_splitncnn_1 928 0=256 1=3 3=2 4=1 5=1 6=294912 +Swish Mul_100 1 1 928 581 +Split splitncnn_7 1 2 581 581_splitncnn_0 581_splitncnn_1 +Convolution Conv_101 1 1 581_splitncnn_1 931 0=128 1=1 5=1 6=32768 +Swish Mul_103 1 1 931 585 +Split splitncnn_8 1 2 585 585_splitncnn_0 585_splitncnn_1 +Convolution Conv_104 1 1 581_splitncnn_0 934 0=128 1=1 5=1 6=32768 +Swish Mul_106 1 1 934 589 +Convolution Conv_107 1 1 585_splitncnn_1 937 0=128 1=1 5=1 6=16384 +Swish Mul_109 1 1 937 593 +Convolution Conv_110 1 1 593 940 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_112 1 1 940 597 +BinaryOp Add_113 2 1 597 585_splitncnn_0 598 +Split splitncnn_9 1 2 598 598_splitncnn_0 598_splitncnn_1 +Convolution Conv_114 1 1 598_splitncnn_1 943 0=128 1=1 5=1 6=16384 +Swish Mul_116 1 1 943 602 +Convolution Conv_117 1 1 602 946 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_119 1 1 946 606 +BinaryOp Add_120 2 1 606 598_splitncnn_0 607 +Split splitncnn_10 1 2 607 607_splitncnn_0 607_splitncnn_1 +Convolution Conv_121 1 1 607_splitncnn_1 949 0=128 1=1 5=1 6=16384 +Swish Mul_123 1 1 949 611 +Convolution Conv_124 1 1 611 952 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_126 1 1 952 615 +BinaryOp Add_127 2 1 615 607_splitncnn_0 616 +Concat Concat_128 2 1 616 589 617 +Convolution Conv_129 1 1 617 955 0=256 1=1 5=1 6=65536 +Swish Mul_131 1 1 955 621 +Split splitncnn_11 1 2 621 621_splitncnn_0 621_splitncnn_1 +Convolution Conv_132 1 1 621_splitncnn_1 958 0=512 1=3 3=2 4=1 5=1 6=1179648 +Swish Mul_134 1 1 958 625 +Convolution Conv_135 1 1 625 961 0=256 1=1 5=1 6=131072 +Swish Mul_137 1 1 961 629 +Split splitncnn_12 1 4 629 629_splitncnn_0 629_splitncnn_1 629_splitncnn_2 629_splitncnn_3 +Pooling MaxPool_138 1 1 629_splitncnn_3 630 1=5 3=2 5=1 +Pooling MaxPool_139 1 1 629_splitncnn_2 631 1=9 3=4 5=1 +Pooling MaxPool_140 1 1 629_splitncnn_1 632 1=13 3=6 5=1 +Concat Concat_141 4 1 629_splitncnn_0 630 631 632 633 +Convolution Conv_142 1 1 633 964 0=512 1=1 5=1 6=524288 +Swish Mul_144 1 1 964 637 +Split splitncnn_13 1 2 637 637_splitncnn_0 637_splitncnn_1 +Convolution Conv_145 1 1 637_splitncnn_1 967 0=256 1=1 5=1 6=131072 +Swish Mul_147 1 1 967 641 +Convolution Conv_148 1 1 637_splitncnn_0 970 0=256 1=1 5=1 6=131072 +Swish Mul_150 1 1 970 645 +Convolution Conv_151 1 1 641 973 0=256 1=1 5=1 6=65536 +Swish Mul_153 1 1 973 649 +Convolution Conv_154 1 1 649 976 0=256 1=3 4=1 5=1 6=589824 +Swish Mul_156 1 1 976 653 +Concat Concat_157 2 1 653 645 654 +Convolution Conv_158 1 1 654 979 0=512 1=1 5=1 6=262144 +Swish Mul_160 1 1 979 658 +Convolution Conv_161 1 1 658 982 0=256 1=1 5=1 6=131072 +Swish Mul_163 1 1 982 662 +Split splitncnn_14 1 2 662 662_splitncnn_0 662_splitncnn_1 +Interp Resize_165 1 1 662_splitncnn_1 667 0=1 1=2.000000e+00 2=2.000000e+00 +Concat Concat_166 2 1 667 621_splitncnn_0 668 +Split splitncnn_15 1 2 668 668_splitncnn_0 668_splitncnn_1 +Convolution Conv_167 1 1 668_splitncnn_1 985 0=128 1=1 5=1 6=65536 +Swish Mul_169 1 1 985 672 +Convolution Conv_170 1 1 668_splitncnn_0 988 0=128 1=1 5=1 6=65536 +Swish Mul_172 1 1 988 676 +Convolution Conv_173 1 1 672 991 0=128 1=1 5=1 6=16384 +Swish Mul_175 1 1 991 680 +Convolution Conv_176 1 1 680 994 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_178 1 1 994 684 +Concat Concat_179 2 1 684 676 685 +Convolution Conv_180 1 1 685 997 0=256 1=1 5=1 6=65536 +Swish Mul_182 1 1 997 689 +Convolution Conv_183 1 1 689 1000 0=128 1=1 5=1 6=32768 +Swish Mul_185 1 1 1000 693 +Split splitncnn_16 1 2 693 693_splitncnn_0 693_splitncnn_1 +Interp Resize_187 1 1 693_splitncnn_1 698 0=1 1=2.000000e+00 2=2.000000e+00 +Concat Concat_188 2 1 698 577_splitncnn_0 699 +Split splitncnn_17 1 2 699 699_splitncnn_0 699_splitncnn_1 +Convolution Conv_189 1 1 699_splitncnn_1 1003 0=64 1=1 5=1 6=16384 +Swish Mul_191 1 1 1003 703 +Convolution Conv_192 1 1 699_splitncnn_0 1006 0=64 1=1 5=1 6=16384 +Swish Mul_194 1 1 1006 707 +Convolution Conv_195 1 1 703 1009 0=64 1=1 5=1 6=4096 +Swish Mul_197 1 1 1009 711 +Convolution Conv_198 1 1 711 1012 0=64 1=3 4=1 5=1 6=36864 +Swish Mul_200 1 1 1012 715 +Concat Concat_201 2 1 715 707 716 +Convolution Conv_202 1 1 716 1015 0=128 1=1 5=1 6=16384 +Swish Mul_204 1 1 1015 720 +Split splitncnn_18 1 2 720 720_splitncnn_0 720_splitncnn_1 +Convolution Conv_205 1 1 720_splitncnn_1 1018 0=128 1=3 3=2 4=1 5=1 6=147456 +Swish Mul_207 1 1 1018 724 +Concat Concat_208 2 1 724 693_splitncnn_0 725 +Split splitncnn_19 1 2 725 725_splitncnn_0 725_splitncnn_1 +Convolution Conv_209 1 1 725_splitncnn_1 1021 0=128 1=1 5=1 6=32768 +Swish Mul_211 1 1 1021 729 +Convolution Conv_212 1 1 725_splitncnn_0 1024 0=128 1=1 5=1 6=32768 +Swish Mul_214 1 1 1024 733 +Convolution Conv_215 1 1 729 1027 0=128 1=1 5=1 6=16384 +Swish Mul_217 1 1 1027 737 +Convolution Conv_218 1 1 737 1030 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_220 1 1 1030 741 +Concat Concat_221 2 1 741 733 742 +Convolution Conv_222 1 1 742 1033 0=256 1=1 5=1 6=65536 +Swish Mul_224 1 1 1033 746 +Split splitncnn_20 1 2 746 746_splitncnn_0 746_splitncnn_1 +Convolution Conv_225 1 1 746_splitncnn_1 1036 0=256 1=3 3=2 4=1 5=1 6=589824 +Swish Mul_227 1 1 1036 750 +Concat Concat_228 2 1 750 662_splitncnn_0 751 +Split splitncnn_21 1 2 751 751_splitncnn_0 751_splitncnn_1 +Convolution Conv_229 1 1 751_splitncnn_1 1039 0=256 1=1 5=1 6=131072 +Swish Mul_231 1 1 1039 755 +Convolution Conv_232 1 1 751_splitncnn_0 1042 0=256 1=1 5=1 6=131072 +Swish Mul_234 1 1 1042 759 +Convolution Conv_235 1 1 755 1045 0=256 1=1 5=1 6=65536 +Swish Mul_237 1 1 1045 763 +Convolution Conv_238 1 1 763 1048 0=256 1=3 4=1 5=1 6=589824 +Swish Mul_240 1 1 1048 767 +Concat Concat_241 2 1 767 759 768 +Convolution Conv_242 1 1 768 1051 0=512 1=1 5=1 6=262144 +Swish Mul_244 1 1 1051 772 +Convolution Conv_245 1 1 720_splitncnn_0 1054 0=128 1=1 5=1 6=16384 +Swish Mul_247 1 1 1054 776 +Split splitncnn_22 1 2 776 776_splitncnn_0 776_splitncnn_1 +Convolution Conv_248 1 1 776_splitncnn_1 1057 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_250 1 1 1057 780 +Convolution Conv_251 1 1 780 1060 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_253 1 1 1060 784 +Convolution Conv_254 1 1 784 797 0=80 1=1 5=1 6=10240 9=4 +Convolution Conv_255 1 1 776_splitncnn_0 1063 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_257 1 1 1063 789 +Convolution Conv_258 1 1 789 1066 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_260 1 1 1066 793 +Split splitncnn_23 1 2 793 793_splitncnn_0 793_splitncnn_1 +Convolution Conv_261 1 1 793_splitncnn_1 794 0=4 1=1 5=1 6=512 +Convolution Conv_262 1 1 793_splitncnn_0 796 0=1 1=1 5=1 6=128 9=4 +Concat Concat_265 3 1 794 796 797 798 +Convolution Conv_266 1 1 746_splitncnn_0 1069 0=128 1=1 5=1 6=32768 +Swish Mul_268 1 1 1069 802 +Split splitncnn_24 1 2 802 802_splitncnn_0 802_splitncnn_1 +Convolution Conv_269 1 1 802_splitncnn_1 1072 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_271 1 1 1072 806 +Convolution Conv_272 1 1 806 1075 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_274 1 1 1075 810 +Convolution Conv_275 1 1 810 823 0=80 1=1 5=1 6=10240 9=4 +Convolution Conv_276 1 1 802_splitncnn_0 1078 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_278 1 1 1078 815 +Convolution Conv_279 1 1 815 1081 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_281 1 1 1081 819 +Split splitncnn_25 1 2 819 819_splitncnn_0 819_splitncnn_1 +Convolution Conv_282 1 1 819_splitncnn_1 820 0=4 1=1 5=1 6=512 +Convolution Conv_283 1 1 819_splitncnn_0 822 0=1 1=1 5=1 6=128 9=4 +Concat Concat_286 3 1 820 822 823 824 +Convolution Conv_287 1 1 772 1084 0=128 1=1 5=1 6=65536 +Swish Mul_289 1 1 1084 828 +Split splitncnn_26 1 2 828 828_splitncnn_0 828_splitncnn_1 +Convolution Conv_290 1 1 828_splitncnn_1 1087 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_292 1 1 1087 832 +Convolution Conv_293 1 1 832 1090 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_295 1 1 1090 836 +Convolution Conv_296 1 1 836 849 0=80 1=1 5=1 6=10240 9=4 +Convolution Conv_297 1 1 828_splitncnn_0 1093 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_299 1 1 1093 841 +Convolution Conv_300 1 1 841 1096 0=128 1=3 4=1 5=1 6=147456 +Swish Mul_302 1 1 1096 845 +Split splitncnn_27 1 2 845 845_splitncnn_0 845_splitncnn_1 +Convolution Conv_303 1 1 845_splitncnn_1 846 0=4 1=1 5=1 6=512 +Convolution Conv_304 1 1 845_splitncnn_0 848 0=1 1=1 5=1 6=128 9=4 +Concat Concat_307 3 1 846 848 849 850 +Reshape Reshape_315 1 1 798 858 0=-1 1=85 +Reshape Reshape_323 1 1 824 866 0=-1 1=85 +Reshape Reshape_331 1 1 850 874 0=-1 1=85 +Concat Concat_332 3 1 858 866 874 875 0=1 +Permute Transpose_333 1 1 875 output 0=1 diff --git a/multimodal/YOLOX/demo/ncnn/android/app/src/main/java/com/megvii/yoloXncnn/MainActivity.java b/multimodal/YOLOX/demo/ncnn/android/app/src/main/java/com/megvii/yoloXncnn/MainActivity.java new file mode 100644 index 0000000000000000000000000000000000000000..0f57e4f1297e3d4787d9e859bccc386bd3bbab06 --- /dev/null +++ b/multimodal/YOLOX/demo/ncnn/android/app/src/main/java/com/megvii/yoloXncnn/MainActivity.java @@ -0,0 +1,247 @@ +// Some code in this file is based on: +// https://github.com/nihui/ncnn-android-yolov5/blob/master/app/src/main/java/com/tencent/yolov5ncnn/MainActivity.java +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// Copyright (C) Megvii, Inc. and its affiliates. All rights reserved. + +package com.megvii.yoloXncnn; + +import android.app.Activity; +import android.content.Intent; +import android.graphics.Bitmap; +import android.graphics.BitmapFactory; +import android.graphics.Canvas; +import android.graphics.Color; +import android.graphics.Paint; +import android.media.ExifInterface; +import android.graphics.Matrix; +import android.net.Uri; +import android.os.Bundle; +import android.util.Log; +import android.view.View; +import android.widget.Button; +import android.widget.ImageView; + +import java.io.FileNotFoundException; +import java.io.InputStream; +import java.io.IOException; + +public class MainActivity extends Activity +{ + private static final int SELECT_IMAGE = 1; + + private ImageView imageView; + private Bitmap bitmap = null; + private Bitmap yourSelectedImage = null; + + private YOLOXncnn yoloX = new YOLOXncnn(); + + /** Called when the activity is first created. */ + @Override + public void onCreate(Bundle savedInstanceState) + { + super.onCreate(savedInstanceState); + setContentView(R.layout.main); + + boolean ret_init = yoloX.Init(getAssets()); + if (!ret_init) + { + Log.e("MainActivity", "yoloXncnn Init failed"); + } + + imageView = (ImageView) findViewById(R.id.imageView); + + Button buttonImage = (Button) findViewById(R.id.buttonImage); + buttonImage.setOnClickListener(new View.OnClickListener() { + @Override + public void onClick(View arg0) { + Intent i = new Intent(Intent.ACTION_PICK); + i.setType("image/*"); + startActivityForResult(i, SELECT_IMAGE); + } + }); + + Button buttonDetect = (Button) findViewById(R.id.buttonDetect); + buttonDetect.setOnClickListener(new View.OnClickListener() { + @Override + public void onClick(View arg0) { + if (yourSelectedImage == null) + return; + YOLOXncnn.Obj[] objects = yoloX.Detect(yourSelectedImage, false); + + showObjects(objects); + } + }); + + Button buttonDetectGPU = (Button) findViewById(R.id.buttonDetectGPU); + buttonDetectGPU.setOnClickListener(new View.OnClickListener() { + @Override + public void onClick(View arg0) { + if (yourSelectedImage == null) + return; + + YOLOXncnn.Obj[] objects = yoloX.Detect(yourSelectedImage, true); + + showObjects(objects); + } + }); + } + + private void showObjects(YOLOXncnn.Obj[] objects) + { + if (objects == null) + { + imageView.setImageBitmap(bitmap); + return; + } + + // draw objects on bitmap + Bitmap rgba = bitmap.copy(Bitmap.Config.ARGB_8888, true); + + final int[] colors = new int[] { + Color.rgb( 54, 67, 244), + Color.rgb( 99, 30, 233), + Color.rgb(176, 39, 156), + Color.rgb(183, 58, 103), + Color.rgb(181, 81, 63), + Color.rgb(243, 150, 33), + Color.rgb(244, 169, 3), + Color.rgb(212, 188, 0), + Color.rgb(136, 150, 0), + Color.rgb( 80, 175, 76), + Color.rgb( 74, 195, 139), + Color.rgb( 57, 220, 205), + Color.rgb( 59, 235, 255), + Color.rgb( 7, 193, 255), + Color.rgb( 0, 152, 255), + Color.rgb( 34, 87, 255), + Color.rgb( 72, 85, 121), + Color.rgb(158, 158, 158), + Color.rgb(139, 125, 96) + }; + + Canvas canvas = new Canvas(rgba); + + Paint paint = new Paint(); + paint.setStyle(Paint.Style.STROKE); + paint.setStrokeWidth(4); + + Paint textbgpaint = new Paint(); + textbgpaint.setColor(Color.WHITE); + textbgpaint.setStyle(Paint.Style.FILL); + + Paint textpaint = new Paint(); + textpaint.setColor(Color.BLACK); + textpaint.setTextSize(26); + textpaint.setTextAlign(Paint.Align.LEFT); + + for (int i = 0; i < objects.length; i++) + { + paint.setColor(colors[i % 19]); + + canvas.drawRect(objects[i].x, objects[i].y, objects[i].x + objects[i].w, objects[i].y + objects[i].h, paint); + + // draw filled text inside image + { + String text = objects[i].label + " = " + String.format("%.1f", objects[i].prob * 100) + "%"; + + float text_width = textpaint.measureText(text); + float text_height = - textpaint.ascent() + textpaint.descent(); + + float x = objects[i].x; + float y = objects[i].y - text_height; + if (y < 0) + y = 0; + if (x + text_width > rgba.getWidth()) + x = rgba.getWidth() - text_width; + + canvas.drawRect(x, y, x + text_width, y + text_height, textbgpaint); + + canvas.drawText(text, x, y - textpaint.ascent(), textpaint); + } + } + + imageView.setImageBitmap(rgba); + } + + @Override + protected void onActivityResult(int requestCode, int resultCode, Intent data) + { + super.onActivityResult(requestCode, resultCode, data); + + if (resultCode == RESULT_OK && null != data) { + Uri selectedImage = data.getData(); + + try + { + if (requestCode == SELECT_IMAGE) { + bitmap = decodeUri(selectedImage); + + yourSelectedImage = bitmap.copy(Bitmap.Config.ARGB_8888, true); + + imageView.setImageBitmap(bitmap); + } + } + catch (FileNotFoundException e) + { + Log.e("MainActivity", "FileNotFoundException"); + return; + } + } + } + + private Bitmap decodeUri(Uri selectedImage) throws FileNotFoundException + { + // Decode image size + BitmapFactory.Options o = new BitmapFactory.Options(); + o.inJustDecodeBounds = true; + BitmapFactory.decodeStream(getContentResolver().openInputStream(selectedImage), null, o); + + // The new size we want to scale to + final int REQUIRED_SIZE = 640; + + // Find the correct scale value. It should be the power of 2. + int width_tmp = o.outWidth, height_tmp = o.outHeight; + int scale = 1; + while (true) { + if (width_tmp / 2 < REQUIRED_SIZE || height_tmp / 2 < REQUIRED_SIZE) { + break; + } + width_tmp /= 2; + height_tmp /= 2; + scale *= 2; + } + + // Decode with inSampleSize + BitmapFactory.Options o2 = new BitmapFactory.Options(); + o2.inSampleSize = scale; + Bitmap bitmap = BitmapFactory.decodeStream(getContentResolver().openInputStream(selectedImage), null, o2); + + // Rotate according to EXIF + int rotate = 0; + try + { + ExifInterface exif = new ExifInterface(getContentResolver().openInputStream(selectedImage)); + int orientation = exif.getAttributeInt(ExifInterface.TAG_ORIENTATION, ExifInterface.ORIENTATION_NORMAL); + switch (orientation) { + case ExifInterface.ORIENTATION_ROTATE_270: + rotate = 270; + break; + case ExifInterface.ORIENTATION_ROTATE_180: + rotate = 180; + break; + case ExifInterface.ORIENTATION_ROTATE_90: + rotate = 90; + break; + } + } + catch (IOException e) + { + Log.e("MainActivity", "ExifInterface IOException"); + } + + Matrix matrix = new Matrix(); + matrix.postRotate(rotate); + return Bitmap.createBitmap(bitmap, 0, 0, bitmap.getWidth(), bitmap.getHeight(), matrix, true); + } + +} diff --git a/multimodal/YOLOX/demo/ncnn/android/app/src/main/java/com/megvii/yoloXncnn/YOLOXncnn.java b/multimodal/YOLOX/demo/ncnn/android/app/src/main/java/com/megvii/yoloXncnn/YOLOXncnn.java new file mode 100644 index 0000000000000000000000000000000000000000..212e1c2b881b89c69f27211160df0d2c61a098d8 --- /dev/null +++ b/multimodal/YOLOX/demo/ncnn/android/app/src/main/java/com/megvii/yoloXncnn/YOLOXncnn.java @@ -0,0 +1,27 @@ +// Copyright (C) Megvii, Inc. and its affiliates. All rights reserved. + +package com.megvii.yoloXncnn; + +import android.content.res.AssetManager; +import android.graphics.Bitmap; + +public class YOLOXncnn +{ + public native boolean Init(AssetManager mgr); + + public class Obj + { + public float x; + public float y; + public float w; + public float h; + public String label; + public float prob; + } + + public native Obj[] Detect(Bitmap bitmap, boolean use_gpu); + + static { + System.loadLibrary("yoloXncnn"); + } +} diff --git a/multimodal/YOLOX/demo/ncnn/android/app/src/main/java/com/megvii/yoloXncnn/yoloXncnn.java b/multimodal/YOLOX/demo/ncnn/android/app/src/main/java/com/megvii/yoloXncnn/yoloXncnn.java new file mode 100644 index 0000000000000000000000000000000000000000..212e1c2b881b89c69f27211160df0d2c61a098d8 --- /dev/null +++ b/multimodal/YOLOX/demo/ncnn/android/app/src/main/java/com/megvii/yoloXncnn/yoloXncnn.java @@ -0,0 +1,27 @@ +// Copyright (C) Megvii, Inc. and its affiliates. All rights reserved. + +package com.megvii.yoloXncnn; + +import android.content.res.AssetManager; +import android.graphics.Bitmap; + +public class YOLOXncnn +{ + public native boolean Init(AssetManager mgr); + + public class Obj + { + public float x; + public float y; + public float w; + public float h; + public String label; + public float prob; + } + + public native Obj[] Detect(Bitmap bitmap, boolean use_gpu); + + static { + System.loadLibrary("yoloXncnn"); + } +} diff --git a/multimodal/YOLOX/demo/ncnn/android/app/src/main/jni/CMakeLists.txt b/multimodal/YOLOX/demo/ncnn/android/app/src/main/jni/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4b8cd476bc66ce7f4a381dd4299cf391ad67260 --- /dev/null +++ b/multimodal/YOLOX/demo/ncnn/android/app/src/main/jni/CMakeLists.txt @@ -0,0 +1,14 @@ +project(yoloXncnn) + +cmake_minimum_required(VERSION 3.4.1) + +set(ncnn_DIR ${CMAKE_SOURCE_DIR}/ncnn-20210525-android-vulkan/${ANDROID_ABI}/lib/cmake/ncnn) +find_package(ncnn REQUIRED) + +add_library(yoloXncnn SHARED yoloXncnn_jni.cpp) + +target_link_libraries(yoloXncnn + ncnn + + jnigraphics +) diff --git a/multimodal/YOLOX/demo/ncnn/android/app/src/main/jni/yoloXncnn_jni.cpp b/multimodal/YOLOX/demo/ncnn/android/app/src/main/jni/yoloXncnn_jni.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c27867d2be73cd51a02033f6e7a50b7721954db8 --- /dev/null +++ b/multimodal/YOLOX/demo/ncnn/android/app/src/main/jni/yoloXncnn_jni.cpp @@ -0,0 +1,474 @@ +// Some code in this file is based on: +// https://github.com/nihui/ncnn-android-yolov5/blob/master/app/src/main/jni/yolov5ncnn_jni.cpp +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// Copyright (C) Megvii, Inc. and its affiliates. All rights reserved. + +#include +#include +#include + +#include + +#include +#include + +// ncnn +#include "layer.h" +#include "net.h" +#include "benchmark.h" + +static ncnn::UnlockedPoolAllocator g_blob_pool_allocator; +static ncnn::PoolAllocator g_workspace_pool_allocator; + +static ncnn::Net yoloX; + +class YoloV5Focus : public ncnn::Layer +{ +public: + YoloV5Focus() + { + one_blob_only = true; + } + + virtual int forward(const ncnn::Mat& bottom_blob, ncnn::Mat& top_blob, const ncnn::Option& opt) const + { + int w = bottom_blob.w; + int h = bottom_blob.h; + int channels = bottom_blob.c; + + int outw = w / 2; + int outh = h / 2; + int outc = channels * 4; + + top_blob.create(outw, outh, outc, 4u, 1, opt.blob_allocator); + if (top_blob.empty()) + return -100; + + #pragma omp parallel for num_threads(opt.num_threads) + for (int p = 0; p < outc; p++) + { + const float* ptr = bottom_blob.channel(p % channels).row((p / channels) % 2) + ((p / channels) / 2); + float* outptr = top_blob.channel(p); + + for (int i = 0; i < outh; i++) + { + for (int j = 0; j < outw; j++) + { + *outptr = *ptr; + + outptr += 1; + ptr += 2; + } + + ptr += w; + } + } + + return 0; + } +}; + +DEFINE_LAYER_CREATOR(YoloV5Focus) + +struct Object +{ + float x; + float y; + float w; + float h; + int label; + float prob; +}; + +struct GridAndStride +{ + int grid0; + int grid1; + int stride; +}; + +static inline float intersection_area(const Object& a, const Object& b) +{ + if (a.x > b.x + b.w || a.x + a.w < b.x || a.y > b.y + b.h || a.y + a.h < b.y) + { + // no intersection + return 0.f; + } + + float inter_width = std::min(a.x + a.w, b.x + b.w) - std::max(a.x, b.x); + float inter_height = std::min(a.y + a.h, b.y + b.h) - std::max(a.y, b.y); + + return inter_width * inter_height; +} + +static void qsort_descent_inplace(std::vector& faceobjects, int left, int right) +{ + int i = left; + int j = right; + float p = faceobjects[(left + right) / 2].prob; + + while (i <= j) + { + while (faceobjects[i].prob > p) + i++; + + while (faceobjects[j].prob < p) + j--; + + if (i <= j) + { + // swap + std::swap(faceobjects[i], faceobjects[j]); + + i++; + j--; + } + } + + #pragma omp parallel sections + { + #pragma omp section + { + if (left < j) qsort_descent_inplace(faceobjects, left, j); + } + #pragma omp section + { + if (i < right) qsort_descent_inplace(faceobjects, i, right); + } + } +} + +static void qsort_descent_inplace(std::vector& faceobjects) +{ + if (faceobjects.empty()) + return; + + qsort_descent_inplace(faceobjects, 0, faceobjects.size() - 1); +} + +static void nms_sorted_bboxes(const std::vector& faceobjects, std::vector& picked, float nms_threshold) +{ + picked.clear(); + + const int n = faceobjects.size(); + + std::vector areas(n); + for (int i = 0; i < n; i++) + { + areas[i] = faceobjects[i].w * faceobjects[i].h; + } + + for (int i = 0; i < n; i++) + { + const Object& a = faceobjects[i]; + + int keep = 1; + for (int j = 0; j < (int)picked.size(); j++) + { + const Object& b = faceobjects[picked[j]]; + + // intersection over union + float inter_area = intersection_area(a, b); + float union_area = areas[i] + areas[picked[j]] - inter_area; + // float IoU = inter_area / union_area + if (inter_area / union_area > nms_threshold) + keep = 0; + } + + if (keep) + picked.push_back(i); + } +} + +static void generate_grids_and_stride(const int target_size, std::vector& strides, std::vector& grid_strides) +{ + for (auto stride : strides) + { + int num_grid = target_size / stride; + for (int g1 = 0; g1 < num_grid; g1++) + { + for (int g0 = 0; g0 < num_grid; g0++) + { + grid_strides.push_back((GridAndStride){g0, g1, stride}); + } + } + } +} + +static void generate_yolox_proposals(std::vector grid_strides, const ncnn::Mat& feat_blob, float prob_threshold, std::vector& objects) +{ + const int num_grid = feat_blob.h; + fprintf(stderr, "output height: %d, width: %d, channels: %d, dims:%d\n", feat_blob.h, feat_blob.w, feat_blob.c, feat_blob.dims); + + const int num_class = feat_blob.w - 5; + + const int num_anchors = grid_strides.size(); + + const float* feat_ptr = feat_blob.channel(0); + for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++) + { + const int grid0 = grid_strides[anchor_idx].grid0; + const int grid1 = grid_strides[anchor_idx].grid1; + const int stride = grid_strides[anchor_idx].stride; + + // yolox/models/yolo_head.py decode logic + // outputs[..., :2] = (outputs[..., :2] + grids) * strides + // outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides + float x_center = (feat_ptr[0] + grid0) * stride; + float y_center = (feat_ptr[1] + grid1) * stride; + float w = exp(feat_ptr[2]) * stride; + float h = exp(feat_ptr[3]) * stride; + float x0 = x_center - w * 0.5f; + float y0 = y_center - h * 0.5f; + + float box_objectness = feat_ptr[4]; + for (int class_idx = 0; class_idx < num_class; class_idx++) + { + float box_cls_score = feat_ptr[5 + class_idx]; + float box_prob = box_objectness * box_cls_score; + if (box_prob > prob_threshold) + { + Object obj; + obj.x = x0; + obj.y = y0; + obj.w = w; + obj.h = h; + obj.label = class_idx; + obj.prob = box_prob; + + objects.push_back(obj); + } + + } // class loop + feat_ptr += feat_blob.w; + + } // point anchor loop +} + + +extern "C" { + +// FIXME DeleteGlobalRef is missing for objCls +static jclass objCls = NULL; +static jmethodID constructortorId; +static jfieldID xId; +static jfieldID yId; +static jfieldID wId; +static jfieldID hId; +static jfieldID labelId; +static jfieldID probId; + +JNIEXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved) +{ + __android_log_print(ANDROID_LOG_DEBUG, "YOLOXncnn", "JNI_OnLoad"); + + ncnn::create_gpu_instance(); + + return JNI_VERSION_1_4; +} + +JNIEXPORT void JNI_OnUnload(JavaVM* vm, void* reserved) +{ + __android_log_print(ANDROID_LOG_DEBUG, "YOLOXncnn", "JNI_OnUnload"); + + ncnn::destroy_gpu_instance(); +} + +// public native boolean Init(AssetManager mgr); +JNIEXPORT jboolean JNICALL Java_com_megvii_yoloXncnn_YOLOXncnn_Init(JNIEnv* env, jobject thiz, jobject assetManager) +{ + ncnn::Option opt; + opt.lightmode = true; + opt.num_threads = 4; + opt.blob_allocator = &g_blob_pool_allocator; + opt.workspace_allocator = &g_workspace_pool_allocator; + opt.use_packing_layout = true; + + // use vulkan compute + if (ncnn::get_gpu_count() != 0) + opt.use_vulkan_compute = true; + + AAssetManager* mgr = AAssetManager_fromJava(env, assetManager); + + yoloX.opt = opt; + + yoloX.register_custom_layer("YoloV5Focus", YoloV5Focus_layer_creator); + + // init param + { + int ret = yoloX.load_param(mgr, "yolox.param"); + if (ret != 0) + { + __android_log_print(ANDROID_LOG_DEBUG, "YOLOXncnn", "load_param failed"); + return JNI_FALSE; + } + } + + // init bin + { + int ret = yoloX.load_model(mgr, "yolox.bin"); + if (ret != 0) + { + __android_log_print(ANDROID_LOG_DEBUG, "YOLOXncnn", "load_model failed"); + return JNI_FALSE; + } + } + + // init jni glue + jclass localObjCls = env->FindClass("com/megvii/yoloXncnn/YOLOXncnn$Obj"); + objCls = reinterpret_cast(env->NewGlobalRef(localObjCls)); + + constructortorId = env->GetMethodID(objCls, "", "(Lcom/megvii/yoloXncnn/YOLOXncnn;)V"); + + xId = env->GetFieldID(objCls, "x", "F"); + yId = env->GetFieldID(objCls, "y", "F"); + wId = env->GetFieldID(objCls, "w", "F"); + hId = env->GetFieldID(objCls, "h", "F"); + labelId = env->GetFieldID(objCls, "label", "Ljava/lang/String;"); + probId = env->GetFieldID(objCls, "prob", "F"); + + return JNI_TRUE; +} + +// public native Obj[] Detect(Bitmap bitmap, boolean use_gpu); +JNIEXPORT jobjectArray JNICALL Java_com_megvii_yoloXncnn_YOLOXncnn_Detect(JNIEnv* env, jobject thiz, jobject bitmap, jboolean use_gpu) +{ + if (use_gpu == JNI_TRUE && ncnn::get_gpu_count() == 0) + { + return NULL; + //return env->NewStringUTF("no vulkan capable gpu"); + } + + double start_time = ncnn::get_current_time(); + + AndroidBitmapInfo info; + AndroidBitmap_getInfo(env, bitmap, &info); + const int width = info.width; + const int height = info.height; + if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888) + return NULL; + + // parameters which might change for different model + const int target_size = 640; + const float prob_threshold = 0.3f; + const float nms_threshold = 0.65f; + std::vector strides = {8, 16, 32}; // might have stride=64 + + int w = width; + int h = height; + float scale = 1.f; + if (w > h) + { + scale = (float)target_size / w; + w = target_size; + h = h * scale; + } + else + { + scale = (float)target_size / h; + h = target_size; + w = w * scale; + } + + ncnn::Mat in = ncnn::Mat::from_android_bitmap_resize(env, bitmap, ncnn::Mat::PIXEL_RGB2BGR, w, h); + + // pad to target_size rectangle + int wpad = target_size - w; + int hpad = target_size - h; + ncnn::Mat in_pad; + // different from yolov5, yolox only pad on bottom and right side, + // which means users don't need to extra padding info to decode boxes coordinate. + ncnn::copy_make_border(in, in_pad, 0, hpad, 0, wpad, ncnn::BORDER_CONSTANT, 114.f); + + // yolox + std::vector objects; + { + + ncnn::Extractor ex = yoloX.create_extractor(); + + ex.set_vulkan_compute(use_gpu); + + ex.input("images", in_pad); + + std::vector proposals; + + // yolox decode and generate proposal logic + { + ncnn::Mat out; + ex.extract("output", out); + + std::vector grid_strides; + generate_grids_and_stride(target_size, strides, grid_strides); + generate_yolox_proposals(grid_strides, out, prob_threshold, proposals); + + } + + // sort all proposals by score from highest to lowest + qsort_descent_inplace(proposals); + + // apply nms with nms_threshold + std::vector picked; + nms_sorted_bboxes(proposals, picked, nms_threshold); + + int count = picked.size(); + + objects.resize(count); + for (int i = 0; i < count; i++) + { + objects[i] = proposals[picked[i]]; + + // adjust offset to original unpadded + float x0 = (objects[i].x) / scale; + float y0 = (objects[i].y) / scale; + float x1 = (objects[i].x + objects[i].w) / scale; + float y1 = (objects[i].y + objects[i].h) / scale; + + // clip + x0 = std::max(std::min(x0, (float)(width - 1)), 0.f); + y0 = std::max(std::min(y0, (float)(height - 1)), 0.f); + x1 = std::max(std::min(x1, (float)(width - 1)), 0.f); + y1 = std::max(std::min(y1, (float)(height - 1)), 0.f); + + objects[i].x = x0; + objects[i].y = y0; + objects[i].w = x1 - x0; + objects[i].h = y1 - y0; + } + } + + // objects to Obj[] + static const char* class_names[] = { + "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", + "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", + "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", + "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", + "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", + "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", + "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", + "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", + "hair drier", "toothbrush" + }; + + jobjectArray jObjArray = env->NewObjectArray(objects.size(), objCls, NULL); + + for (size_t i=0; iNewObject(objCls, constructortorId, thiz); + + env->SetFloatField(jObj, xId, objects[i].x); + env->SetFloatField(jObj, yId, objects[i].y); + env->SetFloatField(jObj, wId, objects[i].w); + env->SetFloatField(jObj, hId, objects[i].h); + env->SetObjectField(jObj, labelId, env->NewStringUTF(class_names[objects[i].label])); + env->SetFloatField(jObj, probId, objects[i].prob); + + env->SetObjectArrayElement(jObjArray, i, jObj); + } + + double elasped = ncnn::get_current_time() - start_time; + __android_log_print(ANDROID_LOG_DEBUG, "YOLOXncnn", "%.2fms detect", elasped); + + return jObjArray; +} + +} diff --git a/multimodal/YOLOX/demo/ncnn/android/app/src/main/res/layout/main.xml b/multimodal/YOLOX/demo/ncnn/android/app/src/main/res/layout/main.xml new file mode 100644 index 0000000000000000000000000000000000000000..9440a1fdf222f7be484cb6511c200ea1c0eaae9b --- /dev/null +++ b/multimodal/YOLOX/demo/ncnn/android/app/src/main/res/layout/main.xml @@ -0,0 +1,35 @@ + + + + + +