File size: 2,934 Bytes
9becae6 4e3cfa9 9becae6 4e3cfa9 9becae6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import gradio as gr\n",
"import numpy as np\n",
"from os import environ\n",
"from PIL import Image as PImage\n",
"from torchvision import transforms as T\n",
"from transformers import MaskFormerForInstanceSegmentation, MaskFormerImageProcessor"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ade_mean=[0.485, 0.456, 0.406]\n",
"ade_std=[0.229, 0.224, 0.225]\n",
"\n",
"model_id = f\"thiagohersan/maskformer-satellite-trees\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# preprocessor = MaskFormerImageProcessor.from_pretrained(model_id)\n",
"preprocessor = MaskFormerImageProcessor(\n",
" do_resize=False,\n",
" do_normalize=False,\n",
" do_rescale=False,\n",
" ignore_index=255,\n",
" reduce_labels=False\n",
")\n",
"\n",
"hf_token = environ.get('HFTOKEN') or True\n",
"model = MaskFormerForInstanceSegmentation.from_pretrained(model_id, use_auth_token=hf_token)\n",
"\n",
"test_transform = T.Compose([\n",
" T.ToTensor(),\n",
" T.Normalize(mean=ade_mean, std=ade_std)\n",
"])\n",
"\n",
"with PImage.open(\"../color-filter-calculator/assets/Artshack_screen.jpg\") as img:\n",
" img_size = (img.height, img.width)\n",
" norm_image = test_transform(np.array(img))\n",
" inputs = preprocessor(images=norm_image, return_tensors=\"pt\")\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"outputs = model(**inputs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"results = preprocessor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[img_size])[0]\n",
"results = results.numpy()\n",
"\n",
"labels = np.unique(results)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for label_id in labels:\n",
" print(model.config.id2label[label_id])"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.8.15 ('hf-gradio')",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.15"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "4888b226c77b860705e4be316b14a092026f41c3585ee0ddb38f3008c0cb495e"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|