{ "cells": [ { "cell_type": "code", "execution_count": 81, "metadata": { "id": "4xDkP0IKzLgW" }, "outputs": [], "source": [ "from os import listdir\n", "from numpy import array\n", "from keras.models import Model\n", "from pickle import dump\n", "from keras.applications.vgg16 import VGG16\n", "from tensorflow.keras.preprocessing.image import load_img\n", "from tensorflow.keras.preprocessing.image import img_to_array\n", "from tensorflow.keras.preprocessing.sequence import pad_sequences\n", "from keras.preprocessing.text import Tokenizer\n", "from keras.utils import to_categorical\n", "from keras.utils import plot_model\n", "from keras.models import Model\n", "from keras.layers import Input\n", "from keras.layers import Dense\n", "from keras.layers import LSTM\n", "from keras.layers import Embedding\n", "from keras.layers import Dropout\n", "from tensorflow.keras.layers import Add\n", "from keras.callbacks import ModelCheckpoint" ] }, { "cell_type": "code", "source": [ "from keras.applications.vgg16 import VGG16, preprocess_input\n", "model = VGG16()\n", "# re-structure the model\n", "model.layers.pop()\n", "model = Model(inputs=model.inputs, outputs=model.layers[-2].output)\n", "# summarize\n", "print(model.summary())" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "UuRJY0-d9yux", "outputId": "22a58ba4-0f1f-473f-ca65-b7422a453f22" }, "execution_count": 82, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Model: \"model_7\"\n", "_________________________________________________________________\n", " Layer (type) Output Shape Param # \n", "=================================================================\n", " input_8 (InputLayer) [(None, 224, 224, 3)] 0 \n", " \n", " block1_conv1 (Conv2D) (None, 224, 224, 64) 1792 \n", " \n", " block1_conv2 (Conv2D) (None, 224, 224, 64) 36928 \n", " \n", " block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 \n", " \n", " block2_conv1 (Conv2D) (None, 112, 112, 128) 73856 \n", " \n", " block2_conv2 (Conv2D) (None, 112, 112, 128) 147584 \n", " \n", " block2_pool (MaxPooling2D) (None, 56, 56, 128) 0 \n", " \n", " block3_conv1 (Conv2D) (None, 56, 56, 256) 295168 \n", " \n", " block3_conv2 (Conv2D) (None, 56, 56, 256) 590080 \n", " \n", " block3_conv3 (Conv2D) (None, 56, 56, 256) 590080 \n", " \n", " block3_pool (MaxPooling2D) (None, 28, 28, 256) 0 \n", " \n", " block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160 \n", " \n", " block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808 \n", " \n", " block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808 \n", " \n", " block4_pool (MaxPooling2D) (None, 14, 14, 512) 0 \n", " \n", " block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808 \n", " \n", " block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808 \n", " \n", " block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808 \n", " \n", " block5_pool (MaxPooling2D) (None, 7, 7, 512) 0 \n", " \n", " flatten (Flatten) (None, 25088) 0 \n", " \n", " fc1 (Dense) (None, 4096) 102764544 \n", " \n", " fc2 (Dense) (None, 4096) 16781312 \n", " \n", "=================================================================\n", "Total params: 134,260,544\n", "Trainable params: 134,260,544\n", "Non-trainable params: 0\n", "_________________________________________________________________\n", "None\n" ] } ] }, { "cell_type": "code", "execution_count": 83, "metadata": { "id": "tR-lfP51bmp5" }, "outputs": [], "source": [ "from os import listdir\n", "from pickle import dump\n", "from tensorflow.keras.preprocessing.image import img_to_array, load_img\n", "from keras.models import Model\n", "\n", "# extract feature from each photo in directory\n", "def extract_features(directory):\n", "\t# extract features from each photo\n", "\tfeatures = dict()\n", "\tfor name in listdir(directory):\n", "\t\t# load an image from file\n", "\t\tfilename = directory + '/' + name\n", "\t\timage = load_img(filename, target_size=(224, 224))\n", "\t\t# convert the image pixels to a numpy array\n", "\t\timage = img_to_array(image)\n", "\t\t# reshape data for the model\n", "\t\timage = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))\n", "\t\t# prepare the image for the VGG model\n", "\t\timage = preprocess_input(image)\n", "\t\t# get features\n", "\t\tfeature = model.predict(image, verbose=0)\n", "\t\t# get image id\n", "\t\timage_id = name.split('.')[0]\n", "\t\t# store feature\n", "\t\tfeatures[image_id] = feature\n", "\t\tprint('>%s' % name)\n", "\treturn features" ] }, { "cell_type": "code", "execution_count": 84, "metadata": { "id": "nc7ks02Gjwgh" }, "outputs": [], "source": [ "# directory = \"/content/drive/MyDrive/Image_Captioning_Project/Images\"\n", "# features = extract_features(directory)\n", "# dump(features, open('features1.pkl', 'wb'))\n", "# print(\"Extracted Features: %d\" %len(features))" ] }, { "cell_type": "code", "source": [ "!ls" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "9fp0SwYv0VK6", "outputId": "4387bde5-1136-41b9-f03b-52404d2ee015" }, "execution_count": 85, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "drive sample_data\n" ] } ] }, { "cell_type": "code", "execution_count": 86, "metadata": { "id": "-nIfYC9Yj7-b" }, "outputs": [], "source": [ "import string\n", "from nltk.tokenize import word_tokenize\n", "\n", "def load_doc(filename):\n", " # open the file as read only\n", " file = open(filename, 'r')\n", " # read all text\n", " text = file.read()\n", " # close the file\n", " file.close()\n", " return text" ] }, { "cell_type": "code", "execution_count": 87, "metadata": { "id": "n9FUCV49RnnS" }, "outputs": [], "source": [ "def load_descriptions(doc):\n", "\tmapping = dict()\n", "\t# process lines\n", "\tfor line in doc.split('\\n'):\n", "\t\t# split line by white space\n", "\t\ttokens = line.split()\n", "\t\tif len(line) < 2:\n", "\t\t\tcontinue\n", "\t\t# take the first token as the image id, the rest as the description\n", "\t\timage_id, image_desc = tokens[0], tokens[1:]\n", "\t\t# remove filename from image id\n", "\t\timage_id = image_id.split('.')[0]\n", "\t\t# convert description tokens back to string\n", "\t\timage_desc = ' '.join(image_desc)\n", "\t\t# create the list if needed\n", "\t\tif image_id not in mapping:\n", "\t\t\tmapping[image_id] = list()\n", "\t\t# store description\n", "\t\tmapping[image_id].append(image_desc)\n", "\treturn mapping" ] }, { "cell_type": "markdown", "source": [ "## Preprocessing of Text\n", "\n", "1. Convert all words to lowercase.\n", "2. Remove all punctuation.\n", "3. Remove all words that are one character or less in length (e.g. ‘a’).\n", "4. Remove all words with numbers in them." ], "metadata": { "id": "l31beBIf2E3h" } }, { "cell_type": "code", "execution_count": 88, "metadata": { "id": "e6Dd0Ugej8sf" }, "outputs": [], "source": [ "def clean_descriptions(descriptions):\n", " # prepare translation table for removing punctuation\n", " table = str.maketrans('', '', string.punctuation)\n", " for key, desc_list in descriptions.items():\n", " for i in range(len(desc_list)):\n", " desc = desc_list[i]\n", " # tokenize\n", " desc = desc.split()\n", " # convert to lower case\n", " desc = [word.lower() for word in desc]\n", " # remove punctuation from each token\n", " desc = [w.translate(table) for w in desc]\n", " # remove hanging 's' and 'a'\n", " desc = [word for word in desc if len(word)>1]\n", " # remove tokens with numbers in them\n", " desc = [word for word in desc if word.isalpha()]\n", " # store as string\n", " desc_list[i] = ' '.join(desc)\n", "def to_vocabulary(descriptions):\n", " # build a list of all description strings\n", " all_desc = set()\n", " for key in descriptions.keys():\n", " [all_desc.update(d.split()) for d in descriptions[key]]\n", " return all_desc" ] }, { "cell_type": "code", "execution_count": 89, "metadata": { "id": "o7lzD3qgkCDs" }, "outputs": [], "source": [ "def save_descriptions(descriptions, filename):\n", " lines = list()\n", " for key, desc_list in descriptions.items():\n", " for desc in desc_list:\n", " lines.append(key + \" \" + desc)\n", " data = '\\n'.join(lines)\n", " file = open(filename, 'w')\n", " file.write(data)\n", " file.close()" ] }, { "cell_type": "code", "execution_count": 90, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "_kjcu6Ug3Lqc", "outputId": "4139ef68-a4a0-43f4-f86f-01f52cf73138" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "[nltk_data] Downloading package punkt to /root/nltk_data...\n", "[nltk_data] Package punkt is already up-to-date!\n" ] }, { "output_type": "execute_result", "data": { "text/plain": [ "True" ] }, "metadata": {}, "execution_count": 90 } ], "source": [ "import nltk\n", "nltk.download('punkt')" ] }, { "cell_type": "code", "execution_count": 91, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "RancEv97kENF", "outputId": "8d8214e4-fe45-4e25-a602-74c143971471" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Loaded: 8092\n" ] } ], "source": [ "filename = \"/content/drive/MyDrive/Image_Captioning_Project/Flickr8k.token.txt\"\n", "doc = load_doc(filename)\n", "descriptions = load_descriptions(doc)\n", "print(\"Loaded: %d\" %len(descriptions))" ] }, { "cell_type": "code", "execution_count": 92, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "BD4eqsEukFqE", "outputId": "e9e98894-99c3-4ca4-d9bd-94fc4ba22e0e" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Vocab size: 8763\n" ] } ], "source": [ "#clean desc\n", "clean_descriptions(descriptions)\n", "vocab = to_vocabulary(descriptions)\n", "print(\"Vocab size: %d\" %len(vocab))" ] }, { "cell_type": "code", "execution_count": 93, "metadata": { "id": "rVZ9RHhbkGIo" }, "outputs": [], "source": [ "# save_descriptions(descriptions, \"descriptions2.txt\")" ] }, { "cell_type": "markdown", "source": [ "### Developing Deep Learning Model\n", "\n", "#### This section is divided into the following parts:\n", "\n", "Loading Data.\n", "Defining the Model.\n", "Fitting the Model." ], "metadata": { "id": "PnV7xrpPCPrs" } }, { "cell_type": "code", "execution_count": 94, "metadata": { "id": "dZ8tvUjckG1F" }, "outputs": [], "source": [ "from pickle import dump\n", "\n", "#load into memory\n", "def load_doc(filename):\n", "\t# open the file as read only\n", "\tfile = open(filename, 'r')\n", "\t# read all text\n", "\ttext = file.read()\n", "\t# close the file\n", "\tfile.close()\n", "\treturn text\n", "\n", "#pre-defined list of photo identifier\n", "def load_set(filename):\n", " doc = load_doc(filename)\n", " dataset = list()\n", " for line in doc.split(\"\\n\"):\n", " if len(line) < 1:\n", " continue\n", " identifier = line.split('.')[0]\n", " dataset.append(identifier)\n", " return set(dataset)" ] }, { "cell_type": "markdown", "source": [ "load_clean_descriptions() that loads the cleaned text descriptions from ‘descriptions.txt‘ for a given set of identifiers and returns a dictionary of identifiers to lists of text descriptions.\n", "\n", "The model we will develop will generate a caption given a photo, and the caption will be generated one word at a time. The sequence of previously generated words will be provided as input. Therefore, we will need a ‘first word’ to kick-off the generation process and a ‘last word‘ to signal the end of the caption.\n", "\n", "We will use the strings ‘startseq‘ and ‘endseq‘ for this purpose." ], "metadata": { "id": "NxRnPZr4CZh4" } }, { "cell_type": "code", "execution_count": 95, "metadata": { "id": "9hF4PUCskHF_" }, "outputs": [], "source": [ "def load_photo_features(features, dataset):\n", " all_features = load(open(features, 'rb'))\n", " features = {k: all_features[k] for k in dataset}\n", " return features" ] }, { "cell_type": "code", "execution_count": 96, "metadata": { "id": "kPQVXbKqSp6m" }, "outputs": [], "source": [ "def load_clean_descriptions(filename, dataset):\n", "\t# load document\n", "\tdoc = load_doc(filename)\n", "\tdescriptions = dict()\n", "\tfor line in doc.split('\\n'):\n", "\t\t# split line by white space\n", "\t\ttokens = line.split()\n", "\t\t# split id from description\n", "\t\timage_id, image_desc = tokens[0], tokens[1:]\n", "\t\t# skip images not in the set\n", "\t\tif image_id in dataset:\n", "\t\t\t# create list\n", "\t\t\tif image_id not in descriptions:\n", "\t\t\t\tdescriptions[image_id] = list()\n", "\t\t\t# wrap description in tokens\n", "\t\t\tdesc = 'startseq ' + ' '.join(image_desc) + ' endseq'\n", "\t\t\t# store\n", "\t\t\tdescriptions[image_id].append(desc)\n", "\treturn descriptions" ] }, { "cell_type": "code", "source": [ "from pickle import load\n", "\n", "# load training dataset (6K)\n", "filename = '/content/drive/MyDrive/Image_Captioning_Project/Flickr_8k.trainImages.txt'\n", "train = load_set(filename)\n", "print('Dataset: %d' % len(train))\n", "# descriptions\n", "train_descriptions = load_clean_descriptions('/content/drive/MyDrive/Image_Captioning_Project/descriptions1.txt', train)\n", "print('Descriptions: train=%d' % len(train_descriptions))\n", "# photo features\n", "train_features = load_photo_features('/content/drive/MyDrive/Image_Captioning_Project/features.pkl', train)\n", "print('Photos: train=%d' % len(train_features))" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "M8Y0VIWrxRdr", "outputId": "baef9b0d-885f-4ba6-b792-4d9b0ffcb52b" }, "execution_count": 97, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Dataset: 6000\n", "Descriptions: train=6000\n", "Photos: train=6000\n" ] } ] }, { "cell_type": "code", "execution_count": 98, "metadata": { "id": "CxCpUVLlZ1M6" }, "outputs": [], "source": [ "def load_doc(filename):\n", "\t# open the file as read only\n", "\tfile = open(filename, 'r')\n", "\t# read all text\n", "\ttext = file.read()\n", "\t# close the file\n", "\tfile.close\n", "\treturn text" ] }, { "cell_type": "code", "execution_count": 99, "metadata": { "id": "pqOcDdqUTHDW" }, "outputs": [], "source": [ "def load_set(filename):\n", " doc = load_doc(filename)\n", " dataset = list()\n", " for line in doc.split(\"\\n\"):\n", " if len(line) < 1:\n", " continue\n", " identifier = line.split('.')[0]\n", " dataset.append(identifier)\n", " return set(dataset)" ] }, { "cell_type": "code", "execution_count": 100, "metadata": { "id": "_l3IMXzPZ7mN" }, "outputs": [], "source": [ "def load_clean_descriptions(filename, dataset):\n", "\t# load document\n", "\tdoc = load_doc(filename)\n", "\tdescriptions = dict()\n", "\tfor line in doc.split('\\n'):\n", "\t\t# split line by white space\n", "\t\ttokens = line.split()\n", "\t\t# split id from description\n", "\t\timage_id, image_desc = tokens[0], tokens[1:]\n", "\t\t# skip images not in the set\n", "\t\tif image_id in dataset:\n", "\t\t\t# create list\n", "\t\t\tif image_id not in descriptions:\n", "\t\t\t\tdescriptions[image_id] = list()\n", "\t\t\t# wrap description in tokens\n", "\t\t\tdesc = 'startseq ' + ' '.join(image_desc) + ' endseq'\n", "\t\t\t# store\n", "\t\t\tdescriptions[image_id].append(desc)\n", "\treturn descriptions " ] }, { "cell_type": "code", "execution_count": 101, "metadata": { "id": "Cf_rfRQpTtu_" }, "outputs": [], "source": [ "def load_photo_features(filename, dataset):\n", "\t# load all features\n", "\tall_features = load(open(filename, 'rb'))\n", "\t# filter features\n", "\tfeatures = {k: all_features[k] for k in dataset}\n", "\treturn features" ] }, { "cell_type": "code", "execution_count": 102, "metadata": { "id": "0HHS8lQFkHR-" }, "outputs": [], "source": [ "# dict to clean list\n", "def to_lines(descriptions):\n", " all_desc = list()\n", " for key in descriptions.keys():\n", " [all_desc.append(d) for d in descriptions[key]]\n", " return all_desc\n", "\n", "def create_tokenizer(descriptions):\n", " lines = to_lines(descriptions)\n", " tokenizer = Tokenizer()\n", " tokenizer.fit_on_texts(lines)\n", " return tokenizer" ] }, { "cell_type": "code", "execution_count": 103, "metadata": { "id": "FjSt5rpvkXk3" }, "outputs": [], "source": [ "#len of description\n", "def max_length(description):\n", " lines = to_lines(description)\n", " return max(len(d.split()) for d in lines)" ] }, { "cell_type": "code", "execution_count": 104, "metadata": { "id": "cNko1BPwkX21" }, "outputs": [], "source": [ "# create input and output sequence\n", "def create_sequences(tokenizer, max_length, desc_list, photo):\n", " X1, X2, y = list(), list(), list()\n", " # walk through each description for the image\n", " for desc in desc_list:\n", " # encode the sequence\n", " seq = tokenizer.texts_to_sequences([desc])[0]\n", " # split one sequence into multiple X,y pairs\n", " for i in range(1, len(seq)):\n", " # split into input and output pair\n", " in_seq, out_seq = seq[:i], seq[i]\n", " # pad input sequence\n", " in_seq = pad_sequences([in_seq], maxlen=max_length)[0]\n", " # encode output sequence\n", " out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]\n", " # store\n", " X1.append(photo)\n", " X2.append(in_seq)\n", " y.append(out_seq)\n", " return array(X1), array(X2), array(y)" ] }, { "cell_type": "markdown", "metadata": { "id": "q5LhU8zQkdqu" }, "source": [ "## Model building" ] }, { "cell_type": "code", "execution_count": 105, "metadata": { "id": "zT7KqC6bkYEj" }, "outputs": [], "source": [ "from tensorflow.keras.layers import add\n", "def define_model(vocab_size, max_length):\n", " # feature extractor model\n", " inputs1 = Input(shape=(1000,))\n", " fe1 = Dropout(0.5)(inputs1)\n", " fe2 = Dense(256, activation='relu')(fe1)\n", " # sequence model\n", " inputs2 = Input(shape=(max_length,))\n", " se1 = Embedding(vocab_size,output_dim=256, mask_zero=True)(inputs2)\n", " se2 = Dropout(0.5)(se1)\n", " se3 = LSTM(256)(se2)\n", " # decoder model\n", " decoder1 = add([fe2, se3])\n", " decoder2 = Dense(256, activation='relu')(decoder1)\n", " outputs = Dense(vocab_size, activation='softmax')(decoder2)\n", " # tie it together [image, seq] [word]\n", " model = Model(inputs=[inputs1, inputs2], outputs=outputs)\n", " model.compile(loss='categorical_crossentropy', optimizer='adam')\n", " # summarize model\n", " print(model.summary())\n", " return model" ] }, { "cell_type": "code", "execution_count": 106, "metadata": { "id": "fR-dFhrYkYJv" }, "outputs": [], "source": [ "# load batch of data\n", "def data_generator(descriptions, photos, tokenizer, max_length):\n", " # loop for ever over images\n", " while 1:\n", " for key, desc_list in descriptions.items():\n", " # retrieve the photo feature\n", " photo = photos[key][0]\n", " in_img, in_seq, out_word = create_sequences(tokenizer, max_length, desc_list, photo)\n", " yield [[in_img, in_seq], out_word]" ] }, { "cell_type": "code", "execution_count": 107, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "eJ6n7_dEkkis", "outputId": "4f0e3daa-18c7-4797-a7a6-5d193b02cf04" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Dataset: 6000\n", "train_descriptions= 6000\n", "photos: train= 6000\n", "Vocab size: 7579\n", "Description Length: 34\n" ] } ], "source": [ "#load train dataset\n", "import tensorflow as tf\n", "filename = \"/content/drive/MyDrive/Image_Captioning_Project/Flickr_8k.trainImages.txt\"\n", "train = load_set(filename)\n", "print(\"Dataset: %d\" %len(train))\n", "\n", "train_descriptions = load_clean_descriptions(\"/content/drive/MyDrive/Image_Captioning_Project/descriptions1.txt\", train)\n", "print(\"train_descriptions= %d\" %len(train_descriptions))\n", "\n", "train_feature = load_photo_features(\"/content/drive/MyDrive/Image_Captioning_Project/features.pkl\", train)\n", "print(\"photos: train= %d\" %len(train_feature))\n", "\n", "tokenizer = create_tokenizer(train_descriptions)\n", "vocab_size = len(tokenizer.word_index)+1\n", "print(\"Vocab size: %d\" %vocab_size)\n", "\n", "max_length = max_length(train_descriptions)\n", "print('Description Length: %d' % max_length)" ] }, { "cell_type": "code", "execution_count": 108, "metadata": { "id": "yUnKHod1BIup" }, "outputs": [], "source": [ "import pickle\n", "\n", "# Dump the tokenizer using pickle\n", "with open('tokenizer1.pkl', 'wb') as f:\n", " pickle.dump(tokenizer, f)" ] }, { "cell_type": "code", "execution_count": 109, "metadata": { "id": "-x7HX3aLkk2x" }, "outputs": [], "source": [ "#train model\n", "# model = define_model(vocab_size, max_length)\n", "# filename = \"/content/drive/MyDrive/Image_Captioning_Project/model_18.h5\"\n", "# model = load_model(filename)\n", "# epochs = 4\n", "# steps = len(train_descriptions)\n", "# model.summary()" ] }, { "cell_type": "code", "execution_count": 110, "metadata": { "id": "JyBqM_6ikk8Y" }, "outputs": [], "source": [ "# for i in range(epochs):\n", "# #create data generator\n", "# generator = data_generator(train_descriptions, train_feature, tokenizer, max_len)\n", "# model.fit(generator, epochs=1, steps_per_epoch = steps, verbose=1)\n", "# model.save(\"model_\" + str(i) + \".h5\")" ] }, { "cell_type": "code", "execution_count": 111, "metadata": { "id": "U4-B7smiaqCH" }, "outputs": [], "source": [ "def load_doc(filename):\n", "\t# open the file as read only\n", "\tfile = open(filename, 'r')\n", "\t# read all text\n", "\ttext = file.read()\n", "\t# close the file\n", "\tfile.close()\n", "\treturn text\n", "\n", "# load a pre-defined list of photo identifiers\n", "def load_set(filename):\n", "\tdoc = load_doc(filename)\n", "\tdataset = list()\n", "\t# process line by line\n", "\tfor line in doc.split('\\n'):\n", "\t\t# skip empty lines\n", "\t\tif len(line) < 1:\n", "\t\t\tcontinue\n", "\t\t# get the image identifier\n", "\t\tidentifier = line.split('.')[0]\n", "\t\tdataset.append(identifier)\n", "\treturn set(dataset)" ] }, { "cell_type": "code", "execution_count": 112, "metadata": { "id": "y42JWG04a1e2" }, "outputs": [], "source": [ "def load_photo_features(filename, dataset):\n", "\t# load all features\n", "\tall_features = load(open(filename, 'rb'))\n", "\t# filter features\n", "\tfeatures = {k: all_features[k] for k in dataset}\n", "\treturn features\n", "\n", "# covert a dictionary of clean descriptions to a list of descriptions\n", "def to_lines(descriptions):\n", "\tall_desc = list()\n", "\tfor key in descriptions.keys():\n", "\t\t[all_desc.append(d) for d in descriptions[key]]\n", "\treturn all_desc\n", "\n", "# fit a tokenizer given caption descriptions\n", "def create_tokenizer(descriptions):\n", "\tlines = to_lines(descriptions)\n", "\ttokenizer = Tokenizer()\n", "\ttokenizer.fit_on_texts(lines)\n", "\treturn tokenizer\n", "\n", "# calculate the length of the description with the most words\n", "def max_length(descriptions):\n", "\tlines = to_lines(descriptions)\n", "\treturn max(len(d.split()) for d in lines)\n", "\n", "# map an integer to a word\n", "def word_for_id(integer, tokenizer):\n", "\tfor word, index in tokenizer.word_index.items():\n", "\t\tif index == integer:\n", "\t\t\treturn word\n", "\treturn None " ] }, { "cell_type": "code", "source": [ "from tensorflow.keras.preprocessing.sequence import pad_sequences\n", "import numpy as np\n", "def generate_desc(model, tokenizer, photo, max_length):\n", "\t# seed the generation process\n", "\tin_text = 'startseq'\n", "\t# iterate over the whole length of the sequence\n", "\tfor i in range(max_length):\n", "\t\t# integer encode input sequence\n", "\t\tsequence = tokenizer.texts_to_sequences([in_text])[0]\n", "\t\t# pad input\n", "\t\tsequence = pad_sequences([sequence], maxlen=max_length)\n", "\t\t# predict next word\n", "\t\tyhat = model.predict([photo,sequence], verbose=0)\n", "\t\t# convert probability to integer\n", "\t\tyhat = np.argmax(yhat)\n", "\t\t# map integer to word\n", "\t\tword = word_for_id(yhat, tokenizer)\n", "\t\t# stop if we cannot map the word\n", "\t\tif word is None:\n", "\t\t\tbreak\n", "\t\t# append as input for generating the next word\n", "\t\tin_text += ' ' + word\n", "\t\t# stop if we predict the end of the sequence\n", "\t\tif word == 'endseq':\n", "\t\t\tbreak\n", "\treturn in_text" ], "metadata": { "id": "Needd4uQm2gZ" }, "execution_count": 113, "outputs": [] }, { "cell_type": "code", "execution_count": 114, "metadata": { "id": "PrCXWKP8E1pt" }, "outputs": [], "source": [ "# evaluated the skill of model\n", "from nltk.translate.bleu_score import corpus_bleu\n", "def evaluate_model(model, descriptions, photos, tokenizer, max_length):\n", "\tactual, predicted = list(), list()\n", "\t# step over the whole set\n", "\tfor key, desc_list in descriptions.items():\n", "\t\t# generate description\n", "\t\tyhat = generate_desc(model, tokenizer, photos[key], max_length)\n", "\t\t# store actual and predicted\n", "\t\treferences = [d.split() for d in desc_list]\n", "\t\tactual.append(references)\n", "\t\tpredicted.append(yhat.split())\n", "\t# calculate BLEU score\n", "\tprint('BLEU-1: %f' % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0)))\n", "\tprint('BLEU-2: %f' % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0)))\n", "\tprint('BLEU-3: %f' % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0)))\n", "\tprint('BLEU-4: %f' % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25)))" ] }, { "cell_type": "code", "execution_count": 115, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "PwYhBAiYbOFN", "outputId": "a253af6f-79d6-408b-ade8-ebbe4f8d2426" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Dataset: 6000\n", "train_descriptions= 6000\n", "photos: train= 6000\n", "Vocab size: 7268\n", "Description Length: 33\n" ] } ], "source": [ "#load train dataset\n", "import tensorflow as tf\n", "filename = \"/content/drive/MyDrive/Image_Captioning_Project/Flickr_8k.trainImages.txt\"\n", "train = load_set(filename)\n", "print(\"Dataset: %d\" %len(train))\n", "\n", "train_descriptions = load_clean_descriptions(\"/content/drive/MyDrive/Image_Captioning_Project/descriptions.txt\", train)\n", "print(\"train_descriptions= %d\" %len(train_descriptions))\n", "\n", "train_feature = load_photo_features(\"/content/drive/MyDrive/Image_Captioning_Project/features.pkl\", train)\n", "print(\"photos: train= %d\" %len(train_feature))\n", "\n", "tokenizer = create_tokenizer(train_descriptions)\n", "vocab_size = len(tokenizer.word_index)+1\n", "print(\"Vocab size: %d\" %vocab_size)\n", "\n", "max_length = max_length(train_descriptions)\n", "print('Description Length: %d' % max_length)" ] }, { "cell_type": "code", "execution_count": 116, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "S9p7vYFZ7Y_J", "outputId": "48124f24-c348-4cd7-a8bb-6e30e7c26060" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Dataset: 1000\n", "Description= 1000\n", "photos: test=1000" ] }, { "output_type": "stream", "name": "stderr", "text": [ "WARNING:tensorflow:Layer lstm_1 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "\n" ] } ], "source": [ "filename = \"/content/drive/MyDrive/Image_Captioning_Project/Flickr_8k.testImages.txt\"\n", "test = load_set(filename)\n", "print(\"Dataset: %d\" %len(test))\n", "test_description = load_clean_descriptions(\"/content/drive/MyDrive/Image_Captioning_Project/descriptions.txt\", test)\n", "print(\"Description= %d\" %len(test_description))\n", "test_features = load_photo_features(\"/content/drive/MyDrive/Image_Captioning_Project/features.pkl\", test)\n", "print(\"photos: test=%d\" % len(test_features))\n", "\n", "from keras.models import load_model\n", "filename = \"/content/drive/MyDrive/Image_Captioning_Project/model_18.h5\"\n", "model = load_model(filename)" ] }, { "cell_type": "code", "source": [ "# evaluate_model(model, test_description, test_features, tokenizer, max_length)" ], "metadata": { "id": "URns1LFyOaPZ" }, "execution_count": 117, "outputs": [] }, { "cell_type": "code", "execution_count": 118, "metadata": { "id": "9dVz3rRrKvAi" }, "outputs": [], "source": [ "from pickle import load\n", "from numpy import argmax\n", "from tensorflow.keras.preprocessing.sequence import pad_sequences\n", "from keras.applications.vgg16 import VGG16\n", "from tensorflow.keras.preprocessing.image import load_img\n", "from tensorflow.keras.preprocessing.image import img_to_array\n", "from keras.applications.vgg16 import preprocess_input\n", "from keras.models import Model\n", "from keras.models import load_model\n", "# from keras.preprocessing.text import Tokenizer" ] }, { "cell_type": "code", "execution_count": 119, "metadata": { "id": "qvlspE-eitvS" }, "outputs": [], "source": [ "def extract_features(filename):\n", "\t# load the model\n", "\tmodel = VGG16()\n", "\t# re-structure the model\n", "\tmodel.layers.pop()\n", "\tmodel = Model(inputs=model.inputs, outputs=model.layers[-2].output)\n", "\t# load the photo\n", "\timage = load_img(filename, target_size=(224, 224))\n", "\t# convert the image pixels to a numpy array\n", "\timage = img_to_array(image)\n", "\t# reshape data for the model\n", "\timage = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))\n", "\t# prepare the image for the VGG model\n", "\timage = preprocess_input(image)\n", "\t# get features\n", "\tfeature = model.predict(image, verbose=0)\n", "\treturn feature" ] }, { "cell_type": "code", "execution_count": 124, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "ao4swPUfj1Ql", "outputId": "b6c92906-91c1-44c2-c958-409ca4a30d8b" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "WARNING:tensorflow:Layer lstm_1 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "startseq dog is running through the snow endseq\n" ] } ], "source": [ "from pickle import load\n", "from tensorflow.keras.preprocessing.text import Tokenizer\n", "\n", "tokenizer = load(open('/content/tokenizer1.pkl', 'rb'))\n", "max_len = 34\n", "model = load_model('/content/drive/MyDrive/Image_Captioning_Project/model_18.h5')\n", "photo = extract_features(\"/content/drive/MyDrive/Image_Captioning_Project/Images/101654506_8eb26cfb60.jpg\")\n", "tokenizer.analyzer = None\n", "description = generate_desc(model, tokenizer, photo, max_len)\n", "print(description)" ] }, { "cell_type": "code", "execution_count": 121, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "T1IsisClmnJI", "outputId": "cb6f632f-9792-4dce-aa58-174f60a94ecf" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "man in red shirt is standing on rock\n" ] } ], "source": [ "query = description\n", "stopwords = ['startseq','endseq']\n", "querywords = query.split()\n", "\n", "resultwords = [word for word in querywords if word.lower() not in stopwords]\n", "result = ' '.join(resultwords)\n", "\n", "print(result)" ] }, { "cell_type": "code", "source": [], "metadata": { "id": "vO4XsnKBEUeg" }, "execution_count": 121, "outputs": [] } ], "metadata": { "accelerator": "GPU", "colab": { "provenance": [], "gpuType": "T4" }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "name": "python" } }, "nbformat": 4, "nbformat_minor": 0 }