Trent commited on
Commit
587ab22
1 Parent(s): 59d1d42

Model list improvement

Browse files
app.py CHANGED
@@ -4,6 +4,7 @@ import image2text
4
  import intro
5
  import text2image
6
  import text2patch
 
7
 
8
  PAGES = {
9
  "Introduction": intro,
@@ -13,7 +14,7 @@ PAGES = {
13
  }
14
 
15
  st.sidebar.title("Navigation")
16
- model = st.sidebar.selectbox("Choose a model", ["koclip-base", "koclip-large"])
17
  page = st.sidebar.selectbox("Navigate to...", list(PAGES.keys()))
18
 
19
  PAGES[page].app(model)
 
4
  import intro
5
  import text2image
6
  import text2patch
7
+ from config import MODEL_LIST
8
 
9
  PAGES = {
10
  "Introduction": intro,
 
14
  }
15
 
16
  st.sidebar.title("Navigation")
17
+ model = st.sidebar.selectbox("Choose a model", MODEL_LIST)
18
  page = st.sidebar.selectbox("Navigate to...", list(PAGES.keys()))
19
 
20
  PAGES[page].app(model)
config.py ADDED
@@ -0,0 +1 @@
 
 
1
+ MODEL_LIST = ["koclip-base", "koclip-large"]
executables/embed_captions.py CHANGED
@@ -1,15 +1,18 @@
1
  import argparse
2
  import csv
 
 
3
 
 
4
  from utils import load_model
5
 
6
 
7
  def main(args):
8
  caption_txt_path = args.text_path
9
  f = open(caption_txt_path)
10
- captions = [sent.strip() for sent in f.readlines()
11
 
12
- for model_name in ["koclip-base", "koclip-large"]:
13
  model, processor = load_model(f"koclip/{model_name}")
14
  captions_processed = [processor(sent,images=None,return_tensors='jax') for sent in captions]
15
  vec = [np.asarray(model.get_text_features(**c)) for c in captions_processed]
 
1
  import argparse
2
  import csv
3
+ import numpy as np
4
+ import os
5
 
6
+ from config import MODEL_LIST
7
  from utils import load_model
8
 
9
 
10
  def main(args):
11
  caption_txt_path = args.text_path
12
  f = open(caption_txt_path)
13
+ captions = [sent.strip() for sent in f.readlines()]
14
 
15
+ for model_name in MODEL_LIST:
16
  model, processor = load_model(f"koclip/{model_name}")
17
  captions_processed = [processor(sent,images=None,return_tensors='jax') for sent in captions]
18
  vec = [np.asarray(model.get_text_features(**c)) for c in captions_processed]
executables/embed_images.py CHANGED
@@ -7,6 +7,7 @@ from jax import jit
7
  from PIL import Image
8
  from tqdm import tqdm
9
 
 
10
  from utils import load_model
11
 
12
 
@@ -15,7 +16,7 @@ def main(args):
15
  files = list(os.listdir(root))
16
  for f in files:
17
  assert f[-4:] == ".jpg"
18
- for model_name in ["koclip-base", "koclip-large"]:
19
  model, processor = load_model(f"koclip/{model_name}")
20
  with tqdm(total=len(files)) as pbar:
21
  for counter in range(0, len(files), args.batch_size):
 
7
  from PIL import Image
8
  from tqdm import tqdm
9
 
10
+ from config import MODEL_LIST
11
  from utils import load_model
12
 
13
 
 
16
  files = list(os.listdir(root))
17
  for f in files:
18
  assert f[-4:] == ".jpg"
19
+ for model_name in MODEL_LIST:
20
  model, processor = load_model(f"koclip/{model_name}")
21
  with tqdm(total=len(files)) as pbar:
22
  for counter in range(0, len(files), args.batch_size):
utils.py CHANGED
@@ -3,6 +3,7 @@ import numpy as np
3
  import streamlit as st
4
  from transformers import AutoTokenizer, CLIPProcessor, ViTFeatureExtractor
5
 
 
6
  from koclip import FlaxHybridCLIP
7
 
8
 
@@ -25,7 +26,7 @@ def load_index(img_file):
25
 
26
  @st.cache(allow_output_mutation=True)
27
  def load_model(model_name="koclip/koclip-base"):
28
- assert model_name in {"koclip/koclip-base", "koclip/koclip-large"}
29
  model = FlaxHybridCLIP.from_pretrained(model_name)
30
  processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
31
  processor.tokenizer = AutoTokenizer.from_pretrained("klue/roberta-large")
 
3
  import streamlit as st
4
  from transformers import AutoTokenizer, CLIPProcessor, ViTFeatureExtractor
5
 
6
+ from config import MODEL_LIST
7
  from koclip import FlaxHybridCLIP
8
 
9
 
 
26
 
27
  @st.cache(allow_output_mutation=True)
28
  def load_model(model_name="koclip/koclip-base"):
29
+ assert model_name in {f"koclip/{model}" for model in MODEL_LIST}
30
  model = FlaxHybridCLIP.from_pretrained(model_name)
31
  processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
32
  processor.tokenizer = AutoTokenizer.from_pretrained("klue/roberta-large")