TobyYang7's picture
Update llava_llama3/serve/cli.py
b8bd6a9 verified
raw
history blame
3.5 kB
import argparse
import torch
from llava_llama3.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from llava_llama3.conversation import conv_templates, SeparatorStyle
from llava_llama3.model.builder import load_pretrained_model
from llava_llama3.utils import disable_torch_init
from llava_llama3.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path
from PIL import Image
import requests
from PIL import Image
from io import BytesIO
from transformers import TextStreamer
import base64
def load_image(image_file):
if isinstance(image_file, str) and (image_file.startswith('http://') or image_file.startswith('https://')):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert('RGB')
elif isinstance(image_file, bytes):
image = Image.open(BytesIO(image_file)).convert('RGB')
else:
image = Image.open(image_file).convert('RGB')
return image
def chat_llava(args, image_file, text, tokenizer, model, image_processor, context_len, streamer=None):
# Model
disable_torch_init()
conv = conv_templates[args.conv_mode].copy()
roles = conv.roles
inp = text
if image_file is not None:
print(image_file, type(image_file))
image = load_image(image_file)
print(image, type(image))
image_size = image.size
image_tensor = process_images([image], image_processor, model.config)
if type(image_tensor) is list:
image_tensor = [image.to(model.device, dtype=torch.float16) for image in image_tensor]
else:
image_tensor = image_tensor.to(model.device, dtype=torch.float16)
if model.config.mm_use_im_start_end:
inp = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + inp
else:
inp = DEFAULT_IMAGE_TOKEN + '\n' + inp
conv.append_message(conv.roles[0], inp)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
keywords = [stop_str]
with torch.inference_mode():
output_ids = model.generate(
input_ids,
images=image_tensor,
image_sizes=[image_size],
do_sample=True if args.temperature > 0 else False,
temperature=args.temperature,
max_new_tokens=args.max_new_tokens,
streamer=streamer,
use_cache=True)
else:
conv.append_message(conv.roles[0], inp)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
input_ids = tokenizer(prompt, return_tensors='pt').input_ids.to(model.device)
with torch.inference_mode():
output_ids = model.generate(
input_ids,
do_sample=True if args.temperature > 0 else False,
temperature=args.temperature,
max_new_tokens=args.max_new_tokens,
use_cache=True)
outputs = tokenizer.decode(output_ids[0]).strip()
conv.messages[-1][-1] = outputs
# Return the model's output as a string
# return outputs
return outputs.replace('<|end_of_text|>', '\n').lstrip()