coderchen01 commited on
Commit
af78608
1 Parent(s): dc9aedf

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +11 -9
README.md CHANGED
@@ -116,19 +116,21 @@ This is a copy of the dataset uploaded on Hugging Face for easy access. The orig
116
  ```python
117
  # usage
118
  from datasets import load_dataset
119
- from transformers import CLIPImageProcessor, CLIPTokenizer
120
  from torch.utils.data import DataLoader
121
 
122
- image_processor = CLIPImageProcessor.from_pretrained(clip_path)
123
- tokenizer = CLIPTokenizer.from_pretrained(clip_path)
124
 
125
  def tokenization(example):
126
- text_inputs = tokenizer(example["text"], truncation=True, padding=True, return_tensors="pt")
127
- image_inputs = image_processor(example["image"], return_tensors="pt")
128
- return {'pixel_values': image_inputs['pixel_values'],
129
- 'input_ids': text_inputs['input_ids'],
130
- 'attention_mask': text_inputs['attention_mask'],
131
- "label": example["label"]}
 
 
 
132
 
133
  dataset = load_dataset('coderchen01/MMSD2.0', 'mmsd-v2')
134
  dataset.set_transform(tokenization)
 
116
  ```python
117
  # usage
118
  from datasets import load_dataset
119
+ from transformers import CLIPProcessor
120
  from torch.utils.data import DataLoader
121
 
122
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
 
123
 
124
  def tokenization(example):
125
+ inputs = processor(
126
+ text=example["text"], images=example["image"], return_tensors="pt"
127
+ )
128
+ return {
129
+ "pixel_values": inputs["pixel_values"],
130
+ "input_ids": inputs["input_ids"],
131
+ "attention_mask": inputs["attention_mask"],
132
+ "label": example["label"],
133
+ }
134
 
135
  dataset = load_dataset('coderchen01/MMSD2.0', 'mmsd-v2')
136
  dataset.set_transform(tokenization)