-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathVisualQueryTemplate.py
More file actions
64 lines (43 loc) · 1.81 KB
/
VisualQueryTemplate.py
File metadata and controls
64 lines (43 loc) · 1.81 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import re
from PIL import Image
import numpy as np
import time
from transformers import pipeline
import torch
def tensor2pil(image):
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
class VisualQueryTemplateNode:
def __init__(self):
pass
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"images": ("IMAGE",),
"model": (["Salesforce/blip-vqa-base", "Salesforce/blip-vqa-capfilt-large", "dandelin/vilt-b32-finetuned-vqa", "microsoft/git-large-vqav2"], ),
"question": ("STRING", {"default": "{eye color} eyes, {hair style} {hair color} hair, {ethnicity} {gender}, {age number} years old, {facialhair}", "multiline": True, "dynamicPrompts": False}),
}
}
RETURN_TYPES = ("STRING",)
OUTPUT_IS_LIST = (True,)
FUNCTION = "vqa_image"
CATEGORY = "image"
def vqa_image(self, images, model, question):
start_time = time.time()
device = 0 if torch.cuda.is_available() else -1
vqa = pipeline(model=model, device=device)
answers = []
for image in images:
pil_image = tensor2pil(image).convert("RGB")
final_answer = question
matches = re.findall(r'\{([^}]*)\}', question)
for match in matches:
match_answers = vqa(question=match, image=pil_image)
print(match, match_answers)
match_answer = match_answers[0]["answer"]
final_answer = final_answer.replace("{"+match+"}", match_answer)
answers.append(final_answer)
end_time = time.time()
execution_time = end_time - start_time
print(f"Execution time: {execution_time} seconds")
return (answers,)