Spaces:
Build error
Build error
| !sudo apt-get install -y poppler-utils | |
| import streamlit as st | |
| from PIL import Image | |
| from byaldi import RAGMultiModalModel | |
| import tempfile | |
| import torch | |
| # Function to upload image, run inference, and display output | |
| def upload_image_and_infer(): | |
| # Step 1: Allow user to upload an image file | |
| uploaded_file = st.file_uploader("Upload an image file", type=["jpg", "png", "jpeg"]) | |
| if uploaded_file is not None: | |
| # Step 2: Save uploaded image to temporary file | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file: | |
| temp_file.write(uploaded_file.read()) | |
| temp_path = temp_file.name | |
| # Step 3: Display the uploaded image | |
| image = Image.open(temp_path) | |
| st.image(image, caption="Uploaded Image", use_column_width=True) | |
| # Step 4: Load the RAGMultiModalModel and processor | |
| RAG = RAGMultiModalModel.from_pretrained("vidore/colpali") | |
| model = Qwen2VLForConditionalGeneration.from_pretrained( | |
| "Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int8", torch_dtype="auto", device_map="auto" | |
| ) | |
| processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int8") | |
| # Assuming `results` contains the page number information | |
| text_query = "extract the details?" | |
| RAG.index( | |
| input_path=temp_path, # Using the uploaded image's temporary path | |
| index_name="image_index", | |
| store_collection_with_index=False, | |
| overwrite=True | |
| ) | |
| results = RAG.search(text_query, k=1) | |
| # Step 5: Prepare messages for inference | |
| image_index = results[0]["page_num"] - 1 # Get page number from the search result | |
| messages = [ | |
| { | |
| "role": "user", | |
| "content": [ | |
| { | |
| "type": "image", | |
| "image": image, # Use the uploaded image | |
| }, | |
| {"type": "text", "text": text_query}, | |
| ], | |
| } | |
| ] | |
| # Step 6: Prepare input for the model | |
| text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
| image_inputs, video_inputs = process_vision_info(messages) # Assuming process_vision_info is defined | |
| # Tokenizing and preparing inputs | |
| inputs = processor( | |
| text=[text], | |
| images=image_inputs, | |
| videos=video_inputs, | |
| padding=True, | |
| return_tensors="pt", | |
| ) | |
| inputs = inputs.to("cuda") | |
| # Step 7: Inference and generate output | |
| generated_ids = model.generate(**inputs, max_new_tokens=128) | |
| generated_ids_trimmed = [ | |
| out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) | |
| ] | |
| # Decode the generated output | |
| output_text = processor.batch_decode( | |
| generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False | |
| ) | |
| # Step 8: Display the output in Streamlit | |
| st.write("Generated Output:", output_text) | |
| else: | |
| st.write("Please upload an image.") | |
| # Helper function to process images (replace with actual implementation if needed) | |
| def process_vision_info(messages): | |
| image_inputs = [msg['content'][0]['image'] for msg in messages if 'image' in msg['content'][0]] | |
| video_inputs = [] # Assuming no video inputs for now | |
| return image_inputs, video_inputs | |
| # Run the function inside the Streamlit app | |
| upload_image_and_infer() | |