import gradio as gr from transformers import pipeline import logging # Set up logging logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") # Load the Tapas model qa_pipeline = pipeline("table-question-answering", model="google/tapas-large-finetuned-wtq") def ask_table(data, question): try: logging.info(f"Received table: {data}, question: {question}") if not data or not question: return "Please provide both a table and a question." # Convert the table to a format compatible with TAPAS headers = data[0] # Assume the first row contains headers rows = data[1:] # Remaining rows are table data # Process the table into dictionary format processed_table = [dict(zip(headers, row)) for row in rows if any(row)] if not processed_table: return "The table is empty or invalid." # Query the TAPAS model answers = qa_pipeline(table=processed_table, query=question) logging.info(f"Answer: {answers}") return answers.get("answer", "No answer found.") except Exception as e: logging.error(f"Error: {str(e)}") return f"Error processing your request: {str(e)}" # Define Gradio interface iface = gr.Interface( fn=ask_table, inputs=[ gr.Dataframe( headers=None, # Expect the user to provide headers in the first row row_count=(2, "dynamic"), col_count=(19, "dynamic"), type="array", label="Input Table" ), gr.Textbox( lines=2, placeholder="Enter your question about the table here...", label="Ask a Question" ) ], outputs="text", title="Table Question Answering", description="Provide a table with headers in the first row and ask questions. Supports up to 50 rows and 20 columns." ) # Launch Gradio app iface.launch(share=True)