update prompt

This commit is contained in:
heroding77
2024-01-16 21:30:27 +08:00
parent 1cc1bd5bb3
commit f4e05abf4c
4 changed files with 8 additions and 8 deletions

View File

@@ -10,11 +10,11 @@ class GAIALoader:
assert os.path.exists(cache_dir), f"Cache directory {cache_dir} does not exist."
self.cache_dir = cache_dir
try:
self.dataset = load_dataset("gaia-benchmark/GAIA", "2023_level1", cache_dir=self.cache_dir)
self.dataset = load_dataset("gaia-benchmark/GAIA", "2023_level2", cache_dir=self.cache_dir)
except Exception as e:
raise Exception(f"Failed to load GAIA dataset: {e}")
else:
self.dataset = load_dataset("gaia-benchmark/GAIA", "2023_level1")
self.dataset = load_dataset("gaia-benchmark/GAIA", "2023_level2")
def get_data_by_task_id(self, task_id, type):
@@ -33,9 +33,9 @@ def main():
parser.add_argument('--config_path', type=str, default='config.json', help='openAI config file path')
parser.add_argument('--query', type=str, default=None, help='user query')
parser.add_argument('--query_file_path', type=str, default='', help='user query file path')
parser.add_argument('--task_id', type=str, default="9318445f-fe6a-4e1b-acbf-c68228c9906a", help='GAIA dataset task_id')
parser.add_argument('--task_id', type=str, default="b2c257e0-3ad7-4f05-b8e3-d9da973be36e", help='GAIA dataset task_id')
parser.add_argument('--cache_dir', type=str, default=None, help='GAIA dataset cache dir path')
parser.add_argument('--logging_filedir', type=str, default='log/val_level1', help='GAIA dataset cache dir path')
parser.add_argument('--logging_filedir', type=str, default='log/val_level2', help='GAIA dataset cache dir path')
args = parser.parse_args()
task_id = args.task_id

View File

@@ -378,7 +378,7 @@ prompt = {
22. Once the task involves obtaining knowledge such as books, articles, character information, etc., you need to plan API tasks to obtain this knowledge from the Internet.
23. When decomposing an API subtask which uses the Bing Search API or the Bing Load Page API, you need to proceed to plan a QA subtask for analyzing and summarizing the information returned by that API subtask. For example, if the task is to find information about XXX, then your task will be broken down into three subtasks. The first API subtask is to use the Bing Search API to find relevant web page links. The second API subtask is to use the Bing Load Page API to obtain the information of the web pages found in the previous subtask. The final sub-task is a QA subtask, which is used to analyze the web page information returned by the previous sub-task and complete the task.
24. When the task involves retrieving a certain detailed content, then after decomposing the API subtask using '/tools/bing/searchv2', you also need to decompose an API subtask using '/tools/bing/load_pagev2', using for more detailed content.
25. If the attached file is a picture file(png or jpg), the task must be broken down into two sub-tasks. The first is a API subtask, which uses image caption API to analyze image and solve problem. The second is a QA subtask, which analyzes and completes task based on the return from API subtask.
25. If the attached file is a png or jpg file, the task must first be decomposed a API subtask, which uses image caption API to analyze image and solve problem. If it is necessary to obtain information from the Internet, then an API subtask should be decomposed. Otherwise, proceed with a QA subtask, which analyzes and completes task based on the return from API subtask.
26. Please note that all available APIs are only in the API List. You should not make up APIs that are not in the API List.
27. If the attached file is a csv file, you must first decompose a Code subtask,, which involves extracting all information from the csv file, and the description of this subtask only needs to say "extract data from the csv file". Then proceed with a QA subtask, which involves analyzing the file's content and completing the task based on this analysis.
''',

View File

@@ -286,7 +286,7 @@
},
"/tools/image_caption": {
"post": {
"summary": "When the task is to question and answer based on local picture, you have to use the Image Caption tool, who can directly analyze picture to answer question and complete task. For local images you want to understand, you need to only give the image_file without url. And you must give the 'query' paramter, its content is the task itself.",
"summary": "When the task is to question and answer based on local picture, you have to use the Image Caption tool, who can directly analyze picture to answer question and complete task. For local images you want to understand, you need to only give the image_file without url. It is crucial to provide the 'query' parameter, and its value must be the full content of the task itself.",
"operationId": "image_search_tools_image_caption_post",
"parameters": [
{

View File

@@ -12,10 +12,10 @@ api_path = '/tools/image_caption'
method = 'post'
# Define the query parameter to specify the task of extracting butterfat content information
query_params = {'query': 'How many cats are in the attached photo, including those that are partially obscured or not fully in frame?'}
query_params = {'query': 'As a comma separated list with no whitespace, using the provided image provide all the fractions that use / as the fraction line and the answers to the sample problems. Order the list by the order in which the fractions appear.'}
# Define the file to be uploaded
file_path = '/home/heroding/.cache/huggingface/datasets/downloads/28242018ceba2e5429c7fa9fe177fc248eed4d3e90b266190c0175a97166f20b.jpg'
file_path = '/home/heroding/.cache/huggingface/datasets/downloads/2105d7660150b62c9b52b778082c3ba8bd69ecc463e61343dbf0f6e79c96294a.png'
files = {'image_file': open(file_path, 'rb')}
# Make the API call using the ToolRequestUtil