Dataset Preview
Full Screen Viewer
Full Screen
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed because of a cast error
Error code: DatasetGenerationCastError Exception: DatasetGenerationCastError Message: An error occurred while generating the dataset All the data files must have the same columns, but at some point there are 1 new columns ({'failed_status'}) This happened while the json dataset builder was generating data using hf://datasets/open-cn-llm-leaderboard/vlm_requests/deepseek-ai/Janus-Pro-7B_eval_request_False_float16_Original.json (at revision 2c97255d5e1290206947af2c60751aaf51b5ee96) Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations) Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1870, in _prepare_split_single writer.write_table(table) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 622, in write_table pa_table = table_cast(pa_table, self._schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2292, in table_cast return cast_table_to_schema(table, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2240, in cast_table_to_schema raise CastError( datasets.table.CastError: Couldn't cast model: string model_api_url: string model_api_key: string model_api_name: string base_model: string revision: string precision: string private: bool weight_type: string status: string submitted_time: timestamp[s] model_type: string params: int64 runsh: string adapter: string eval_id: int64 flageval_id: int64 failed_status: int64 to {'model': Value(dtype='string', id=None), 'model_api_url': Value(dtype='string', id=None), 'model_api_key': Value(dtype='string', id=None), 'model_api_name': Value(dtype='string', id=None), 'base_model': Value(dtype='string', id=None), 'revision': Value(dtype='string', id=None), 'precision': Value(dtype='string', id=None), 'private': Value(dtype='bool', id=None), 'weight_type': Value(dtype='string', id=None), 'status': Value(dtype='string', id=None), 'submitted_time': Value(dtype='timestamp[s]', id=None), 'model_type': Value(dtype='string', id=None), 'params': Value(dtype='float64', id=None), 'runsh': Value(dtype='string', id=None), 'adapter': Value(dtype='string', id=None), 'eval_id': Value(dtype='int64', id=None), 'flageval_id': Value(dtype='int64', id=None)} because column names don't match During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1420, in compute_config_parquet_and_info_response parquet_operations = convert_to_parquet(builder) File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1052, in convert_to_parquet builder.download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 924, in download_and_prepare self._download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1000, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1741, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1872, in _prepare_split_single raise DatasetGenerationCastError.from_cast_error( datasets.exceptions.DatasetGenerationCastError: An error occurred while generating the dataset All the data files must have the same columns, but at some point there are 1 new columns ({'failed_status'}) This happened while the json dataset builder was generating data using hf://datasets/open-cn-llm-leaderboard/vlm_requests/deepseek-ai/Janus-Pro-7B_eval_request_False_float16_Original.json (at revision 2c97255d5e1290206947af2c60751aaf51b5ee96) Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
model
string | model_api_url
string | model_api_key
string | model_api_name
string | base_model
string | revision
string | precision
string | private
bool | weight_type
string | status
string | submitted_time
timestamp[us] | model_type
string | params
float64 | runsh
string | adapter
string | eval_id
int64 | flageval_id
int64 | failed_status
int64 |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Qwen/Qwen2-VL-2B-Instruct | main | float16 | false | Original | FINISHED | 2025-01-24T02:46:12 | π’ : pretrained | 2.209 | #!/bin/bash
current_file="$0"
current_dir="$(dirname "$current_file")"
SERVER_IP=$1
SERVER_PORT=$2
PYTHONPATH=$current_dir:$PYTHONPATH accelerate launch $current_dir/model_adapter.py --server_ip $SERVER_IP --server_port $SERVER_PORT "${@:3}" --cfg $current_dir/meta.json
| import torch
from typing import Dict, Any
import time
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from flagevalmm.server import ServerDataset
from flagevalmm.models.base_model_adapter import BaseModelAdapter
from flagevalmm.server.utils import parse_args, process_images_symbol
from qwen_vl_utils import process_vision_info
class CustomDataset(ServerDataset):
def __getitem__(self, index):
data = self.get_data(index)
question_id = data["question_id"]
img_path = data["img_path"]
qs = data["question"]
qs, idx = process_images_symbol(qs)
idx = set(idx)
img_path_idx = []
for i in idx:
if i < len(img_path):
img_path_idx.append(img_path[i])
else:
print("[warning] image index out of range")
return question_id, img_path_idx, qs
class ModelAdapter(BaseModelAdapter):
def model_init(self, task_info: Dict):
ckpt_path = task_info["model_path"]
torch.set_grad_enabled(False)
with self.accelerator.main_process_first():
tokenizer = AutoTokenizer.from_pretrained(ckpt_path, trust_remote_code=True)
model = Qwen2VLForConditionalGeneration.from_pretrained(
ckpt_path,
device_map="auto",
torch_dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
)
model = self.accelerator.prepare_model(model, evaluation_mode=True)
self.tokenizer = tokenizer
if hasattr(model, "module"):
model = model.module
self.model = model
self.processor = AutoProcessor.from_pretrained(ckpt_path)
def build_message(
self,
query: str,
image_paths=[],
) -> str:
messages = []
messages.append(
{
"role": "user",
"content": [],
},
)
for img_path in image_paths:
messages[-1]["content"].append(
{"type": "image", "image": img_path},
)
# add question
messages[-1]["content"].append(
{
"type": "text",
"text": query,
},
)
return messages
def run_one_task(self, task_name: str, meta_info: Dict[str, Any]):
results = []
cnt = 0
data_loader = self.create_data_loader(
CustomDataset, task_name, batch_size=1, num_workers=0
)
for question_id, img_path, qs in data_loader:
if cnt == 1:
start_time = time.perf_counter()
cnt += 1
question_id = question_id[0]
img_path_flaten = [p[0] for p in img_path]
qs = qs[0]
messages = self.build_message(qs, image_paths=img_path_flaten)
text = self.processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = self.processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt",
)
inputs = inputs.to("cuda")
# Inference
generated_ids = self.model.generate(**inputs, max_new_tokens=1024)
generated_ids_trimmed = [
out_ids[len(in_ids) :]
for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
response = self.processor.batch_decode(
generated_ids_trimmed,
skip_special_tokens=True,
clean_up_tokenization_spaces=False,
)[0]
self.accelerator.print(f"{qs}\n{response}\n\n")
results.append(
{"question_id": question_id, "answer": response.strip(), "prompt": qs}
)
rank = self.accelerator.state.local_process_index
self.save_result(results, meta_info, rank=rank)
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process:
correct_num = self.collect_results_and_save(meta_info)
total_time = time.perf_counter() - start_time
print(
f"Total time: {total_time}\nAverage time:{total_time / cnt}\nResults_collect number: {correct_num}"
)
print("rank", rank, "finished")
if __name__ == "__main__":
args = parse_args()
model_adapter = ModelAdapter(
server_ip=args.server_ip,
server_port=args.server_port,
timeout=args.timeout,
extra_cfg=args.cfg,
)
model_adapter.run()
| 26,049 | 1,054 | null |
||||
deepseek-ai/Janus-Pro-7B | main | float16 | false | Original | PENDING | 2025-02-14T06:58:30 | π’ : pretrained | 0 | #!/bin/bash
current_file="$0"
current_dir="$(dirname "$current_file")"
SERVER_IP=$1
SERVER_PORT=$2
cd /share/project/daiteng01/deepseek/Janus-main
pip install -e . -i http://10.1.1.16/repository/pypi-group/simple --trusted-host 10.1.1.16
cd -
PYTHONPATH=$current_dir:$PYTHONPATH accelerate launch $current_dir/model_adapter.py --server_ip $SERVER_IP --server_port $SERVER_PORT "${@:3}" --cfg $current_dir/meta.json
| import time
from flagevalmm.server import ServerDataset
import sys
from flagevalmm.models.base_model_adapter import BaseModelAdapter
from flagevalmm.server.utils import (
parse_args,
default_collate_fn,
process_images_symbol,
load_pil_image,
)
from typing import Dict, Any
import torch
from transformers import AutoModelForCausalLM
from janus.models import MultiModalityCausalLM, VLChatProcessor
from janus.utils.io import load_pil_images
class CustomDataset(ServerDataset):
def __getitem__(self, index):
data = self.get_data(index)
qs, idx = process_images_symbol(
data["question"], dst_pattern="<image_placeholder>"
)
question_id = data["question_id"]
img_path = data["img_path"]
return question_id, qs, img_path
class ModelAdapter(BaseModelAdapter):
def model_init(self, task_info: Dict):
ckpt_path = task_info["model_path"]
torch.set_grad_enabled(False)
with self.accelerator.main_process_first():
self.vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(ckpt_path)
self.tokenizer = self.vl_chat_processor.tokenizer
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
ckpt_path, trust_remote_code=True
)
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
model = self.accelerator.prepare_model(vl_gpt, evaluation_mode=True)
if hasattr(model, "module"):
model = model.module
self.model = model
def build_message(
self,
query: str,
image_paths=[],
) -> str:
content = ""
for i in range(len(image_paths)):
content += "<image_placeholder>\n"
content += query
messages = [
{
"role": "<|User|>",
"content": content,
"images": image_paths,
},
{"role": "<|Assistant|>", "content": ""},
]
print("= = jieguo", messages, file=sys.stderr)
return messages
def run_one_task(self, task_name: str, meta_info: Dict[str, Any]):
results = []
cnt = 0
data_loader = self.create_data_loader(
CustomDataset,
task_name,
collate_fn=default_collate_fn,
batch_size=1,
num_workers=2,
)
for question_id, question, images in data_loader:
if cnt == 1:
start_time = time.perf_counter()
cnt += 1
messages = self.build_message(question[0], images[0])
pil_images = load_pil_images(messages)
prepare_inputs = self.vl_chat_processor(
conversations=messages, images=pil_images, force_batchify=True
).to(self.model.device)
inputs_embeds = self.model.prepare_inputs_embeds(**prepare_inputs)
# run the model to get the response
outputs = self.model.language_model.generate(
inputs_embeds=inputs_embeds,
attention_mask=prepare_inputs.attention_mask,
pad_token_id=self.tokenizer.eos_token_id,
bos_token_id=self.tokenizer.bos_token_id,
eos_token_id=self.tokenizer.eos_token_id,
max_new_tokens=4096,
do_sample=False,
use_cache=True,
)
response = self.tokenizer.decode(
outputs[0].cpu().tolist(), skip_special_tokens=True
)
self.accelerator.print(f"{question[0]}\n{response}\n\n")
results.append(
{
"question_id": question_id[0],
"answer": response.strip(),
"prompt": question[0],
}
)
rank = self.accelerator.state.local_process_index
# save results for the rank
self.save_result(results, meta_info, rank=rank)
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process:
correct_num = self.collect_results_and_save(meta_info)
total_time = time.perf_counter() - start_time
print(
f"Total time: {total_time}\nAverage time:{total_time / cnt}\nResults_collect number: {correct_num}"
)
print("rank", rank, "finished")
if __name__ == "__main__":
args = parse_args()
model_adapter = ModelAdapter(
server_ip=args.server_ip,
server_port=args.server_port,
timeout=args.timeout,
extra_cfg=args.cfg,
)
model_adapter.run()
| 26,176 | 1,060 | 8 |
||||
yi.daiteng01 | https://api.lingyiwanwu.com/v1/chat/completions | 876995f3b3ce41aca60b637fb51d752e | yi-vision | main | float16 | false | Original | FINISHED | 2025-01-24T07:22:04 | π’ : pretrained | 0 | 26,055 | 1,055 | null |
README.md exists but content is empty.
- Downloads last month
- 1,706