[email protected] commited on
Commit
426fbbd
·
2 Parent(s): 744cc62 5b55c03

Merge branch 'main' of hf.co:kirp/kosmos2_5 into main

Browse files
Files changed (1) hide show
  1. README.md +85 -43
README.md CHANGED
@@ -1,50 +1,62 @@
1
  ---
2
- license: apache-2.0
 
3
  ---
4
- # Test vesion
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  ```bash
6
  pip install git+https://github.com/tic-top/transformers.git
7
  ```
8
-
9
  ```python
10
- from transformers import AutoModelForVision2Seq, AutoProcessor
11
  from PIL import Image
 
12
  import torch
13
- device = "cuda:2"
 
 
14
  repo = "kirp/kosmos2_5"
15
- dtype = torch.float16
16
- # dtype = torch.bfloat16
17
- model = AutoModelForVision2Seq.from_pretrained(repo, device_map = device, torch_dtype=dtype)
18
- # print(model)
19
- # exit(0)
20
  processor = AutoProcessor.from_pretrained(repo)
21
 
22
- path = "receipt_00008.png"
23
- image = Image.open(path)
24
- prompt = "<ocr>"
25
- # prompt = "<md>"
26
- inputs = processor(text=prompt, images=image, return_tensors="pt", max_patches=4096)
27
 
28
- raw_width, raw_height = image.size
29
  height, width = inputs.pop("height"), inputs.pop("width")
 
30
  scale_height = raw_height / height
31
  scale_width = raw_width / width
32
 
33
  inputs = {k: v.to(device) if v is not None else None for k, v in inputs.items()}
34
  inputs["flattened_patches"] = inputs["flattened_patches"].to(dtype)
35
- with torch.no_grad():
36
- generated_text = model.generate(**inputs, max_length=4096)
37
- generated_text = processor.batch_decode(generated_text)
38
- import re, os
39
- def postprocess(y, scale_height, scale_width, result_path=None):
40
- y = (
41
- y.replace("<s>", "")
42
- .replace("</s>", "")
43
- .replace("", "")
45
- .replace(prompt, "")
46
- )
47
- print(y)
48
  pattern = r"<bbox><x_\d+><y_\d+><x_\d+><y_\d+></bbox>"
49
  bboxs_raw = re.findall(pattern, y)
50
  lines = re.split(pattern, y)[1:]
@@ -53,24 +65,54 @@ def postprocess(y, scale_height, scale_width, result_path=None):
53
  info = ""
54
  for i in range(len(lines)):
55
  box = bboxs[i]
56
- # do we need to convert the size of the box?
57
- # maybe yes
58
  x0, y0, x1, y1 = box
59
- # maybe modify the order
60
  if not (x0 >= x1 or y0 >= y1):
61
  x0 = int(x0 * scale_width)
62
  y0 = int(y0 * scale_height)
63
  x1 = int(x1 * scale_width)
64
  y1 = int(y1 * scale_height)
65
  info += f"{x0},{y0},{x1},{y0},{x1},{y1},{x0},{y1},{lines[i]}"
66
-
67
- if result_path is not None:
68
- os.makedirs(os.path.dirname(result_path), exist_ok=True)
69
- # create and write in utf-8
70
- with open(result_path, "w", encoding="utf-8") as f:
71
- f.write(info)
72
- else:
73
- print(info)
74
-
75
- postprocess(generated_text[0], scale_height, scale_width)
76
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language: en
3
+ license: mit
4
  ---
5
+ # Under testing
6
+
7
+ # Kosmos-2.5
8
+
9
+ [Microsoft Document AI](https://www.microsoft.com/en-us/research/project/document-ai/) | [GitHub](https://github.com/microsoft/unilm/tree/master/kosmos-2.5)
10
+
11
+ ## Model description
12
+
13
+ Kosmos-2.5 is a multimodal literate model for machine reading of text-intensive images. Pre-trained on large-scale text-intensive images, Kosmos-2.5 excels in two distinct yet cooperative transcription tasks: (1) generating spatially-aware text blocks, where each block of text is assigned its spatial coordinates within the image, and (2) producing structured text output that captures styles and structures into the markdown format. This unified multimodal literate capability is achieved through a shared decoder-only auto-regressive Transformer architecture, task-specific prompts, and flexible text representations. We evaluate Kosmos-2.5 on end-to-end document-level text recognition and image-to-markdown text generation. Furthermore, the model can be readily adapted for any text-intensive image understanding task with different prompts through supervised fine-tuning, making it a general-purpose tool for real-world applications involving text-rich images. This work also paves the way for the future scaling of multimodal large language models.
14
+
15
+ [Kosmos-2.5: A Multimodal Literate Model](https://arxiv.org/abs/2309.11419)
16
+
17
+ ## NOTE:
18
+ Since this is a generative model, there is a risk of **hallucination** during the generation process, and it **CAN NOT** guarantee the accuracy of all OCR/Markdown results in the images.
19
+
20
+ ## Use with transformers:
21
  ```bash
22
  pip install git+https://github.com/tic-top/transformers.git
23
  ```
 
24
  ```python
 
25
  from PIL import Image
26
+ import requests
27
  import torch
28
+ from transformers import AutoProcessor, AutoModelForVision2Seq
29
+ import re
30
+
31
  repo = "kirp/kosmos2_5"
32
+ device = "cuda:0"
33
+ dtype = torch.bfloat16
34
+ model = AutoModelForVision2Seq.from_pretrained(repo, device_map=device, torch_dtype=dtype)
 
 
35
  processor = AutoProcessor.from_pretrained(repo)
36
 
37
+ url = "https://huggingface.co/kirp/kosmos2_5/resolve/main/receipt_00008.png"
38
+ image = Image.open(requests.get(url, stream=True).raw)
39
+ prompt = "<ocr>" # <md>
 
 
40
 
41
+ inputs = processor(text=prompt, images=image, return_tensors="pt")
42
  height, width = inputs.pop("height"), inputs.pop("width")
43
+ raw_width, raw_height = image.size
44
  scale_height = raw_height / height
45
  scale_width = raw_width / width
46
 
47
  inputs = {k: v.to(device) if v is not None else None for k, v in inputs.items()}
48
  inputs["flattened_patches"] = inputs["flattened_patches"].to(dtype)
49
+
50
+ generated_ids = model.generate(
51
+ **inputs,
52
+ max_new_tokens=1024,
53
+ )
54
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
55
+
56
+ def postprocess(y, scale_height, scale_width):
57
+ y = y.replace(prompt, "")
58
+ if "<md>" in prompt:
59
+ return y
 
 
60
  pattern = r"<bbox><x_\d+><y_\d+><x_\d+><y_\d+></bbox>"
61
  bboxs_raw = re.findall(pattern, y)
62
  lines = re.split(pattern, y)[1:]
 
65
  info = ""
66
  for i in range(len(lines)):
67
  box = bboxs[i]
 
 
68
  x0, y0, x1, y1 = box
 
69
  if not (x0 >= x1 or y0 >= y1):
70
  x0 = int(x0 * scale_width)
71
  y0 = int(y0 * scale_height)
72
  x1 = int(x1 * scale_width)
73
  y1 = int(y1 * scale_height)
74
  info += f"{x0},{y0},{x1},{y0},{x1},{y1},{x0},{y1},{lines[i]}"
75
+ return info
76
+
77
+ output_text = postprocess(generated_text[0], scale_height, scale_width)
78
+ print(output_text)
79
+ ```
80
+ ```text
81
+ 55,595,71,595,71,629,55,629,1
82
+ 82,595,481,595,481,635,82,635,[REG] BLACK SAKURA
83
+ 716,590,841,590,841,629,716,629,45,455
84
+ 55,637,71,637,71,672,55,672,1
85
+ 82,637,486,637,486,675,82,675,COOKIE DOH SAUCES
86
+ 818,632,843,632,843,668,818,668,0
87
+ 51,683,71,683,71,719,51,719,1
88
+ 82,683,371,683,371,719,82,719,NATA DE COCO
89
+ 820,677,845,677,845,713,820,713,0
90
+ 32,770,851,770,851,811,32,811,Sub Total 45,455
91
+ 28,811,853,811,853,858,28,858,PB1 (10%) 4,545
92
+ 28,857,855,857,855,905,28,905,Rounding 0
93
+ 24,905,858,905,858,956,24,956,Total 50,000
94
+ 17,1096,868,1096,868,1150,17,1150,Card Payment 50,000
95
+ ```
96
+
97
+
98
+
99
+ ## Citation
100
+
101
+ If you find Kosmos-2.5 useful in your research, please cite the following paper:
102
+
103
+ ```
104
+ @article{lv2023kosmos,
105
+ title={Kosmos-2.5: A multimodal literate model},
106
+ author={Lv, Tengchao and Huang, Yupan and Chen, Jingye and Cui, Lei and Ma, Shuming and Chang, Yaoyao and Huang, Shaohan and Wang, Wenhui and Dong, Li and Luo, Weiyao and others},
107
+ journal={arXiv preprint arXiv:2309.11419},
108
+ year={2023}
109
+ }
110
+ ```
111
+
112
+ ## License
113
+ The content of this project itself is licensed under the [MIT](https://github.com/microsoft/unilm/blob/master/kosmos-2.5/LICENSE)
114
+
115
+ [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct)
116
+
117
+
118
+