akhil2808 commited on
Commit
0f393e6
1 Parent(s): edd2a62

Upload 3 files

Browse files
Files changed (3) hide show
  1. app (2).py +202 -0
  2. gitattributes +35 -0
  3. requirements (1).txt +11 -0
app (2).py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ from transformers import AutoModel, AutoTokenizer
4
+ from PIL import Image
5
+ import numpy as np
6
+ import os
7
+ import base64
8
+ import io
9
+ import uuid
10
+ import tempfile
11
+ import time
12
+ import shutil
13
+ from pathlib import Path
14
+
15
+ tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
16
+ model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True)
17
+ model = model.eval().cuda()
18
+
19
+ UPLOAD_FOLDER = "./uploads"
20
+ RESULTS_FOLDER = "./results"
21
+
22
+ for folder in [UPLOAD_FOLDER, RESULTS_FOLDER]:
23
+ if not os.path.exists(folder):
24
+ os.makedirs(folder)
25
+
26
+ def image_to_base64(image):
27
+ buffered = io.BytesIO()
28
+ image.save(buffered, format="PNG")
29
+ return base64.b64encode(buffered.getvalue()).decode()
30
+
31
+ @spaces.GPU
32
+ def run_GOT(image, got_mode, fine_grained_mode="", ocr_color="", ocr_box=""):
33
+ unique_id = str(uuid.uuid4())
34
+ image_path = os.path.join(UPLOAD_FOLDER, f"{unique_id}.png")
35
+ result_path = os.path.join(RESULTS_FOLDER, f"{unique_id}.html")
36
+
37
+ shutil.copy(image, image_path)
38
+
39
+ try:
40
+ if got_mode == "plain texts OCR":
41
+ res = model.chat(tokenizer, image_path, ocr_type='ocr')
42
+ return res, None
43
+ elif got_mode == "format texts OCR":
44
+ res = model.chat(tokenizer, image_path, ocr_type='format', render=True, save_render_file=result_path)
45
+ elif got_mode == "plain multi-crop OCR":
46
+ res = model.chat_crop(tokenizer, image_path, ocr_type='ocr')
47
+ return res, None
48
+ elif got_mode == "format multi-crop OCR":
49
+ res = model.chat_crop(tokenizer, image_path, ocr_type='format', render=True, save_render_file=result_path)
50
+ elif got_mode == "plain fine-grained OCR":
51
+ res = model.chat(tokenizer, image_path, ocr_type='ocr', ocr_box=ocr_box, ocr_color=ocr_color)
52
+ return res, None
53
+ elif got_mode == "format fine-grained OCR":
54
+ res = model.chat(tokenizer, image_path, ocr_type='format', ocr_box=ocr_box, ocr_color=ocr_color, render=True, save_render_file=result_path)
55
+
56
+ # res_markdown = f"$$ {res} $$"
57
+ res_markdown = res
58
+
59
+ if "format" in got_mode and os.path.exists(result_path):
60
+ with open(result_path, 'r') as f:
61
+ html_content = f.read()
62
+ encoded_html = base64.b64encode(html_content.encode('utf-8')).decode('utf-8')
63
+ iframe_src = f"data:text/html;base64,{encoded_html}"
64
+ iframe = f'<iframe src="{iframe_src}" width="100%" height="600px"></iframe>'
65
+ download_link = f'<a href="data:text/html;base64,{encoded_html}" download="result_{unique_id}.html">Download Full Result</a>'
66
+ return res_markdown, f"{download_link}<br>{iframe}"
67
+ else:
68
+ return res_markdown, None
69
+ except Exception as e:
70
+ return f"Error: {str(e)}", None
71
+ finally:
72
+ if os.path.exists(image_path):
73
+ os.remove(image_path)
74
+
75
+ def task_update(task):
76
+ if "fine-grained" in task:
77
+ return [
78
+ gr.update(visible=True),
79
+ gr.update(visible=False),
80
+ gr.update(visible=False),
81
+ ]
82
+ else:
83
+ return [
84
+ gr.update(visible=False),
85
+ gr.update(visible=False),
86
+ gr.update(visible=False),
87
+ ]
88
+
89
+ def fine_grained_update(task):
90
+ if task == "box":
91
+ return [
92
+ gr.update(visible=False, value = ""),
93
+ gr.update(visible=True),
94
+ ]
95
+ elif task == 'color':
96
+ return [
97
+ gr.update(visible=True),
98
+ gr.update(visible=False, value = ""),
99
+ ]
100
+
101
+ def cleanup_old_files():
102
+ current_time = time.time()
103
+ for folder in [UPLOAD_FOLDER, RESULTS_FOLDER]:
104
+ for file_path in Path(folder).glob('*'):
105
+ if current_time - file_path.stat().st_mtime > 3600: # 1 hour
106
+ file_path.unlink()
107
+
108
+ title_html = """
109
+ <h2> <span class="gradient-text" id="text">General OCR Theory</span><span class="plain-text">: Towards OCR-2.0 via a Unified End-to-end Model</span></h2>
110
+ <a href="https://huggingface.co/ucaslcl/GOT-OCR2_0">[😊 Hugging Face]</a>
111
+ <a href="https://arxiv.org/abs/2409.01704">[📜 Paper]</a>
112
+ <a href="https://github.com/Ucas-HaoranWei/GOT-OCR2.0/">[🌟 GitHub]</a>
113
+ """
114
+
115
+ with gr.Blocks() as demo:
116
+ gr.HTML(title_html)
117
+ gr.Markdown("""
118
+ "🔥🔥🔥This is the official online demo of GOT-OCR-2.0 model!!!"
119
+
120
+ ### Demo Guidelines
121
+ You need to upload your image below and choose one mode of GOT, then click "Submit" to run GOT model. More characters will result in longer wait times.
122
+ - **plain texts OCR & format texts OCR**: The two modes are for the image-level OCR.
123
+ - **plain multi-crop OCR & format multi-crop OCR**: For images with more complex content, you can achieve higher-quality results with these modes.
124
+ - **plain fine-grained OCR & format fine-grained OCR**: In these modes, you can specify fine-grained regions on the input image for more flexible OCR. Fine-grained regions can be coordinates of the box, red color, blue color, or green color.
125
+ """)
126
+
127
+ with gr.Row():
128
+ with gr.Column():
129
+ image_input = gr.Image(type="filepath", label="upload your image")
130
+ task_dropdown = gr.Dropdown(
131
+ choices=[
132
+ "plain texts OCR",
133
+ "format texts OCR",
134
+ "plain multi-crop OCR",
135
+ "format multi-crop OCR",
136
+ "plain fine-grained OCR",
137
+ "format fine-grained OCR",
138
+ ],
139
+ label="Choose one mode of GOT",
140
+ value="plain texts OCR"
141
+ )
142
+ fine_grained_dropdown = gr.Dropdown(
143
+ choices=["box", "color"],
144
+ label="fine-grained type",
145
+ visible=False
146
+ )
147
+ color_dropdown = gr.Dropdown(
148
+ choices=["red", "green", "blue"],
149
+ label="color list",
150
+ visible=False
151
+ )
152
+ box_input = gr.Textbox(
153
+ label="input box: [x1,y1,x2,y2]",
154
+ placeholder="e.g., [0,0,100,100]",
155
+ visible=False
156
+ )
157
+ submit_button = gr.Button("Submit")
158
+
159
+ with gr.Column():
160
+ ocr_result = gr.Textbox(label="GOT output")
161
+
162
+ with gr.Column():
163
+ gr.Markdown("**If you choose the mode with format, the mathpix result will be automatically rendered as follows:**")
164
+ html_result = gr.HTML(label="rendered html", show_label=True)
165
+
166
+ gr.Examples(
167
+ examples=[
168
+ ["assets/coco.jpg", "plain texts OCR", "", "", ""],
169
+ ["assets/en_30.png", "plain texts OCR", "", "", ""],
170
+ ["assets/table.jpg", "format texts OCR", "", "", ""],
171
+ ["assets/eq.jpg", "format texts OCR", "", "", ""],
172
+ ["assets/exam.jpg", "format texts OCR", "", "", ""],
173
+ ["assets/giga.jpg", "format multi-crop OCR", "", "", ""],
174
+ ["assets/aff2.png", "plain fine-grained OCR", "box", "", "[409,763,756,891]"],
175
+ ["assets/color.png", "plain fine-grained OCR", "color", "red", ""],
176
+ ],
177
+ inputs=[image_input, task_dropdown, fine_grained_dropdown, color_dropdown, box_input],
178
+ outputs=[ocr_result, html_result],
179
+ fn=run_GOT,
180
+ label="examples",
181
+ )
182
+
183
+ task_dropdown.change(
184
+ task_update,
185
+ inputs=[task_dropdown],
186
+ outputs=[fine_grained_dropdown, color_dropdown, box_input]
187
+ )
188
+ fine_grained_dropdown.change(
189
+ fine_grained_update,
190
+ inputs=[fine_grained_dropdown],
191
+ outputs=[color_dropdown, box_input]
192
+ )
193
+
194
+ submit_button.click(
195
+ run_GOT,
196
+ inputs=[image_input, task_dropdown, fine_grained_dropdown, color_dropdown, box_input],
197
+ outputs=[ocr_result, html_result]
198
+ )
199
+
200
+ if __name__ == "__main__":
201
+ cleanup_old_files()
202
+ demo.launch()
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
requirements (1).txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch==2.0.1
2
+ torchvision==0.15.2
3
+ transformers==4.37.2
4
+ megfile==3.1.2
5
+ tiktoken
6
+ verovio
7
+ opencv-python
8
+ accelerate
9
+ numpy==1.26.4
10
+ shutils
11
+ pillow