forked from microsoft/OmniParser
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathindex.html
380 lines (336 loc) · 19.1 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<!-- Meta tags for social media banners, these should be filled in appropriatly as they are your "business card" -->
<!-- Replace the content tag with appropriate information -->
<meta name="description" content="DESCRIPTION META TAG">
<meta property="og:title" content="SOCIAL MEDIA TITLE TAG" />
<meta property="og:description" content="SOCIAL MEDIA DESCRIPTION TAG TAG" />
<meta property="og:url" content="URL OF THE WEBSITE" />
<!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X630-->
<meta property="og:image" content="static/image/your_banner_image.png" />
<meta property="og:image:width" content="1200" />
<meta property="og:image:height" content="630" />
<meta name="twitter:title" content="TWITTER BANNER TITLE META TAG">
<meta name="twitter:description" content="TWITTER BANNER DESCRIPTION META TAG">
<!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X600-->
<meta name="twitter:image" content="static/images/your_twitter_banner_image.png">
<meta name="twitter:card" content="summary_large_image">
<!-- Keywords for your paper to be indexed by-->
<meta name="keywords" content="KEYWORDS SHOULD BE PLACED HERE">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>OmniParser</title>
<!-- <link rel="icon" type="image/x-icon" href="static/images/mammoth_icon.png"> -->
<link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro" rel="stylesheet">
<link rel="stylesheet" href="static/css/bulma.min.css">
<link rel="stylesheet" href="static/css/bulma-carousel.min.css">
<link rel="stylesheet" href="static/css/bulma-slider.min.css">
<link rel="stylesheet" href="static/css/fontawesome.all.min.css">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
<link rel="stylesheet" href="static/css/index.css">
<script src="static/js/jquery.min.js"></script>
<script src="static/js/main.js"></script>
<script defer src="static/js/fontawesome.all.min.js"></script>
<script src="static/js/bulma-carousel.min.js"></script>
<script src="static/js/bulma-slider.min.js"></script>
<script src="static/js/index.js"></script>
<link rel="stylesheet" type="text/css" href="static/css/jquery.dataTables.css">
<script type="text/javascript" charset="utf8" src="static/js/jquery-3.5.1.js"></script>
<script type="text/javascript" charset="utf8" src="static/js/jquery.dataTables.js"></script>
</head>
<body>
<section class="hero">
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column has-text-centered">
<h1 class="title is-1 publication-title">OmniParser for Pure Vision Based GUI Agent</h1>
<div class="is-size-5 publication-authors">
<!-- Paper authors -->
<span class="author-block">
<a href="https://adamlu123.github.io/" target="_blank">Yadong Lu</a><sup>1</sup>,
</span>
<span class="author-block">
<a href="https://jwyang.github.io/" target="_blank">Jianwei Yang</a><sup>1</sup>,
</span>
<span class="author-block">
<a href="https://scholar.google.com/citations?user=S6OFEFEAAAAJ&hl=en" target="_blank">Yelong Shen</a><sup>2</sup>,
</span>
<span class="author-block">
<a href="https://www.microsoft.com/en-us/research/people/hassanam/?from=https://research.microsoft.com/en-us/um/people/hassanam/&type=exact" target="_blank">Ahmed Awadallah</a><sup>1</sup>,
</span>
</div>
<div class="is-size-5 publication-authors">
<span class="author-block">
<sup>1</sup>Microsoft Research,
<sup>2</sup>Microsoft Gen AI,<br>
</div>
<div class="column has-text-centered">
<div class="publication-links">
<!-- Github link -->
<span class="link-block">
<a href="https://github.com/microsoft/OmniParser" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fab fa-github"></i>
</span>
<span>Code</span>
</a>
</span>
<!-- ArXiv abstract Link -->
<span class="link-block">
<a href="https://arxiv.org/pdf/2408.00203" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="ai ai-arxiv"></i>
</span>
<span>arXiv</span>
</a>
</span>
<span class="link-block">
<a href="https://www.microsoft.com/en-us/research/articles/omniparser-for-pure-vision-based-gui-agent/"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-book"></i>
</span>
<span>Blog post</span>
</a>
</span>
<!-- Demo link -->
<!-- <span class="link-block"> -->
<!-- <a href="https://46ea00cff2fc968db7.gradio.live" target="_blank" -->
<!-- class="external-link button is-normal is-rounded is-dark"> -->
<!-- <span class="icon"> -->
<!-- <i class="fab fa-github"></i> -->
<!-- </span> -->
<!-- <span>Demo</span>
</a>
</span>
-->
</div>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- Paper abstract -->
<section class="hero">
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column has-text-centered">
<h2 class="title is-3">Abstract</h2>
<div class="content has-text-justified">
<p>
The recent success of large vision language models shows great potential in driving the agent system operating on user interfaces. However, we argue that the
power multimodal models like GPT-4V as a general agent on multiple operating
systems across different applications is largely underestimated due to the lack of
a robust screen parsing technique capable of: 1. reliably identifying interactable
icons within the user interface, and 2. understanding the semantics of various
elements in a screenshot and accurately associate the intended action with the
corresponding region on the screen. To fill these gaps, we introduce OMNIPARSER,
a comprehensive method for parsing user interface screenshots into structured
elements, which significantly enhances the ability of GPT-4V to generate actions
that can be accurately grounded in the corresponding regions of the interface. We
first curated an interactable icon detection dataset using popular webpages and
an icon description dataset. These datasets were utilized to fine-tune specialized
models: a detection model to parse interactable regions on the screen and a caption
model to extract the functional semantics of the detected elements. OMNIPARSER
significantly improves GPT-4V's performance on ScreenSpot benchmark. And
on Mind2Web and AITW benchmark, OMNIPARSER with screenshot only input
outperforms the GPT-4V baselines requiring additional information outside of
screenshot
</p>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- End paper abstract -->
<!-- Image carousel -->
<section class="hero">
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered">
<!-- <div class="column is-four-fifths"> -->
<div class="item">
<!-- Your image here -->
<img src="static/images/flow_merged0.png" alt="Result mobile" />
<img src="static/images/flow_merged1.png" alt="Result mobile" />
<img src="static/images/flow_merged2.png" alt="Result mobile" />
<h2 class="subtitle">
Examples of parsed screenshot image and local semantics by OmniParser. The inputs to OmniParse are user task and UI screenshot, from which it will produce: 1. parsed screenshot image with bounding boxes and numeric IDs overlayed, and 2. local semantics contains both text extracted and icon description.
</h2>
</div>
<!-- </div> -->
</div>
</div>
</div>
</section>
<!-- End image carousel -->
<section class="hero is-small">
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3">Curated Dataset for Interactable Region Detection and Icon Functionality Description</h2>
<br>
We curate a dataset of interactable icon detection dataset, containing 67k unique screenshot images, each labeled with bounding boxes of interactable icons derived from DOM tree. We first took a 100k uniform sample of popular publicly availabe urls on the clueweb dataset, and collect bounding boxes of interactable regions of the webpage from the DOM tree of each urls. We also collected 7k icon-description
pairs for finetuning the caption model.
<div class="item">
<img src="static/images/curated_data.png" alt="Species Classification results on iWildCam2020-WILDS (OOD) dataset" />
<p>
<b>Examples from the Interactable Region Detection dataset. </b>. TThe bounding boxes are based on the interactable region extracted from the DOM tree of the webpage.
</p>
</div>
</div>
</div>
</div>
</div>
</section>
<!--
<section class="hero is-light is-small">
<div class="hero-body">
<div class="container">
<div id="results-carousel" class="carousel results-carousel">
<div class="item item-steve">
<video poster="" id="steve" autoplay controls muted loop playsinline height="100%">
<source src="./static/videos/web_5_demo_nocap.mp4"
type="video/mp4">
</video>
</div>
<div class="item item-chair-tp">
<video poster="" id="chair-tp" autoplay controls muted loop playsinline height="100%">
<source src="./static/videos/web_22_demo_nocap.mp4"
type="video/mp4">
</video>
</div>
</div>
</div>
</div>
</section> -->
<section class="hero is-small">
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3">Results</h2>
<br>
We evaluate our model on SeeClick, Mind2Web, and AITW benchmarks. We show that our model outperforms the GPT-4V baseline on all benchmarks. We also show that our model with screenshot only input outperforms the GPT-4V baselines requiring additional information outside of screenshot.
<div class="item">
<img src="static/images/seeclick.png" alt="seeclick" />
<img src="static/images/m2w.png" alt="mind2web" />
<img src="static/images/aitw.png" alt="aitw" />
</div>
</div>
</div>
</div>
</div>
</section>
<section class="hero is-small"></section>
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3">Plugin-ready for Other Vision Language Models</h2>
<br>
To further demonstrate OmniParser is a plugin choice for off-the-shelf vision langauge models, we show the performance of OmniParser combined with recently announced vision language models: Phi-3.5-V and Llama-3.2-V. As seen in table, our finetuned interactable region detection (ID) model significantly improves the task performance compared to grounding dino model (w.o. ID) with local semantics across all subcategories for GPT-4V, Phi-3.5-V and Llama-3.2-V. In addition, the local semantics of icon functionality helps significantly with the performance for every vision language model.
In the table, LS is short for local semantics of icon functionality, ID is short for the interactable region detection model we finetune. The setting w.o. ID means we replace the ID model with original Grounding DINO model not finetuned on our data, and with local semantics. The setting w.o. ID and w.o LS means we use Grounding DINO model, and further without using the icon description in the text prompt.
<div class="item">
<img src="static/images/ablation.png" alt="seeclick" />
</div>
</div>
</div>
</div>
</div>
</section>
<section class="section">
<div class="container is-max-desktop">
<h2 class="title is-3">Demo of Mind2Web Tasks </h2>
<div class="columns is-centered">
<!-- Visual Effects. -->
<div class="column">
<div class="content">
<!-- <h2 class="title is-3">Visual Effects</h2> -->
<!-- <p>
Using <i>nerfies</i> you can create fun visual effects. This Dolly zoom effect
would be impossible without nerfies since it would require going through a wall.
</p> -->
<video id="dollyzoom" autoplay controls muted loop playsinline height="100%">
<source src="./static/videos/web_5_demo_nocap.mp4"
type="video/mp4">
</video>
</div>
</div>
<!--/ Visual Effects. -->
<!-- Matting. -->
<div class="column">
<!-- <h2 class="title is-3">Matting</h2> -->
<div class="columns is-centered">
<div class="column content">
<!-- <p>
As a byproduct of our method, we can also solve the matting problem by ignoring
samples that fall outside of a bounding box during rendering.
</p> -->
<video id="matting-video" controls playsinline height="100%">
<source src="./static/videos/web_22_demo_nocap.mp4"
type="video/mp4">
</video>
</div>
</div>
</div>
</div>
</section>
<br>
<!-- BibTex citation -->
<section class="section" id="BibTeX">
<div class="container is-max-desktop content">
<h2 class="title">Citation</h2>
<!-- Please cite our paper if you use our code, data, model or results: -->
<!-- <br> -->
<pre><code>@misc{lu2024omniparserpurevisionbased,
title={OmniParser for Pure Vision Based GUI Agent},
author={Yadong Lu and Jianwei Yang and Yelong Shen and Ahmed Awadallah},
year={2024},
eprint={2408.00203},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2408.00203},
}
</code></pre>
</div>
</section>
<footer class="footer">
<div class="container">
<div class="columns is-centered">
<div class="column is-8">
<div class="content">
<p>
This page was built using the <a href="https://github.com/eliahuhorwitz/Academic-project-page-template" target="_blank">Academic Project Page Template</a> which was adopted from the <a href="https://nerfies.github.io" target="_blank">Nerfies</a> project page. You are free to borrow the of this website, we just ask that you link back to this page in the footer. <br> This website is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"
target="_blank">Creative
Commons Attribution-ShareAlike 4.0 International License</a>.
</p>
</div>
</div>
</div>
</div>
</footer>
</body>
<style>
.buttonGroup {
text-align: center;
}
.buttonGroup>button {
padding: 15px;
color: white;
background-color: #363636;
border-radius: 5px;
}
.buttonGroup>button:hover {
box-shadow: 5px;
}
</style>
</html>