-
Notifications
You must be signed in to change notification settings - Fork 3
/
challenge.html
408 lines (353 loc) · 22.3 KB
/
challenge.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
<!DOCTYPE html>
<html lang="en">
<head>
<title>LID</title>
<meta charset="utf-8">
<meta content="width=device-width, initial-scale=1, shrink-to-fit=no" name="viewport">
<link href="https://fonts.googleapis.com/css?family=B612+Mono|Cabin:400,700&display=swap" rel="stylesheet">
<link href="fonts/icomoon/style.css" rel="stylesheet">
<link crossorigin="anonymous" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css"
integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" rel="stylesheet">
<link href="css/jquery-ui.css" rel="stylesheet">
<link href="css/owl.carousel.min.css" rel="stylesheet">
<link href="css/owl.theme.default.min.css" rel="stylesheet">
<link href="css/owl.theme.default.min.css" rel="stylesheet">
<link href="css/jquery.fancybox.min.css" rel="stylesheet">
<link href="fonts/flaticon/font/flaticon.css" rel="stylesheet">
<link href="css/aos.css" rel="stylesheet">
<link href="css/jquery.mb.YTPlayer.min.css" media="all" rel="stylesheet" type="text/css">
<link href="css/style.css" rel="stylesheet">
<!-- HTML5 shim and Respond.js for IE8 support of HTML5 elements and media queries -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
<script>
(function (i, s, o, g, r, a, m) {
i['GoogleAnalyticsObject'] = r;
i[r] = i[r] || function () {
(i[r].q = i[r].q || []).push(arguments)
}, i[r].l = 1 * new Date();
a = s.createElement(o),
m = s.getElementsByTagName(o)[0];
a.async = 1;
a.src = g;
m.parentNode.insertBefore(a, m)
})(window, document, 'script', 'https://www.google-analytics.com/analytics.js', 'ga');
ga('create', 'UA-88572407-1', 'auto');
ga('send', 'pageview');
</script>
</head>
<body data-offset="300" data-spy="scroll" data-target=".site-navbar-target">
<div class="site-wrap">
<div class="site-mobile-menu site-navbar-target">
<div class="site-mobile-menu-header">
<div class="site-mobile-menu-close mt-3">
<span class="icon-close2 js-menu-toggle"></span>
</div>
</div>
<div class="site-mobile-menu-body"></div>
</div>
<div class="header-top">
<div class="container">
<div class="row align-items-center">
<div class="col-12 col-lg-6 d-flex">
<a class="site-logo" href="index.html">
Learning from Imperfect Data (LID)
</a>
<a class="ml-auto d-inline-block d-lg-none site-menu-toggle js-menu-toggle text-black"
href="#"><span
class="icon-menu h3"></span></a>
</div>
<div class="col-12 col-lg-6 ml-auto d-flex">
<div class="ml-md-auto top-social d-none d-lg-inline-block">
<a class="d-inline-block p-3" href="#"> </a>
<a class="d-inline-block p-3" href="#"> </a>
<a class="d-inline-block p-3" href="#"> </a>
</div>
</div>
<!-- <div class="col-6 d-block d-lg-none text-right">-->
</div>
</div>
</div>
<div class="site-navbar py-2 js-sticky-header site-navbar-target d-none pl-0 d-lg-block" role="banner">
<div class="container">
<div class="d-flex align-items-center">
<div class="mr-auto">
<nav class="site-navigation position-relative text-right" role="navigation">
<ul class="site-menu main-menu js-clone-nav mr-auto d-none pl-0 d-lg-block">
<li class="active">
<a class="nav-link text-left" href="index.html">Home</a>
</li>
<li>
<a class="nav-link text-left" href="index.html#dates">Important dates</a>
</li>
<li>
<a href="index.html#schedule" class="nav-link text-left">Schedule</a>
</li>
<li>
<a href="index.html#papers" class="nav-link text-left">Papers</a>
</li>
<li>
<a class="nav-link text-left" href="index.html#people">Organizers</a>
</li>
<!-- <li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="challenge.html" id="navbarDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
Challenge
</a>
<div class="dropdown-menu" aria-labelledby="navbarDropdown">
<a class="dropdown-item" href="challenge.html#challenge1">Object semantic segmentation with image-level supervision</a>
<a class="dropdown-item" href="challenge.html#challenge2">Scene parsing with point-based supervision</a>
</div>
</li> -->
<li>
<a class="nav-link text-left" href="challenge.html">Challenge</a>
</li>
<li>
<a class="nav-link text-left" href="challenge.html#awards">Awards</a>
</li>
<li class="nav-item dropdown">
<a aria-expanded="false" aria-haspopup="true" class="nav-link dropdown-toggle"
data-toggle="dropdown" href="challenge.html" id="navbarDropdown" role="button">
Previous
</a>
<div aria-labelledby="navbarDropdown" class="dropdown-menu">
<a class="dropdown-item" href="LID2019">LID 2019</a>
</div>
</li>
</ul>
</nav>
</div>
</div>
</div>
</div>
</div>
<div class="site-section">
<div class="container">
<div class="row">
<!-- Challenge 1-->
<div class="col-lg-12" id="challenge1">
<!-- <p style="text-align: justify">We will organize the second Learning from Imperfect Data (LID) challenge on object semantic segmentation and scene parsing, which includes two competition tracks. </p> (<strong>challenge deadline: June 8, 2019</strong>) -->
<p>In conjunction with this workshop, we will hold three challenges this
year.</p> <!--(<strong>challenge deadline: June 8, 2019</strong>)-->
<div class="section-title">
<h1>Track1</h1>
<h4>Weakly-supervised Semantic Segmentation</h4>
</div>
<div class="trend-entry d-flex">
<div class="trend-contents">
<p>
This track targets on learning to perform object semantic segmentation using image-level
annotations as supervision [1, 2, 3]. The dataset is built upon the image detection track of
ImageNet Large Scale Visual Recognition Competition (ILSVRC) [4], which totally includes
456, 567 training images from 200 categories. We provide pixel-level annotations of 15K
images (validation/testing: 5, 000/10, 000) for evaluation.
</p>
<!-- </br> -->
<ul>
<!-- <li> For training, all the images in the training set of ILSVRC DET are permitted. </li> -->
<!-- <li> If supervised saliency detection is applied, only MSRA-B dataset is permitted. </li> -->
<li><strong>Evalution:</strong> Mean Intersection-Over-Union (IoU) score over 200 categories.</li>
<li><strong>Download: </strong> The training dataset is available at <a
href="http://image-net.org/image/ILSVRC2017/ILSVRC2017_DET.tar.gz">Imagenet DET</a>
, val and test dataset are available at <a
href="https://pan.baidu.com/s/1_rzQNkTEFmTJdiYYbhySSQ">Baidu Drive </a> and <a
href="https://drive.google.com/open?id=1B0enLzxyIULbRZWUi0XpNnXCu7nwFI-f">
Google Drive</a> <br/>
<strong class="text-danger">Note: </strong> The image label information can be extracted using the <a href="https://drive.google.com/open?id=1ajioybXZYPIXUyQl7G4MRykr7AvelAL6">scripts</a> </li>
<li><strong>Submission: </strong> <a
href="https://evalai.cloudcv.org/web/challenges/challenge-page/556/overview">https://evalai.cloudcv.org/web/challenges/challenge-page/556/overview</a>
</li>
</ul>
</div>
</div>
</div>
<!-- Challenge 2-->
<div class="col-lg-12" id="challenge2">
<div class="section-title">
<h1>Track2</h1>
<h4> Weakly-supervised Scene Parsing</h4>
</div>
<div class="trend-entry d-flex">
<div class="trend-contents">
<!-- <p> Beyond object segmentation, background categories such as wall, road, sky need to be further specified for the scene parsing, which is a challenging task compared with object semantic segmentation. Thus, it will be more difficult and expensive to manually annotate pixel-level mask for this task. In this track, we propose to leverage several labeled points that are much easier to obtain to guide the training process.
The dataset is built upon the well-known ADE20K, which includes 20,210 training images from 150 categories. We provide the point-based annotations on the training set. Please download the data from <a href="http://sceneparsing.csail.mit.edu/data/LID2019"> LID Challenge Track2 data </a>. <br/> -->
<p> This track targets on learning to perform scene parsing using points-based annotation as
supervision. The dataset is built upon the ADE20K dataset [5]. There are 20,210 images in
the training set, 2,000 images in the validation set, and 3,000 images in the testing set.
We provide the additional point-based annotations on the training set [6].
</p>
<!-- </br> -->
<!-- <strong> <span class="text-danger">*Note:</span> </strong> -->
<!-- </p> -->
<ul>
<li><strong>Evalution:</strong> Mean Intersection-Over-Union (IoU) score over 150 categories.</li>
<li><strong>Download: </strong> Please download the data from <a
href="http://sceneparsing.csail.mit.edu/data/LID2019"> LID Challenge Track2 data</a>
</li>
<li><strong>Submission: </strong> <a
href="https://evalai.cloudcv.org/web/challenges/challenge-page/574/overview">https://evalai.cloudcv.org/web/challenges/challenge-page/574/overview</a>
</li>
</ul>
</div>
</div>
</div>
<!-- Challenge 3-->
<div class="col-lg-12" id="challenge3">
<div class="section-title">
<h1>Track3</h1>
<h4> Weakly-supervised Object Localization</h4>
</div>
<div class="trend-entry d-flex">
<div class="trend-contents">
<p> This track targets on making the classification networks be equipped with the ability of
object localization [7, 8, 9]. The dataset is built upon the image
classification/localization track of ImageNet Large Scale Visual Recognition Competition
(ILSVRC), which totally includes 1.2 million training images from 1000 categories. We
provide pixel-level annotations of 44, 271 images (validation/testing: 23, 151/21, 120) for
evaluation.
</p>
<ul>
<li><Strong>Evalution:</Strong> IoU curve. With the predicted object localization map, we calculate
the IoU scores between the foreground pixels and the ground-truth masks under different
thresholds. In the ideal curve, the highest IoU score is expected to close to 1.0. The
threshold value corresponding to the highest IoU score is expected to be 255 since the
higher threshold values can reflect a higher contrast between the target object and the
background.
</li>
<li> <Strong>Download: </Strong> validation dataset, test list and evaluation scripts are available at <a
href="https://pan.baidu.com/s/1Ob7bzJcvirpkqZ-gQL-MjA">Baidu Drive (pwd: z5yp) </a> and <a
href="https://drive.google.com/drive/folders/1rd3iV9Xif2tRgofQWrL3qH1_lIFkicdI?usp=sharing">
Google Drive</a></li> </li>
<li> <Strong>Submission: </Strong> <a href="https://evalai.cloudcv.org/web/challenges/challenge-page/557/overview">https://evalai.cloudcv.org/web/challenges/challenge-page/557/overview</a> </li>
</div>
</div>
</div>
<!-- Rules -->
<div class="col-lg-12" id="rules">
<div class="section-title">
<h1>RULES</h1>
</div>
<div class="trend-entry d-flex">
<div class="trend-contents">
<p> This year, we have two strict rules for all competitors.
</p>
<ol>
<li> For training, only the images provided in the training set are permitted.
Competitors can use the classification models pre-trained on the training set of
ILSVRC CLS-LOC to initialize the parameters but <strong
class="text-danger">CANNOT</strong> leverage any datasets with
pixel-level annotations.
In particular, for Track 1 and Track 3, only the image-level annotations of
training images can be leveraged for supervision and the bounding-box annotations
are <strong class="text-danger">NOT</strong> permitted.
</li>
<li>We encourage competitors to design elegant and effective models competing for all
the tracks rather than ensembling multiple models.
Therefore, we restrict the parameter size of the inference model(s) should be <strong
class="text-danger">LESS
than 150M</strong> (slightly more than two DeepLab V3+ [10] models using Resnet 101 as the
backbone). The
competitors ranked at Top-3 are required to submit the inference code for
verification.
</li>
</ol>
</div>
</div>
</div>
<!-- Awards -->
<div class="col-lg-12" id="awards" style="padding-top:80px;margin-top:-80px;">
<div class="section-title">
<h1>Awards</h1>
</div>
<div class="trend-entry d-flex">
<div class="trend-contents">
<p>
This year, Baidu Inc will provide cash awards to the winners of each track.
Participants are encouraged to submit the inference code based on the deep learning platform
<a href="https://github.com/PaddlePaddle/Paddle">PaddlePaddle</a>
, especially on the semantic segmentation toolkit <a href="https://github.com/PaddlePaddle/PaddleSeg">PaddleSeg</a>.
<strong class="text-danger">Winners will receive a cash award of USD 2000 if they use the
PaddlePaddle platform or a
USD 500 cash award if other deep learning platforms are used.</strong>
</p>
</div>
</div>
</div>
<!-- References -->
<div class="col-lg-12" id="reference">
<div class="section-title">
<h2>References</h2>
</div>
<div class="trend-entry d-flex">
<div class="trend-contents">
<p>[1] George Papandreou, Liang-Chieh Chen, Kevin Murphy, and Alan L Yuille. Weakly-and semi-supervised learning of a dcnn for semantic image segmentation. In ICCV, 2015.</p>
<p>[2] Yunchao Wei, Huaxin Xiao, Honghui Shi, Zequn Jie, Jiashi Feng, and Thomas S Huang. Revisiting dilated convolution: A simple approach for weakly-and semi-supervised semantic segmentation. In CVPR, 2018.</p>
<p>[3] Peng-Tao Jiang, Qibin Hou, Yang Cao, Ming-Ming Cheng, Yunchao Wei, and Hong-Kai Xiong. Integral object mining via online attention accumulation. In ICCV, 2019.</p>
<p>[4] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: a large-scale hierarchical image database. In CVPR, 2009.</p>
<p>[5] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In CVPR, 2017.</p>
<p>[6] Rui Qian, Yunchao Wei, Honghui Shi, Jiachen Li, Jiaying Liu, and Thomas Huang. Weakly Supervised Scene Parsing with Point-based Distance Metric Learning. In AAAI, 2019</p>
<p>[7] Bolei Zhou, Aditya Khosla, Agata Lapedriza, Aude Oliva, and Antonio Torralba. Learning Deep Features for Discriminative Localization. In IEEE CVPR, 2016.</p>
<p>[8] Xiaolin Zhang, Yunchao Wei, Jiashi Feng, Yi Yang, and Thomas Huang. Adversarial complementary learning for weakly supervised object localization. In IEEE CVPR, 2018.</p>
<p>[9] Xiaolin Zhang, Yunchao Wei, Guoliang Kang, Yi Yang, and Thomas Huang. Self-produced guidance for weakly-supervised object localization. In ECCV, 2018.</p>
<p>[10] Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, and Hartwig Adam. Encoder-decoder with atrous separable convolution for semantic image segmentation. In ECCV, 2018.</p>
</div>
</div>
</div>
</div>
</div>
<div class="col-lg-12" id="challenge2">
<div style="display:inline-block;width:500px;">
<script async="async" src="//rc.rev
olvermaps.com/0/0/7.js?i=2hlmeh3dic1&m=0&c=ff0000&cr1=ffffff&br=19&sx=0"
type="text/javascript"></script>
</div>
</div>
</div>
</div>
<!-- END section -->
<div class="footer">
<div class="container">
<div class="row">
<div class="col-12">
<div class="copyright">
<p>
<!-- Link back to Colorlib can't be removed. Template is licensed under CC BY 3.0. -->
Copyright ©<script>document.write(new Date().getFullYear());</script>
All rights reserved | This template is made with <i aria-hidden="true"
class="icon-heart text-danger"></i> by
<a href="https://colorlib.com" target="_blank">Colorlib</a>
<!-- Link back to Colorlib can't be removed. Template is licensed under CC BY 3.0. -->
</p>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- .site-wrap -->
<!-- loader -->
<div class="show fullscreen" id="loader">
<svg class="circular" height="48px" width="48px">
<circle class="path-bg" cx="24" cy="24" fill="none" r="22" stroke="#eeeeee" stroke-width="4"/>
<circle class="path" cx="24" cy="24" fill="none" r="22" stroke="#ff5e15" stroke-miterlimit="10"
stroke-width="4"/>
</svg>
</div>
<script src="js/jquery-3.3.1.min.js"></script>
<script src="js/jquery-migrate-3.0.1.min.js"></script>
<script src="js/jquery-ui.js"></script>
<script src="js/popper.min.js"></script>
<script src="js/bootstrap.min.js"></script>
<script src="js/owl.carousel.min.js"></script>
<script src="js/jquery.stellar.min.js"></script>
<script src="js/jquery.countdown.min.js"></script>
<script src="js/bootstrap-datepicker.min.js"></script>
<script src="js/jquery.easing.1.3.js"></script>
<script src="js/aos.js"></script>
<script src="js/jquery.fancybox.min.js"></script>
<script src="js/jquery.sticky.js"></script>
<script src="js/jquery.mb.YTPlayer.min.js"></script>
<script src="js/main.js"></script>
</body>
</html>