-
Notifications
You must be signed in to change notification settings - Fork 0
/
SFM.bib
324 lines (279 loc) · 27.5 KB
/
SFM.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
@book{Hartley:2003:MVG:861369,
author = {Hartley, Richard and Zisserman, Andrew},
title = {Multiple View Geometry in Computer Vision},
year = {2003},
isbn = {0521540518},
edition = {2},
publisher = {Cambridge University Press},
address = {New York, NY, USA},
}
@book{Szeliski:2010:CVA:1941882,
author = {Szeliski, Richard},
title = {Computer Vision: Algorithms and Applications},
year = {2010},
isbn = {1848829345, 9781848829343},
edition = {1st},
publisher = {Springer-Verlag},
address = {Berlin, Heidelberg},
}
@article{lu_survey_2004,
title = {A survey of motion-parallax-based 3-{D} reconstruction algorithms},
volume = {34},
issn = {1094-6977},
doi = {10.1109/TSMCC.2004.829300},
abstract = {The task of recovering three-dimensional (3-D) geometry from two-dimensional views of a scene is called 3-D reconstruction. It is an extremely active research area in computer vision. There is a large body of 3-D reconstruction algorithms available in the literature. These algorithms are often designed to provide different tradeoffs between speed, accuracy, and practicality. In addition, even the output of various algorithms can be quite different. For example, some algorithms only produce a sparse 3-D reconstruction while others are able to output a dense reconstruction. The selection of the appropriate 3-D reconstruction algorithm relies heavily on the intended application as well as the available resources. The goal of this paper is to review some of the commonly used motion-parallax-based 3-D reconstruction techniques and make clear the assumptions under which they are designed. To do so efficiently, we classify the reviewed reconstruction algorithms into two large categories depending on whether a prior calibration of the camera is required. Under each category, related algorithms are further grouped according to the common properties they share.},
number = {4},
journal = {IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews)},
author = {Lu, Ye and Zhang, J. Z. and Wu, Q. M. J. and Li, Ze-Nian},
month = nov,
year = {2004},
keywords = {Algorithm design and analysis, calibration, Cameras, camera self calibration, computational geometry, Computer vision, Eyes, Geometry, Humans, image classification, Image motion analysis, image reconstruction, motion-parallax-based 3D reconstruction algorithm, Reconstruction algorithms, stereo image processing, stereo vision, Three dimensional displays, triangulation, visual perception, Visual system},
pages = {532--548}
}
@article{10.2307/35316,
ISSN = {00804649},
URL = {http://www.jstor.org/stable/35316},
abstract = {It is shown that from a monocular view of a rigid, textured, curved surface it is possible, in principle, to determine the gradient of the surface at any point, and the motion of the eye relative to it, from the velocity field of the changing retinal image, and its first and second spatial derivatives. The relevant equations are redundant, thus providing a test of the rigidity assumption. They involve, among other observable quantities, the components of shear of the retinal velocity field, suggesting that the visual system may possess specialized channels for computing these components.},
author = {H. C. Longuet-Higgins and K. Prazdny},
journal = {Proceedings of the Royal Society of London. Series B, Biological Sciences},
number = {1173},
pages = {385--397},
publisher = {The Royal Society},
title = {The Interpretation of a Moving Retinal Image},
volume = {208},
year = {1980}
}
@inproceedings{leibe_dynamic_2007,
title = {Dynamic 3D {Scene} {Analysis} from a {Moving} {Vehicle}},
doi = {10.1109/CVPR.2007.383146},
abstract = {In this paper, we present a system that integrates fully automatic scene geometry estimation, 2D object detection, 3D localization, trajectory estimation, and tracking for dynamic scene interpretation from a moving vehicle. Our sole input are two video streams from a calibrated stereo rig on top of a car. From these streams, we estimate structure-from-motion (SfM) and scene geometry in real-time. In parallel, we perform multi-view/multi-category object recognition to detect cars and pedestrians in both camera images. Using the SfM self-localization, 2D object detections are converted to 3D observations, which are accumulated in a world coordinate frame. A subsequent tracking module analyzes the resulting 3D observations to find physically plausible spacetime trajectories. Finally, a global optimization criterion takes object-object interactions into account to arrive at accurate 3D localization and trajectory estimates for both cars and pedestrians. We demonstrate the performance of our integrated system on challenging real-world data showing car passages through crowded city areas.},
booktitle = {2007 {IEEE} {Conference} on {Computer} {Vision} and {Pattern} {Recognition}},
author = {Leibe, B. and Cornelis, N. and Cornelis, K. and Gool, L. Van},
month = jun,
year = {2007},
keywords = {2D object detection, 3D localization, 3D scene analysis, automatic scene geometry estimation, Cameras, Geometry, image analysis, Layout, motion estimation, moving vehicle, Object detection, object recognition, Streaming media, structure-from-motion, traffic engineering computing, Trajectory, trajectory estimation, vehicle dynamics, Vehicles},
pages = {1--8},
file = {IEEE Xplore Abstract Record:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\BN5M6IIU\\4270171.html:text/html;IEEE Xplore Full Text PDF:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\T2V4QGF3\\Leibe et al. - 2007 - Dynamic 3D Scene Analysis from a Moving Vehicle.pdf:application/pdf}
}
@article{fermuller_geometry_1997,
title = {On the {Geometry} of {Visual} {Correspondence}},
volume = {21},
issn = {0920-5691, 1573-1405},
url = {http://link.springer.com/article/10.1023/A:1007951901001},
doi = {10.1023/A:1007951901001},
abstract = {Image displacement fields—optical flow fields, stereo disparity fields, normal flow fields—due to rigid motion possess a global geometric structure which is independent of the scene in view. Motion vectors of certain lengths and directions are constrained to lie on the imaging surface at particular loci whose location and form depends solely on the 3D motion parameters. If optical flow fields or stereo disparity fields are considered, then equal vectors are shown to lie on conic sections. Similarly, for normal motion fields, equal vectors lie within regions whose boundaries also constitute conics. By studying various properties of these curves and regions and their relationships, a characterization of the structure of rigid motion fields is given. The goal of this paper is to introduce a concept underlying the global structure of image displacement fields. This concept gives rise to various constraints that could form the basis of algorithms for the recovery of visual information from multiple views.},
language = {en},
number = {3},
urldate = {2017-02-17},
journal = {International Journal of Computer Vision},
author = {Fermüller, Cornelia and Aloimonos, Yiannis},
month = feb,
year = {1997},
pages = {223--247},
file = {Snapshot:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\F4RVB8M4\\A1007951901001.html:text/html}
}
@inproceedings{labayrade_real_2002,
title = {Real time obstacle detection in stereovision on non flat road geometry through "v-disparity" representation},
volume = {2},
doi = {10.1109/IVS.2002.1188024},
abstract = {Presents a road obstacle detection method able to cope with uphill and downhill gradients and dynamic pitching of the vehicle. Our approach is based on the construction and investigation of the "v-disparity" image which provides a good representation of the geometric content of the road scene. The advantage of this image is that it provides semi-global matching and is able to perform robust obstacle detection even in the case of partial occlusion or errors committed during the matching process. Furthermore, this detection is performed without any explicit extraction of coherent structures. This paper explains the construction of the "v-disparity" image, its main properties, and the obstacle detection method. The longitudinal profile of the road is estimated and the objects located above the road surface are then extracted as potential obstacles; subsequently, the accurate detection of road obstacles, in particular the position of tyre-road contact points is computed in a precise manner. The whole process is performed at frame rate with a current-day PC. Our experimental findings and comparisons with the results obtained using a flat geometry hypothesis show the benefits of our approach.},
booktitle = {{IEEE} {Intelligent} {Vehicle} {Symposium}, 2002},
author = {Labayrade, R. and Aubert, D. and Tarel, J. P.},
month = jun,
year = {2002},
keywords = {automotive electronics, coherent structures, Computer vision, Data mining, downhill gradients, dynamic vehicle pitching, fast robust method, Geometry, Image edge detection, image representation, lane-markings, Layout, microcomputer applications, nonflat road geometry, Object detection, object extraction, parameter estimation, partial occlusion, real-time obstacle detection, real-time systems, road edges, Road vehicles, Robustness, robust obstacle detection, semi-global matching, stereo image pair, stereo image processing, stereovision, Suspensions, tyre-road contact points, uphill gradients, v-disparity image, v-disparity representation, Vehicle detection, vehicle dynamics},
pages = {646--651 vol.2}
}
@inproceedings{soquet_road_2007,
title = {Road {Segmentation} {Supervised} by an {Extended} {V}-{Disparity} {Algorithm} for {Autonomous} {Navigation}},
doi = {10.1109/IVS.2007.4290108},
abstract = {This paper presents an original approach of road segmentation supervised by stereovision. It deals with free space estimation by stereovision and road detection by color segmentation. The v-disparity algorithm is extended to provide a reliable and precise road profile on all types of roads. The free space is estimated by classifying the pixels of the disparity map. This classification is performed by using the road profile and the u-disparity image. Then a color segmentation is performed on the free space. Here is the supervision. Each stage of the algorithm is presented and experimental results are shown.},
booktitle = {2007 {IEEE} {Intelligent} {Vehicles} {Symposium}},
author = {Soquet, N. and Aubert, D. and Hautiere, N.},
month = jun,
year = {2007},
keywords = {autonomous navigation, Cameras, color segmentation, disparity map, extended v-disparity algorithm, free space estimation, Geometry, image classification, image colour analysis, image segmentation, Laser radar, navigation, Radar detection, Remotely operated vehicles, road profile, roads, road segmentation, Sensor systems, Space vehicles, stereo image processing, stereovision, supervision, u-disparity image},
pages = {160--165}
}
@article{qu_free_2016,
title = {Free {Space} {Estimation} on {Nonflat} {Plane} {Based} on {V}-{Disparity}},
volume = {23},
issn = {1070-9908},
doi = {10.1109/LSP.2016.2609203},
abstract = {Recent progress in free space estimation provides precise segmentation between the ground and obstacles on flat plane. However, it remains challenging on nonflat plane, especially with the varying latitudinal and longitudinal slope or in the case of multiground plane. This letter presents a well-integrated framework for free space estimation in this challenge. Our approach couples an improved V-disparity with a proposed confidence map in a probabilistic fashion. The improved V-disparity representation adopts the sliding windows paradigm and a new V-disparity filter is designed. The proposed confidence map represents the ownership of pixels like occupancy grids, but is built only using the information of the disparity map. The free space estimation is implemented in the confidence map with the global optimization paradigm of dynamic programming. We demonstrate our superior performance compared to two other methods from the literature on a manually labeled dataset from KITTI's object detection benchmark.},
number = {11},
journal = {IEEE Signal Processing Letters},
author = {Qu, L. and Wang, K. and Chen, L. and Gu, Y. and Zhang, X.},
month = nov,
year = {2016},
keywords = {Correlation, dynamic programming, Estimation, free space estimation, global optimization paradigm, ground estimation, image representation, image segmentation, improved V-disparity representation, Object detection, object detection benchmark, obstacle detection, precise segmentation, roads, sliding windows paradigm, stereo vision, Three-dimensional displays, Two dimensional displays, V-disparity, V-disparity filter},
pages = {1617--1621}
}
@article{bouchafa_c-velocity:_2012,
title = {c-{Velocity}: {A} {Flow}-{Cumulating} {Uncalibrated} {Approach} for 3D {Plane} {Detection}},
volume = {97},
issn = {0920-5691, 1573-1405},
shorttitle = {c-{Velocity}},
url = {http://link.springer.com/article/10.1007/s11263-011-0475-6},
doi = {10.1007/s11263-011-0475-6},
abstract = {This paper deals with plane detection from a monocular image sequence without camera calibration or a priori knowledge about the egomotion. Within a framework of driver assistance applications, it is assumed that the 3D scene is a set of 3D planes. In this paper, the vision process considers obstacles, roads and buildings as planar structures. These planes are detected by exploiting iso-velocity curves after optical flow estimation. A Hough Transform-like frame called c-velocity was designed. This paper explains how this c-velocity, defined by analogy to the v-disparity in stereovision, can represent planes, regardless of their orientation and how this representation facilitates plane extraction. Under a translational camera motion, planar surfaces are transformed into specific parabolas of the c-velocity space. The error and robustness analysis of the proposed technique confirms that this cumulative approach is very efficient for making the detection more robust and coping with optical flow imprecision. Moreover, the results suggest that the concept could be generalized to the detection of other parameterized surfaces than planes.},
language = {en},
number = {2},
urldate = {2017-02-17},
journal = {International Journal of Computer Vision},
author = {Bouchafa, Samia and Zavidovique, Bertrand},
month = apr,
year = {2012},
pages = {148--166},
file = {Snapshot:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\RNBI8AMS\\s11263-011-0475-6.html:text/html}
}
@INPROCEEDINGS{7780814,
author={J. L. Schönberger and J. M. Frahm},
booktitle={2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
title={Structure-from-Motion Revisited},
year={2016},
volume={},
number={},
pages={4104-4113},
keywords={computer vision;image reconstruction;public domain software;stereo image processing;3D image reconstruction;SfM;incremental reconstruction system;open-source implementation;structure-from-motion;Cameras;Image reconstruction;Image registration;Internet;Pipelines;Robustness;Transmission line matrix methods},
doi={10.1109/CVPR.2016.445},
ISSN={},
month={June},}
@article{Ozyesil_voroninski_basri_singer_2017,
title={A survey of structure from motion.},
volume={26}, DOI={10.1017/S096249291700006X},
journal={Acta Numerica},
publisher={Cambridge University Press},
author={Özyeşil, Onur and Voroninski, Vladislav and Basri, Ronen and Singer, Amit},
year={2017}, pages={305–364}}
%------------------------------- Point matching
@article{luong_camera_1997,
title = {Camera {Calibration}, {Scene} {Motion} and {Structure} recovery from point correspondences and fundamental matrices},
volume = {22},
abstract = {We address the problem of estimating three-dimensional motion, and structure from motion with an uncalibrated moving camera. We show that point correspondences between three images, and the fundamental matrices computed from these point correspondences, are sufficient to recover the internal orientation of the camera (its calibration), the motion parameters, and to compute coherent perspective projection matrices which enable us to reconstruct 3-D structure up to a similarity. In contrast with other methods, no calibration object with a known 3-D shape is needed, and no limitations are put upon the unknown motions to be performed or the parameters to be recovered, as long as they define a projective camera. The theory of the method, which is based on the constraint that the observed points are part of a static scene, thus allowing us to link the intrinsic parameters and the fundamental matrix via the absolute conic, is first detailed. Several algorithms are then presented, and their perfo...},
journal = {Ijcv},
author = {Luong, Q.-T. and Faugeras, O. D.},
year = {1997},
pages = {261--289},
file = {Citeseer - Full Text PDF:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\SB9VJP94\\Luong and Faugeras - 1997 - Camera Calibration, Scene Motion and Structure rec.pdf:application/pdf;Citeseer - Snapshot:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\W2P9NWSU\\summary.html:text/html}
}
@INPROCEEDINGS{1232642,
author={T. Sato and M. Kanbara and N. Yokoya},
booktitle={Proceedings of IEEE International Conference on Multisensor Fusion and Integration for Intelligent Systems, MFI2003.},
title={Outdoor scene reconstruction from multiple image sequences captured by a hand-held video camera},
year={2003},
volume={},
number={},
pages={113-118},
keywords={augmented reality;image recognition;image reconstruction;image sequences;stereo image processing;video cameras;video signal processing;3D model;automatic 3D reconstruction;extended multi-baseline stereo;extrinsic camera parameter estimation;hand-held video camera;mixed reality;multiple image sequences;navigation;object recognition;outdoor scene reconstruction;scenic simulation;stereo method;three-dimensional model;voxel voting;Cameras;Costs;Image reconstruction;Image sequences;Layout;Navigation;Object recognition;Parameter estimation;Reconstruction algorithms;Virtual reality},
doi={10.1109/MFI-2003.2003.1232642},
ISSN={},
month={July},}
%---------------------------------- Ego-motion estimation
@inproceedings{tian_comparison_1996,
address = {Washington, DC, USA},
series = {{CVPR} '96},
title = {Comparison of {Approaches} to {Egomotion} {Computation}},
isbn = {978-0-8186-7258-3},
url = {http://dl.acm.org/citation.cfm?id=794190.794594},
urldate = {2017-02-17},
booktitle = {Proceedings of the 1996 {Conference} on {Computer} {Vision} and {Pattern} {Recognition} ({CVPR} '96)},
publisher = {IEEE Computer Society},
author = {Tian, Tina Y. and Tomasi, Carlo and Heeger, David J.},
year = {1996},
pages = {315--}
}
@INPROCEEDINGS{898370,
author={G. P. Stein and O. Mano and A. Shashua},
booktitle={Proceedings of the IEEE Intelligent Vehicles Symposium 2000 (Cat. No.00TH8511)},
title={A robust method for computing vehicle ego-motion},
year={2000},
volume={},
number={},
pages={362-368},
keywords={automated highways;computer vision;image motion analysis;image sequences;matrix algebra;parameter estimation;probability;road vehicles;cluttered scenes;direct methods;glare;global probability function;moving objects;probability distribution matrices;rain;rear view mirror;robust method;vehicle ego-motion;Cameras;Computer vision;Layout;Mechanical sensors;Mirrors;Motion measurement;Rain;Road vehicles;Robustness;Testing},
doi={10.1109/IVS.2000.898370},
ISSN={},
month={},}
@inproceedings{azuma_egomotion_2010,
title = {Egomotion estimation using planar and non-planar constraints},
doi = {10.1109/IVS.2010.5548117},
abstract = {There are two major approaches for estimating camera motion (egomotion) given an image sequence. Each approach has own strengths and weaknesses. One approach is the feature based methods. In this approach the point feature correspondences are taken as the input. Since initially the depths of point features are unknown, the egomotion is estimated by the depth independent epipolar constraints on the point feature correspondences. This approach is robust in practice, but is relatively limited in accuracy since it exploits no structure assumption, such as planarity. The other approach, termed the direct method, has the advantage in its accuracy. In this method, the egomotion is estimated as the parameters of a homography by directly aligning the planar potion of two images. The direct method may be preferable in the cases with known planes that are persistent in the view. The on-board camera system for ground vehicles is a representative example. Despite the potential accuracy, the direct method fails when the plane lacks proper texture. We propose an egomotion estimation method that is based on both the homographic constraint on a planar region, and on the epipolar constraint on generally non-planar regions, so that the both kinds of visual cues contribute to the estimation. We observe that the method improves the egomotion estimation in robustness while retaining the comparable accuracy to the direct method.},
booktitle = {2010 {IEEE} {Intelligent} {Vehicles} {Symposium}},
author = {Azuma, T. and Sugimoto, S. and Okutomi, M.},
month = jun,
year = {2010},
keywords = {camera motion estimation, depth independent epipolar constraints, egomotion estimation, ground vehicles, homographic constraint, image sensors, image sequences, Intelligent vehicles, Land vehicles, Layout, motion estimation, nonplanar constraints, on-board camera system, parameter estimation, planar constraints, Robustness, Simultaneous localization and mapping, Smart cameras, traffic engineering computing, USA Councils},
pages = {855--862},
file = {IEEE Xplore Abstract Record:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\6JQW8RTV\\5548117.html:text/html;IEEE Xplore Full Text PDF:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\ANRWGKEW\\Azuma et al. - 2010 - Egomotion estimation using planar and non-planar c.pdf:application/pdf}
}
@misc{noauthor_determining_nodate,
title = {{DETERMINING} {THE} {EGO}-{MOTION} {OF} {AN} {UNCALIBRATED} {CAMERA} {FROM} {INSTANTANEOUS} {OPTICAL} {FLOW} {\textbar} {Equations} {\textbar} {Camera}},
url = {https://www.scribd.com/document/32924542/DETERMINING-THE-EGO-MOTION-OF-AN-UNCALIBRATED-CAMERA-FROM-INSTANTANEOUS-OPTICAL-FLOW},
urldate = {2017-02-17},
file = {DETERMINING THE EGO-MOTION OF AN UNCALIBRATED CAMERA FROM INSTANTANEOUS OPTICAL FLOW | Equations | Camera:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\7E5XIKJB\\DETERMINING-THE-EGO-MOTION-OF-AN-UNCALIBRATED-CAMERA-FROM-INSTANTANEOUS-OPTICAL-FLOW.html:text/html}
}
%--------------------------------------------- Unclassified
@article{cesic_radar_2016,
title = {Radar and stereo vision fusion for multitarget tracking on the special {Euclidean} group},
volume = {83},
issn = {0921-8890},
url = {http://www.sciencedirect.com/science/article/pii/S0921889015303286},
doi = {10.1016/j.robot.2016.05.001},
abstract = {Reliable scene analysis, under varying conditions, is an essential task in nearly any assistance or autonomous system application, and advanced driver assistance systems (ADAS) are no exception. ADAS commonly involve adaptive cruise control, collision avoidance, lane change assistance, traffic sign recognition, and parking assistance—with the ultimate goal of producing a fully autonomous vehicle. The present paper addresses detection and tracking of moving objects within the context of ADAS. We use a multisensor setup consisting of a radar and a stereo camera mounted on top of a vehicle. We propose to model the sensors uncertainty in polar coordinates on Lie Groups and perform the objects state filtering on Lie groups, specifically, on the product of two special Euclidean groups, i.e., SE ( 2 ) 2 . To this end, we derive the designed filter within the framework of the extended Kalman filter on Lie groups. We assert that the proposed approach results with more accurate uncertainty modeling, since used sensors exhibit contrasting measurement uncertainty characteristics and the predicted target motions result with banana-shaped uncertainty contours. We believe that accurate uncertainty modeling is an important ADAS topic, especially when safety applications are concerned. To solve the multitarget tracking problem, we use the joint integrated probabilistic data association filter and present necessary modifications in order to use it on Lie groups. The proposed approach is tested on a real-world dataset collected with the described multisensor setup in urban traffic scenarios.},
urldate = {2017-04-26},
journal = {Robotics and Autonomous Systems},
author = {Ćesić, Josip and Marković, Ivan and Cvišić, Igor and Petrović, Ivan},
month = sep,
year = {2016},
keywords = {advanced driver assistance systems, Detection and tracking of moving objects, Joint integrated probabilistic data association, Radar, Stereo camera},
pages = {338--348},
file = {ScienceDirect Snapshot:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\HUGJTN7Z\\S0921889015303286.html:text/html}
}
@misc{noauthor_mpi_nodate,
title = {{MPI} {Sintel} {Dataset}},
url = {http://sintel.is.tue.mpg.de/},
urldate = {2017-01-11},
file = {MPI Sintel Dataset:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\9SITXDR7\\sintel.is.tue.mpg.de.html:text/html}
}
@misc{noauthor_deep_nodate,
title = {Deep {Discrete} {Flow} {\textbar} {Perceiving} {Systems} - {Max} {Planck} {Institute} for {Intelligent} {Systems}},
url = {https://ps.is.tuebingen.mpg.de/publications/guney2016accv},
urldate = {2017-01-11},
file = {Deep Discrete Flow | Perceiving Systems - Max Planck Institute for Intelligent Systems:C\:\\Users\\MAI\\AppData\\Roaming\\Zotero\\Zotero\\Profiles\\8h4pxrbr.default\\zotero\\storage\\H7XP76H8\\guney2016accv.html:text/html}
}
@ARTICLE{Faugeras95stratificationof,
author = {Olivier Faugeras},
title = {Stratification of 3-D vision: projective, affine, and metric representations},
journal = {Journal of the Optical Society of America A},
year = {1995},
volume = {12},
pages = {46548--4}
}
@conference{icinco17,
author={Tan Khoa Mai and Michèle Gouiffès and Samia Bouchafa},
title={Exploiting Optical Flow Field Properties for 3D Structure Identification},
booktitle={Proceedings of the 14th International Conference on Informatics in Control, Automation and Robotics - Volume 2: ICINCO,},
year={2017},
pages={459-464},
publisher={SciTePress},
organization={INSTICC},
doi={10.5220/0006474504590464},
isbn={978-989-758-264-6},
}
@article{DBLP:journals/corr/OzyesilVBS17,
author = {Onur {\"{O}}zyesil and
Vladislav Voroninski and
Ronen Basri and
Amit Singer},
title = {A Survey on Structure from Motion},
journal = {CoRR},
volume = {abs/1701.08493},
year = {2017},
url = {http://arxiv.org/abs/1701.08493},
archivePrefix = {arXiv},
eprint = {1701.08493},
timestamp = {Wed, 07 Jun 2017 14:40:49 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/OzyesilVBS17},
bibsource = {dblp computer science bibliography, https://dblp.org}
}