@inproceedings{ea600973d2b64a9f9241c9af46fd0ea3,
title = "Bubblenets: Learning to select the guidance frame in video object segmentation by deep sorting frames",
abstract = "Semi-supervised video object segmentation has made significant progress on real and challenging videos in recent years. The current paradigm for segmentation methods and benchmark datasets is to segment objects in video provided a single annotation in the first frame. However, we find that segmentation performance across the entire video varies dramatically when selecting an alternative frame for annotation. This paper addresses the problem of learning to suggest the single best frame across the video for user annotation-this is, in fact, never the first frame of video. We achieve this by introducing BubbleNets, a novel deep sorting network that learns to select frames using a performance-based loss function that enables the conversion of expansive amounts of training examples from already existing datasets. Using BubbleNets, we are able to achieve an 11% relative improvement in segmentation performance on the DAVIS benchmark without any changes to the underlying method of segmentation.",
keywords = "Datasets and Evaluation, Deep Learning, Grouping and Shape, Motion and Tracking, Segmentation",
author = "Griffin, {Brent A.} and Corso, {Jason J.}",
note = "Publisher Copyright: {\textcopyright} 2019 IEEE.; 32nd IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2019 ; Conference date: 16-06-2019 Through 20-06-2019",
year = "2019",
month = jun,
doi = "10.1109/CVPR.2019.00912",
language = "English",
series = "Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
pages = "8906--8915",
booktitle = "Proceedings - 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2019",
}