% Beecks, Christian % Encoding: utf-8 @Article{Schoeffmann2019f, author = {Rossetto, Luca and Berns, Fabian and Schöffmann, Klaus and Awad, George M. and Beecks, Christian}, journal = {ACM SIGMM Records}, title = {{The V3C1 Dataset: Advancing the State of the Art in Video Retrieval}}, year = {2019}, month = {Juni}, number = {2}, volume = {11}, abstract = {Standardized datasets are of vital importance in multimedia research, as they form the basis for reproducible experiments and evaluations. In the area of video retrieval, widely used datasets such as the IACC [5], which has formed the basis for the TRECVID Ad-Hoc Video Search Task and other retrieval-related challenges, have started to show their age. For example, IACC is no longer representative of video content as it is found in the wild [7]. This is illustrated by the figures below, showing the distribution of video age and duration across various datasets in comparison with a sample drawn from Vimeo and Youtube.}, url = {https://records.sigmm.org/2019/07/06/the-v3c1-dataset-advancing-the-state-of-the-art-in-video-retrieval/} } @InProceedings{Schoeffmann2019c, author = {Berns, Fabian and Rossetto, Luca and Schöffmann, Klaus and Beecks, Christian and Awad, George M.}, booktitle = {Proceedings of the ACM International Conference on Multimedia Retrieval}, title = {{V3C1 Dataset: An Evaluation of Content Characteristics }}, year = {2019}, address = {New York, NY}, month = {Juni}, pages = {334--338}, publisher = {ACM - New York}, doi = {10.1145/3323873.3325051}, url = {https://dl.acm.org/doi/10.1145/3323873.3325051} } @InProceedings{Leibetseder2019b, author = {Leibetseder, Andreas and Münzer, Bernd and Primus, Manfred Jürgen and Kletz, Sabrina and Schöffmann, Klaus and Berns, Fabian and Beecks, Christian}, booktitle = {Proceedings of the ACM Workshop on Lifelog Search Challenge (LSC 19)}, title = {{lifeXplore at the Lifelog Search Challenge 2019 }}, year = {2019}, address = {New York, NY}, month = {Juni}, pages = {13--17}, publisher = {ACM - New York}, doi = {10.1145/3326460.3329157}, url = {https://www.researchgate.net/publication/333690590_lifeXplore_at_the_Lifelog_Search_Challenge_2019} } @Article{Schoeffmann2017MTAPHusslein, author = {Schoeffmann, Klaus and Husslein, Heinrich and Kletz, Sabrina and Petscharnig, Stefan and Münzer, Bernd and Beecks, Christian}, journal = {Multimedia Tools and Applications}, title = {Video Retrieval in Laparoscopic Video Recordings with Dynamic Content Descriptors}, year = {2017}, month = {nov}, pages = {18}, address = {USA}, language = {EN}, publisher = {Springer US} } @InProceedings{Muenzer2017a, author = {Münzer, Bernd and Primus, Manfred Jürgen and Hudelist, Marco and Beecks, Christian and Hürst, Wolfgang and Schoeffmann, Klaus}, booktitle = {2017 IEEE International Conference on Multimedia \& Expo Workshops (ICMEW)}, title = {When content-based video retrieval and human computation unite: Towards effective collaborative video search}, year = {2017}, address = {Hongkong, China}, editor = {Chan, Yui-Lam and Rahardja, Susanto}, month = {jul}, pages = {214-219}, publisher = {IEEE}, abstract = {Although content-based retrieval methods achieved very good results for large-scale video collections in recent years, they still suffer from various deficiencies. On the other hand, plain human perception is a very powerful ability that still outperforms automatic methods in appropriate settings, but is very limited when it comes to large-scale data collections. In this paper, we propose to take the best from both worlds by combining an advanced content-based retrieval system featuring various query modalities with a straightforward mobile tool that is optimized for fast human perception in a sequential manner. In this collaborative system with multiple users, both subsystems benefit from each other: The results of issued queries are used to re-rank the video list on the tablet tool, which in turn notifies the retrieval tool about parts of the dataset that have already been inspected in detail and can be omitted in subsequent queries. The preliminary experiments show promising results in terms of search performance.}, doi = {10.1109/ICMEW.2017.8026262}, language = {EN}, location = {Hongkong}, talkdate = {2017.07.10}, talktype = {registered} } @InProceedings{Beecks2017, author = {Beecks, Christian and Kletz, Sabrina and Schoeffmann, Klaus}, booktitle = {Proceedings of the Third IEEE International Conference on Multimedia Big Data (BigMM 2017)}, title = {Large-Scale Endoscopic Image and Video Linking with Gradient-Based Signatures}, year = {2017}, address = {Laguna Hills, California, USA}, editor = {Chen, Shu-Ching and Sheu, Philip Chen-Yu}, month = {apr}, pages = {5}, publisher = {IEEE}, series = {BigMM}, abstract = {Given a large-scale video archive of surgical interventions and a medical image showing a specific moment of an operation, how to find the most image-related videos efficiently without the utilization of additional semantic characteristics? In this paper, we investigate a novel content-based approach of linking medical images with relevant video segments arising from endoscopic procedures. We propose to approximate the video segments' content-based features by gradient-based signatures and to index these signatures with the Minkowski distance in order to determine the most query-like video segments efficiently. We benchmark our approach on a large endoscopic image and video archive and show that our approach achieves a significant improvement in efficiency in comparison to the state-of-the-art while maintaining high accuracy.}, doi = {10.1109/BigMM.2017.44}, keywords = {feature signatures, laparoscopic video, medical endoscopy, motion analysis, similarity search, video retrieval}, language = {EN}, location = {Laguna Hills, California, USA}, talkdate = {2017.04.19}, talktype = {registered}, url = {http://ieeexplore.ieee.org/document/7966709/} } @InProceedings{hudelist2016collaborative, title = {Collaborative Video Search Combining Video Retrieval with Human-Based Visual Inspection}, author = {Hudelist, Marco A and Cob{\^a}rzan, Claudiu and Beecks, Christian and van de Werken, Rob and Kletz, Sabrina and H{\"u}rst, Wolfgang and Schoeffmann, Klaus}, booktitle = {International Conference on Multimedia Modeling}, year = {2016}, address = {Cham, Switzerland}, editor = {Tian, Qi and Sebe, Nicu and Qi, Guo-Jun and Huet, Benoit and Hong, Richang and Liu, Xueliang}, month = {jan}, organization = {Springer}, pages = {400--405}, publisher = {Springer International Publishing}, language = {EN}, location = {Miami, FL, USA}, talkdate = {2016.01.05}, talktype = {registered} } @InProceedings{SchoeffmannSPIE2016, author = {Schoeffmann, Klaus and Beecks, Christian and Lux, Mathias and Uysal, Merih Seran and Seidl, Thomas}, booktitle = {Proceedings of SPIE 9786, Medical Imaging 2016: Image-Guided Procedures, Robotic Interventions, and Modeling}, title = {Content-based Retrieval in Videos from Laparoscopic Surgery}, year = {2016}, address = {Bellingham, WA, USA}, editor = {Webster, Robert and Yaniv, Ziv}, month = {feb}, pages = {97861V-97861V10}, publisher = {SPIE}, language = {EN}, location = {San Diego, CA, USA}, talkdate = {2016.02.27}, talktype = {registered} } @InProceedings{Hurst:2016:NTC:2964284.2973824, title = {A New Tool for Collaborative Video Search via Content-based Retrieval and Visual Inspection}, author = {Hürst, Wolfgang and Ip Vai Ching, Algernon and Hudelist, Marco and Primus, Manfred and Schoeffmann, Klaus and Beecks, Christian}, booktitle = {Proceedings of the 2016 ACM on Multimedia Conference}, year = {2016}, address = {New York, NY, USA}, editor = {Hanjalic, Alan and Snoek, Cees and Worring, Marcel}, month = {jan}, pages = {731--732}, publisher = {ACM}, series = {MM '16}, doi = {10.1145/2964284.2973824}, keywords = {collaborative search, feature signatures, human-computer interaction, video retrieval}, language = {EN}, location = {Amsterdam, The Netherlands}, talkdate = {2016.10.16}, talktype = {registered}, url = {http://doi.acm.org/10.1145/2964284.2973824} } @InProceedings{HudelistMMM2016, author = {Hudelist, Marco Andrea and Cob\^{a}rzan, Claudiu and Beecks, Christian and van de Werken, Rob and Kletz, Sabrina and H\"{u}rst, Wolfgang and Schoeffmann, Klaus}, booktitle = {Multimedia Modeling}, title = {Collaborative Video Search Combining Video Retrieval with Human-Based Visual Inspection}, year = {2016}, address = {Cham, Switzerland}, editor = {Tian, Qi and Sebe, Nicu and Qi, Guo-Jun and Huet, Benoit and Hong, Richang and Liu, Xueliang}, month = {jan}, pages = {400-405}, publisher = {Springer International Publishing}, series = {Lecture Notes in Computer Science}, abstract = {We propose a novel video browsing approach that aims at optimally integrating traditional, machine-based retrieval methods with an interface design optimized for human browsing performance. Advanced video retrieval and filtering (e.g., via color and motion signatures, and visual concepts) on a desktop is combined with a storyboard-based interface design on a tablet optimized for quick, brute-force visual inspection. Both modules run independently but exchange information to significantly minimize the data for visual inspection and compensate mistakes made by the search algorithms.}, isbn13 = {978-3-319-27673-1}, language = {EN}, location = {Miami, Florida, USA}, subtitle = {22nd International Conference, MMM 2016, Miami, FL, USA, January 4-6, 2016, Proceedings, Part II}, talkdate = {2016.01.05}, talktype = {poster}, url = {http://link.springer.com/chapter/10.1007/978-3-319-27674-8_40} } @InProceedings{BeecksEndoRetrieval2015, author = {Beecks, Christian and Schoeffmann, Klaus and Lux, Mathias and Uysal, Merih Seran and Seidl, Thomas}, booktitle = {Proceedings of the IEEE International Symposium on Multimedia 2015 (ISM 2015)}, title = {Endoscopic Video Retrieval: A Signature-based Approach for Linking Endoscopic Images with Video Segments}, year = {2015}, address = {Los Alamitos, CA}, editor = {Del Bimbo, Alberto and Chen, Shu-Ching and Wang, Haohong and Yu, Heather and Zimmermann, Roger}, month = {dec}, pages = {1-6}, publisher = {IEEE}, abstract = {In the field of medical endoscopy more and more surgeons are changing over to record and store videos of their endoscopic procedures, such as surgeries and examinations, in long-term video archives. In order to support surgeons in accessing these endoscopic video archives in a content-based way, we propose a simple yet effective signature-based approach: the Signature Matching Distance based on adaptive-binning feature signatures. The proposed distance-based similarity model facilitates an adaptive representation of the visual properties of endoscopic images and allows for matching these properties efficiently. We conduct an extensive performance analysis with respect to the task of linking specific endoscopic images with video segments and show the high efficacy of our approach. We are able to link more than 88% of the endoscopic images to their corresponding correct video segments, which improves the current state of the art by one order of magnitude.}, language = {EN}, location = {Miami, Florida, USA}, talkdate = {2015.12.14}, talktype = {registered} } @InProceedings{Beecks2011, author = {Beecks, Christian and Skopal, Thomas and Schoeffmann, Klaus and Seidl, Thomas}, booktitle = {Proceedings of the 5th International Workshop on Ranking in Databases (DBRank 2011)}, title = {Towards Large-Scale Multimedia Exploration}, year = {2011}, address = {Seattle, WA, USA}, editor = {Das, Gautam and Hsristidis, Vagelis and Ilyas, Ihab}, month = {aug}, pages = {31-33}, publisher = {VLDB}, abstract = {With the advent of the information age and the increasing size and complexity of multimedia databases, the question of how to support users in getting access and insight into those large databases has become immensely important. While traditional content-based retrieval approaches provide query-driven access under the assumption that the users' information needs are clearly specified, modern content-based exploration approaches support users in browsing and navigating through multimedia databases in the case of imprecise or even unknown information needs. By means of interactive graphical user interfaces, exploration approaches offer a convenient and intuitive access to unknown multimedia databases which becomes even more important with the arrival of powerful mobile devices. In this paper, we formulate challenges of user-centric multimedia exploration with a particular focus on large-scale multimedia databases. We claim that adaptability and scalability should be researched on both conceptual as well as technical level in order to model multimedia exploration approaches which are able to cope with millions of multimedia objects in near-realtime.}, language = {EN}, location = {Seattle, WA, USA}, pdf = {https://www.itec.aau.at/bib/files/Towards LargeScale_2011_KSch.pdf}, talkdate = {2011.08.29}, talktype = {registered} }