% Year: 2012 % Encoding: utf-8 @InProceedings{lux2012did, author = {Lux, Mathias and Huber, Jochen}, booktitle = {Image Analysis for Multimedia Interactive Services (WIAMIS), 2012 13th International Workshop on}, title = {Why did you record this video? An exploratory study on user intentions for video production}, year = {2012}, address = {Los Alamitos, CA, USA}, editor = {O'Connor, Noel and Daras, Petros and Pereira, Fernando}, month = {jan}, organization = {IEEE}, pages = {1-4}, publisher = {IEEE}, abstract = {Why do people record videos and share them? While the question seems to be simple, user intentions have not yet been investigated for video production and sharing. A general taxonomy would lead to adapted information systems and multimedia interfaces tailored to the users' intentions. We contribute (1) an exploratory user study with 20 participants, examining the various facets of user intentions for video production and sharing in detail and (2) a novel set of user intention clusters for video production, grounded empirically in our study results. We further reflect existing work in specialized domains (i.e. video blogging and mobile phone cameras) and show that prevailing models used in other multimedia fields (e.g. photography) cannot be used as-is to reason about video recording and sharing intentions.}, doi = {10.1109/WIAMIS.2012.6226758}, isbn10 = {978-1-4673-0789-5}, isbn13 = {978-1-4673-0791-8}, issn = {2158-5873}, keywords = {Communication, Networking & Broadcasting ; Components, Circuits, Devices & Systems ; Computing & Processing (Hardware/Software) ; Signal Processing & Analysis}, language = {EN}, location = {Dublin, Ireland}, talkdate = {2012.05.25}, talktype = {registered} } @Article{chatzichristofis2012image, author = {Chatzichristofis, Sawas and Marques, Oge and Lux, Mathias and Boutalis, Yiannis}, journal = {Cellular Automata}, title = {Image Encryption Using the Recursive Attributes of the eXclusive-OR Filter on Cellular Automata}, year = {2012}, month = {jan}, pages = {340-350}, abstract = {A novel visual multimedia content encryption method based on cellular automata (CA) is presented in this paper. The proposed algorithm is based on an attribute of the eXclusive-OR (XOR) logic gate, according to which, its application to a square-sized CA has the ability to reconstruct the original content of a CA after a preset number of iterations. The resulted encrypted image is a lossless representation of the original/plaintext image, i.e. there is no loss of either resolution or contrast. Experimental results indicate that the encrypted image does not contain any statistical information able to reveal the original image.}, address = {Berlin, Heidelberg}, doi = {10.1007/978-3-642-33350-7_35}, isbn10 = {978-3-642-33349-1}, isbn13 = {978-3-642-33350-7}, language = {EN}, publisher = {Springer}, subtitle = {10th International Conference on Cellular Automata for Research and Industry, ACRI 2012, Santorini Island, Greece, September 24-27, 2012. Proceedings} } @InCollection{Waltl2012_UserCentricraey, author = {Waltl, Markus and Raffelsberger, Christian and Timmerer, Christian and Hellwagner, Hermann}, booktitle = {User Centric Media}, publisher = {Springer Verlag}, title = {Metadata-Based Content Management and Sharing System for Improved User Experience}, year = {2012}, address = {Berlin, Heidelberg, New York}, editor = {Alvarez, Federico and Costa, Cristina}, month = {dec}, pages = {132-140}, series = {Lecture Notes of the Institute for Computer Sciences, Social Informatics and Telecommunications Engineering}, volume = {60}, doi = {10.1007/978-3-642-35145-7_17}, keywords = {Metadata; UPnP A/V; Content Management; Content Sharing; MPEG-V; Sensory Effects; User Experience}, language = {EN}, url = {http://dx.doi.org/10.1007/978-3-642-35145-7_17} } @InProceedings{Waltl2012_QoMEX2012_SensoryEffects, author = {Waltl, Markus and Timmerer, Christian and Rainer, Benjamin and Hellwagner, Hermann}, booktitle = {Proceedings of the 4th International Workshop on Quality of Multimedia Experience (QoMEX'12)}, title = {Sensory Effect Dataset and Test Setups}, year = {2012}, address = {Yarra Valley, Australia}, editor = {Burnett, Ian and Wu, Henry}, month = {jul}, pages = {115--120}, publisher = {IEEE}, abstract = {Additional constituents for the representation of multimedia content gained more and more attention. For example, the amount of cinemas equipped with additional devices (e.g., ambient light, vibrating seats, wind generators, water sprayers, heater/coolers) that stimulate senses going beyond audition and vision increases. On the content side the MPEG-V standard specifies – among others – Sensory Effect Metadata (SEM) which provides means to describe sensory effects such as wind, vibration, light, etc. to be attached to audio-visual content and, thus, offering an enhanced and immersive experience for the user. However, there is a lack of a common set of test content allowing for various subjective user studies and verification across different test sites. In this paper we provide our dataset comprising a number of videos from different genres enriched with MPEG-V compliant Sensory Effect Metadata descriptions. Furthermore, we describe possible test setups using off-the-shelf hardware for conducting subjective quality assessments.}, isbn13 = {-}, keywords = {Sensory Experience, Sensory Effects, MPEG-V, Dataset, Test Environment}, language = {EN}, location = {Yarra Valley, Australia}, pdf = {https://www.itec.aau.at/bib/files/QoMEX2012_Dataset.pdf}, talkdate = {2012.07.05}, talktype = {registered}, url = {http://www.qomex2012.org} } @Article{Waltl2012_MTAP, author = {Waltl, Markus and Timmerer, Christian and Rainer, Benjamin and Hellwagner, Hermann}, journal = {Multimedia Tools and Applications}, title = {Sensory Effects for Ambient Experiences in the World Wide Web}, year = {2012}, month = {may}, number = {-}, pages = {1--20}, volume = {-}, abstract = {More and more content in various formats becomes available via the WorldWideWeb (WWW). Currently availableWeb browsers are able to access and interpret these contents (i.e., Web videos, text, image, and audio). These contents stimulate only senses like audition or vision. Recently, it has been proposed to stimulate also other senses while consuming multimedia content, through so-called sensory effects. These sensory effects aim to enhance the ambient experience by providing effects such as light, wind, vibration, etc. The effects are represented as Sensory Effect Metadata (SEM) description which is associated to multimedia content and is rendered on devices like fans, vibration chairs, or lamps. In this paper we present two subjective quality assessments which comprise sensory effects, such as light, in the area of the WWW and their results achieved. The first assessment evaluates the influence of light effects on the Quality of Experience (QoE). The second assessment measures the impact of different settings for the color calculation on the viewing experience. Furthermore, we describe a Web browser plug-in for Mozilla Firefox which is able to render such sensory effects that are provided via the WWW.}, address = {Berlin, Heidelberg, New York}, doi = {10.1007/s11042-012-1099-8}, keywords = {World Wide Web, MPEG-V, Subjective quality assessment, Sensory effects, Quality of multimedia experience}, language = {EN}, publisher = {Springer Verlag}, url = {http://dx.doi.org/10.1007/s11042-012-1099-8} } @InProceedings{Waltl2012_MMM, author = {Waltl, Markus and Rainer, Benjamin and Timmerer, Christian and Hellwagner, Hermann}, booktitle = {Advances in Multimedia Modeling}, title = {Enhancing the User Experience with the Sensory Effect Media Player and AmbientLib}, year = {2012}, address = {Berlin, Heidelberg, New York}, editor = {Schoeffmann, Klaus and Merialdo, Bernard and Hauptmann, Alexander and Ngo, Chong-Wah and Andreopoulos, Yiannis and Breiteneder, Christian}, month = {jan}, pages = {624--626}, publisher = {Springer}, series = {LNCS 7131}, abstract = {Multimedia content is increasingly used in every area of our life. Still, each type of content only stimulates the visual and/or the hearing system. Thus, the user experience depends only on those two stimuli. In this paper we introduce a standard which offers the possibility to add additional effects to multimedia content. Furthermore, we present a multimedia player and a Web browser plug-in which uses this standard to stimulate further senses by using additional sensory effects (i.e., wind, vibration, and light) to enhance the user experience resulting in a unique, worthwhile sensory experience.}, keywords = {MPEG-V, User Experience, Sensory Experience, Media Player, Ambient, World Wide Web}, language = {EN}, location = {Klagenfurt, Austria}, pdf = {https://www.itec.aau.at/bib/files/mwbrcthh_mmm2012.pdf}, talkdate = {2012.01.05}, talktype = {poster} } @InProceedings{Waltl2012_ACMMM_OSSC, author = {Waltl, Markus and Rainer, Benjamin and Timmerer, Christian and Hellwagner, Hermann}, booktitle = {Proceedings of the 20th ACM Multimedia (MM'12)}, title = {A Toolset for the Authoring, Simulation, and Rendering of Sensory Experiences}, year = {2012}, address = {Nara, Japan}, editor = {Babaguchi, Noboru and Aizawa, Kiyoharu and Smith, John}, month = {oct}, pages = {1469-1472}, publisher = {ACM}, abstract = {This paper describes a toolset for the authoring, simulating, and rendering of multimedia content annotated with Sensory Effect Metadata (SEM) descriptions as specified in Part 3 of the MPEG V standard. This part of MPEG-V standardizes the description of sensory effects (e.g., light, wind) in order to be rendered on sensory devices (e.g., fans, vibration chairs) aiming at generating a sensory experience stimulating possibly all human senses. Our implementation comprises a toolset to author sensory effects associated with multimedia content and the simulation thereof. Furthermore, it includes a library, a standalone player, and a Web browser plug-in which enables the playback and rendering of sensory effects on off-the-shelf rendering devices and in various contexts. All software modules are available under the GNU General Public License (GPL) v3 and the GNU Lesser General Public License (LGPL) v3 respectively.}, isbn13 = {-}, keywords = {MPEG-V, Annotation Tool, Simulator, Media Player, Web Browser Plug-in, Sensory Effects, Sensory Experience}, language = {EN}, location = {Nara, Japan}, pdf = {https://www.itec.aau.at/bib/files/acmmm2012_ossc_mwbrcthh.pdf}, talkdate = {2012.10.31}, talktype = {registered}, url = {http://www.acmm2012.org} } @Article{Timmerer2012_MPEGColumnSep, author = {Timmerer, Christian}, journal = {ACM SIGMultimedia Records}, title = {MPEG column: 101st MPEG meeting}, year = {2012}, issn = {1947-4598}, month = {sep}, number = {3}, pages = {9-11}, volume = {4}, address = {New York, NY, USA}, language = {EN}, publisher = {ACM}, url = {http://records.sigmm.ndlab.net/2012/11/mpeg-column-101st-mpeg-meeting/} } @Article{Timmerer2012_MPEGColumnJun, author = {Timmerer, Christian}, journal = {ACM SIGMultimedia Records}, title = {MPEG column: 100th MPEG meeting}, year = {2012}, issn = {1947-4598}, month = {jun}, number = {2}, pages = {2-3}, volume = {4}, address = {New York, NY, USA}, language = {EN}, publisher = {ACM}, url = {http://records.sigmm.ndlab.net/2012/06/mpeg-column-100th-mpeg-meeting/} } @Article{Timmerer2012_MPEGColumnDec, author = {Timmerer, Christian}, journal = {ACM SIGMultimedia Records}, title = {MPEG column: 102nd MPEG meeting}, year = {2012}, issn = {1947-4598}, month = {dec}, number = {4}, pages = {1-2}, volume = {4}, address = {New York, NY, CUSA}, language = {EN}, publisher = {ACM}, url = {http://records.sigmm.ndlab.net/2012/12/mpeg-column-102nd-mpeg-meeting/} } @InProceedings{Timmerer2012_ACMMM, title = {Dynamic adaptive streaming over HTTP: from content creation to consumption}, author = {Timmerer, Christian and Griwodz, Carsten}, booktitle = {Proceedings of the 20th ACM international conference on Multimedia}, year = {2012}, address = {New York, NY, USA}, editor = {Babaguchi, Noboru and Aizawa, Kiyoharu and Smith, John}, month = {oct}, pages = {1533--1534}, publisher = {ACM}, series = {MM '12}, abstract = {In this tutorial we present dynamic adaptive streaming over HTTP ranging from content creation to consumption. It particular, it provides an overview of the recently ratified MPEG-DASH standard, how to create content to be delivered using DASH, its consumption, and the evaluation thereof with respect to competing industry solutions. The tutorial can be roughly clustered into three parts. In part I we will provide an introduction to DASH, part II covers content creation, delivery, and consumption, and, finally, part III deals with the evaluation of existing (open source) MPEG-DASH implementations compared to state-of-art deployed industry solutions.}, doi = {10.1145/2393347.2396553}, keywords = {MPEG, adaptation, dash, dynamic adaptive http streaming, streaming}, language = {EN}, location = {Nara, Japan}, pdf = {https://www.itec.aau.at/bib/files/mtu008-timmerer.pdf}, slides = {https://www.itec.aau.at/bib/files/mtu008-timmerer-slides.pdf}, talkdate = {2012.10.29}, talktype = {registered}, url = {http://doi.acm.org/10.1145/2393347.2396553} } @Article{Timmerer2012909, author = {Timmerer, Christian and Waltl, Markus and Rainer, Benjamin and Hellwagner, Hermann}, journal = {Signal Processing: Image Communication}, title = {Assessing the quality of sensory experience for multimedia presentations}, year = {2012}, month = {sep}, number = {8}, pages = {909--916}, volume = {27}, abstract = {This paper introduces the concept of sensory experience by utilizing sensory effects such as wind or lighting as another dimension which contributes to the quality of the user experience. In particular, we utilize a representation format for sensory effects that are attached to traditional multimedia resources such as audio, video, and image contents. Sensory effects (e.g., wind, lighting, explosion, heat, cold) are rendered on special devices (e.g., fans, ambient lights, motion chair, air condition) in synchronization with the traditional multimedia resources and shall stimulate other senses than audition and vision (e.g., mechanoreception, equilibrioception, thermoreception), with the intention to increase the users Quality of Experience (QoE). In particular, the paper provides a comprehensive introduction into the concept of sensory experience, its assessment in terms of the QoE, and related standardization and implementation efforts. Finally, we will highlight open issues and research challenges including future work.}, address = {Amsterdam, Netherlands}, doi = {10.1016/j.image.2012.01.016}, keywords = {Quality of Experience, Sensory experience, Subjective quality assessment, Experimental results, MPEG-V}, language = {EN}, pdf = {https://www.itec.aau.at/bib/files/1-s2.0-S0923596512000252-main.pdf}, publisher = {Elsevier}, url = {http://dx.doi.org/10.1016/j.image.2012.01.016} } @InProceedings{Taschwer2012, author = {Taschwer, Mario}, booktitle = {Advances in Multimedia Modeling}, title = {A Key-Frame-Oriented Video Browser}, year = {2012}, address = {Berlin / Heidelberg}, editor = {Schoeffmann, Klaus and Merialdo, Bernard and Hauptmann, Alexander and Ngo, Chong-Wah and Andreopoulos, Yiannis and Breiteneder, Christian}, month = {jan}, pages = {655-657}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = {7131}, abstract = {We propose a video browser facilitating known-item search in a single video. Key frames are presented as four images at a time and can be navigated quickly in both forward and backward directions using a slider. Alternatively, key frames can be displayed automatically at different frame rates. The user may choose between three mappings of key frames to the four key frame widgets based on video time stamps and color similarity.}, doi = {10.1007/978-3-642-27355-1_68}, isbn13 = {978-3-642-27354-4}, language = {EN}, location = {Klagenfurt}, talkdate = {2012.01.06}, talktype = {poster}, url = {http://dx.doi.org/10.1007/978-3-642-27355-1_68} } @InProceedings{SobeEMCS2012, author = {Sobe, Anita and Elmenreich, Wilfried and Del Fabro, Manfred}, booktitle = {European Meeting on Cybernetics and Systems Research Book of Abstracts}, title = {Self-organizing content sharing at social events}, year = {2012}, address = {Vienna}, editor = {Bichler, Robert and Blachfellner, Stefan and Hofkirchner, Wolfgang}, month = {apr}, pages = {197--200}, publisher = {EMCSR}, language = {EN}, location = {Vienna, Austria}, talkdate = {2012.04.11}, talktype = {registered}, url = {http://www.emcsr.net/wp-content/uploads/2012/EMCSR_Book_of_Abstracts_V2.pdf} } @InProceedings{SchoeffmannWIAMIS2012, author = {Schoeffmann, Klaus and Ahlström, David}, booktitle = {Proceedings of The 13th International Workshop on Image Analysis for Multimedia Interactive Services ({WIAMIS} 2012)}, title = {Using a Cylindrical Interface for Image Browsing to Improve Visual Search Performance}, year = {2012}, address = {Los Alamitos, CA, USA}, editor = {O'Connor, Noel and Daras, Petros and Pereira, Fernando}, month = {may}, pages = {1-4}, publisher = {IEEE}, abstract = {In this paper we evaluate a 3D cylindrical interface that arranges image thumbnails by visual similarity for the purpose of image browsing. Through a user study we compare the performance of this interface to the performance of a common scrollable 2D list of thumbnails in a grid arrangement. Our evaluation shows that the 3D Cylinder interface enables significantly faster visual search and is the preferred search interface for the majority of tested users.}, language = {EN}, location = {Dublin, Ireland}, talkdate = {2012.05.23}, talktype = {registered} } @Article{SchoeffmannSIGMMR2012, author = {Schoeffmann, Klaus and Bailer,Werner}, journal = {ACM SIGMultimedia Records}, title = {Video Browser Showdown}, year = {2012}, month = {jun}, number = {2}, pages = {2}, volume = {4}, address = {N/A}, language = {EN}, publisher = {ACM} } @InProceedings{SchoeffmannMMM12, author = {Schoeffmann, Klaus and Ahlström, David and Böszörmenyi, Laszlo}, booktitle = {Advances in Multimedia Modeling}, title = {Video Browsing with a 3D Thumbnail Ring Arranged by Color Similarity}, year = {2012}, address = {Berlin, Heidelberg, New York}, editor = {Schoeffmann, Klaus and Merialdo, Bernard and Hauptmann, Alexander and Ngo, Chong-Wah and Andreopoulos, Yiannis and Breiteneder, Christian}, month = {jan}, pages = {660--662}, publisher = {Springer}, language = {EN}, location = {Klagenfurt, Austria}, talkdate = {2012.01.06}, talktype = {poster} } @Article{SchoeffmannIJMDEM12, author = {Schoeffmann, Klaus and Ahlström, David}, journal = {International Journal of Multimedia Data Engineering and Management}, title = {An Evaluation of Color Sorting for Image Browsing}, year = {2012}, month = {mar}, number = {1}, pages = {49-62}, volume = {3}, address = {701 E. Chocolate Ave., Hershey, PA 17033, USA}, language = {EN}, publisher = {IGI Publishing} } @InProceedings{SchoeffmannICMR2012, author = {Schoeffmann, Klaus and Hudelist, Marco Andrea and Schaefer, Gerald and Del Fabro, Manfred}, booktitle = {Proceedings of the 2nd ACM International Conference on Multimedia Retrieval}, title = {Mobile Image Browsing on a 3D Globe}, year = {2012}, address = {New York, NY, USA}, editor = {Horace, H S Ip and Rui, Yong}, month = {jun}, pages = {61:1--61:2}, publisher = {ACM}, abstract = {With users increasingly using their mobile devices such as smartphones as digital photo albums, effective methods for managing these collections are becoming increasingly important. Standard solutions provide only limited facilities for organising, browsing and searching image collections on mobile devices, making it challenging and time-consuming to locate images of interest. In this demo paper, we present an intuitive interface for organising and browsing image collections on mobile devices. Images are arranged on a 3D globe according to colour similarity. To avoid image overlap image thumbnails are placed on a regular grid structure while large image collections are organised using a hierarchical data structure. Through multi-touch user interaction image browsing can be performed in an intuitive and effective manner.}, doi = {10.1145/2324796.2324866}, isbn10 = {978-1-4503-1329-2}, language = {EN}, location = {Hong Kong, China}, talkdate = {2012.06.08}, talktype = {poster}, url = {http://dl.acm.org/citation.cfm?id=2324866} } @InProceedings{SchoeffmannICME2012_1, author = {Schoeffmann, Klaus and Ahlström, David and Böszörmenyi, Laszlo}, booktitle = {Proceedings of the {IEEE} International Conference on Multimedia and Expo ({ICME} 2012)}, title = {3D Storyboards for Interactive Visual Search}, year = {2012}, address = {Los Alamitos, CA, USA}, editor = {Zhang, Jian and Schonfeld, Dan and Feng, David Dagan and Nanyang, Jianfei Cai and Hanjalic, Alan and Magli, Enrico and Pickering, Mark and Friedland, Gerald and Hua, Xian-Sheng}, month = {July}, pages = {848-853}, publisher = {IEEE Computer Society}, abstract = {Interactive image and video search tools typically use a grid-like arrangement of thumbnails for preview purpose. Such a display, which is commonly known as storyboard, provides limited flexibility at interactive search and it does not optimally exploit the available screen estate. In this paper we design and evaluate alternatives to the common two-dimensional storyboard. We take advantage of 3D graphics in order to present image thumbnails in cylindrical arrangements. Through a user study we evaluate the performance of these interfaces in terms of visual search time and subjective performance.}, language = {EN}, location = {Melbourne, Australia}, talkdate = {2012.07.12}, talktype = {registered} } @InProceedings{Reiners2012, author = {Reiners, René and Halvorsrud, Ragnhild and Eide, Aslak Wegner and Pohl, Daniela}, booktitle = {Proceedings of the 19th Conference on Pattern Languages of Programs (PLoP)}, title = {An Approach to Evolutionary Design Pattern Engineering}, year = {2012}, address = {New York, NY, USA}, editor = {Guerra, Eduardo}, month = {oct}, pages = {1-14}, publisher = {ACM}, abstract = {The design of interactive systems, especially in distributed research projects, is a challenging process in which many concepts are developed with successful outcomes but also with dissatisfying results. In order to structure and relay knowledge about good or bad approaches, design patterns are a well-known instrument in research and development. Due to the condition that a design pattern must be easy to read, different stakeholders in the system engineering and design process are able to understand the described concepts without the need of specific expert knowledge . In distributed projects, application design knowledge may be scattered and documented in different manners. This means, before we can start formulating patterns, we need to discover and gather the available and partially concealed design knowledge. Since these fragments of knowledge may not always be accurately formulated for being used as design patterns, we seek for a collaborative method for collecting and formulating early findings together with established design knowledge. In this paper we present a concept of an evolutionary process for capturing, formulating, refining and validating design patterns. Our approach aims at involving as many stakeholders as possible in order to shape a pattern language over a project’s lifetime in a collaborative process allowing facile participation. We implement our approach in the scope of the EU research project BRIDGE that aims at supporting inter-agency collaboration during emergency response. We close with a discussion of the current state and envisioned next steps in order to foster our considerations.}, keywords = {Design Patterns, Pattern Languages, Pattern Evolution Process, Collaboration}, language = {EN}, location = {Tucson, Arizona, USA}, pdf = {https://www.itec.aau.at/bib/files/An Approach to Evolutionary Design Pattern Engineering.pdf}, talkdate = {2012.10.20}, talktype = {registered}, url = {http://www.hillside.net/plop/2012/index.php} } @InProceedings{Rainer2012_QoMEX2012_SensoryEffects, author = {Rainer, Benjamin and Waltl, Markus and Cheng, Eva and Shujau, Muawiyath and Timmerer, Christian and Davis, Stephen and Burnett, Ian and Hellwagner, Hermann}, booktitle = {Proceedings of the 4th International Workshop on Quality of Multimedia Experience (QoMEX'12)}, title = {Investigating the Impact of Sensory Effects on the Quality of Experience and Emotional Response in Web Videos}, year = {2012}, address = {Yarra Valley, Australia}, editor = {Burnett, Ian and Wu, Henry}, month = {jul}, pages = {278--283}, publisher = {IEEE}, abstract = {Multimedia is ubiquitously available online with large amounts of video increasingly consumed through Web sites such as YouTube or Google Video. However, online multimedia typically limits users to visual/auditory stimulus, with onscreen visual media accompanied by audio. The recent introduction of MPEG-V proposed multi-sensory user experiences in multimedia environments, such as enriching video content with so-called sensory effects like wind, vibration, light, etc. In MPEG-V, these sensory effects are represented as Sensory Effect Metadata (SEM), which is additionally associated to the multimedia content. This paper presents three user studies that utilize the sensory effects framework of MPEG-V, investigating the emotional response of users and enhancement of Quality of Experience (QoE) of Web video sequences from a range of genres with and without sensory effects. In particular, the user studies were conducted in Austria and Australia to investigate whether geography and cultural differences affect users’ elicited emotional responses and QoE.}, isbn13 = {-}, keywords = {Quality of Multimedia Experience, Sensory Effects, MPEG-V, Subjective Quality Assessment, World Wide Web, Sensory Experience}, language = {EN}, location = {Yarra Valley, Australia}, pdf = {https://www.itec.aau.at/bib/files/QoMEX2012_UserStudy.pdf}, talkdate = {2012.07.07}, talktype = {registered}, url = {http://www.qomex2012.org} } @InProceedings{Rainer2012_EUSIPCO2012, author = {Rainer, Benjamin and Lederer, Stefan and Mueller, Christopher and Timmerer, Christian}, booktitle = {Proceedings of the 20th European Signal Processing Conference (EUSIPCO)}, title = {A Seamless Web Integration of Adaptive HTTP streaming}, year = {2012}, address = {Bucharest, Romania}, editor = {Pesquet-Popescu, Béatrice and Burileanu, Corneliu}, month = {aug}, pages = {1519-1523}, publisher = {European Signal Processing (EURASIP) Society}, abstract = {Nowadays video is an important part of the Web and Web sites like YouTube, Hulu, etc. count millions of users consuming their content every day. However, these Web sites mainly use media players based on proprietary browser plug-ins (i.e., Adobe Flash) and do not leverage adaptive streaming systems. This paper presents a seamless integration of the recent MPEG standard on Dynamic Adaptive Streaming over HTTP (DASH) in the Web using the HTML5 video element. Therefore, we present DASHJS, a JavaScript-based MPEG-DASH client which adopts the Media Source API of Google’s Chrome browser to present a flexible and potentially browser independent DASH client. Furthermore, we present the integration of WebM based media segments in DASH giving a detailed description of the used container format structure and a corresponding Media Presentation Description (MPD). Our preliminary evaluation demonstrates the bandwidth adaption capabilities to show the effectiveness of the system.}, language = {EN}, location = {Bucharest, Romania}, pdf = {https://www.itec.aau.at/bib/files/DASHJS-Eusipco.pdf}, talkdate = {2012.08.30}, talktype = {registered} } @InProceedings{Raffelsberger2012, author = {Raffelsberger, Christian and Hellwagner, Hermann}, booktitle = {Proceedings of the Tenth Workshop on Intelligent Solutions in Embedded Systems ({WISES '12})}, title = {Evaluation of MANET Routing Protocols in a Realistic Emergency Response Scenario}, year = {2012}, address = {Los Alamitos, CA, USA}, editor = {Quaritsch, Markus and Fehervari, Istvan}, month = {jul}, pages = {88-92}, publisher = {IEEE}, abstract = {We evaluate the performance of several routing protocols for mobile ad-hoc networks (MANETs) in an emergency response scenario. The simulated scenario uses a disaster area mobility model and a wireless shadowing model to represent realistic first responder movements in a hybrid indoor/outdoor environment. The resulting scenario imposes some challenges on the MANET routing protocols such as intermittent connectivity and network partitions. The simulation results show that nodes have diverse connectivity characteristics which are challenging for state-of-the-art MANET routing protocols.}, language = {EN}, location = {Klagenfurt, Austria}, pdf = {https://www.itec.aau.at/bib/files/WISES2012-cr.pdf}, talkdate = {2012.07.05}, talktype = {poster} } @InProceedings{RT_ICME_1, author = {Tusch, Roland and Pletzer, Felix and Kraetschmer, Armin and Böszörmenyi, Laszlo and Rinner, Bernhard and Mariacher, Thomas and Harrer, Manfred}, booktitle = {ICME '12 Proceedings of the 2012 IEEE International Conference on Multimedia and Expo Workshops}, title = {Efficient Level of Service Classification for Traffic Monitoring in the Compressed Video Domain}, year = {2012}, address = {Piscataway (NJ)}, editor = {Zhang, Jian and Schonfeld, Dan and Deagan, David Feng}, month = {jul}, pages = {967-972}, publisher = {IEEE}, abstract = {This paper presents a new method for estimating the level of service (LOS) on motorways in the compressed video domain. The method performs statistical computations on motion vectors of MPEG4 encoded video streams within a predefined region of interest to determine a set of four motion features describing the speed and density of the traffic stream. These features are fed into a Gaussian radial basis function network to classify the corresponding LOS. To improve the classification results, vectors of moving objects are clustered and outliers are eliminated. The proposed method is designed to be executed on a server system, where a large number of camera live streams can be analyzed in parallel in real-time. Evaluations with a comprehensive set of real-world training and test data from an Austrian motorway have shown an average accuracy of 86.7% on the test data set for classifying all four LOS levels. With a mean execution time of 48 microseconds per frame on a common server, hundreds of video streams can be analyzed in real-time.}, doi = {10.1109/ICME.2012.101}, isbn13 = {978-1-4673-1659-0}, language = {EN}, location = {Melbourne, Australia}, talkdate = {2012.07.12}, talktype = {registered} } @InProceedings{RT_ICME, author = {Tusch, Roland and Pletzer, Felix and Mudunuri, Vijay and Kraetschmer, Armin and Sabbavarapu, Karuna and Kogler, Marian and Böszörmenyi, Laszlo and Rinner, Bernhard and Harrer, Manfred and Mariacher, Thomas and Hrassnig, P}, booktitle = {ICMEW '12 Proceedings of the 2012 IEEE International Conference on Multimedia and Expo Workshops}, title = {LOOK2 - A Video-based System for Real-time Notification of Relevant Traffic Events.}, year = {2012}, address = {Piscataway (NJ)}, editor = {Zhang, Jian and Schonfeld, Dan and Feng, Deagan David}, month = {jul}, pages = {670}, publisher = {IEEE}, abstract = {We demonstrate our novel video-based real-time traffic event notification and verification system LOOK2. It generates fast and reliable traffic information about relevant traffic state and road conditions changes on observed roads. It utilizes installed road-side sensors providing low-level traffic and environmental data, as well as video sensors which gain high-level traffic information from live video analysis. Spatio-temporal data fusion is applied on all available traffic and environmental data to gain reliable traffic information. This traffic information is published by a DATEXII compliant web service to a web-based traffic desk application. Road network and traffic channel operators receive real-time and relevant traffic event notifications by using this application. The system also enables a visual verification of the notified situations.}, doi = {10.1109/ICMEW.2012.126}, isbn10 = {978-1-4673-2027-6}, language = {EN}, location = {Melbourne, Australia}, talkdate = {2012.07.10}, talktype = {poster} } @InProceedings{RT_AVSS_2012, author = {Pletzer, Felix and Tusch, Roland and Böszörmenyi, Laszlo and Rinner, Bernhard}, booktitle = {Proceedings of the IEEE Conference on Advanced Vision and Signal-based Surveillance}, title = {Robust traffic state estimation on smart cameras.}, year = {2012}, address = {Piscataway (NJ)}, editor = {Porikli, Fatih and Wang, Liang and Maybank, Steve}, month = {sep}, pages = {434-439}, publisher = {IEEE}, abstract = {This paper presents a novel method for video-based traffic state detection on motorways performed on smart cameras. Camera calibration parameters are obtained from the known length of lane markings. Mean traffic speed is estimated from Kanade-Lucas-Tomasi (KLT) optical flow method using a robust outlier detection. Traffic density is estimated using a robust statistical counting method. Our method has been implemented on an embedded smart camera and evaluated under different road and illumination conditions. It achieves a detection rate of more than 95% for stationary traffic.}, language = {EN}, talktype = {none}, url = {http://doi.ieeecomputersociety.org/10.1109/AVSS.2012.63} } @InProceedings{Pohl2012c, author = {Pohl, Daniela and Bouchachia, Abdelhamid and Hellwagner, Hermann}, booktitle = {11th International Conference on Machine Learning and Applications}, title = {Automatic Identification of Crisis-Related Sub-Events using Clustering}, year = {2012}, address = {Los Alamitos, CA, USA}, editor = {Han, Jiawei and Khoshgoftaar, Taghi M and Zhu, Xingquan}, month = {dec}, pages = {333-338}, publisher = {IEEE}, abstract = {Social media are becoming an important instrument for supporting crisis management, due to their broad acceptance and the intensive usage of mobile devices for accessing them. Social platforms facilitate collaboration among the public during a crisis and also support after-the-fact analysis. Thus, social media are useful for the processes of understanding, learning, and decision making. In particular, having information from social networks in a suitable, ideally summarized, form can speed up such processes. The present study relies on Flickr and YouTube as social media and aims at automatically identifying individual sub-events within a crisis situation. The study applies a two-phase clustering approach to detect those sub-events. The first phase uses geo-referenced data to locate a sub-event, while the second phase uses the natural language descriptions of pictures and videos to further identify the ”what-about” of those sub-events. The results show high potential of this social media-based clustering approach for detecting crisis-related sub-events.}, keywords = {Clustering, Sub-Event Detection, Crisis Management}, language = {EN}, location = {Boca Raton, Florida, USA}, pdf = {https://www.itec.aau.at/bib/files/06406815.pdf}, talkdate = {2012.12.12}, talktype = {registered}, url = {http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6406815} } @InProceedings{Pohl2012b, author = {Pohl, Daniela and Bouchachia, Abdelhamid and Hellwagner, Hermann}, booktitle = {IEEE 21st International Workshop on Enabling Technologies: Infrastructure for Collaborative Enterprises (WETICE)}, title = {Supporting Crisis Management via Sub-Event Detection in Social Networks}, year = {2012}, address = {Toulouse, Fance}, editor = {Diaz, Michel and Senac, Patrick}, month = {jun}, pages = {373 -378}, publisher = {IEEE}, abstract = {Social networks give the opportunity to gather and share knowledge about a situation of relevance. This so called user-generated content is getting increasingly important during crisis management. It facilitates the collaboration with citizens or parties involved from the very beginning of the crisis. The information captured in form of images, text or videos is a valuable source of identifying sub-events of a crisis. In this study, we use metadata of images and videos collected from Flickr and YouTube to extract sub-events in crisis situations. We investigate the suitability of clustering techniques to detect sub-events. In particular two algorithms are evaluated on several data sets related to crisis situations. The results show the high potential of the approach proposed.}, doi = {10.1109/WETICE.2012.58}, issn = {1524-4547}, keywords = {Crisis Management, Information Retrieval, Clustering, Sub-Event Detection}, language = {EN}, location = {Toulouse, Fance}, talkdate = {2012.06.26}, talktype = {registered} } @InProceedings{Pohl2012a, author = {Pohl, Daniela and Bouchachia, Abdelhamid and Hellwagner, Hermann}, booktitle = {Proceedings of the 21st International Conference Companion on World Wide Web}, title = {Automatic Sub-Event Detection in Emergency Management using Social Media}, year = {2012}, address = {New York, NY, USA}, editor = {Mille, Alain and Gandon, Fabien and Misselis, Jacques}, month = {apr}, pages = {683--686}, publisher = {ACM}, series = {WWW '12 Companion}, abstract = {Emergency management is about assessing critical situations, followed by decision making as a key step. Clearly, information is crucial in this two-step process. The technology of social (multi)media turns out to be an interesting source for collecting information about an emergency situation. In particular, situational information can be captured in form of pictures, videos, or text messages. The present paper investigates the application of multimedia metadata to identify the set of sub-events related to an emergency situation. The used metadata is compiled from Flickr and YouTube during an emergency situation, where the identification of the events relies on clustering. Initial results presented in this paper show how social media data can be used to detect different sub-events in a critical situation.}, keywords = {Emergency Management, Social Media, Clustering}, language = {EN}, location = {Lyon, France}, pdf = {https://www.itec.aau.at/bib/files/p683.pdf}, talkdate = {2012.04.17}, talktype = {registered} } @InProceedings{Muller2012, author = {Müller, Alexander and Lux, Mathias and Böszörmenyi, Laszlo}, booktitle = {Proceedings of the 12th International Conference on Knowledge Management and Knowledge Technologies}, title = {The video summary GWAP: summarization of videos based on a social game}, year = {2012}, address = {New York, NY, USA}, editor = {Lindstaedt, Stefanie and Granitzer, Michael}, month = {jan}, pages = {15:1--15:7}, publisher = {ACM}, series = {i-KNOW '12}, doi = {10.1145/2362456.2362476}, keywords = {games with a purpose, human computation, video retrieval, video summarization}, language = {EN}, talktype = {none}, url = {http://doi.acm.org/10.1145/2362456.2362476} } @TechReport{Muenzer2012, author = {Münzer, Bernd and Schoeffmann, Klaus and Böszörmenyi, Laszlo}, institution = {Institute of Information Technology ({ITEC}), Klagenfurt University}, title = {Detection of Circular Content Area in Endoscopic Videos for Efficient Encoding and Improved Content Analysis}, year = {2012}, address = {Klagenfurt, Austria}, month = {nov}, number = {TR/ITEC/12/2.03}, abstract = {The actual content of endoscopic videos is typically limited to a circular area in the center of the image due to the inherent characteristics of the camera. This area is surrounded by a dark border that fills up the remainder of the rectangular image and is subject to noise. The position and size of the circle is not standardized and usually varies over time. In this paper a robust algorithm is presented that (1) classifies which parts of an endoscopic video feature a circular content area and (2) determines its exact position and size, if present. This information is useful for improving video encoding efficiency, limiting further analysis steps to the relevant area and saving ink when printing still images on findings. Our evaluation shows that the proposed method is very fast, reliable and robust. Moreover, it indicates that by exploiting this information for video encoding a considerable bitrate reduction is possible with the same visual quality.}, language = {EN}, pages = {20}, pdf = {https://www.itec.aau.at/bib/files/CircleDetection.pdf} } @InProceedings{MuellerICME2012, author = {Mueller, Christopher and Smole, Martin and Schoeffmann, Klaus}, booktitle = {Proceedings of the IEEE International Conference on Multimedia and Expo (ICME 2012)}, title = {A Demonstration of A Hierarchical Multi-Layout 3D Video Browser}, year = {2012}, address = {Los Alamitos, CA, USA}, editor = {Zhang, Jian and Schonfeld, Dan and Feng, David Dagan and Nanyang, Jianfei Cai and Hanjalic, Alan and Magli, Enrico and Pickering, Mark and Friedland, Gerald and Hua, Xian-Sheng}, month = {jul}, pages = {665}, publisher = {IEEE Computer Society}, abstract = {This paper demonstrates a novel 3D Video Browser (3VB) that enables interactive search within a single video as well as video collections by utilizing 3D projection and an intuitive interaction. The browsing approach is based on hierarchical search, which means that the user can split a video into several segments. The 3VB disposes a convenient interface that allows flexible arrangement of video segments in the 3D space. It allows for concurrent playback of video segments and flexible inspection of these segments at any desired level of detail through convenient user interaction.}, language = {EN}, location = {Melbourne, Australia}, pdf = {https://www.itec.aau.at/bib/files/A_Demo_of_a_Hierarchical_Multi-Layout_3D_Video_Browser.pdf}, talkdate = {2012.07.10}, talktype = {registered} } @InProceedings{Mueller2012b, author = {Mueller, Christopher and Renzi, Daniele and Lederer, Stefan and Battista, Stefano and Timmerer, Christian}, booktitle = {Proceedings of the 20th European Signal Processing Conference (EUSIPCO12)}, title = {Using Scalable Video Coding for Dynamic Adaptive Streaming over HTTP in Mobile Environments}, year = {2012}, address = {Bucharest, Romania}, editor = {Burileanu, Corneliu and Pesquet-Popescu, Béatrice}, month = {aug}, pages = {2208-2212}, publisher = {European Signal Processing (EURASIP) Society}, abstract = {Dynamic Adaptive Streaming over HTTP (DASH) is a convenient approach to transfer videos in an adaptive and dynamic way to the user. As a consequence, this system provides high bandwidth flexibility and is especially suitable for mobile use cases where the bandwidth variations are tremendous. In this paper we have integrated the Scalable Video Coding (SVC) extensions of the Advanced Video Coding (AVC) standard into the recently ratified MPEG-DASH standard. Furthermore, we have evaluated our solution under restricted conditions using bandwidth traces from mobile environments and compared it with an improved version of our MPEG-DASH implementation using AVC as well as major industry solutions.}, keywords = {Dynamic Adaptive Streaming over {HTTP}, {MPEG-DASH}, Scalable Video Coding, Evaluation, Mobile Networks, Vehicular Mobility}, language = {EN}, location = {Bucharest, Romania}, pdf = {https://www.itec.aau.at/bib/files/mueller_svc-dash.pdf}, talkdate = {2012.08.31}, talktype = {registered} } @InProceedings{Mueller2012a, author = {Mueller, Christopher and Lederer, Stefan and Timmerer, Christian}, booktitle = {Proceedings of the Fourth Annual ACM SIGMM Workshop on Mobile Video (MoVid12)}, title = {An Evaluation of Dynamic Adaptive Streaming over HTTP in Vehicular Environments}, year = {2012}, address = {New York, NY, USA}, editor = {Hefeeda, Mohamed and Hsu, Cheng-Hsin and Chatterjee, Mainak and Venkatasubramanian, Nalini and Ganguly, Samrat}, month = {feb}, pages = {37-42}, publisher = {ACM}, abstract = {MPEGs' Dynamic Adaptive Streaming over HTTP (MPEG-DASH) is an emerging standard designed for media delivery over the top of existing infrastructures and able to handle varying bandwidth conditions during a streaming session. This requirement is very important, specifically within mobile environments and, thus, DASH could potentially become a major driver for mobile multimedia streaming. Hence, this paper provides a detailed evaluation of our implementation of MPEG DASH compared to the most popular propriety systems, i.e., Microsoft Smooth Steaming, Adobe HTTP Dynamic Streaming, and Apple HTTP Live Streaming. In particular, these systems will be evaluated under restricted conditions which are due to vehicular mobility. In anticipation of the results, our prototype implementation of MPEG-DASH can very well compete with state-of-the-art solutions and, thus, can be regarded as a mature standard ready for industry adaption.}, keywords = {Dynamic Adaptive Streaming over HTTP, MPEG-DASH, Microsoft Smooth Streaming, Adobe HTTP Dynamic Streaming, Evaluation, Apple HTTP Live Streaming, Mobile Networks, Vehicular Mobility}, language = {EN}, location = {Chapel Hill, North Carolina, USA}, pdf = {https://www.itec.aau.at/bib/files/p37-mueller.pdf}, talkdate = {2012.02.24}, talktype = {registered} } @InProceedings{Mueller2012VCIP, author = {Mueller, Christopher and Lederer, Stefan and Timmerer, Christian}, booktitle = {Proceedings of the IEEE Conference on Visual Communications and Image Processing Conference (VCIP 2012)}, title = {A Proxy Effect Analysis and Fair Adaptation Algorithm for Multiple Competing Dynamic Adaptive Streaming over HTTP Clients}, year = {2012}, address = {San Diego, CA, USA}, editor = {Aizawa, Kiyoharu and Kuo, Jay and Liu, Zicheng}, month = {nov}, pages = {6}, publisher = {IEEE}, abstract = {Multimedia streaming technologies based on the Hypertext Transfer Protocol (HTTP) are very popular and used by many content providers such as Netflix, Hulu, and Vudu. Recently, ISO/IEC MPEG has ratified Dynamic Adaptive Streaming over HTTP (DASH) which extends the traditional HTTP streaming with an adaptive component addressing the issue of varying bandwidth conditions that users are facing in networks based on the Internet Protocol (IP). Additionally, industry has already deployed several solutions based on such an approach which simplifies large scale deployment because the whole streaming logic is located at the client. However, these features may introduce drawbacks when multiple clients compete for a network bottleneck due to the fact that the clients are not aware of the network infrastructure such as proxies or other clients. This paper identifies these negative effects and the evaluation thereof using MPEG-DASH and Microsoft Smooth Streaming. Furthermore, we propose a novel adaptation algorithm introducing the concept of fairness regarding a cluster of clients. In anticipation of the results we can conclude that we achieve more efficient bottleneck bandwidth utilization and less quality switches.}, language = {EN}, location = {San Diego, USA}, pdf = {https://www.itec.aau.at/bib/files/PID2500949.pdf}, talkdate = {2012.11.29}, talktype = {registered}, url = {http://www.vcip2012.org} } @InProceedings{Mueller2012, author = {Lederer, Stefan and Mueller, Christopher and Timmerer, Christian}, booktitle = {Proceedings of the Third Annual {ACM SIGMM} Conference on Multimedia Systems ({MMSys12})}, title = {Dynamic Adaptive Streaming over {HTTP} Dataset}, year = {2012}, address = {New York, NY, USA}, editor = {Claypool, Mark and Griwodz, Carsten and Mayer-Patel, Ketan}, month = {feb}, pages = {89-94}, publisher = {ACM}, abstract = {Adaptive HTTP streaming got lot of attention in recent years and with dynamic adaptive streaming over HTTP (DASH) a standard is available. Many papers cover this topic and present their research results, but unfortunately all of them use their own private dataset which – in most cases – is not publicly available. Hence, it is difficult to compare, e.g., adaptation algorithms in an objective way due to the lack of a common dataset which shall be used as basis for such experiments. In this paper, we present our DASH dataset featuring our DASHEncoder, an open source DASH content generation tool. We also provide basic evaluations of the different segment lengths, the influence of HTTP server settings, and, in this context, we show some of the advantages as well as problems of shorter segment lengths.}, keywords = {Dynamic Adaptive Streaming over HTTP, DASH, Dataset, Encoder, Content Generation Tool}, language = {EN}, location = {Chapel Hill, North Carolina, USA}, pdf = {https://www.itec.aau.at/bib/files/p89-lederer.pdf}, talkdate = {2012.02.22}, talktype = {registered} } @Article{Milojicic2012, author = {Milojicic, Dejan and Arlitt, Martin and Seligmann, Doree Duncan and Thiruvathukal, George and Timmerer, Christian}, journal = {Computer}, title = {Innovation Mashups: Academic Rigor Meets Social Networking Buzz}, year = {2012}, issn = {0018-9162}, month = {sep}, number = {9}, pages = {101-105}, volume = {45}, abstract = {Exploring new options for publishing and content delivery offers an enormous opportunity to improve the state of the art and further modernize academic and professional publications.}, address = {Los Alamitos, CA, USA}, doi = {http://doi.ieeecomputersociety.org/10.1109/MC.2012.313}, keywords = {STCs, IEEE Computer Society, mashups, Computing Now}, language = {EN}, pdf = {https://www.itec.aau.at/bib/files/mco2012090101.pdf}, publisher = {IEEE Computer Society} } @InProceedings{Marques2012, author = {Marques, Oge and Lux, Mathias}, booktitle = {Proceedings of the 35th international ACM SIGIR conference on Research and development in information retrieval}, title = {Visual information retrieval using Java and LIRE}, year = {2012}, address = {New York, NY, USA}, editor = {Hersh, William and Callan, Jamie and Maarek, Yoelle and Sanderson, Mark}, month = {jan}, pages = {1193--1193}, publisher = {ACM}, series = {SIGIR '12}, doi = {10.1145/2348283.2348538}, keywords = {content-based image retrieval, image search, java, visual information retrieval}, language = {EN}, location = {Portland, Oregon, USA}, talkdate = {2012.08.12}, talktype = {registered}, url = {http://doi.acm.org/10.1145/2348283.2348538} } @InProceedings{Lux2012b, author = {Lux, Mathias and Taschwer, Mario and Marques, Oge}, booktitle = {Proceedings of the 20th ACM international conference on Multimedia}, title = {Classification of photos based on good feelings: ACM MM 2012 multimedia grand challenge submission}, year = {2012}, address = {New York, NY, USA}, editor = {Aizawa, Kiyoharu and Babaguchi, Noboru and Smith, John}, month = {jan}, pages = {1367--1368}, publisher = {ACM}, series = {MM '12}, doi = {10.1145/2393347.2396488}, keywords = {affection, image classification, image search, user intentions}, language = {EN}, location = {Nara, Japan}, talkdate = {2012.11.01}, talktype = {registered}, url = {http://doi.acm.org/10.1145/2393347.2396488} } @InProceedings{Lux2012a, author = {Lux, Mathias and Taschwer, Mario and Marques, Oge}, booktitle = {Proceedings of the ACM multimedia 2012 workshop on Crowdsourcing for multimedia}, title = {A closer look at photographers' intentions: a test dataset}, year = {2012}, address = {New York, NY, USA}, editor = {Aizawa, Kiyoharu and Babaguchi, Noboru and Smith, John}, month = {jan}, pages = {17--18}, publisher = {ACM}, series = {CrowdMM '12}, doi = {10.1145/2390803.2390811}, keywords = {digital photos, user intentions}, language = {EN}, location = {Nara, Japan}, talkdate = {2012.10.29}, talktype = {registered}, url = {http://doi.acm.org/10.1145/2390803.2390811} } @InProceedings{Lux2012, author = {Lux, Mathias and Guggenberger, Mario and Müller, Alexander}, booktitle = {Proceedings of the Eighth Artificial Intelligence and Interactive Digital Entertainment International Conference (AIIDE 2012)}, title = {Finding Image Regions with Human Computation and Games with a Purpose}, year = {2012}, address = {Palo Alto, California, USA}, editor = {Riedl, Mark and Sukthankar, Gita}, month = {jan}, pages = {220}, publisher = {Association for the Advancement of Artificial Intelligence (AAAI Press)}, abstract = {Manual image annotation is a tedious and time-consuming task, while automated methods are error prone and limited in their results. Human computation, and especially games with a purpose, have shown potential to create high quality annotations by "hiding the complexity" of the actual annotation task and employing the "wisdom of the crowds". In this demo paper we present two games with a single purpose: finding regions in images that correspond to given terms. We discuss approach, implementation, and preliminary results of our work and give an outlook to immediate future work.}, isbn10 = {978-1-57735-582-3}, keywords = {Games with a Purpose; Human Computation}, language = {EN}, talktype = {none}, url = {http://www.aaai.org/ocs/index.php/AIIDE/AIIDE12/paper/view/5474} } @InProceedings{Lederer2012c, author = {Liu, Yaning and Geurts, Joost and Rainer, Benjamin and Lederer, Stefan and Mueller, Christopher and Timmerer, Christian}, booktitle = {CCNx Community Meeting (CCNxConn 2012)}, title = {DASH over CCN: A CCN use-case for a Social Media based collaborative project}, year = {2012}, address = {Sophia Antipolis}, editor = {Carofiglio, Giovanna}, month = {sep}, pages = {1-1}, publisher = {Parc}, keywords = {CCN, DASH}, language = {EN}, location = {Sophia Antipolis, France}, pdf = {https://www.itec.aau.at/bib/files/DashOverCCN.pdf}, talkdate = {2012.09.12}, talktype = {registered}, url = {http://www.ccnx.org/ccnxcon2012/} } @InProceedings{Lederer2012b, author = {Lederer, Stefan and Mueller, Christopher and Rainer, Benjamin and Waltl, Markus and Timmerer, Christian}, booktitle = {Proceedings of the IEEE Conference on Visual Communications and Image Processing Conference (VCIP 2012)}, title = {An open source MPEG DASH evaluation suite}, year = {2012}, address = {San Diego, CA, USA}, editor = {Izquierdo, Ebroul and Wang, Xin}, month = {nov}, pages = {1-1}, publisher = {IEEE}, abstract = {In this paper we demonstrate our MPEG-DASH evaluation suite, which comprises several components on the client side as well as on the server side. The major client components are the VLC DASH plugin, libDASH, and DASH-JS, a JavaScript-based DASH client. These tools enable performance tests on various platforms, e.g., Windows and Linux as well as mobile platforms such as Android. Moreover, due to their flexible structure it is possible to integrate adaptation logics and evaluate them under consistent conditions. On the server side we provide the content generation tool DASHEncoder, our MPEG-DASH datasets well as the MPEG-DASH conformance validator.}, keywords = {MPEG DASH, Open Source, Demo}, language = {EN}, location = {San Diego, USA}, pdf = {https://www.itec.aau.at/bib/files/open-source_MPEG-DASH_evaluation_suite.pdf}, talkdate = {2012.11.29}, talktype = {registered}, url = {http://www.vcip2012.org} } @InProceedings{Lederer2012, author = {Lederer, Stefan and Mueller, Christopher and Timmerer, Christian}, booktitle = {Proceedings of the 19th International Packet Video Workshop ({PV} 2012)}, title = {Towards Peer-Assisted Dynamic Adaptive Streaming over HTTP}, year = {2012}, address = {Munich, Germany}, editor = {Guillemot, Christine and Chakareski, Jacob and Steinbach, Eckehard}, month = {may}, pages = {1-6}, publisher = {IEEE}, abstract = {This paper presents our peer-assisted Dynamic Adaptive Streaming over HTTP (pDASH) proposal as well as an evaluation based on our DASH simulation environment in comparison to conventional approaches, i.e., non-peer-assisted DASH. Our approach maintains the standard conformance to MPEG-DASH enabling an easy and straightforward way of enhancing a streaming system with peer assistance to reduce the bandwidth and infrastructure requirements of the content/service provider. In anticipation of the results our system achieves a bandwidth reduction of Content Distribution Networks (CDN) and as a consequence the corresponding infrastructure costs of the content/service providers by up to 25% by leveraging the upstream capacity of neighboring peers. Furthermore, the cost savings have been evaluated using a cost model that is based on the current Amazon CloudFront pricing scheme. Furthermore, we have also evaluated the performance impact that various combinations of quality levels of the content could have in a peer-assisted streaming system as well as the client behavior in such an environment.}, keywords = {Peer-Assisted Streaming, MPEG-DASH, Dynamic Adaptive Streaming over HTTP, CDN Bandwidth Reduction, Peer-to-Peer Streaming.}, language = {EN}, location = {Munich, Germany}, pdf = {https://www.itec.aau.at/bib/files/Paper53.pdf}, talkdate = {2012.05.10}, talktype = {registered} } @InProceedings{Kuschnig2012a, author = {Kuschnig, Robert and Yanmaz, Evsen and Kofler, Ingo and Rinner, Bernhard and Hellwagner, Hermann}, booktitle = {Proceedings of the Austrian Robotics Workshop (ARW-12)}, title = {{Profiling IEEE 802.11 Performance on Linux-based UAVs}}, year = {2012}, address = {Graz, Austria}, editor = {Gerald Steinbauer, Suzana Uran}, month = {may}, pages = {6}, publisher = {Graz University of Technology}, language = {EN}, location = {Graz, Austria}, pdf = {https://www.itec.aau.at/bib/files/Kuschnig_ARW2012.pdf}, talktype = {none} } @PhdThesis{Kuschnig2012, author = {Kuschnig, Robert}, school = {Klagenfurt University}, title = {Congestion-Aware Quality-Adaptive Streaming of Scalable Video}, year = {2012}, month = {jul}, abstract = {Internet video streaming is a hot topic in multimedia systems. A large variety of devices (computers, mobile phones, TVs, etc.) are connected to the Internet via wired or wireless networks and are capable of receiving and decoding HD video content. To enable new services like HD video streaming (e.g., online video rental), the Internet’s infrastructure was enhanced. But the Internet is still a best-effort network, which does not implement quality-of-service or admission control, resulting in time-varying bandwidth and packet delay, packet loss and network congestion. Because video streaming accounts for a considerable amount of the Internet’s traffic, video streaming needs additionally to be congestion-aware, to avoid a congestion collapse of the Internet. The Transmission Control Protocol (TCP) can adapt to changing network conditions and is currently the de facto standard protocol for congestion-aware and reliable data transmission in the Internet. This fact gave TCP-based video streaming a huge momentum. Consequently, this thesis investigates TCP-based adaptive video streaming for the Internet. The main goal is to provide a solution for congestion-aware video streaming, while still being able to achieve a reasonable performance in error-prone networks. To complement existing work on congestion-aware adaptive streaming, this thesis makes six contributions. (1) The baseline performance of TCP-based adaptive streaming is identified by means of an evaluation of different adaptive streaming approaches. The results represent a reference for further investigations. (2) An investigation on the influence of TCP’s behavior in presence of packet loss on the video streaming performance. (3) To overcome the shortcomings of TCP-based video streaming (single TCP connections fail to deliver a good performance in case of packet loss), a new approach to video streaming based on multiple request-response streams was introduced. The novelty of this system is that it is able to make use of multiple HTTP-based request-response streams while still providing TCP-friendliness. (4) A performance model of the HTTP-based request-response streams was developed, to estimate the influence of the system parameters and the network characteristics on the throughput performance. (5) A comprehensive evaluation of the HTTP-based request-response streams under diverse network conditions was conducted, to validate the model’s estimations. Additionally, the TCP-friendliness was evaluated, showing that request-response streaming systems can be configured to achieve TCP-friendliness. (6) A cellular network with high bandwidth fluctuations and RTTs was used to investigate the performance of the request-response streaming system in a mobile video streaming scenario. The results indicate that the streaming system can make good use of the available bandwidth, while the number of quality switches is kept low. While aggregating multiple TCP connections to improve the TCP streaming performance is quite common, usually the improvement comes at the cost of high deployment effort. By placing the streaming logic at the client, request-response streams can avoid this complexity. Additionally, this client-driven approach responds faster to changing network conditions and enables easy recovery from connection stalls or aborts, because the control loop is at the client. To improve the network efficiency and the scalability in terms of number of clients served, HTTP-based request-response streams can utilize HTTP proxies and caches.}, language = {EN}, pages = {186}, pdf = {https://www.itec.aau.at/bib/files/PhD_thesis_Robert_Kuschnig_2012.pdf} } @InProceedings{Kogler2012, author = {Kogler, Marian and Lux, Mathias}, booktitle = {i-KNOW '12 Proceedings of the 12th International Conference on Knowledge Management and Knowledge Technologies}, title = {Robust image retrieval using bag of visual words with fuzzy codebooks and fuzzy assignment}, year = {2012}, address = {New York, NY, USA}, editor = {Lindstaedt, Stefanie}, month = {jan}, pages = {34.1 - 34.4}, publisher = {ACM}, series = {i-KNOW '12}, abstract = {Content-based retrieval systems leverage low level features such as color, texture or local information of images to find similar images to a respective query image. In recent years the Bag of Visual Words (BoVW) approach, which relies on quantized visual information around local image patches, has gained importance in image retrieval. In this paper we focus on fuzzy algorithms, in order to improve the descriptiveness of image descriptors. We extend the BoVW approach by applying fuzzy clustering and fuzzy assignment to take a step towards more effective visual descriptors, which are matched against each other in content-based similarity searches.}, doi = {10.1145/2362456.2362498}, keywords = {bag of visual words, content based image retrieval, fuzzy, visual information retrieval}, language = {EN}, talktype = {none}, url = {http://doi.acm.org/10.1145/2362456.2362498} } @InProceedings{Kofler2012, author = {Kofler, Ingo and Kuschnig, Robert and Hellwagner, Hermann}, booktitle = {Proceedings of the 9th {IEEE} Consumer Communications and Networking Conference ({CCNC})}, title = {Implications of the {ISO Base Media File Format} on Adaptive {HTTP} Streaming of {H.264/SVC}}, year = {2012}, address = {Los Alamitos, CA, USA}, editor = {Shirazi, Behrooz}, month = {jan}, pages = {5}, publisher = {IEEE}, abstract = {HTTP streaming has gained significant attraction in the last few years. Currently many commercial as well as standardized streaming systems are already offering adaptive streaming. In most cases, the adaptation is achieved by switching between separately encoded video streams in different qualities. In contrast to that, this paper focuses on the applicability of scalable video coding based on the H.264/SVC standard for adaptive HTTP streaming. Recent work has already highlighted the conceptual advantages like better cache utilization, fine-grained bit rate scalability, and lower storage requirements. This paper discusses the actual realization and design options for implementing priority streaming using the ISO~Base Media File Format (BMFF). We propose three different strategies for organizing the scalable video bit stream that consider both the possibilities as well as limitations of the ISO BMFF. The proposed strategies are discussed and evaluated both conceptually and quantitatively. For that purpose, we provide a detailed analysis based on modeling both the overhead of the file format and the HTTP encapsulation. The results for all three priority streaming strategies show that the limitations of the ISO BMFF result in a high relative overhead in the case of low bit rate content. However, when applied to high quality content, priority streaming of H.264/SVC can be implemented at a very low cost. Depending on the number of layers and the offered scalability dimensions, different strategies should be chosen to minimize the overhead. Based on the analytical model and the discussion, this paper provides guidance for selecting the most efficient strategy.}, language = {EN}, location = {Las Vegas, NV, USA}, pdf = {https://www.itec.aau.at/bib/files/Kofler_PriorityStreamingISOBMFF_preprint.pdf}, talkdate = {2012.01.14}, talktype = {registered}, url = {http://www.ieee-ccnc.org/2012/} } @Article{Hossfeld2012, author = {Ho{\ss}feld, Tobias and Schatz, Raimund and Varela, Martin and Timmerer, Christian}, journal = {Communications Magazine, IEEE}, title = {Challenges of QoE Management for Cloud Applications}, year = {2012}, month = {apr}, number = {4}, pages = {28-36}, volume = {50}, abstract = {Cloud computing is currently gaining enormous momentum due to a number of promised benefits: ease of use in terms of deployment, administration, and maintenance, along with high scalability and flexibility to create new services. However, as more personal and business applications migrate to the cloud, service quality will become an important differentiator between providers. In particular, quality of experience as perceived by users has the potential to become the guiding paradigm for managing quality in the cloud. In this article, we discuss technical challenges emerging from shifting services to the cloud, as well as how this shift impacts QoE and QoE management. Thereby, a particular focus is on multimedia cloud applications. Together with a novel QoE-based classification scheme of cloud applications, these challenges drive the research agenda on QoE management for cloud applications.}, address = {New York, NY, USA}, doi = {10.1109/MCOM.2012.6178831}, keywords = {cloud computing, multimedia computing, software quality, QoE management, QoE-based classification scheme, multimedia cloud applications, quality management, quality of experience, service quality, Cloud computing, Multimedia communication, Quality of service, Streaming media}, language = {EN}, pdf = {https://www.itec.aau.at/bib/files/06178831.pdf}, publisher = {IEEE Communications Society} } @InCollection{Hellwagner2012, author = {Hellwagner, Hermann}, booktitle = {Encyclopedia of Parallel Computing}, publisher = {Springer}, title = {Scalable Coherent Interface (SCI)}, year = {2012}, address = {Berlin, Heidelberg, New York}, editor = {Padua, David}, month = {jan}, pages = {9}, abstract = {Scalable Coherent Interface (SCI) is the specification (standardized by ISO/IEC and the IEEE) of a high-speed, flexible, scalable, point-to-point-based interconnect technology that was implemented in various ways to couple multiple processing nodes. SCI supports both the message-passing and shared-memory communication models, the latter in either the cache-coherent or non-coherent variants. SCI can be deployed as a system area network for compute clusters, as a memory interconnect for large-scale, cache-coherent, distributed-shared-memory multiprocessors, or as an I/O subsystem interconnect.}, language = {EN} } @InProceedings{Grafl2012_TEMU, author = {Grafl, Michael and Timmerer, Christian and Waltl, Markus and Xilouris, George and Zotos, Nikolaos and Renzi, Daniele and Battista, Stefano and Chernilov, Alex}, booktitle = {Proceedings of the 2012 {IEEE} International Conference on Telecommunications and Multimedia ({TEMU} 2012)}, title = {Distributed Adaptation Decision-Taking Framework and Scalable Video Coding Tunneling for Edge and In-Network Media Adaptation}, year = {2012}, address = {Los Alamitos, CA, USA}, editor = {Pallis, Evangelos and Zacharopoulos, Vassilios and Kourtis, Anastasios}, month = {jul}, pages = {6}, publisher = {IEEE}, series = {TEMU}, abstract = {Existing and future media ecosystems need to cope with the ever-increasing heterogeneity of networks, devices, and user characteristics collectively referred to as (usage) context. The key to address this problem is media adaptation to various and dynamically changing contexts in order to provide a service quality that is regarded as satisfactory by the end user. The adaptation can be performed in many ways and at different locations, e.g., at the edge and within the network resulting in a substantial number of issues to be integrated within a media ecosystem. This paper describes research challenges, key innovations, target research outcomes, and achievements so far for edge and in-network media adaptation by introducing the concept of Scalable Video Coding (SVC) tunneling.}, doi = {http://dx.doi.org/10.1109/TEMU.2012.6294710}, keywords = {distributed adaptation decision-taking; SVC tunneling; research challenges; in-network adaptation; content-aware networking}, language = {EN}, location = {Heraklion, Greece}, pdf = {https://www.itec.aau.at/bib/files/TEMU2012_mgrafl.pdf}, talkdate = {2012.07.31}, talktype = {registered} } @InProceedings{Eberhard2012b, author = {Eberhard, Michael and Kumar, Amit and Mapelli, Licio and Palo, Andi and Petrocco, Riccardo and Uitto, Mikko}, booktitle = {Proceedings of the ACM Multimedia Systems Conference}, title = {NextSharePC: An Open-Source BitTorrent-based P2P Client Supporting SVC}, year = {2012}, address = {New York, U.S.A.}, editor = {Griwodz, Carsten}, month = {feb}, pages = {1-6}, publisher = {ACM}, language = {EN}, location = {Chapel Hill, North Carolina, U.S.A.}, pdf = {https://www.itec.aau.at/bib/files/next_share_pc.pdf}, talkdate = {2012.02.22}, talktype = {registered} } @InProceedings{Eberhard2012a, author = {Eberhard, Michael and Petrocco, Riccardo and Hellwagner, Hermann and Timmerer, Christian}, booktitle = {Proceedings of the Consumer Communication \& Networking Conference 2012}, title = {Comparison of Piece-Picking Algorithms for Layered Video Content in Peer-to-Peer Networks}, year = {2012}, address = {Los Alamitos, CA, U.S.A.}, editor = {Shirazi, Behrooz}, month = {jan}, pages = {1-5}, publisher = {IEEE}, language = {EN}, location = {Las Vegas, U.S.A.}, pdf = {https://www.itec.aau.at/bib/files/ccnc_pp_evaluation.pdf}, talkdate = {2012.01.15}, talktype = {registered} } @InProceedings{DelFabro2012c, author = {Del Fabro, Manfred and Lux, Mathias and Schoeffmann, Klaus and Taschwer, Mario}, booktitle = {Proceedings of TRECVID 2012}, title = {ITEC-UNIKLU Known-Item Search Submission 2012}, year = {2012}, address = {Gaithersburg, USA}, editor = {Over, Paul and Awad, George and Michel, Martial and Fiscus, Jonathan and Sanders, Greg and Shaw, Barbara and Kraaij, Wessel and Smeaton, Alan and Quénot, Georges}, month = {nov}, pages = {11}, publisher = {National Institute of Standards and Technology (NIST)}, abstract = {In this report we describe our approach to the known-item search task for TRECVID~2012. We describe how we index available metadata and how we gain additional information about the videos using content-based analysis. A rule-based query expansion and query reduction method is applied to increase the number of relevant videos in automatic runs. Furthermore, we describe an approach for quick, interactive filtering of large result sets. We outline how the parameters of our system were tuned for the IACC dataset and discuss our TRECVID 2012 KIS results.}, language = {EN}, location = {Gaithersburg, USA}, talkdate = {2012.11.28}, talktype = {poster}, url = {http://www-nlpir.nist.gov/projects/tvpubs/tv.pubs.org.html} } @InProceedings{DelFabro2012b, author = {Del Fabro, Manfred and Sobe, Anita and Böszörmenyi, Laszlo}, booktitle = {Proceedings of the Fourth International Conferences on Advances in Multimedia (MMEDIA 2012)}, title = {Summarization of Real-Life Events Based on Community-Contributed Content}, year = {2012}, address = {France}, editor = {Davies, Philip and Newell, David}, month = {apr}, pages = {119--126}, publisher = {IARIA}, abstract = {In this paper, we investigate whether community-contributed multimedia content can be used to make video summaries of social events. We implemented an event summarization algorithm that uses photos from Flickr and videos from YouTube to compose summaries of well-known society events, which took place in the last three years. The comparison with a manually obtained ground truth shows a good coverage of the most important situations of these events. We do not claim to produce the best summaries possible, which may be compared to the work of a human director, but we analyze what can be achieved with community-contributed content by now.}, isbn13 = {978-1-61208-195-3}, keywords = {video summarization. event summarization. social media. real-life events. video retrieval. image retrieval. multimedia entertainment.}, language = {EN}, location = {Chamonix Mont-Blanc, France}, pdf = {https://www.itec.aau.at/bib/files/mmedia_2012_6_30_40058.pdf}, talkdate = {2012.05.02}, talktype = {registered}, url = {http://www.thinkmind.org/download.php?articleid=mmedia_2012_6_30_40058} } @InProceedings{DelFabro2012a, author = {Del Fabro, Manfred and Böszörmenyi, Laszlo}, booktitle = {Advances in Multimedia Modeling}, title = {{AAU} Video Browser: Non-Sequential Hierarchical Video Browsing without Content Analysis}, year = {2012}, address = {Berlin, Heidelberg, New York}, editor = {Schoeffmann, Klaus and Merialdo, Bernard and Hauptmann, Alexander and Ngo, Chong-Wah and Andreopoulos, Yiannis and Breiteneder, Christian}, month = {jan}, pages = {639--641}, publisher = {Springer}, abstract = {We participate in the Video Browser Showdown with our easy-to-use video browsing tool. It can be used for getting a quick overview of videos as well as for simple Known Item Search (KIS) tasks. It offers a parallel and a tree-like browsing interface for navigating through the content of single videos or even small video collections in a hierarchical, non-sequential manner. We want to validate whether simple KIS tasks can be completed without a time consuming content analysis in advance.}, doi = {10.1007/978-3-642-27355-1_63}, language = {EN}, location = {Klagenfurt, Austria}, pdf = {https://www.itec.aau.at/bib/files/delfabro_mmm2012_VBS.pdf}, talkdate = {2012.01.06}, talktype = {poster} } @InProceedings{DelFabro2012, author = {Del Fabro, Manfred and Böszörmenyi, Laszlo}, booktitle = {Advances in Multimedia Modeling}, title = {Summarization and Presentation of Real-Life Events Using Community-Contributed Content}, year = {2012}, address = {Berlin, Heidelberg, New York}, editor = {Schoeffmann, Klaus and Merialdo, Bernard and Hauptmann, Alexander and Ngo, Chong-Wah and Andreopoulos, Yiannis and Breiteneder, Christian}, month = {jan}, pages = {630--632}, publisher = {Springer}, abstract = {We present an algorithm for the summarization of social events with community-contributed content from Flickr and YouTube. A clustering algorithm groups content related to the searched event. Date information, GPS coordinates, user ratings and visual features are used to select relevant photos and videos. The composed event summaries are presented with our video browser.}, doi = {10.1007/978-3-642-27355-1_60}, language = {EN}, location = {Klagenfurt, Austria}, pdf = {https://www.itec.aau.at/bib/files/submission_145.pdf}, talkdate = {2012.01.05}, talktype = {poster} } @InCollection{BailerTVCA2012, author = {Bailer, Werner and Schoeffmann, Klaus and Hopfgartner, Frank}, booktitle = {TV Content Analysis: Techniques and Applications}, publisher = {Auerbach Publications}, title = {A Survey of Advanced Content Management Tools for TV Postproduction}, year = {2012}, address = {CRC Press / Taylor and Francis, Albert House, 4th Floor, 1-4 Singer Street, London, EC2A 4BQ, UK}, editor = {Kompatsiaris, Yiannis and Merialdo, Bernard and Lian, Shiguo}, month = {mar}, pages = {674}, isbn13 = {9781439855607}, language = {EN} } @InProceedings{AhlstroemICME2012, author = {Ahlström, David and Schoeffmann, Klaus}, booktitle = {Proceedings of the 2012 {IEEE} International Conference on Multimedia and Expo Workshops}, title = {A Visual Search User Study on the Influences of Aspect Ratio Distortion of Preview Thumbnails}, year = {2012}, address = {Los Alamitos, CA, USA}, editor = {Zhang, Jian and Schonfeld, Dan and Feng, David Dagan and Nanyang, Jianfei Cai and Hanjalic, Alan and Magli, Enrico and Pickering, Mark and Friedland, Gerald and Hua, Xian-Sheng}, month = {jul}, pages = {546-551}, publisher = {IEEE Computing Society}, language = {EN}, location = {Melbourne, Australia}, talkdate = {2012.07.13}, talktype = {registered} } @InProceedings{ACM2012, author = {Ahlström, David and Hudelist, Marco Andrea and Schoeffmann, Klaus and Schaefer, Gerald}, booktitle = {Proceedings of the 20th ACM international conference on Multimedia}, title = {A User Study on Image Browsing on Touchscreens}, year = {2012}, address = {New York, USA}, editor = {Babaguchi, Noboru and Aizawa, Kiyoharu and Smith, John}, month = {nov}, pages = {pp. 925-928}, publisher = {ACM Digital Library}, abstract = {Default image browsing interfaces on touch-based mobile devices provide limited support for image search tasks. To facilitate fast and convenient searches we propose an alternative interface that takes advantage of 3D graphics and arranges images on a rotatable globe according to color similarity. In a user study we compare the new design to the iPad's image browser. Results collected from 24 participants show that for color-sorted image collections the globe can reduce search time by 23% without causing more errors and that it is perceived as being fun to use and preferred over the standard browsing interface by 70% of the participants.}, isbn10 = {978-1-4503-1089-5}, language = {EN}, location = {Nara, Japan}, talkdate = {2012.10.31}, talktype = {registered}, url = {http://dl.acm.org/citation.cfm?id=2393347&coll=DL&dl=ACM&CFID=159013035&CFTOKEN=94655035} }