% Year: 2009 % Encoding: utf-8 @InCollection{Zahariadis2009, author = {Zahariadis, Theodore and Lamy-Bergot, Catherine and Schierl, Thomas and Grüneberg, Karsten and Celetto, Luca and Timmerer, Christian}, booktitle = {Towards the Future Internet - A European Research Perspective}, publisher = {IOS Press}, title = {Content Adaptation Issues in the Future Internet}, year = {2009}, address = {Amsterdam, Netherlands}, editor = {Tselentis, Georgios and Domingue, John and Galis, Alex and Gavras, Anastasius and Hausheer, David and Krco, Srdjan and Lotz, Volkmar and Zahariadis, Theodore}, month = may, pages = {283-292}, abstract = {Future Media Internet is envisaged to provide the means to share and distribute (advanced) multimedia content and services with superior quality and striking flexibility, in a trusted and personalized way, improving citizens' quality of life, working conditions, edutainment and safety. Based on work that has taken place in projects ICT SEA and ICT OPTIMIX, and the Media Delivery Platforms Cluster of projects, we try to provide the challenges and the way ahead in the area of content adaptation.}, issn = {9781607500070}, keywords = {Future Media Internet, Adaptation, Scalable Video Coding}, language = {EN}, pdf = {https://www.itec.aau.at/bib/files/STAL9781607500070-0283.pdf}, url = {http://www.booksonline.iospress.nl/Content/View.aspx?piid=12006} } @InProceedings{Waltl2009, author = {Waltl, Markus and Timmerer, Christian and Hellwagner, Hermann}, booktitle = {Proceedings of the First International Workshop on Quality of Multimedia Experience (QoMEX 2009)}, title = {A Test-Bed for Quality of Multimedia Experience Evaluation of Sensory Effects}, year = {2009}, address = {Los Alamitos, CA, USA}, editor = {Ebrahim, Touradj and El-Maleh, Khaled and Dane, Gokce and Karam, Lina}, month = {jul}, pages = {145-150}, publisher = {IEEE}, abstract = {This paper introduces a prototype test-bed for triggering sensory effects like light, wind, or vibration when presenting audiovisual resources, e.g., a video, to users. The ISO/IEC MPEG is currently standardizing the Sensory Effect Description Language (SEDL) for describing such effects. This language is briefly described in the paper and the testbed that is destined to evaluate the quality of the multimedia experience of users is presented. It consists of a video annotation tool for sensory effects, a corresponding simulation tool, and a real test system. Initial experiments and results on determining the color of light effects from the video content are reported.}, doi = {10.1109/QOMEX.2009.5246962}, isbn13 = {978-1-4244-4370-3}, issn = {978-1-4244-43}, keywords = {Sensory Information, MPEG-V}, language = {EN}, location = {San Diego, CA}, pdf = {https://www.itec.aau.at/bib/files/qomex2009_mwcthh.pdf}, talkdate = {2009.07.31}, talktype = {registered}, url = {http://www.qomex2009.org} } @InProceedings{Timmerer2009a, author = {Timmerer, Christian and Gelissen, Jean and Waltl, Markus and Hellwagner, Hermann}, booktitle = {Proceedings of the 2009 NEM Summit}, title = {Interfacing with Virtual Worlds}, year = {2009}, address = {Heidelberg}, editor = {Hrasnica, Halid}, month = sep, pages = {118-123}, publisher = {Eurescom – the European Institute for Research and Strategic Studies in Telecommunications – GmbH}, abstract = {Virtual worlds (often referred to as 3D3C for 3D visualization & navigation and the 3C’s of Community, Creation and Commerce) integrate existing and emerging (media) technologies (e.g. instant messaging, video, 3D, VR, AI, chat, voice, etc.) that allow for the support of existing and the development of new kinds of networked services. The emergence of virtual worlds as platforms for networked services is recognized by businesses as an important enabler as it offers the power to reshape the way companies interact with their environments (markets, customers, suppliers, creators, stakeholders, etc.) in a fashion comparable to the Internet and to allow for the development of new (breakthrough) business models, services, applications and devices. Each virtual world however has a different culture and audience making use of these specific worlds for a variety of reasons. These differences in existing Metaverses permit users to have unique experiences. In order to bridge these differences in existing and emerging Metaverses a standardized framework is required, i.e., MPEG-V Media Context and Control (ISO/IEC 23005), that will provide a lower entry level to (multiple) virtual worlds both for the provider of goods and services as well as the user. The aim of this paper is to provide an overview of MPEG-V and its intended standardization areas. Additionally, a review about MPEG-V’s most advanced part – Sensory Information – is given.}, issn = {9783000289538}, keywords = {MPEG-V}, language = {EN}, pdf = {https://www.itec.aau.at/bib/files/nem2009_ctjgmwhh.pdf}, talktype = {none}, url = {http://www.nem-summit.eu} } @InProceedings{Timmerer2009, author = {Timmerer, Christian and Jaborning, Johannes and Hellwagner, Hermann}, booktitle = {Proceedings of the 9th Workshop on Multimedia Metadata (WMM'09)}, title = {A Comparison and Mapping Model}, year = {2009}, address = {Aachen, Germany}, editor = {Klamma, Ralf and Grigoras, Romulus and Charvillat, Vincent and Kosch, Harald}, month = mar, pages = {18}, publisher = {http://ceur-ws.org}, abstract = {Nowadays, mobile devices have implemented several transmission technologies which enable access to the Internet and increase the bit rate for data exchange. Despite modern mobile processors and high-resolution displays, mobile devices will never reach the stage of a powerful notebook or desktop system (for example, due to the fact of battery powered CPUs or just concerning the smallsized displays). Due to these limitations, the deliverable content for these devices should be adapted based on their capabilities including a variety of aspects (e.g., from terminal to network characteristics). These capabilities should be described in an interoperable way. In practice, however, there are many standards available and a common mapping model between these standards is not in place. Therefore, in this paper we describe such a mapping model and its implementation aspects. In particular, we focus on the whole delivery context (i.e., terminal capabilities, network characteristics, user preferences, etc.) and investigated the two most prominent state-of-the-art description schemes, namely User Agent Profile (UAProf) and Usage Environment Description (UED).}, language = {EN}, pdf = {https://www.itec.aau.at/bib/files/DC-paper-v.2.pdf}, talktype = {none}, url = {http://ceur-ws.org/Vol-441/pxx.pdf} } @InProceedings{Sobe2009b, author = {Sobe, Anita and Böszörmenyi, Laszlo}, booktitle = {2009 First International Conference on Advances in Multimedia}, title = {{Non-sequential Multimedia Caching}}, year = {2009}, address = {Los Alamitos, CA, USA}, editor = {Burdescu, Dan and Dini, Petre}, month = jul, pages = {158--161}, publisher = {IEEE}, series = {MMedia'2009}, doi = {10.1109/MMEDIA.2009.36}, language = {EN}, location = {Colmar, France}, talkdate = {2009.07.25}, talktype = {registered} } @Book{Sobe2009a, author = {Sobe, Anita}, publisher = {VdM}, title = {Single Sign-On in IMS-based IPTV Systems}, year = {2009}, address = {Saarbrücken, Germany}, month = oct, issn = {978363920674}, language = {EN}, pages = {80} } @InProceedings{Schoeffmann2009e, author = {Schoeffmann, Klaus and Lux, Mathias and Taschwer, Mario and Böszörmenyi, Laszlo}, booktitle = {ICME'09 Proceedings of the 2009 IEEE international Conference on Multimedia and Expo}, title = {Visualization of Video Motion in Context of Video Browsing}, year = {2009}, address = {Los Alamitos, CA, USA}, editor = {Lin, CY and Cox, I}, month = jul, pages = {658-661}, publisher = {IEEE}, abstract = {We present a new approach for video browsing using visualization of motion direction and motion intensity statistics by color and brightness variations. Statistics are collected from motion vectors of H.264/AVC encoded video streams, so full video decoding is not required. By interpreting visualized motion patterns of video segments, users are able to quickly identify scenes similar to a prototype scene or identify potential scenes of interest. We give some examples of motion patterns with different semantic value, including camera zooms, hill jumps of ski-jumpers, and the repeated appearance of a news speaker. In a user study we show that certain scenes of interest can be found significantly faster using our video browsing tool than using a video player with VCR-like controls.}, issn = {9781424442911}, language = {EN}, talktype = {none}, url = {http://dl.acm.org/citation.cfm?id=1698924.1699086} } @InProceedings{Schoeffmann2009d, author = {Schoeffmann, Klaus and Taschwer, Mario and Böszörmenyi, Laszlo}, booktitle = {Proceedings oft the International Conference on Multimedia and Expo 2009}, title = {Video Browsing Using Motion Visualization}, year = {2009}, address = {Los Alamitos, CA, USA}, editor = {Lin, CY and Cox, I}, month = jul, pages = {1835-1836}, publisher = {IEEE}, abstract = {We present a video browsing tool that uses a novel and powerful visualization technique of video motion. The tool provides an interactive navigation index that allows users to quickly and easily recognize content semantics like scenes with fast/slow motion (in general or according to a specific direction), scenes showing still/moving objects in front of a still/moving background, camera pans, or camera zooms. Moreover, the visualization facilitates identification of similar segments in a video. A first user study has shown encouraging results.}, issn = {1945-788X}, language = {EN}, talktype = {none} } @InProceedings{Schoeffmann2009c, author = {Schoeffmann, Klaus and Böszörmenyi, Laszlo}, booktitle = {Content-Based Multimedia Indexing, 2009. CBMI '09}, title = {Video Browsing Using Interactive Navigation Summaries}, year = {2009}, address = {Los Alamitos, CA, USA}, editor = {Avrithis, Yannis and Kollias, Stefanos}, month = jul, pages = {243-248}, publisher = {IEEE}, abstract = {A new approach for interactive video browsing is described. The novelty of the proposed approach is the flexible concept of interactive navigation summaries. Similar to time sliders, commonly used with standard soft video players, navigation summaries allow random access to a video. In addition, they also provide abstract visualizations of the content at a user-defined level of detail and, thus, quickly communicate content characteristics to the user. Navigation summaries can provide visual information about both low-level features but even high-level features. The concept fully integrates the user, who knows best which navigation summary at which level of detail could be most beneficial for his/her current video browsing task, and provide him/her a flexible set of navigation means. A first user study has shown that our approach can significantly outperform standard soft video players - the state-of-the art “poor man’s” video browsing tool.}, doi = {10.1109/CBMI.2009.40}, issn = {9780769536620}, language = {EN}, location = {Chania, Crete}, talkdate = {2009.06.04}, talktype = {registered} } @InProceedings{Schoeffmann2009b, author = {Schoeffmann, Klaus and Böszörmenyi, Laszlo}, booktitle = {Advance in Semantic Media Adaptation and Personalization}, title = {Interactive Video Browsing of H.264 Content Based on Just-in-Time Analysis}, year = {2009}, address = {Boca Raton, FL, USA}, editor = {Angelides, Marios C and Mylonas, Phivos}, month = feb, pages = {159-179}, publisher = {Auerbach Publications}, isbn10 = {1420076647}, isbn13 = {978-1420076646}, language = {EN}, talktype = {none} } @InProceedings{Schoeffmann2009, author = {Schoeffmann, Klaus and Lux, Mathias and Böszörmenyi, Laszlo}, booktitle = {Advances in Multimedia Modeling}, title = {A Novel Approach for Fast and Accurate Commercial Detection in H.264/AVC Bit Streams Based on Logo Identification}, year = {2009}, address = {Berlin, Heidelberg, New York}, editor = {Huet, Benoit and Smeaton, Alan and Mayer-Patel, Ketan and Avrithis, Yannis}, month = jan, pages = {119-127}, publisher = {Springer}, series = {Lecture Notes in Computer Sciences}, abstract = {Commercial blocks provide no extra value for video indexing, retrieval, archiving, or summarization of TV broadcasts. Therefore, automatic detection of commercial blocks is an important topic in the domain of multimedia information systems. We present a commercial detection approach which is based on logo detection performed in the compressed domain. The novelty of our approach is that by taking advantage of advanced features of the H.264/AVC coding, it is both significantly faster and more exact than existing approaches working directly on compressed data. Our approach enables removal of commercials in a fraction of real-time while achieving an average recall of 97.33% with an average precision of 99.31%. Moreover, due to its run-time performance, our approach can also be employed on low performance devices, for instance DVB recorders.}, doi = {10.1007/978-3-540-92892-8_13}, isbn10 = {978-3-540-92891-1}, language = {EN}, talktype = {none}, url = {http://www.springer.com/computer/information+systems/book/978-3-540-92891-1} } @InProceedings{Reiterer2009b, author = {Reiterer, Bernhard and Concolato, Cyril and Hellwagner, Hermann}, booktitle = {Proceedings of 1st International ICST Conference on User Centric Media - UCMedia 2009}, title = {Natural-Language-based Conversion of Images to Mobile Multimedia Experiences}, year = {2009}, address = {Berlin, Heidelberg, New York}, editor = {Daras, Patros and Chlamtac, Imrich}, month = dec, pages = {4 - CD}, publisher = {Springer}, series = {LNICST - Lecture Notes of the Institute for Computer Sciences, Social-Informatics and Telecommunications Engineering}, abstract = {We describe an approach for viewing any large, detail-rich picture on a small display by generating a video from the image, as taken by a virtual camera moving across it at varying distance. Our main innovation is the ability to build the virtual camera's motion from a textual description of a picture, e.g., a museum caption, so that relevance and ordering of image regions are determined by co-analyzing image annotations and natural language text. Furthermore, our system arranges the resulting presentation such that it is synchronized with an audio track generated from the text by use of a text-to-speech system.}, issn = {9789639799844}, keywords = {image adaptation - text analysis - image annotation - digital cultural heritage - computer animation}, language = {EN}, talktype = {none}, url = {http://www.usercentricmedia.org/index.shtml} } @InProceedings{Reiterer2009a, author = {Reiterer, Bernhard and Hellwagner, Hermann}, booktitle = {Proceedings International InterMedia Summer School 2009}, title = {Animated Picture Presentation Steered by Natural Language}, year = {2009}, address = {Geneva}, editor = {Nadia, Magnenat-Thalmann and Seunghyun, Han and Dimitris, Potopsaltou}, month = jun, pages = {24-32}, publisher = {MIRALab at University of Geneva}, series = {International InterMedia Summer School}, abstract = {In this paper, we present an approach for presenting large, feature-rich pictures on small displays by generating an animation and subsequently a video from the image, as it could be taken by a virtual camera moving across the image. Our main innovation is the ability to build the virtual camera's motion upon a textual description of a picture, as from a museum caption, so that relevance and ordering of image regions is determined by co-analyzing image annotations and text. Furthermore, our system can arrange the resulting presentation in a way that it is synchronized with an audio track generated from the text by use of a text-to-speech system.}, keywords = {image adaptation, text parsing, image annotation, digital cultural heritage, computer animation}, language = {EN}, talktype = {none}, url = {http://intermediaschool.miralab.unige.ch/} } @InProceedings{Reiterer2009, author = {Reiterer, Bernhard and Lachner, Janine and Lorenz, Andreas and Zimmermann, Andreas and Hellwagner, Hermann}, booktitle = {Advances in Semantic Media Adaptation and Personalization}, title = {Research Directions Toward User-centric Multimedia}, year = {2009}, address = {Boca Raton (Florida)}, editor = {Angelides, Marios C and Mylonas, Phivos and Wallace, Manolis}, month = mar, pages = {21-42}, publisher = {Auerbach Publications}, abstract = {Currently, much research aims at coping with the shortcomings in multimedia consumption that may exist in a user's current context, e.g., due to the absence of appropriate devices at many locations, a lack of capabilities of mobile devices, restricted access to content, or non-personalized user interfaces. Recently, solutions to specific problems have been emerging, e.g., wireless access to multimedia repositories over standardized interfaces; however, due to usability restrictions the user has to spend much effort to or is even incapable of fulfilling his/her demands. The vision of user-centric multimedia places the user in the center of multimedia services to support his/her multimedia consumption intelligently, dealing with the aforementioned issues while minimizing required work. Essential features of such a vision are comprehensive context awareness, personalized user interfaces, and multimedia content adaptation. These aspects are addressed in this paper as major challenges toward a user-centric multimedia framework.}, doi = {10.1201/9781420076653-c2}, edition = {2}, isbn10 = {1420076647}, isbn13 = {978-1420076646}, issn = {9781420076646}, language = {EN}, talktype = {none}, url = {http://www.crcpress.com/product/isbn/9781420076646} } @InProceedings{Lux2009b, author = {Lux, Mathias}, booktitle = {Multimedia, 2009. ISM '09. 11th IEEE International Symposium on}, title = {An Evaluation of Metrics for Retrieval of MPEG-7 Semantic Descriptions}, year = {2009}, address = {Los Alamitos, CA, USA}, editor = {Tsai, Jeffrey and Jain, Ramesh}, month = dec, pages = {546-551}, publisher = {IEEE}, abstract = {MPEG-7 is an extensive multimedia metadata standard covering a huge number of aspects of metadata. However, as with most metadata standards details of usage and application of the standards are – at least partially – open to interpretation. In case of MPEG-7storage and transmission of high level metadata on concept level are defined but retrieval methods are not proposed. So if for instance a user annotates photos using the MPEG-7 semantic description scheme, there are no standardized ways to retrieve the photos based on the annotation. In this paper we propose metrics for retrieval based on the MPEG-7 semantic description scheme and evaluate them in a digital photo retrieval scenario.}, doi = {10.1109/ISM.2009.104}, isbn10 = {978-1-4244-5231-6}, language = {EN}, talktype = {none} } @Article{Lux2009a, author = {Lux, Mathias and Marques, Oge and Schoeffmann, Klaus and Böszörmenyi, Laszlo and Lajtai, Georg}, journal = {Multimedia Tools and Applications}, title = {A novel tool for summarization of arthroscopic videos}, year = {2009}, month = sep, pages = {521 - 544}, abstract = {Arthroscopic surgery is a minimally invasive procedure that uses a small camera to generate video streams, which are recorded and subsequently archived. In this paper we present a video summarization tool and demonstrate how it can be successfully used in the domain of arthroscopic videos. The proposed tool generates a keyframe-based summary, which clusters visually similar frames based on user-selected visual features and appropriate dissimilarity metrics. We discuss how this tool can be used for arthroscopic videos, taking advantage of several domain-specific aspects, without losing its ability to work on general-purpose videos. Experimental results confirm the feasibility of the proposed approach and encourage extending it to other application domains.}, address = {Berlin, Heidelberg, New York}, language = {EN}, publisher = {Springer}, url = {http://www.springerlink.com/content/u037362581245316/} } @InProceedings{Lux2009, author = {Lux, Mathias and Schoeffmann, Klaus and Marques, Oge and Böszörmenyi, Laszlo}, booktitle = {9th Workshop on Multimedia Metadata (WMM'09)}, title = {A Novel Tool for Quick Video Summarization using Keyframe Extraction Techniques}, year = {2009}, address = {Aachen, Germany}, editor = {Grigoras, Romulus and Charvillat, Vincent and Klamma, Ralf and Kosch, Harald}, month = mar, pages = {62-76}, publisher = {http://ceur-ws.org}, abstract = {The increasing availability of short, unstructured video clips on the Web has generated an unprecedented need to organize, index, annotate and retrieve video contents to make them useful to potential viewers. This paper presents a novel, simple, and easy-to-use tool to benchmark different low level features for video summarization based on keyframe extraction. Moreover, it shows the usefulness of the benchmarking tool by developing hypothesis for a chosen domain through an exploratory study. It discusses the results of exploratory studies involving users and their judgment of what makes the summary generated by the tool a good one.}, language = {EN}, talktype = {none}, url = {http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-441/p04.pdf} } @Article{Kosch2009, author = {Kosch, Harald and Timmerer, Christian}, journal = {IEEE Computing Now}, title = {Multimedia Metadata and Semantic Management}, year = {2009}, month = dec, number = {December 2009}, pages = {00}, volume = {Multimedia Metadata and Semantic Management}, address = {Los Alamitos, CA, USA}, language = {EN}, publisher = {IEEE} } @InProceedings{Kogler2009, author = {Kogler, Marian and Del Fabro, Manfred and Lux, Mathias and Schoeffmann, Klaus and Böszörmenyi, Laszlo}, booktitle = {Proceedings of the 10th International Workshop of the Multimedia Metadata Community on Semantic Multimedia Database Technologies (SeMuDaTe'09) in conjunction with the 4th International Conference on Semantic and Digital Media Technologies (SAMT 2009)}, title = {Global vs. Local Feature in Video Summarization: Experimental Results}, year = {2009}, address = {Aachen, Germany}, editor = {Ralf, Klamma and Harald, Kosch and Lux, Mathias and Florian, Stegmaier}, month = dec, pages = {6}, publisher = {http://ceur-ws.org}, language = {EN}, talktype = {none}, url = {http://sunsite.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-539/} } @InProceedings{Kofler2009c, author = {Kofler, Christoph and Lux, Mathias}, booktitle = {MM '09 Proceedings of the 17th ACM international conference on Multimedia}, title = {Dynamic presentation adaptation based on user intent classification.}, year = {2009}, address = {NA}, editor = {Gao, Wen and Tui, Yong and Hanjalic, Alan}, month = oct, pages = {1117-1118}, publisher = {NA}, abstract = {Results of internet searches are typically presented as lists. When searching for digital photos different search result presentations however offer different benefits. If users are primarily interested in the visual content of images a thumbnail grid may be more appropriate than a list. For people searching photos taken at a specific place image metadata in the result presentation is of interest too. In this paper we present an application which monitors a user's behavior while searching for digital photos and classifies the user's intention. Based on the intention, the result is adapted to support the user in an optimal way.}, doi = {10.1145/1631272.1631526}, language = {EN}, talktype = {none}, url = {http://dl.acm.org/citation.cfm?id=1631526} } @InProceedings{Kofler2009b, author = {Kofler, Christoph and Lux, Mathias}, booktitle = {Proceedings of I-KNOW ’09 and I-SEMANTICS ’09}, title = {An Exploratory Study on the Explicitness of User Intentions in Digital Photo Retrieval.}, year = {2009}, address = {Graz, Austria}, editor = {Tochtermann, Klaus and Maurer, Hermann}, month = sep, pages = {208-214}, publisher = {TU Graz \& Know Center}, abstract = {Search queries are typically interpreted as specification of information need of a user. Typically the search query is either interpreted as is or based on the context of a user, being for instance a user profile, his/her previously undertaken searches or any other background information. The actual intent of the user – the goal s/he wants to achieve with information retrieval – is an important part of a user’s context. In this paper we present the results of an exploratory study on the interplay between the goals of users and their search behavior in multimedia retrieval.}, language = {EN}, talktype = {none}, url = {http://www.i-know.tugraz.at/2009/papers/an_exploratory_study_explicitness_user_intentions.pdf} } @InProceedings{Kofler2009a, author = {Kofler, Ingo and Kuschnig, Robert and Hellwagner, Hermann}, booktitle = {Proceedings of the IEEE International Symposium on Broadband Multimedia Systems and Broadcasting (BMSB)}, title = {Improving IPTV Services by H.264/SVC Adaptation and Traffic Control}, year = {2009}, address = {Los Alamitos, CA, USA}, editor = {Angueira, Pablo and Reimers, Ulrich}, month = may, pages = {1-6}, publisher = {IEEE}, series = {BMSB}, abstract = {This paper presents a novel approach that combines both in-network, application-layer adaptation and network-layer traffic control of scalable video streams based on the H.264/SVC standard. In the IPTV/VoD scenario considered, an intercepting RTSP/RTP proxy performs admission control of the requested video, based on the signaled scalability information, and decides whether the content can be streamed without changes or in an adapted version. The proxy configures the network layer appropriately in order to separate the video stream from besteffort traffic on the same link. Rather than performing fixed bandwidth allocation, our proxy approach uses the Hierarchical Token Bucket (HTB) queuing discipline to allow for borrowing bandwidth between traffic classes. In that setting, two different allocation policies are introduced. The Hard Reservation Policy (HRP) performs admission control and adaptation on the video streams and does not modify video bandwidth allocation after admission. In contrast, the Flexible Borrowing Policy (FBP) restricts the admission control to the base layer of the SVC stream. The packets carrying MGS enhancement layer data are marked with priorities by the proxy and are handled at the network layer by a priority-based queuing mechanism. Both a qualitative comparison and an experimental evaluation of the two policies are given.}, doi = {10.1109/ISBMSB.2009.5133771}, isbn13 = {9781424425907}, language = {EN}, location = {Bilbao, Spain}, talkdate = {2009.05.14}, talktype = {registered}, url = {https://www.itec.aau.at/publications/mmc/BMSB09_Kofler_Improving_IPTV_Services_Preprint.pdf} } @InProceedings{Kofler2009, author = {Kofler, Ingo and Kuschnig, Robert and Hellwagner, Hermann}, booktitle = {Proceedings of the 6th IEEE Consumer Communications and Networking Conference (CCNC)}, title = {In-Network Real-Time Adaptation of Scalable Video Content on a WiFi-ne Router}, year = {2009}, address = {Los Alamitos, CA, USA}, editor = {Gibbs, Simon and Messer, Alan}, month = jan, pages = {2}, publisher = {IEEE}, series = {CCNC}, abstract = {One of the most active research topics in the field of video signal processing is scalable video coding (SVC). The recently published extension of the H.264/AVC video coding standard introduces scalability features by employing a layered encoding of the video stream. In our work we investigated the usage of this scalable extension of H.264/AVC for in-network multimedia adaptation. We developed an RTSP/RTP-based proxy which exploits the layered encoding of the video and can perform real-time video adaptation on an inexpensive off-the-shelf WiFi router. This is achieved by applying a stateful, packet-based adaptation approach that keeps the computational costs at a minimum. With that approach it is possible to simultaneously adapt multiple video streams to varying network conditions or to the capabilities of the consumers' end-devices. In our demonstration we show the streaming of two scalable video streams from a server to a client and the in-network adaptation of the video at the WiFi router. The adaptation can be controlled interactively in the temporal, spatial and SNR domains.}, doi = {10.1109/CCNC.2009.4785005}, isbn13 = {9781424423088}, language = {EN}, location = {Las Vegas, NV, USA}, pdf = {https://www.itec.aau.at/bib/files/CCNC09_SVC_Adaptation_Router_preprint.pdf}, talkdate = {2009.01.11}, talktype = {poster} } @Article{Karpati2009, author = {Karpati, Peter and Szkaliczki, Tibor and Böszörmenyi, Laszlo}, journal = {Multimedia Tools and Applications}, title = {Designing and scaling distributed VoD servers}, year = {2009}, issn = {1380-7501}, month = jan, pages = {55-91}, volume = {Volume 41, Number 1}, abstract = {Planning Video-on-Demand (VoD) services based on the server architecture and the available equipment is always a challenging task. We created a formal model to support the design of distributed video servers that adapt dynamically and automatically to the changing client demands, network and host parameters. The model makes giving estimations about the available throughput possible, and defines evaluation criteria for VoD services relating to utilization and load balance, video usage, client satisfaction and costs. The dynamism of the frame model originates from the possible state transitions which have to be defined in a core model. The core model is responsible for configuration recommendation which determines how clients are served depending on the properties of their requests, system configuration and system load. Furthermore, it decides on the optimal placement of the server components in the network. The usability of the model is illustrated on examples.}, address = {Berlin, Heidelberg, New York}, language = {EN}, publisher = {Springer}, url = {http://www.springerlink.com/content/1153860131r36v13/?p=c2702cc6a0b347749314ae5367dc47f0&pi=0} } @Article{Hellwagner2009, author = {Hellwagner, Hermann and Kuschnig, Robert and Stütz, Thomas and Uhl, Andreas}, journal = {Journal on Signal Processing: Image Communication}, title = {Efficient In-Network Adaptation of Encrypted {H.264/SVC} Content}, year = {2009}, month = {jul}, number = {9}, pages = {740-758}, volume = {24}, abstract = {This paper addresses the efficient adaptation of encrypted scalable video content (H.264/SVC). RTP-based in-network adaptation schemes on a media aware network element (MANE) in an IPTV and VoD scenario are considered. Two basic alternatives to implement encryption and adaptation of H.264/SVC content are investigated: (i) full, format-independent encryption making use of Secure RTP (SRTP); (ii) SVC-specific encryption that leaves the metadata relevant for adaptation (NAL unit headers) unencrypted. The SRTP-based scheme (i) is straightforward to deploy, but requires the MANE to be in the security context of the delivery, i.e., to be a trusted node. For adaptation, the content needs to be decrypted, scaled, and re-encrypted. The SVC-specific approach (ii) enables both full and selective encryption, e.g., of the base layer only. SVC-specific encryption is based on own previous work, which is substantially extended and detailed in this paper. The adaptation MANE can now be an untrusted node; adaptation becomes a low-complexity process, avoiding full decryption and re-encryption of the content. This paper presents the first experimental comparison of these two approaches and evaluates whether multimedia-specific encryption can lead to performance and application benefits. Potential security threats and security properties of the two approaches in the IPTV and VoD scenario are elementarily analyzed. In terms of runtime performance on the MANE our SVC-specific encryption scheme significantly outperforms the SRTP-based approach. SVC-specific encryption is also superior in terms of induced end-to-end delays. The performance can even be improved by selective application of the SVC-specific encryption scheme. The results indicate that efficient adaptation of SVC-encrypted content on low-end, untrusted network devices is feasible.}, address = {Amsterdam}, language = {EN}, pdf = {https://www.itec.aau.at/bib/files/Elsevier_SPIC_Hellwagner09a.pdf}, publisher = {Elsevier B.V.} } @InProceedings{Ferscha2009, author = {Ferscha, Alois and Hellwagner, Hermann and Neuper, Christa and Pree, Wolfgang}, booktitle = {Informatik macht Zukunft - Zukunft macht Informatik}, title = {Zukunft der Informatik}, year = {2009}, address = {Wien}, editor = {Chroust, Gerhard and Moessenboeck, Hans-Peter}, month = dec, pages = {48-51}, publisher = {Oesterreichische Computer Gesellschaft}, isbn10 = {9783854032588}, language = {DE}, talktype = {none} } @Article{Eberhard2009a, author = {Eberhard, Michael and Timmerer, Christian and Quacchio, Emanuele and Hellwagner, Hermann}, journal = {IEEE Wireless Communications}, title = {An Interoperable Delivery Framework for Scalable Media Resources}, year = {2009}, month = {oct}, number = {Vol. 16, No. 5}, pages = {58-63}, volume = {Oktober 2009}, abstract = {In this paper, an interoperable framework for the delivery of scalable media resources, e.g., in the standardized Scalable Video Coding (SVC) format, is presented. The framework provides support for Video on Demand (VoD) as well as multicast streaming and performs an efficient, generic, and interoperable adaptation of the streamed content based on MPEG-21 Digital Item Adaptation (DIA). The server as well as the clients of the streaming framework implement the MPEG Extensible Middleware (MXM) and utilize the MPEG Query Format (MPQF) for querying the available media resources. The framework has been fully integrated into the VLC media player. The architecture for both, VoD and multicast is presented in detail. Finally, a comparison in terms of performance of the generic MPEG-21 metadata-based adaptation approach to an SVC-specific adaptation approach is provided.}, address = {Los Alamitos, CA, USA}, language = {EN}, pdf = {https://www.itec.aau.at/bib/files/ieeewireless_eberhard.pdf}, publisher = {IEEE} } @InProceedings{Eberhard2009, author = {Eberhard, Michael and Timmerer, Christian and Hellwagner, Hermann}, booktitle = {STreaming Day ’09 Proceedings}, title = {A Layered Piece-Picking Algorithm for Peer-to-Peer Networks}, year = {2009}, address = {NA}, editor = {Raggio, Marko and Rovati, Fabrizio}, month = sep, pages = {n.a.}, publisher = {NA}, abstract = {The streaming of multimedia content over Peer-to-Peer (P2P) networks is nowadays a well appreciated concept, as it helps distributing content to a great number of users and additionally reduces the server costs for providing the content. As the users of P2P networks often have different bandwidth connections and terminals, the same content is usually provided in different qualities. Although such a provision of the same content in different qualities helps to satisfy all users, it makes the sharing process less efficient. Users that are interested in the content in a specific quality can only exchange pieces with those users that are interested in the same content and the same quality. Thus, layered video coding, which provides different qualities within one bitstream, is especially well suited for P2P distribution. If the layered content is provided once in the best quality, all peers interested in this content can at least exchange the base layer, plus the enhancement layers they are interested in with those peers that have them available.}, issn = {9781616236212}, language = {EN}, location = {Genova, Italy}, talkdate = {2009.09.21}, talktype = {registered} } @MastersThesis{DelFabro2009, author = {Del Fabro, Manfred}, school = {Klagenfurt University}, title = {Extensible Toolkit for Non-Linear Video Exploration}, year = {2009}, month = {jan}, abstract = {In this thesis I give an overview of current research done in the fields of video abstraction, video browsing and interactive video retrieval. In these research fields, which are summarized under the term video exploration, new concepts and solutions are developed that should help the users to get more control over video content again. Furthermore, I am going to introduce "FVET", the Flexible Video Exploration Toolkit. This is a platform that has been developed in the scope of this thesis, to be able to experiment with different approaches of video exploration. As it can be extend with plug-ins, it is very flexible regarding future developments. For this purpose an own plug-in architecture has been implemented that addresses especially the needs of video exploration applications.}, language = {EN}, pages = {124} } @InProceedings{Chatzichristofis2009a, author = {Chatzichristofis, Savvas A and Boutalis, Yiannis and Lux, Mathias}, booktitle = {SISAP '09 Proceedings of the 2009 Second International Workshop on Similarity Search and Applications}, title = {An Interactive Content Based Image Retrieval System}, year = {2009}, address = {Los Alamitos, CA, USA}, editor = {Skopal, Thomas and Zezula, Pavel}, month = aug, pages = {151-153}, publisher = {IEEE}, abstract = {This paper presents an image retrieval suite called img(Rummager) which brings into effect a number of new as well as state of the art descriptors. The application can execute an image search based on a query image, either from XML-based index files, or directly from a folder containing image files, extracting the comparison features in real time. In addition the img(Rummager) application can execute a hybrid search of images from the application server, combining keyword information and visual similarity. Also img(Rummager) supports easy retrieval evaluation based on the normalized modified retrieval rank (NMRR) and average precision (AP).}, isbn10 = {978-0-7695-3765-8}, language = {EN}, talktype = {none}, url = {http://www.computer.org/portal/web/csdl/doi/10.1109/SISAP.2009.16} } @InProceedings{Chatzichristofis2009, author = {Chatzichristofis, Savvas A and Boutalis, YS and Lux, Mathias}, booktitle = {Signal Processing, Pattern Recognition and Applications (SPPRA 2009)}, title = {Selection of the proper compact composite descriptor for improving content based image retrieval}, year = {2009}, address = {Calgary, Canada}, editor = {Zagar, B}, month = feb, pages = {00-00}, publisher = {ACTA Press}, abstract = {Compact Composite Descriptors (CCD) are global image features capturing both, color and texture characteristics, at the same time in a very compact representation. In this paper we propose a combination of two recently introduced CCDs (CEDD and FCTH) into a Joint Composite Descriptor (JCD). We further present a method for descriptor selection to approach the best ANMRR that would result from CEDD and FCTH. With our approach the most appropriate descriptor in terms of maximization of information content can be found on a per image basis without knowledge of the data set as a whole. Experiments conducted on three known benchmarking image databases demonstrate the effectiveness of the proposed technique.}, language = {EN}, talktype = {none}, url = {http://www.actapress.com/PaperInfo.aspx?PaperID=34669&reason=500} } @InProceedings{Boeszoermenyi2009a, author = {Böszörmenyi, Laszlo}, booktitle = {Informatik macht Zukunft - Zukunft macht Informatik}, title = {Das Ideal der Ausführbarkeit in der Informatik und Gesellschaft}, year = {2009}, address = {Vienna, Austria}, editor = {Chroust, Gerhard and Mössenböck, Hans-Peter}, month = dec, pages = {117-122}, publisher = {OCG}, issn = {9783854032588}, language = {DE}, talktype = {none}, url = {http://www.siabshop.com/ocgbookshop/product_info.php/info/p50_Informatik-macht-Zukunft---Zukunftsmacht-Informatik.html/XTCsid/612c1725ded1d040d0098ae0fe5612a4} } @InProceedings{Boeszoermenyi2009, author = {Böszörmenyi, Laszlo}, booktitle = {Zeit erinnern}, title = {Können sich Computer erinnern?}, year = {2009}, address = {Klagenfurt, Austria}, editor = {Antonitsch, Peter and Scherbantin, Annette and Theuermann, Anneliese and Wakounig, Vladimir}, month = oct, pages = {205-215}, publisher = {Drava Verlag}, series = {Drava Diskurs}, issn = {9783854356004}, language = {DE}, talktype = {none}, url = {http://ifeb.uni-klu.ac.at/index.php?id=120&tx_ttnews[tt_news]=34&tx_ttnews[backPid]=22&cHash=bb0520ffc9} }