% Keywords: Analysis % Encoding: utf-8 @InProceedings{Steinkellner2021, author = {Philip Steinkellner and Klaus Schöffmann}, booktitle = {2021 International Conference on Content-Based Multimedia Indexing (CBMI)}, title = {{Evaluation of Object Detection Systems and Video Tracking in Skiing Videos}}, year = {2021}, month = {jun}, pages = {1--6}, publisher = {IEEE}, abstract = {Nowadays, modern ski resorts provide additional services to customers, such as recording videos of specific moments from their skiing experience. This and similar tasks can be achieved by using computer vision methods. In this work, we evaluate the detection performance of current object detection methods and the tracking performance of a detection-based tracking algorithm. The evaluation is based on videos of skiers and snowboarders from ski resorts. We collect videos of race tracks from different resorts and compile a public dataset of images and videos, where skiers and snowboarders are annotated with bounding boxes. Based on this data, we evaluate the performance of four state-of-the-art object detection methods. This evaluation is performed with general models trained on the MS COCO dataset as well as with custom models trained on our dataset. In addition, we review the performance of the detection-based, multi-object tracking algorithm Deep SORT, which we adapt for skier tracking.The results show promising performance and reveal that the MS COCO models already achieve high Precision, while training a custom model additionally improves the performance. Bigger models profit from custom training in terms of more accurate bounding box placement and higher Precision, while smaller models have an overall high training payoff. The modified Deep SORT tracker manages to follow a skier’s trajectory over an extended period and operates with high accuracy, which indicates that the tracker is overall well suited for tracking of skiers and snowboarders on race tracks. Even when exposed to strong camera and skier movement changes, the tracker stays latched onto the target.}, doi = {10.1109/cbmi50038.2021.9461905}, keywords = {Object Detection, Object Tracking, YOLOv4, Faster R-CNN, Deep SORT, Skiing, Sports Video Analysis}, url = {http://dx.doi.org/10.1109/cbmi50038.2021.9461905} } @Article{Rossetto2021, author = {Luca Rossetto and Ralph Gasser and Jakub Lokoc and Werner Bailer and Klaus Schoeffmann and Bernd Muenzer and Tomas Soucek and Phuong Anh Nguyen and Paolo Bolettieri and Andreas Leibetseder and Stefanos Vrochidis}, journal = {IEEE Transactions on Multimedia}, title = {{Interactive Video Retrieval in the Age of Deep Learning - Detailed Evaluation of VBS 2019}}, year = {2021}, issn = {1941-0077}, month = mar, pages = {243--256}, volume = {23}, abstract = {Despite the fact that automatic content analysis has made remarkable progress over the last decade - mainly due to significant advances in machine learning - interactive video retrieval is still a very challenging problem, with an increasing relevance in practical applications. The Video Browser Showdown (VBS) is an annual evaluation competition that pushes the limits of interactive video retrieval with state-of-the-art tools, tasks, data, and evaluation metrics. In this paper, we analyse the results and outcome of the 8th iteration of the VBS in detail. We first give an overview of the novel and considerably larger V3C1 dataset and the tasks that were performed during VBS 2019. We then go on to describe the search systems of the six international teams in terms of features and performance. And finally, we perform an in-depth analysis of the per-team success ratio and relate this to the search strategies that were applied, the most popular features, and problems that were experienced. A large part of this analysis was conducted based on logs that were collected during the competition itself. This analysis gives further insights into the typical search behavior and differences between expert and novice users. Our evaluation shows that textual search and content browsing are the most important aspects in terms of logged user interactions. Furthermore, we observe a trend towards deep learning based features, especially in the form of labels generated by artificial neural networks. But nevertheless, for some tasks, very specific content-based search features are still being used. We expect these findings to contribute to future improvements of interactive video search systems.}, doi = {10.1109/tmm.2020.2980944}, keywords = {Interactive Video Retrieval, Video Browsing, Video Content Analysis, Content-based Retrieval, Evaluations}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, url = {https://ieeexplore.ieee.org/document/9037125} } @InCollection{Prodan2021, author = {Shajulin Benedict and Prateek Agrawal and Radu Prodan}, booktitle = {Communications in Computer and Information Science}, publisher = {Springer Singapore}, title = {{Energy Consumption Analysis of R-Based Machine Learning Algorithms for Pandemic Predictions}}, year = {2021}, month = jun, pages = {192--204}, volume = {1393}, abstract = {The push for agile pandemic analytic solutions has attained development-stage software modules of applications instead of functioning as full-fledged production-stage applications – i.e., performance, scalability, and energy-related concerns are not optimized for the underlying computing domains. And while the research continues to support the idea that reducing the energy consumption of algorithms improves the lifetime of battery-operated machines, advisable tools in almost any developer setting, an energy analysis report for R-based analytic programs is indeed a valuable suggestion. This article proposes an energy analysis framework for R-programs that enables data analytic developers, including pandemic-related application developers, to analyze the programs. It reveals an energy analysis report for R programs written to predict the new cases of 215 countries using random forest variants. Experiments were carried out at the IoT cloud research lab and the energy efficiency aspects were discussed in the article. In the experiments, ranger-based prediction program consumed 95.8 J.}, doi = {10.1007/978-981-16-3660-8_18}, keywords = {Analysis, Energy consumption, Machine learning, R-program, Tools}, url = {https://link.springer.com/chapter/10.1007/978-981-16-3660-8_18} } @InProceedings{Matha2021, author = {Roland Matha and Dragi Kimovski and Anatoliy Zabrovskiy and Christian Timmerer and Radu Prodan}, booktitle = {2021 IEEE 17th International Conference on eScience (eScience)}, title = {{Where to Encode: A Performance Analysis of x86 and Arm-based Amazon EC2 Instances}}, year = {2021}, month = {sep}, pages = {118--127}, publisher = {IEEE}, abstract = {Video streaming became an undivided part of the Internet. To efficiently utilise the limited network bandwidth it is essential to encode the video content. However, encoding is a computationally intensive task, involving high-performance resources provided by private infrastructures or public clouds. Public clouds, such as Amazon EC2, provide a large portfolio of services and instances optimized for specific purposes and budgets. The majority of Amazon’s instances use x86 processors, such as Intel Xeon or AMD EPYC. However, following the recent trends in computer architecture, Amazon introduced Arm based instances that promise up to 40% better cost performance ratio than comparable x86 instances for specific workloads. We evaluate in this paper the video encoding performance of x86 and Arm instances of four instance families using the latest FFmpeg version and two video codecs. We examine the impact of the encoding parameters, such as different presets and bitrates, on the time and cost for encoding. Our experiments reveal that Arm instances show high time and cost saving potential of up to 33.63% for specific bitrates and presets, especially for the x264 codec. However, the x86 instances are more general and achieve low encoding times, regardless of the codec.}, doi = {10.1109/escience51609.2021.00022}, keywords = {Amazon EC2, Arm instances, AVC, Cloud computing, FFmpeg, Graviton2, HEVC, Performance analysis, Video encoding}, url = {https://www.computer.org/csdl/proceedings-article/escience/2021/036100a118/1y14GC0fb6o} } @InCollection{Leibetseder2021a, author = {Andreas Leibetseder and Klaus Schoeffmann}, booktitle = {MultiMedia Modeling}, publisher = {Springer International Publishing}, title = {{Less is More - diveXplore 5.0 at VBS 2021}}, year = {2021}, month = jan, number = {12573}, pages = {455--460}, abstract = {As a longstanding participating system in the annual Video Browser Showdown (VBS2017-VBS2020) as well as in two iterations of the more recently established Lifelog Search Challenge (LSC2018-LSC2019), diveXplore is developed as a feature-rich Deep Interactive Video Exploration system. After its initial successful employment as a competitive tool at the challenges, its performance, however, declined as new features were introduced increasing its overall complexity. We mainly attribute this to the fact that many additions to the system needed to revolve around the system’s core element – an interactive self-organizing browseable featuremap, which, as an integral component did not accommodate the addition of new features well. Therefore, counteracting said performance decline, the VBS 2021 version constitutes a completely rebuilt version 5.0, implemented from scratch with the aim of greatly reducing the system’s complexity as well as keeping proven useful features in a modular manner.}, doi = {10.1007/978-3-030-67835-7_44}, keywords = {Video retrieval, Interactive video search, Video analysis}, url = {https://link.springer.com/chapter/10.1007/978-3-030-67835-7_44} } @InCollection{Karisch2021, author = {Christof Karisch and Andreas Leibetseder and Klaus Schoeffmann}, booktitle = {MultiMedia Modeling}, publisher = {Springer International Publishing}, title = {{NoShot Video Browser at VBS2021}}, year = {2021}, month = jan, number = {12573}, pages = {405--409}, abstract = {We present our NoShot Video Browser, which has been successfully used at the last Video Browser Showdown competition VBS2020 at the MMM2020. NoShot is given its name due to the fact, that it neither makes use of any kind of shot detection nor utilize the VBS master shots. Instead videos are split into frames with a time distance of one second. The biggest strength of the system lies in its feature “time cache”, which shows results with the best confidence in a range of seconds.}, doi = {10.1007/978-3-030-67835-7_36}, keywords = {Video retrieval, Interactive video search, Video analysis}, url = {https://link.springer.com/chapter/10.1007/978-3-030-67835-7_36} } @Article{Karandikar_2021, author = {Nikita Karandikar and Rockey Abhishek and Nishant Saurabh and Zhiming Zhao and Alexander Lercher and Ninoslav Marina and Radu Prodan and Chunming Rong and Antorweep Chakravorty}, journal = {Blockchain: Research and Applications}, title = {Blockchain-based prosumer incentivization for peak mitigation through temporal aggregation and contextual clustering.1}, year = {2021}, issn = {2096-7209}, month = jun, pages = {1--35}, abstract = {Peak mitigation is of interest to power companies as peak periods may require the operator to over provision supply in order to meet the peak demand. Flattening the usage curve can result in cost savings, both for the power companies and the end users. Integration of renewable energy into the energy infrastructure presents an opportunity to use excess renewable generation to supplement supply and alleviate peaks. In addition, demand side management can shift the usage from peak to off peak times and reduce the magnitude of peaks. In this work, we present a data driven approach for incentive based peak mitigation. Understanding user energy profiles is an essential step in this process. We begin by analysing a popular energy research dataset published by the Ausgrid corporation. Extracting aggregated user energy behavior in temporal contexts and semantic linking and contextual clustering give us insight into consumption and rooftop solar generation patterns. We implement, and performance test a blockchain based prosumer incentivization system. The smart contract logic is based on our analysis of the Ausgrid dataset. Our implementation is capable of supporting 792,540 customers with a reasonably low infrastructure footprint.}, doi = {10.1016/j.bcra.2021.100016}, keywords = {Peak shaving, aggregation analysis, contextual clustering, blockchain, incentivization}, publisher = {Elsevier (BV)}, url = {https://www.sciencedirect.com/science/article/pii/S2096720921000117?via=ihub} } @InProceedings{Moll2020, author = {Philipp Moll and Veit Frick and Natascha Rauscher and Mathias Lux}, booktitle = {Proceedings of the 12th ACM International Workshop on Immersive Mixed and Virtual Environment Systems}, title = {{How players play games}}, year = {2020}, month = {jun}, publisher = {ACM}, abstract = {The popularity of computer games is remarkably high and is still growingevery year. Despite this popularity and the economical importance of gaming,research in game design, or to be more precise, of game mechanics that can beused to improve the enjoyment of a game, is still scarce. In this paper, weanalyze Fortnite, one of the currently most successful games, and observe howplayers play the game. We investigate what makes playing the game enjoyable byanalyzing video streams of experienced players from game streaming platformsand by conducting a user study with players who are new to the game. Weformulate four hypotheses about how game mechanics influence the way playersinteract with the game and how it influences player enjoyment. We presentdifferences in player behavior between experienced players and beginners anddiscuss how game mechanics could be used to improve the enjoyment forbeginners. In addition, we describe our approach to analyze games withoutaccess to game-internal data by using a toolchain which automatically extractsgame information from video streams.}, doi = {10.1145/3386293.3397113}, keywords = {Online Games, Game Mechanics, Game Design, Video Analysis}, url = {https://dl.acm.org/doi/10.1145/3386293.3397113} } @InProceedings{Mazdin2020, author = {Petra Mazdin and Michal Barcis and Hellwagner, Hermann and Bernhard Rinner}, booktitle = {2020 IEEE 16th International Conference on Automation Science and Engineering (CASE)}, title = {{Distributed Task Assignment in Multi-Robot Systems based on Information Utility}}, year = {2020}, month = {aug}, pages = {734--740}, publisher = {IEEE}, abstract = {Most multi-robot systems (MRS) require to coordinate the assignment of tasks to individual robots for efficient missions. Due to the dynamics, incomplete knowledge and changing requirements, the robots need to distribute their local state information within the MRS continuously during the mission. Since communication resources are limited and message transfers may be erroneous, the global state estimated by each robot may become inconsistent. This inconsistency may lead to degraded task assignment and mission performance. In this paper, we explore the effect and cost of communication and exploit information utility for online distributed task assignment. In particular, we model the usefulness of the transferred state information by its information utility and use it for controlling the distribution of local state information and for updating the global state. We compare our distributed, utility-based online task assignment with well-known centralized and auction-based methods and show how substantial reduction of communication effort still leads to successful mission completion. We demonstrate our approach in a wireless communication testbed using ROS2.}, doi = {10.1109/case48305.2020.9216982}, keywords = {Task analysis, Robot kinematics, Mathematical model, Multi-robot systems, Optimization, Heuristic algorithms}, url = {https://doi.org/10.1109/CASE48305.2020.9216982} } @Article{Ghamsarian2020c, author = {Negin Ghamsarian and Klaus Schoeffmann and Morteza Khademi}, journal = {Multimedia Tools and Applications}, title = {{Blind MV-based video steganalysis based on joint inter-frame and intra-frame statistics}}, year = {2020}, issn = {1573-7721}, month = {nov}, number = {6}, pages = {1--23}, volume = {80}, abstract = {Despite all its irrefutable benefits, the development of steganography methods has sparked ever-increasing concerns over steganography abuse in recent decades. To prevent the inimical usage of steganography, steganalysis approaches have been introduced. Since motion vector manipulation leads to random and indirect changes in the statistics of videos, MV-based video steganography has been the center of attention in recent years. In this paper, we propose a 54-dimentional feature set exploiting spatio-temporal features of motion vectors to blindly detect MV-based stego videos. The idea behind the proposed features originates from two facts. First, there are strong dependencies among neighboring MVs due to utilizing rate-distortion optimization techniques and belonging to the same rigid object or static background. Accordingly, MV manipulation can leave important clues on the differences between each MV and the MVs belonging to the neighboring blocks. Second, a majority of MVs in original videos are locally optimal after decoding concerning the Lagrangian multiplier, notwithstanding the information loss during compression. Motion vector alteration during information embedding can affect these statistics that can be utilized for steganalysis. Experimental results have shown that our features’ performance far exceeds that of state-of-the-art steganalysis methods. This outstanding performance lies in the utilization of complementary spatio-temporal statistics affected by MV manipulation as well as feature dimensionality reduction applied to prevent overfitting. Moreover, unlike other existing MV-based steganalysis methods, our proposed features can be adjusted to various settings of the state-of-the-art video codec standards such as sub-pixel motion estimation and variable-block-size motion estimation.}, doi = {10.1007/s11042-020-10001-9}, keywords = {Blind steganalysis, Video steganography, Information security, Motion vector, Video compression, H264/AVC}, publisher = {Springer Science and Business Media LLC}, url = {https://link.springer.com/article/10.1007/s11042-020-10001-9} } @InProceedings{PetscharnigMMM17, author = {Petscharnig, Stefan and Schoeffmann, Klaus}, booktitle = {International Conference on Multimedia Modeling}, title = {Deep Learning of Shot Classification in Gynecologic Surgery Videos}, year = {2017}, address = {Cham}, editor = {Amsaleg, Laurent and Guðmundsson, Gylfi Þór and Gurrin, Cathal and Jónsson, Björn Þór and Satoh, Shin’ichi}, month = {jan}, pages = {702-713}, publisher = {Springer}, abstract = {In the last decade, advances in endoscopic surgery resulted in vast amounts of video data which is used for documentation, analysis, and education purposes. In order to find video scenes relevant for aforementioned purposes, physicians manually search and annotate hours of endoscopic surgery videos. This process is tedious and time-consuming, thus motivating the (semi-)automatic annotation of such surgery videos. In this work, we want to investigate whether the single-frame model for semantic surgery shot classification is feasible and useful in practice. We approach this problem by further training of AlexNet, an already pre-trained CNN architecture. Thus, we are able to transfer knowledge gathered from the Imagenet database to the medical use case of shot classification in endoscopic surgery videos. We annotate hours of endoscopic surgery videos for training and testing data. Our results imply that the CNN-based single-frame classification approach is able to provide useful suggestions to medical experts while annotating video scenes. Hence, the annotation process is consequently improved. Future work shall consider the evaluation of more sophisticated classification methods incorporating the temporal video dimension, which is expected to improve on the baseline evaluation done in this work.}, edition = {LNCS 10132}, keywords = {Multimedia content analysis, Convolutional neural networks, Deep learning, Medical shot classification}, language = {EN}, location = {Klagenfurt, Austria}, talkdate = {2017.01.05}, talktype = {registered}, url = {https://link.springer.com/chapter/10.1007/978-3-319-51811-4_57} } @InProceedings{Kletz2017, author = {Kletz, Sabrina and Schoeffmann, Klaus and Münzer, Bernd and Primus, Manfred J and Husslein, Heinrich}, booktitle = {Proceedings of the First ACM Workshop on Educational and Knowledge Technologies (MultiEdTech 2017)}, title = {Surgical Action Retrieval for Assisting Video Review of Laparoscopic Skills}, year = {2017}, address = {Mountain View, California, USA}, editor = {Li, Qiong and Lienhart, Rainer and Wang, Hao Hong}, month = {oct}, pages = {9}, publisher = {ACM}, series = {MultiEdTech '17}, abstract = {An increasing number of surgeons promote video review of laparoscopic surgeries for detection of technical errors at an early stage as well as for training purposes. The reason behind is the fact that laparoscopic surgeries require specific psychomotor skills, which are difficult to learn and teach. The manual inspection of surgery video recordings is extremely cumbersome and time-consuming. Hence, there is a strong demand for automated video content analysis methods. In this work, we focus on retrieving surgical actions from video collections of gynecologic surgeries. We propose two novel dynamic content descriptors for similarity search and investigate a query-by-example approach to evaluate the descriptors on a manually annotated dataset consisting of 18 hours of video content. We compare several content descriptors including dynamic information of the segments as well as descriptors containing only spatial information of keyframes of the segments. The evaluation shows that our proposed dynamic content descriptors considering motion and spatial information from the segment achieve a better retrieval performance than static content descriptors ignoring temporal information of the segment at all. The proposed content descriptors in this work enable content-based video search for similar laparoscopic actions, which can be used to assist surgeons in evaluating laparoscopic surgical skills.}, doi = {10.1145/3132390.3132395}, keywords = {feature signatures, laparoscopic video, medical endoscopy, motion analysis, similarity search, video retrieval}, language = {EN}, location = {Mountain View, California, USA}, talkdate = {2017.10.27}, talktype = {registered}, url = {http://doi.acm.org/10.1145/3132390.3132395} } @InProceedings{Beecks2017, author = {Beecks, Christian and Kletz, Sabrina and Schoeffmann, Klaus}, booktitle = {Proceedings of the Third IEEE International Conference on Multimedia Big Data (BigMM 2017)}, title = {Large-Scale Endoscopic Image and Video Linking with Gradient-Based Signatures}, year = {2017}, address = {Laguna Hills, California, USA}, editor = {Chen, Shu-Ching and Sheu, Philip Chen-Yu}, month = {apr}, pages = {5}, publisher = {IEEE}, series = {BigMM}, abstract = {Given a large-scale video archive of surgical interventions and a medical image showing a specific moment of an operation, how to find the most image-related videos efficiently without the utilization of additional semantic characteristics? In this paper, we investigate a novel content-based approach of linking medical images with relevant video segments arising from endoscopic procedures. We propose to approximate the video segments' content-based features by gradient-based signatures and to index these signatures with the Minkowski distance in order to determine the most query-like video segments efficiently. We benchmark our approach on a large endoscopic image and video archive and show that our approach achieves a significant improvement in efficiency in comparison to the state-of-the-art while maintaining high accuracy.}, doi = {10.1109/BigMM.2017.44}, keywords = {feature signatures, laparoscopic video, medical endoscopy, motion analysis, similarity search, video retrieval}, language = {EN}, location = {Laguna Hills, California, USA}, talkdate = {2017.04.19}, talktype = {registered}, url = {http://ieeexplore.ieee.org/document/7966709/} } @Article{SchoeffmannTMM2014, title = {3-D Interfaces to Improve the Performance of Visual Known-Item Search}, author = {Schoeffmann, Klaus and Ahlstrom, David and Hudelist, Marco Andrea}, journal = {Multimedia, IEEE Transactions on}, year = {2014}, month = {dec}, number = {7}, pages = {10}, volume = {16}, address = {Los Alamitos, CA, USA}, doi = {10.1109/TMM.2014.2333666}, issn = {1520-9210}, keywords = {Browsers;Image color analysis;Layout;Navigation;Smart phones;Three-dimensional displays;Visualization}, language = {EN}, publisher = {IEEE} } @InProceedings{lux2012did, author = {Lux, Mathias and Huber, Jochen}, booktitle = {Image Analysis for Multimedia Interactive Services (WIAMIS), 2012 13th International Workshop on}, title = {Why did you record this video? An exploratory study on user intentions for video production}, year = {2012}, address = {Los Alamitos, CA, USA}, editor = {O'Connor, Noel and Daras, Petros and Pereira, Fernando}, month = {jan}, organization = {IEEE}, pages = {1-4}, publisher = {IEEE}, abstract = {Why do people record videos and share them? While the question seems to be simple, user intentions have not yet been investigated for video production and sharing. A general taxonomy would lead to adapted information systems and multimedia interfaces tailored to the users' intentions. We contribute (1) an exploratory user study with 20 participants, examining the various facets of user intentions for video production and sharing in detail and (2) a novel set of user intention clusters for video production, grounded empirically in our study results. We further reflect existing work in specialized domains (i.e. video blogging and mobile phone cameras) and show that prevailing models used in other multimedia fields (e.g. photography) cannot be used as-is to reason about video recording and sharing intentions.}, doi = {10.1109/WIAMIS.2012.6226758}, isbn10 = {978-1-4673-0789-5}, isbn13 = {978-1-4673-0791-8}, issn = {2158-5873}, keywords = {Communication, Networking & Broadcasting ; Components, Circuits, Devices & Systems ; Computing & Processing (Hardware/Software) ; Signal Processing & Analysis}, language = {EN}, location = {Dublin, Ireland}, talkdate = {2012.05.25}, talktype = {registered} } @InProceedings{Reiterer2009b, author = {Reiterer, Bernhard and Concolato, Cyril and Hellwagner, Hermann}, booktitle = {Proceedings of 1st International ICST Conference on User Centric Media - UCMedia 2009}, title = {Natural-Language-based Conversion of Images to Mobile Multimedia Experiences}, year = {2009}, address = {Berlin, Heidelberg, New York}, editor = {Daras, Patros and Chlamtac, Imrich}, month = dec, pages = {4 - CD}, publisher = {Springer}, series = {LNICST - Lecture Notes of the Institute for Computer Sciences, Social-Informatics and Telecommunications Engineering}, abstract = {We describe an approach for viewing any large, detail-rich picture on a small display by generating a video from the image, as taken by a virtual camera moving across it at varying distance. Our main innovation is the ability to build the virtual camera's motion from a textual description of a picture, e.g., a museum caption, so that relevance and ordering of image regions are determined by co-analyzing image annotations and natural language text. Furthermore, our system arranges the resulting presentation such that it is synchronized with an audio track generated from the text by use of a text-to-speech system.}, issn = {9789639799844}, keywords = {image adaptation - text analysis - image annotation - digital cultural heritage - computer animation}, language = {EN}, talktype = {none}, url = {http://www.usercentricmedia.org/index.shtml} } @InProceedings{Hellwagner2000b, author = {Hellwagner, Hermann and Leopold, Klaus and Schlatterbeck, Ralf and Weich, Carsten}, booktitle = {Proceedings Distributed and Parallel Systems}, title = {Performance Tuning of Parallel Real-Time Voice Communication Software}, year = {2000}, address = {Norwell, MA, USA}, editor = {Kascuk, Peter and Kotsis, Gabriele}, month = sep, pages = {57-60}, publisher = {Kluwer Academic Publishers}, abstract = {This paper describes an unconventional way to apply a performance analysis tool for parallel programs (Vampir) to understand and tune the performance of the real-time voice and data communication software running on top of Frequentis’ V4 switch. The execution schedule of the strictly time-triggered V4 switching software is computed off-line; analyzing the schedule to identify e.g. performance bottlenecks used to be a complex and time-consuming process. We present our approach to transform the V4 software schedule’s information into Vampir trace files and use this tool’s facilities to provide a visualization of the schedule. A case study illustrates the benefits of this approach.}, keywords = {This paper describes an unconventional way to apply a performance analysis tool for parallel programs (Vampir) to understand and tune the performance of the real-time voice and data communication software running on top of Frequentis’ V4 switch. The execution schedule of the strictly time-triggered V4 switching software is computed off-line; analyzing the schedule to identify e.g. performance bottlenecks used to be a complex and time-consuming process. We present our approach to transform the V4 software schedule’s information into Vampir trace files and use this tool’s facilities to provide a visualization of the schedule. A case study illustrates the benefits of this approach.}, language = {DE}, pdf = {https://www.itec.aau.at/bib/files/2000-0084-HHKL.pdf}, talktype = {none} }