% Keywords: Bandwidth % Encoding: utf-8 @InProceedings{Mehran2021, author = {Narges Mehran and Dragi Kimovski and Radu Prodan}, booktitle = {2021 IEEE/ACM 21st International Symposium on Cluster, Cloud and Internet Computing (CCGrid)}, title = {{A Two-Sided Matching Model for Data Stream Processing in the Cloud textendash Fog Continuum}}, year = {2021}, month = {may}, pages = {514--524}, publisher = {IEEE}, abstract = {Latency-sensitive and bandwidth-intensive stream processing applications are dominant traffic generators over the Internet network. A stream consists of a continuous sequence of data elements, which require processing in nearly real-time. To improve communication latency and reduce the network congestion, Fog computing complements the Cloud services by moving the computation towards the edge of the network. Unfortunately, the heterogeneity of the new Cloud – Fog continuum raises important challenges related to deploying and executing data stream applications. We explore in this work a two-sided stable matching model called Cloud – Fog to data stream application matching (CODA) for deploying a distributed application rep-resented as a workflow of stream processing microservices on heterogeneous computing continuum resources. In CODA, the application microservices rank the continuum resources based on their microservice stream processing time, while resources rank the stream processing microservices based on their residual bandwidth. A stable many-to-one matching algorithm assigns microservices to resources based on their mutual preferences, aiming to optimize the complete stream processing time on the application side, and the total streaming traffic on the resource side. We evaluate the CODA algorithm using simulated and real-world Cloud – Fog experimental scenarios. We achieved 11-45% lower stream processing time and 1.3-20% lower streaming traffic compared to related state-of-the-art approaches.}, doi = {10.1109/ccgrid51090.2021.00061}, keywords = {Cloud - fog computing, Distributed databases, Bandwidth, Games, Streaming media, Data models, Real-time systems}, url = {https://ieeexplore.ieee.org/document/9499353} } @InProceedings{Amirpour2021a, author = {Hadi Amirpour and Hannaneh Barahouei Pasandi and Christian Timmerer and Mohammad Ghanbari}, booktitle = {2021 International Conference on Visual Communications and Image Processing (VCIP)}, title = {{Improving Per-title Encoding for HTTP Adaptive Streaming by Utilizing Video Super-resolution}}, year = {2021}, month = {dec}, pages = {1--5}, publisher = {IEEE}, abstract = {In per-title encoding, to optimize a bitrate ladder over spatial resolution, each video segment is downscaled to a set of spatial resolutions, and they are all encoded at a given set of bitrates. To find the highest quality resolution for each bitrate, the low-resolution encoded videos are upscaled to the original resolution, and a convex hull is formed based on the scaled qualities. Deep learning-based video super-resolution (VSR) approaches show a significant gain over traditional upscaling approaches, and they are becoming more and more efficient over time. This paper improves the per-title encoding over the upscaling methods by using deep neural network-based VSR algorithms. Utilizing a VSR algorithm by improving the quality of low-resolution encodings can improve the convex hull. As a result, it will lead to an improved bitrate ladder. To avoid bandwidth wastage at perceptually lossless bitrates, a maximum threshold for the quality is set, and encodings beyond it are eliminated from the bitrate ladder. Similarly, a minimum threshold is set to avoid low-quality video delivery. The encodings between the maximum and minimum thresholds are selected based on one Just Noticeable Difference. Our experimental results show that the proposed per-title encoding results in a 24% bitrate reduction and 53% storage reduction compared to the state-of-the-art method.}, doi = {10.1109/vcip53242.2021.9675403}, keywords = {Image coding, Visual communication, Bit rate, Superresolution, Bandwidth, Streaming media, Spatial resolution, HAS, per-title, deep learning, compression, bitrate ladder}, url = {https://ieeexplore.ieee.org/document/9675403} } @Article{Bentaleb2020, author = {Abdelhak Bentaleb and Christian Timmerer and Ali C. Begen and Roger Zimmermann}, journal = {ACM Transactions on Multimedia Computing, Communications, and Applications}, title = {{Performance Analysis of ACTE: a Bandwidth Prediction Method for Low-Latency Chunked Streaming}}, year = {2020}, issn = {1551-6857}, month = {jul}, number = {2s}, pages = {1--24}, volume = {16}, abstract = {HTTP adaptive streaming with chunked transfer encoding can offer low-latency streaming without sacrificing the coding efficiency.This allows media segments to be delivered while still being packaged. However, conventional schemes often make widely inaccurate bandwidth measurements due to the presence of idle periods between the chunks and hence this is causing sub-optimal adaptation decisions. To address this issue, we earlier proposed ACTE (ABR for Chunked Transfer Encoding), a bandwidth prediction scheme for low-latency chunked streaming. While ACTE was a significant step forward, in this study we focus on two still remaining open areas, namely (i) quantifying the impact of encoding parameters, including chunk and segment durations, bitrate levels, minimum interval between IDR-frames and frame rate onACTE, and (ii) exploring the impact of video content complexity on ACTE. We thoroughly investigate these questions and report on our findings. We also discuss some additional issues that arise in the context of pursuing very low latency HTTP video streaming.}, doi = {10.1145/3387921}, keywords = {HAS, ABR, DASH, CMAF, low-latency, HTTP chunked transfer encoding, bandwidth measurement and prediction, RLS, encoding parameters, FFmpeg}, publisher = {Association for Computing Machinery (ACM)}, url = {https://dl.acm.org/doi/abs/10.1145/3387921} } @Article{martina_for_HH, author = {Sterca, Adrian and Hellwagner, Hermann and Boian, Florian and Vancea, Alexandru}, journal = {IEEE Transactions on Circuits and Systems for Video Technology}, title = {Media-friendly and TCP-friendly Rate Control Protocols for Multimedia Streaming}, year = {2015}, month = {aug}, number = {1}, pages = {15}, volume = {1}, abstract = {This paper describes a design framework for TCPfriendly and media-friendly rate control algorithms for multimedia streaming applications. The idea of this framework is to start from TFRC’s (TCP-Friendly Rate Control) transmission rate and then alter this transmission rate so that it tracks the media characteristics of the stream (e.g., bitrate) or other application characteristics like the client buffer fill level. In this way, the media-friendly property of the algorithm is achieved. We give three rules that guide how the TFRC throughput should track the evolution of the stream’s media characteristics and remain TCPfriendly in the long term. We also present, as proof of concept, four simple media-friendly and TCP-friendly congestion control algorithms built using the aforementioned framework. These congestion control algorithms are better suited for multimedia streaming applications than traditional TCP congestion control or smooth congestion control algorithms like TFRC. We have performed evaluations of two of the four proposed media-friendly and TCP-friendly congestion control algorithms under various network conditions and validated that they represent viable transport solutions, better than TFRC, for variable bitrate video streams. More specifically, our two media-friendly and TCPfriendly congestion control algorithms maintained a TCP-friendly throughput in the long term in all experiments and avoided an empty buffer at the client side in situations when TFRC could not achieve this.}, address = {USA}, doi = {10.1109/TCSVT.2015.2469075}, keywords = {TCP-friendly congestion control, media-friendly, multimedia streaming, Bandwidth, Multimedia communication}, language = {EN}, publisher = {IEEE}, url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?reload=true&arnumber=7206573&sortType%3Dasc_p_Sequence%26filter%3DAND%28p_Publication_Number%3A76%29%26rowsPerPage%3D100} } @InProceedings{Lederer2012, author = {Lederer, Stefan and Mueller, Christopher and Timmerer, Christian}, booktitle = {Proceedings of the 19th International Packet Video Workshop ({PV} 2012)}, title = {Towards Peer-Assisted Dynamic Adaptive Streaming over HTTP}, year = {2012}, address = {Munich, Germany}, editor = {Guillemot, Christine and Chakareski, Jacob and Steinbach, Eckehard}, month = {may}, pages = {1-6}, publisher = {IEEE}, abstract = {This paper presents our peer-assisted Dynamic Adaptive Streaming over HTTP (pDASH) proposal as well as an evaluation based on our DASH simulation environment in comparison to conventional approaches, i.e., non-peer-assisted DASH. Our approach maintains the standard conformance to MPEG-DASH enabling an easy and straightforward way of enhancing a streaming system with peer assistance to reduce the bandwidth and infrastructure requirements of the content/service provider. In anticipation of the results our system achieves a bandwidth reduction of Content Distribution Networks (CDN) and as a consequence the corresponding infrastructure costs of the content/service providers by up to 25% by leveraging the upstream capacity of neighboring peers. Furthermore, the cost savings have been evaluated using a cost model that is based on the current Amazon CloudFront pricing scheme. Furthermore, we have also evaluated the performance impact that various combinations of quality levels of the content could have in a peer-assisted streaming system as well as the client behavior in such an environment.}, keywords = {Peer-Assisted Streaming, MPEG-DASH, Dynamic Adaptive Streaming over HTTP, CDN Bandwidth Reduction, Peer-to-Peer Streaming.}, language = {EN}, location = {Munich, Germany}, pdf = {https://www.itec.aau.at/bib/files/Paper53.pdf}, talkdate = {2012.05.10}, talktype = {registered} }