% Keywords: AVC % Encoding: utf-8 @InProceedings{Matha2021, author = {Roland Matha and Dragi Kimovski and Anatoliy Zabrovskiy and Christian Timmerer and Radu Prodan}, booktitle = {2021 IEEE 17th International Conference on eScience (eScience)}, title = {{Where to Encode: A Performance Analysis of x86 and Arm-based Amazon EC2 Instances}}, year = {2021}, month = {sep}, pages = {118--127}, publisher = {IEEE}, abstract = {Video streaming became an undivided part of the Internet. To efficiently utilise the limited network bandwidth it is essential to encode the video content. However, encoding is a computationally intensive task, involving high-performance resources provided by private infrastructures or public clouds. Public clouds, such as Amazon EC2, provide a large portfolio of services and instances optimized for specific purposes and budgets. The majority of Amazon’s instances use x86 processors, such as Intel Xeon or AMD EPYC. However, following the recent trends in computer architecture, Amazon introduced Arm based instances that promise up to 40% better cost performance ratio than comparable x86 instances for specific workloads. We evaluate in this paper the video encoding performance of x86 and Arm instances of four instance families using the latest FFmpeg version and two video codecs. We examine the impact of the encoding parameters, such as different presets and bitrates, on the time and cost for encoding. Our experiments reveal that Arm instances show high time and cost saving potential of up to 33.63% for specific bitrates and presets, especially for the x264 codec. However, the x86 instances are more general and achieve low encoding times, regardless of the codec.}, doi = {10.1109/escience51609.2021.00022}, keywords = {Amazon EC2, Arm instances, AVC, Cloud computing, FFmpeg, Graviton2, HEVC, Performance analysis, Video encoding}, url = {https://www.computer.org/csdl/proceedings-article/escience/2021/036100a118/1y14GC0fb6o} } @Article{Ghamsarian2020c, author = {Negin Ghamsarian and Klaus Schoeffmann and Morteza Khademi}, journal = {Multimedia Tools and Applications}, title = {{Blind MV-based video steganalysis based on joint inter-frame and intra-frame statistics}}, year = {2020}, issn = {1573-7721}, month = {nov}, number = {6}, pages = {1--23}, volume = {80}, abstract = {Despite all its irrefutable benefits, the development of steganography methods has sparked ever-increasing concerns over steganography abuse in recent decades. To prevent the inimical usage of steganography, steganalysis approaches have been introduced. Since motion vector manipulation leads to random and indirect changes in the statistics of videos, MV-based video steganography has been the center of attention in recent years. In this paper, we propose a 54-dimentional feature set exploiting spatio-temporal features of motion vectors to blindly detect MV-based stego videos. The idea behind the proposed features originates from two facts. First, there are strong dependencies among neighboring MVs due to utilizing rate-distortion optimization techniques and belonging to the same rigid object or static background. Accordingly, MV manipulation can leave important clues on the differences between each MV and the MVs belonging to the neighboring blocks. Second, a majority of MVs in original videos are locally optimal after decoding concerning the Lagrangian multiplier, notwithstanding the information loss during compression. Motion vector alteration during information embedding can affect these statistics that can be utilized for steganalysis. Experimental results have shown that our features’ performance far exceeds that of state-of-the-art steganalysis methods. This outstanding performance lies in the utilization of complementary spatio-temporal statistics affected by MV manipulation as well as feature dimensionality reduction applied to prevent overfitting. Moreover, unlike other existing MV-based steganalysis methods, our proposed features can be adjusted to various settings of the state-of-the-art video codec standards such as sub-pixel motion estimation and variable-block-size motion estimation.}, doi = {10.1007/s11042-020-10001-9}, keywords = {Blind steganalysis, Video steganography, Information security, Motion vector, Video compression, H264/AVC}, publisher = {Springer Science and Business Media LLC}, url = {https://link.springer.com/article/10.1007/s11042-020-10001-9} } @InProceedings{Sablatschan2010a, author = {Sablatschan, Michael and Ortiz Murillo, Jordi and Ransburg, Michael and Hellwagner, Hermann}, booktitle = {Proceedings of the Workshop SVCVision, in conjunction with the 6th International Mobile Multimedia Communications Conference (MobiMedia 2010)}, title = {Efficient SVC-to-AVC Conversion at a Media Aware Network Element}, year = {2010}, address = {Berlin, Heidelberg, New York}, editor = {Rodriguez, Jonathan and Tafazolli, Rahim and Verikoukis, Christos}, month = {sep}, pages = {7}, publisher = {Springer}, abstract = {H.264/SVC, the Scalable Video Coding extension of the H.264/AVC video coding standard, features spatial, quality and temporal scalability. Backwards compatibility with legacy decoding devices is maintained through an H.264/AVC compliant base layer, which represents the lowest quality of an H.264/SVC bit-stream. However, it is often desireable to also provide the higher quality layers to legacy H.264/AVC devices. This is achieved by a process commonly known as "bit-stream rewriting", which allows for an efficient H.264/SVC to H.264/AVC conversion by exploiting the similarities of the two codecs. This paper describes a demonstrator showing the advantages of including an improved version of the bit-stream rewriting tool from the existing JSVM H.264/SVC reference software in an H.264/SVC-based multimedia delivery system, by integrating it into a Media Aware Network Element.}, keywords = {Multimedia Adaptation, H.264/SVC, SVC-to-AVC rewriting}, language = {EN}, talktype = {none} }