% Keywords: Cloud Computing % Encoding: utf-8 @InProceedings{Matha2021, author = {Roland Matha and Dragi Kimovski and Anatoliy Zabrovskiy and Christian Timmerer and Radu Prodan}, booktitle = {2021 IEEE 17th International Conference on eScience (eScience)}, title = {{Where to Encode: A Performance Analysis of x86 and Arm-based Amazon EC2 Instances}}, year = {2021}, month = {sep}, pages = {118--127}, publisher = {IEEE}, abstract = {Video streaming became an undivided part of the Internet. To efficiently utilise the limited network bandwidth it is essential to encode the video content. However, encoding is a computationally intensive task, involving high-performance resources provided by private infrastructures or public clouds. Public clouds, such as Amazon EC2, provide a large portfolio of services and instances optimized for specific purposes and budgets. The majority of Amazon’s instances use x86 processors, such as Intel Xeon or AMD EPYC. However, following the recent trends in computer architecture, Amazon introduced Arm based instances that promise up to 40% better cost performance ratio than comparable x86 instances for specific workloads. We evaluate in this paper the video encoding performance of x86 and Arm instances of four instance families using the latest FFmpeg version and two video codecs. We examine the impact of the encoding parameters, such as different presets and bitrates, on the time and cost for encoding. Our experiments reveal that Arm instances show high time and cost saving potential of up to 33.63% for specific bitrates and presets, especially for the x264 codec. However, the x86 instances are more general and achieve low encoding times, regardless of the codec.}, doi = {10.1109/escience51609.2021.00022}, keywords = {Amazon EC2, Arm instances, AVC, Cloud computing, FFmpeg, Graviton2, HEVC, Performance analysis, Video encoding}, url = {https://www.computer.org/csdl/proceedings-article/escience/2021/036100a118/1y14GC0fb6o} } @Article{Kimovski2021c, author = {Dragi Kimovski and Roland Matha and Josef Hammer and Narges Mehran and Hellwagner, Hermann and Radu Prodan}, journal = {IEEE Internet Computing}, title = {{Cloud, Fog, or Edge: Where to Compute?}}, year = {2021}, issn = {1941-0131}, month = {jul}, number = {4}, pages = {30--36}, volume = {25}, abstract = {The computing continuum extends the high-performance cloud data centers with energy-efficient and low-latency devices close to the data sources located at the edge of the network. However, the heterogeneity of the computing continuum raises multiple challenges related to application management. These include where to offload an application – from the cloud to the edge – to meet its computation and communication requirements. To support these decisions, we provide in this article a detailed performance and carbon footprint analysis of a selection of use case applications with complementary resource requirements across the computing continuum over a real-life evaluation testbed.}, doi = {10.1109/mic.2021.3050613}, keywords = {Edge computing, Cloud computing, Benchmarking, Carbon footprint}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, url = {https://ieeexplore.ieee.org/document/9321525} } @Article{Matha2020, author = {Roland Matha and Sasko Ristov and Thomas Fahringer and Radu Prodan}, journal = {IEEE Transactions on Parallel and Distributed Systems}, title = {{Simplified Workflow Simulation on Clouds based on Computation and Communication Noisiness}}, year = {2020}, issn = {1045-9219}, month = {jul}, number = {7}, pages = {1559--1574}, volume = {31}, abstract = {Many researchers rely on simulations to analyze and validate their researched methods on Cloud infrastructures. However, determining relevant simulation parameters and correctly instantiating them to match the real Cloud performance is a difficult and costly operation, as minor configuration changes can easily generate an unreliable inaccurate simulation result. Using legacy values experimentally determined by other researchers can reduce the configuration costs, but is still inaccurate as the underlying public Clouds and the number of active tenants are highly different and dynamic in time. To overcome these deficiencies, we propose a novel model that simulates the dynamic Cloud performance by introducing noise in the computation and communication tasks, determined by a small set of runtime execution data. Although the estimating method is apparently costly, a comprehensive sensitivity analysis shows that the configuration parameters determined for a certain simulation setup can be used for other simulations too, thereby reducing the tuning cost by up to 82.46%, while declining the simulation accuracy by only 1.98% in average. Extensive evaluation also shows that our novel model outperforms other state-of-the-art dynamic Cloud simulation models, leading up to 22% lower makespan inaccuracy.}, doi = {10.1109/tpds.2020.2967662}, keywords = {Cloud computing, simulation, workflow applications, burstable instances, performance instability and noisiness}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, url = {https://ieeexplore.ieee.org/document/8964294/} } @InProceedings{Fard2020a, author = {Hamid Mohammadi Fard and Radu Prodan and Felix Wolf}, booktitle = {2020 IEEE/ACM 13th International Conference on Utility and Cloud Computing (UCC)}, title = {{Dynamic Multi-objective Scheduling of Microservices in the Cloud}}, year = {2020}, month = {dec}, pages = {386--393}, publisher = {IEEE}, abstract = {For many applications, a microservices architecture promises better performance and flexibility compared to a conventional monolithic architecture. In spite of the advantages of a microservices architecture, deploying microservices poses various challenges for service developers and providers alike. One of these challenges is the efficient placement of microservices on the cluster nodes. Improper allocation of microservices can quickly waste resource capacities and cause low system throughput. In the last few years, new technologies in orchestration frameworks, such as the possibility of multiple schedulers for pods in Kubernetes, have improved scheduling solutions of microservices but using these technologies needs to involve both the service developer and the service provider in the behavior analysis of workloads. Using memory and CPU requests specified in the service manifest, we propose a general microservices scheduling mechanism that can operate efficiently in private clusters or enterprise clouds. We model the scheduling problem as a complex variant of the knapsack problem and solve it using a multi-objective optimization approach. Our experiments show that the proposed mechanism is highly scalable and simultaneously increases utilization of both memory and CPU, which in turn leads to better throughput when compared to the state-of-the-art.}, doi = {10.1109/ucc48980.2020.00061}, keywords = {scheduling microservices, cloud computing, multi-objective optimization, knapsack problem, resource management}, url = {https://ieeexplore.ieee.org/document/9302823} } @InCollection{Fard2020, author = {Hamid Mohammadi Fard and Radu Prodan and Felix Wolf}, booktitle = {Algorithmic Aspects of Cloud Computing}, publisher = {Springer International Publishing}, title = {{A Container-Driven Approach for Resource Provisioning in Edge-Fog Cloud}}, year = {2020}, month = aug, number = {12041}, pages = {59--76}, abstract = {With the emerging Internet of Things (IoT), distributed systems enter a new era. While pervasive and ubiquitous computing already became reality with the use of the cloud, IoT networks present new challenges because the ever growing number of IoT devices increases the latency of transferring data to central cloud data centers. Edge and fog computing represent practical solutions to counter the huge communication needs between IoT devices and the cloud. Considering the complexity and heterogeneity of edge and fog computing, however, resource provisioning remains the Achilles heel of efficiency for IoT applications. According to the importance of operating-system virtualization (so-called containerization), we propose an application-aware container scheduler that helps to orchestrate dynamic heterogeneous resources of edge and fog architectures. By considering available computational capacity, the proximity of computational resources to data producers and consumers, and the dynamic system status, our proposed scheduling mechanism selects the most adequate host to achieve the minimum response time for a given IoT service. We show how a hybrid use of containers and serverless microservices improves the performance of running IoT applications in fog-edge clouds and lowers usage fees. Moreover, our approach outperforms the scheduling mechanisms of Docker Swarm.}, doi = {10.1007/978-3-030-58628-7_5}, keywords = {Edge computing, Fog computing, Cloud computing, Resource provisioning, Containerization, Microservice, Orchestration, Scheduling}, url = {https://link.springer.com/chapter/10.1007/978-3-030-58628-7_5} } @InProceedings{Prodan2019, author = {Radu Prodan and Ennio Torre and Juan J. Durillo and Gagangeet Singh Aujla and Neeraj Kummar and Hamid Mohammadi Fard and Shajulin Benedikt}, booktitle = {2019 45th Euromicro Conference on Software Engineering and Advanced Applications (SEAA)}, title = {{Dynamic Multi-objective Virtual Machine Placement in Cloud Data Centers}}, year = {2019}, month = {aug}, pages = {92--99}, publisher = {IEEE}, abstract = {Minimizing the resource wastage reduces the energy cost of operating a data center, but may also lead to a considerably high resource overcommitment affecting the Quality of Service (QoS) of the running applications. Determining the effective tradeoff between resource wastage and overcommitment is a challenging task in virtualized Cloud data centers and depends on how Virtual Machines (VMs) are allocated to physical resources. In this paper, we propose a multi-objective framework for dynamic placement of VMs exploiting live-migration mechanisms which simultaneously optimize the resource wastage, overcommitment ratio and migration cost. The optimization algorithm is based on a novel evolutionary meta-heuristic using an island population model underneath. We implemented and validated our method based on an enhanced version of a well-known simulator. The results demonstrate that our approach outperforms other related approaches by reducing up to 57% migrations energy consumption while achieving different energy and QoS goals.}, doi = {10.1109/seaa.2019.00023}, keywords = {Cloud computing, Energy efficiency, Multi objective optimization, Virtual machine placement}, url = {https://ieeexplore.ieee.org/document/8906523} } @Article{Hossfeld2012, author = {Ho{\ss}feld, Tobias and Schatz, Raimund and Varela, Martin and Timmerer, Christian}, journal = {Communications Magazine, IEEE}, title = {Challenges of QoE Management for Cloud Applications}, year = {2012}, month = {apr}, number = {4}, pages = {28-36}, volume = {50}, abstract = {Cloud computing is currently gaining enormous momentum due to a number of promised benefits: ease of use in terms of deployment, administration, and maintenance, along with high scalability and flexibility to create new services. However, as more personal and business applications migrate to the cloud, service quality will become an important differentiator between providers. In particular, quality of experience as perceived by users has the potential to become the guiding paradigm for managing quality in the cloud. In this article, we discuss technical challenges emerging from shifting services to the cloud, as well as how this shift impacts QoE and QoE management. Thereby, a particular focus is on multimedia cloud applications. Together with a novel QoE-based classification scheme of cloud applications, these challenges drive the research agenda on QoE management for cloud applications.}, address = {New York, NY, USA}, doi = {10.1109/MCOM.2012.6178831}, keywords = {cloud computing, multimedia computing, software quality, QoE management, QoE-based classification scheme, multimedia cloud applications, quality management, quality of experience, service quality, Cloud computing, Multimedia communication, Quality of service, Streaming media}, language = {EN}, pdf = {https://www.itec.aau.at/bib/files/06178831.pdf}, publisher = {IEEE Communications Society} }