% Type: Misc % Encoding: utf-8 @Misc{Kimovski2021, author = {Dumitru, Roman and Nikolov, Nikolay and Elvesater, Brian and Soylu, Ahmet and Prodan, Radu and Kimovski, Dragi and Marrella, Andrea and Leotta, Francesco and Benvenuti, Dario and Matskin, Mihhail and Ledakis, Giannis and Simonet-Boulogne, Anthony and Perales, Fernando and Kharlamov, Evgeny and Ulisses, Alexandre and Solberg, Arnor and Ceccarelli, Raffaele}, howpublished = {RCIS '21 Proceedings of the 15th International Conference on Research Challenges in Information Science}, month = may, title = {{DataCloud: Enabling the Big Data Pipelines on the Computing Continuum}}, year = {2021}, abstract = {With the recent developments of Internet of Things (IoT) and cloud-based technologies, massive amounts of data are generated by heterogeneous sources and stored through dedicated cloud solutions. Often organizations generate much more data than they are able to interpret, and current Cloud Computing technologies cannot fully meet the requirements of the Big Data processing applications and their data transfer overheads. Many data are stored for compliance purposes only but not used and turned into value, thus becoming Dark Data, which are not only an untapped value, but also pose a risk for organizations. To guarantee a better exploitation of Dark Data, the DataCloud project aims to realize novel methods and tools for effective and efficient management of the Big Data Pipeline lifecycle encompassing the Computing Continuum. Big Data pipelines are composite pipelines for processing data with nontrivial properties, commonly referred to as the Vs of Big Data (e.g., volume, velocity, value, etc.). Tapping their potential is a key aspect to leverage Dark Data, although it requires to go beyond the current approaches and frameworks for Big Data processing. In this respect, the concept of Computing Continuum extends the traditional centralised Cloud Computing with Edge and Fog computing in order to ensure low latency pre-processing and filtering close to the data sources. This will prevent to overwhelm the centralised cloud data centres enabling new opportunities for supporting Big Data pipelines.}, doi = {http://dx.doi.org/10.1007/978-3-030-75018-3}, url = {https://link.springer.com/content/pdf/bbm:978-3-030-75018-3/1.pdf} } @Misc{Kashanskii2021a, author = {Kashanskii, Vladislav and Radchenko, Gleb and Prodan, Radu and Zabrovskiy, Anatoliy and Agrawal, Prateek}, howpublished = {Online Publication (Abstract)}, month = may, title = {{Automated Workflows Scheduling via Two-Phase Event-based MILP Heuristic for MRCPSP Problem}}, year = {2021}, abstract = {In today’s reality massive amounts of data-intensive tasks are managed by utilizing a large number of heterogeneous computing and storage elements interconnected through high-speed communication networks. However, one issue that still requires research effort is to enable effcient workflows scheduling in such complex environments. As the scale of the system grows and the workloads become more heterogeneous in the inner structure and the arrival patterns, scheduling problem becomes exponentially harder, requiring problem-specifc heuristics. Many techniques evolved to tackle this problem, including, but not limited to Heterogeneous Earliest Finish Time (HEFT), The Dynamic Scaling Consolidation Scheduling (DSCS), Partitioned Balanced Time Scheduling (PBTS), Deadline Constrained Critical Path (DCCP) and Partition Problem-based Dynamic Provisioning Scheduling (PPDPS). In this talk, we will discuss the two-phase heuristic for makespan-optimized assignment of tasks and computing machines on large-scale computing systems, consisting of matching phase with subsequent event-based MILP method for schedule generation. We evaluated the scalability of the heuristic using the Constraint Integer Programing (SCIP) solver with various configurations based on data sets, provided by the MACS framework. Preliminary results show that the model provides near-optimal assignments and schedules for workflows composed of up to 100 tasks with complex task I/O interactions and demonstrates variable sensitivity with respect to the scale of workflows and resource limitation policies imposed.}, keywords = {HPC Schedule Generation, MRCPSP Problem, Workflows Scheduling, Two-Phase Heuristic}, url = {https://ashpc21.si/booklet-of-abstracts/#dearflip-df_2168/} } @Misc{Vladislav2020, author = {Prodan, Radu and Kashanskii, Vladislav and Kimovski, Dragi and Agrawal, Prateek}, howpublished = {Online Publication (Abstract)}, month = feb, title = {{ASPIDE Project: Perspectives on the Scalable Monitoring and Auto-tuning}}, year = {2020}, abstract = {Extreme Data is an incarnation of Big Data concept distinguished by the massive amounts of data that must be queried, communicated and analyzed in (near) real-time by using a very large number of memory/storage elements of both, the converging Cloud and Pre-Exascale computing systems. Notable examples are the raw high energy physics data produced at a rate of hundreds of gigabits-per-second that must be filtered, stored and analyzed in a fault-tolerant fasion, multi-scale brain imaging data analysis and simulations, complex networks data analyses, driven by the social media systems. To handle such amounts of data multi-tierung architectures are introduced, including scheduling systems and distributed storage systems, ranging from in-memory databases to tape libraries. The ASPIDE project is contributing with the definition of a new programming paradigm, APIs, runtime tools and methodologies for expressing data intensive tasks on the converging large-scale systems , which can pave the way for the exploitation of parallelism policies over the various models of the system architectures, promoting high performance and efficiency, and offering powerful operations and mechanisms for processing extreme data sources at high speed and / or real-time.}, url = {https://research-explorer.app.ist.ac.at/record/7474} } @Misc{Moll2019b, author = {Moll, Philipp and Frick, Veit and Rauscher, Natascha Jasmin and Lux, Mathias}, howpublished = {Online Publikation}, month = {September}, title = {{How Players Play Games: Observing the Influences of Game Mechanics}}, year = {2019}, abstract = {The popularity of computer games is remarkably high and is still growingevery year. Despite this popularity and the economical importance of gaming,research in game design, or to be more precise, of game mechanics that can beused to improve the enjoyment of a game, is still scarce. In this paper, weanalyze Fortnite, one of the currently most successful games, and observe howplayers play the game. We investigate what makes playing the game enjoyable byanalyzing video streams of experienced players from game streaming platformsand by conducting a user study with players who are new to the game. Weformulate four hypotheses about how game mechanics influence the way playersinteract with the game and how it influences player enjoyment. We presentdifferences in player behavior between experienced players and beginners anddiscuss how game mechanics could be used to improve the enjoyment forbeginners. In addition, we describe our approach to analyze games withoutaccess to game-internal data by using a toolchain which automatically extractsgame information from video streams.}, url = {https://arxiv.org/abs/1909.09738} } @Misc{Mueller2017, author = {Mueller, Christopher and Lederer, Stefan and Timmerer, Christian}, howpublished = {Patent}, month = {jun}, note = {US 15365886}, title = {Adaptation logic for varying a bitrate}, year = {2017}, url = {https://patents.google.com/patent/US20170188069A1/en} } @Misc{MuellerLT2016_2, author = {Mueller, Christopher and Lederer, Stefan and Timmerer, Christian}, howpublished = {Patent}, month = {jun}, note = {US20160173551 A1}, title = {System and method for session mobility for adaptive bitrate streaming}, year = {2016}, url = {https://www.google.com/patents/US20160173551} } @Misc{MuellerLT2016, author = {Mueller, Christopher and Lederer, Stefan and Timmerer, Christian}, howpublished = {Patent}, month = {may}, note = {US 20160134677}, title = {Apparatus and Method for Cloud Assisted Adaptive Streaming}, year = {2016}, url = {http://www.freepatentsonline.com/y2016/0134677.html} } @Misc{LedererMT2016, author = {Lederer, Stefan and Mueller, Christopher and Timmerer, Christian}, howpublished = {Patent}, month = {aug}, note = {US 20160234282 A1}, title = {Apparatus and method for constant quality optimization for adaptive streaming}, year = {2016}, url = {https://www.google.com/patents/US20160234282} }