% Taschwer, Mario % Encoding: utf-8 @InProceedings{Sokolova2019, author = {Sokolova, Natalia and Schöffmann, Klaus and Taschwer, Mario and Putzgruber-Adamitsch, Doris and El-Shabrawi, Yosuf}, booktitle = {Proceedings of the 26th International Conference in MultiMedia Modeling (MMM 2020) (Part II)}, title = {{Evaluating the Generalization Performance of Instrument Classification in Cataract Surgery Videos}}, year = {2019}, address = {Berlin}, editor = {Wen-Huang Cheng and Junmo Kim and Wei-Ta Chu and Peng Cui and Jung-Woo Choi and Min-Chun Hu and Wesley De Neve}, month = {Dezember}, pages = {626--636}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = {11962}, doi = {10.1007/978-3-030-37734-2_51}, url = {https://www.researchgate.net/publication/338188982_Evaluating_the_Generalization_Performance_of_Instrument_Classification_in_Cataract_Surgery_Videos} } @InProceedings{Taschwer2018a, author = {Taschwer, Mario and Primus, Manfred J{\"u}rgen and Schoeffmann, Klaus and Marques, Oge}, booktitle = {Working Notes Proceedings of the MediaEval 2018 Workshop}, title = {Early and Late Fusion of Classifiers for the {MediaEval Medico} Task}, year = {2018}, editor = {M. Larson and P. Arora and C.H. Demarty and M. Riegler and B. Bischke and E. Dellandrea and M. Lux and A. Porter and G.J.F. Jones}, series = {CEUR Workshop Proceedings}, volume = {2283}, url = {http://ceur-ws.org/Vol-2283/MediaEval_18_paper_23.pdf} } @Article{Taschwer2018, title = {{Automatic separation of compound figures in scientific articles}}, author = {Taschwer, Mario and Marques, Oge}, journal = {Multimedia Tools and Applications}, year = {2018}, month = {Januar}, number = {77}, pages = {519--548}, abstract = {Content-based analysis and retrieval of digital images found in scientific articles is often hindered by images consisting of multiple subfigures (compound figures). We address this problem by proposing a method (ComFig) to automatically classify and separate compound figures, which consists of two main steps: (i) a supervised compound figure classifier (ComFig classifier) discriminates between compound and non-compound figures using task-specific image features; and (ii) an image processing algorithm is applied to predicted compound images to perform compound figure separation (ComFig separation). The proposed ComFig classifier is shown to achieve state-of-the-art classification performance on a published dataset. Our ComFig separation algorithm shows superior separation accuracy on two different datasets compared to other known automatic approaches. Finally, we propose a method to evaluate the effectiveness of the ComFig chain combining classifier and separation algorithm, and use it to optimize the misclassification loss of the ComFig classifier for maximal effectiveness in the chain.}, doi = {10.1007/s11042-016-4237-x}, url = {https://link.springer.com/article/10.1007%2Fs11042-016-4237-x#enumeration} } @PhdThesis{Taschwer2017, author = {Taschwer, Mario}, school = {Alpen-Adria-Universit{\"a}t Klagenfurt}, title = {Concept-Based and Multimodal Methods for Medical Case Retrieval}, year = {2017}, address = {Austria}, month = {mar}, abstract = {Medical case retrieval (MCR) is defined as a multimedia retrieval problem, where the document collection consists of medical case descriptions that pertain to particular diseases, patients' histories, or other entities of biomedical knowledge. Case descriptions are multimedia documents containing textual and visual modalities (images). A query may consist of a textual description of patient's symptoms and related diagnostic images. This thesis proposes and evaluates methods that aim at improving MCR effectiveness over the baseline of fulltext retrieval. We hypothesize that this objective can be achieved by utilizing controlled vocabularies of biomedical concepts for query expansion and concept-based retrieval. The latter represents case descriptions and queries as vectors of biomedical concepts, which may be generated automatically from textual and/or visual modalities by concept mapping algorithms. We propose a multimodal retrieval framework for MCR by late fusion of text-based retrieval (including query expansion) and concept-based retrieval and show that retrieval effectiveness can be improved by 49% using linear fusion of practical component retrieval systems. The potential of further improvement is experimentally estimated as a 166% increase of effectiveness over fulltext retrieval using query-adaptive fusion of ideal component retrieval systems. Additional contributions of this thesis include the proposal and comparative evaluation of methods for concept mapping, query and document expansion, and automatic classification and separation of compound figures found in case descriptions.}, language = {EN}, pages = {200}, pdf = {https://www.itec.aau.at/bib/files/phd-thesis-taschwer.pdf} } @InProceedings{0f4de0c76764d43901677cf74a330af9, title = {{Nerthus: A Bowel Preparation Quality Video Dataset}}, author = {Pogorelov, Konstantin and Ranheim Randel, Kristin and de Lange, Thomas and Eskeland, Sigrun L. and Griwodz, Carsten and Spampinato, Concetto and Taschwer, Mario and Lux, Mathias and Schmidt, Peter T. and Riegler, Michael and Halvorsen, Pal}, booktitle = {Proceedings of the 8th ACM on Multimedia Systems Conference (MMSys 2017)}, year = {2017}, editor = {Kuan-Ta Chen and Pablo Cesar and Cheng-Hsin Hsu}, month = {Juni}, pages = {170--174}, publisher = {Association for Computing Machinery (ACM)}, abstract = {Bowel preparation (cleansing) is considered to be a key precondition for successful colonoscopy (endoscopic examination of the bowel). The degree of bowel cleansing directly affects the possibility to detect diseases and may influence decisions on screening and follow-up examination intervals. An accurate assessment of bowel preparation quality is therefore important. Despite the use of reliable and validated bowel preparation scales, the grading may vary from one doctor to another. An objective and automated assessment of bowel cleansing would contribute to reduce such inequalities and optimize use of medical resources. This would also be a valuable feature for automatic endoscopy reporting in the future. In this paper, we present Nerthus, a dataset containing videos from inside the gastrointestinal (GI) tract, showing different degrees of bowel cleansing. By providing this dataset, we invite multimedia researchers to contribute in the medical field by making systems automatically evaluate the quality of bowel cleansing for colonoscopy. Such innovations would probably contribute to improve the medical field of GI endoscopy.}, doi = {10.1145/3083187.3083216}, url = {https://dl.acm.org/citation.cfm?id=3083216} } @Article{Taschwer2016a, author = {Taschwer, Mario and Marques, Oge}, journal = {Multimedia Tools and Applications}, title = {Automatic Separation of Compound Figures in Scientific Articles}, year = {2016}, issn = {1573-7721}, month = {dec}, pages = {1--30}, abstract = {Content-based analysis and retrieval of digital images found in scientific articles is often hindered by images consisting of multiple subfigures (compound figures). We address this problem by proposing a method (ComFig) to automatically classify and separate compound figures, which consists of two main steps: (i) a supervised compound figure classifier (ComFig classifier) discriminates between compound and non-compound figures using task-specific image features; and (ii) an image processing algorithm is applied to predicted compound images to perform compound figure separation (ComFig separation). The proposed ComFig classifier is shown to achieve state-of-the-art classification performance on a published dataset. Our ComFig separation algorithm shows superior separation accuracy on two different datasets compared to other known automatic approaches. Finally, we propose a method to evaluate the effectiveness of the ComFig chain combining classifier and separation algorithm, and use it to optimize the misclassification loss of the ComFig classifier for maximal effectiveness in the chain.}, address = {New York}, doi = {10.1007/s11042-016-4237-x}, language = {EN}, pdf = {https://www.itec.aau.at/bib/files/fig-separation-mtap.pdf}, publisher = {Springer} } @InProceedings{Taschwer2016, author = {Taschwer, Mario and Marques, Oge}, booktitle = {MultiMedia Modeling}, title = {Compound Figure Separation Combining Edge and Band Separator Detection}, year = {2016}, address = {Cham, Switzerland}, editor = {Tian, Qi and Sebe, Nicu and Qi, Guo-Jun and Huet, Benoit and Hong, Richang and Liu, Xueliang}, month = {jan}, pages = {162--173}, publisher = {Springer International Publishing}, series = {Lecture Notes in Computer Science}, volume = {9516}, abstract = {We propose an image processing algorithm to automatically separate compound figures appearing in scientific articles. We classify compound images into two classes and apply different algorithms for detecting vertical and horizontal separators to each class: the edge-based algorithm aims at detecting visible edges between subfigures, whereas the band-based algorithm tries to detect whitespace separating subfigures (separator bands). The proposed algorithm has been evaluated on two datasets for compound figure separation (CFS) in the biomedical domain and compares well to semi-automatic or more comprehensive state-of-the-art approaches. Additional experiments investigate CFS effectiveness and classification accuracy of various classifier implementations.}, doi = {10.1007/978-3-319-27671-7_14}, isbn13 = {978-3-319-27671-7}, language = {EN}, location = {Miami, FL, USA}, pdf = {https://www.itec.aau.at/bib/files/fig-separation-mmm.pdf}, slides = {https://www.itec.aau.at/bib/files/MMM-2016-Taschwer.pdf}, subtitle = {22nd International Conference, MMM 2016, Miami, FL, USA, January 4-6, 2016}, talkdate = {2016.01.05}, talktype = {registered}, url = {http://link.springer.com/chapter/10.1007/978-3-319-27671-7_14} } @InProceedings{Taschwer2015, author = {Taschwer, Mario and Marques, Oge}, booktitle = {{CLEF} 2015 Working Notes}, title = {AAUITEC at ImageCLEF 2015: Compound Figure Separation}, year = {2015}, address = {Padova, Italy}, editor = {Capellato, Linda and Ferro, Nicola and Jones, Gareth and Juan, Eric}, month = {sep}, pages = {9}, publisher = {CLEF Association}, series = {CEUR Workshop Proceedings, ISSN 1613-0073}, volume = {1391}, abstract = {Our approach to automatically separating compound figures appearing in biomedical articles is split into two image processing algorithms: one is based on detecting separator edges, and the other tries to identify background bands separating sub gures. Only one algorithm is applied to a given image, according to the prediction of a binary classifier trained to distinguish graphical illustrations from other images in biomedical articles. Our submission to the ImageCLEF 2015 compound figure separation task achieved an accuracy of 49% on the provided test set of about 3400 compound images. This stays clearly behind the best submission of other participants (85% accuracy), but is by an order of magnitude faster than other approaches reported in the literature.}, language = {EN}, location = {Toulouse, France}, pdf = {https://www.itec.aau.at/bib/files/aauitec-fig-separation.pdf}, slides = {https://www.itec.aau.at/bib/files/poster-aauitec-fig-separation.pdf}, talkdate = {2015.09.09}, talktype = {poster}, url = {http://ceur-ws.org/Vol-1391/25-CR.pdf} } @InProceedings{Taschwer2014a, author = {Taschwer, Mario}, booktitle = {Proceedings of the ACM International Conference on Multimedia}, title = {Medical Case Retrieval}, year = {2014}, address = {New York, NY, USA}, editor = {n/a, n/a}, month = {nov}, pages = {639--642}, publisher = {ACM}, series = {MM '14}, doi = {10.1145/2647868.2654856}, isbn13 = {978-1-4503-3063-3}, keywords = {biomedical information retrieval, ImageCLEF medical tasks, multimodal information retrieval}, language = {EN}, location = {Orlando, Florida, USA}, pdf = {https://www.itec.aau.at/bib/files/Taschwer_ACM_MM_2014.pdf}, slides = {https://www.itec.aau.at/bib/files/Taschwer_ACM_MM_2014_slides.pdf}, talkdate = {2014.11.05}, talktype = {registered} } @TechReport{Taschwer2014, author = {Taschwer, Mario}, institution = {Institute of Information Technology (ITEC), Alpen-Adria-Universit\"{a}t}, title = {Textual Methods for Medical Case Retrieval}, year = {2014}, address = {Klagenfurt, Austria}, month = {may}, number = {TR/ITEC/14/2.01}, abstract = {Medical case retrieval (MCR) is information retrieval in a collection of medical case descriptions, where descriptions of patients' symptoms are used as queries. We apply known text retrieval techniques based on query and document expansion to this problem, and combine them with new algorithms to match queries and documents with Medical Subject Headings (MeSH). We ran comprehensive experiments to evaluate 546 method combinations on the ImageCLEF 2013 MCR dataset. Methods combining MeSH query expansion with pseudo-relevance feedback performed best, delivering retrieval performance comparable to or slightly better than the best MCR run submitted to ImageCLEF 2013.}, language = {EN}, pages = {50}, pdf = {https://www.itec.aau.at/bib/files/textual-mcr.pdf} } @InProceedings{Taschwer2013, author = {Taschwer, Mario}, booktitle = {{CLEF} 2013 Evaluation Labs and Workshop, Online Working Notes}, title = {Text-Based Medical Case Retrieval Using {MeSH} Ontology}, year = {2013}, address = {Padua, Italy}, editor = {Forner, Pamela and Navigli, Roberto and Tufis, Dan}, month = {sep}, pages = {5}, publisher = {CLEF Initiative}, abstract = {Our approach to the ImageCLEF medical case retrieval task consists of text-only retrieval combined with utilizing the Medical Subject Headings (MeSH) ontology. MeSH terms extracted from the query are used for query expansion or query term weighting. MeSH annotations of documents available from PubMed Central are added to the corpus. Retrieval results improve slightly upon full-text retrieval.}, isbn13 = {978-88-904810-5-5}, language = {EN}, location = {Valencia, Spain}, pdf = {https://www.itec.aau.at/bib/files/aau_mcr_mesh.pdf}, slides = {https://www.itec.aau.at/bib/files/poster_clef2013.pdf}, talkdate = {2013.09.24}, talktype = {poster}, url = {http://www.clef-initiative.eu/documents/71612/4b93fc08-a8fa-4985-873d-c2c18bd4cd3d} } @InProceedings{DelFabro_CBMI2013, author = {Del Fabro, Manfred and Schoeffmann, Klaus and Guggenberger, Mario and Taschwer, Mario}, booktitle = {11th International Workshop on Content-Based Multimedia Indexing}, title = {A Filtering Tool to Support Interactive Search in Internet Video Archives}, year = {2013}, address = {Los Alamitos, CA, USA}, editor = {Czuni, Laszlo}, month = {jun}, pages = {7--10}, publisher = {IEEE Computer Society}, language = {EN}, location = {Veszprem, Hungary}, talkdate = {2013.06.18}, talktype = {poster} } @InProceedings{Taschwer2012, author = {Taschwer, Mario}, booktitle = {Advances in Multimedia Modeling}, title = {A Key-Frame-Oriented Video Browser}, year = {2012}, address = {Berlin / Heidelberg}, editor = {Schoeffmann, Klaus and Merialdo, Bernard and Hauptmann, Alexander and Ngo, Chong-Wah and Andreopoulos, Yiannis and Breiteneder, Christian}, month = {jan}, pages = {655-657}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = {7131}, abstract = {We propose a video browser facilitating known-item search in a single video. Key frames are presented as four images at a time and can be navigated quickly in both forward and backward directions using a slider. Alternatively, key frames can be displayed automatically at different frame rates. The user may choose between three mappings of key frames to the four key frame widgets based on video time stamps and color similarity.}, doi = {10.1007/978-3-642-27355-1_68}, isbn13 = {978-3-642-27354-4}, language = {EN}, location = {Klagenfurt}, talkdate = {2012.01.06}, talktype = {poster}, url = {http://dx.doi.org/10.1007/978-3-642-27355-1_68} } @InProceedings{Lux2012b, author = {Lux, Mathias and Taschwer, Mario and Marques, Oge}, booktitle = {Proceedings of the 20th ACM international conference on Multimedia}, title = {Classification of photos based on good feelings: ACM MM 2012 multimedia grand challenge submission}, year = {2012}, address = {New York, NY, USA}, editor = {Aizawa, Kiyoharu and Babaguchi, Noboru and Smith, John}, month = {jan}, pages = {1367--1368}, publisher = {ACM}, series = {MM '12}, doi = {10.1145/2393347.2396488}, keywords = {affection, image classification, image search, user intentions}, language = {EN}, location = {Nara, Japan}, talkdate = {2012.11.01}, talktype = {registered}, url = {http://doi.acm.org/10.1145/2393347.2396488} } @InProceedings{Lux2012a, author = {Lux, Mathias and Taschwer, Mario and Marques, Oge}, booktitle = {Proceedings of the ACM multimedia 2012 workshop on Crowdsourcing for multimedia}, title = {A closer look at photographers' intentions: a test dataset}, year = {2012}, address = {New York, NY, USA}, editor = {Aizawa, Kiyoharu and Babaguchi, Noboru and Smith, John}, month = {jan}, pages = {17--18}, publisher = {ACM}, series = {CrowdMM '12}, doi = {10.1145/2390803.2390811}, keywords = {digital photos, user intentions}, language = {EN}, location = {Nara, Japan}, talkdate = {2012.10.29}, talktype = {registered}, url = {http://doi.acm.org/10.1145/2390803.2390811} } @InProceedings{DelFabro2012c, author = {Del Fabro, Manfred and Lux, Mathias and Schoeffmann, Klaus and Taschwer, Mario}, booktitle = {Proceedings of TRECVID 2012}, title = {ITEC-UNIKLU Known-Item Search Submission 2012}, year = {2012}, address = {Gaithersburg, USA}, editor = {Over, Paul and Awad, George and Michel, Martial and Fiscus, Jonathan and Sanders, Greg and Shaw, Barbara and Kraaij, Wessel and Smeaton, Alan and Quénot, Georges}, month = {nov}, pages = {11}, publisher = {National Institute of Standards and Technology (NIST)}, abstract = {In this report we describe our approach to the known-item search task for TRECVID~2012. We describe how we index available metadata and how we gain additional information about the videos using content-based analysis. A rule-based query expansion and query reduction method is applied to increase the number of relevant videos in automatic runs. Furthermore, we describe an approach for quick, interactive filtering of large result sets. We outline how the parameters of our system were tuned for the IACC dataset and discuss our TRECVID 2012 KIS results.}, language = {EN}, location = {Gaithersburg, USA}, talkdate = {2012.11.28}, talktype = {poster}, url = {http://www-nlpir.nist.gov/projects/tvpubs/tv.pubs.org.html} } @InProceedings{schoeffmann_mmsys2010, author = {Schoeffmann, Klaus and Taschwer, Mario and Böszörmenyi, Laszlo}, booktitle = {MMSys ’10: Proceedings of the first annual ACM SIGMM conference on Multimedia systems}, title = {The video explorer: a tool for navigation and searching within a single video based on fast content analysis}, year = {2010}, address = {New York, NY, USA}, editor = {Feng, Wu-chi and Mayer-Patel, Ketan}, month = {feb}, pages = {247–258}, publisher = {ACM}, doi = {http://doi.acm.org/10.1145/1730836.1730867}, language = {EN}, location = {Phoenix, Arizona, USA}, talkdate = {2010.2.23}, talktype = {registered} } @Article{Sobe2010, author = {Sobe, Anita and Böszörmenyi, Laszlo and Taschwer, Mario}, journal = {International Journal on Advances in Software}, title = {{Video Notation (ViNo): A Formalism for Describing and Evaluating Non-sequential Multimedia Access}}, year = {2010}, issn = {1942-2628}, month = {sep}, number = {1 \& 2}, pages = {19-30}, volume = {3}, abstract = {The contributions of this paper are threefold: (1) the extensive introduction of a formal Video Notation (ViNo) that allows for describing different multimedia transport techniques for specifying required QoS; (2) the application of this formal notation to analyzing different transport mechanisms without the need of detailed simulations; (3) further application of ViNo to caching techniques, leading to the introduction of two cache admission policies and one replacement policy supporting nonsequential multimedia access. The applicability of ViNo is shown by example and by analysis of an existing CDN simulation. We find that a pure LRU replacement yields significantly lower hit rates than our suggested popularity-based replacement. The evaluation of caches was done by simulation and by usage of ViNo.}, address = {Valencia, Spain}, language = {EN}, pdf = {https://www.itec.aau.at/bib/files/Intl_Journal_Advances_Software_2010_Vol3.pdf}, publisher = {International Academy, Research and Industry Association (IARIA)}, url = {http://www.iariajournals.org} } @InProceedings{Lux2010e, author = {Lux, Mathias and Schoeffmann, Klaus and del Fabro, Manfred and Kogler, Marian and Taschwer, Mario}, booktitle = {TRECVID 2010 Participant Notebook Papers}, title = {ITEC-UNIKLU Known-Item Search Submission}, year = {2010}, address = {Gaithersburg, USA}, editor = {Over, Paul and Awad, George and Fiscus, Jonathan and Michel, Martial and Kraaij, Wessel and Smeaton, Alan and Quénot, Georges}, month = {jan}, pages = {9}, publisher = {National Institute of Standards and Technology (NIST)}, language = {EN}, talktype = {none}, url = {http://www-nlpir.nist.gov/projects/tvpubs/tv.pubs.org.html} } @InProceedings{Schoeffmann2009e, author = {Schoeffmann, Klaus and Lux, Mathias and Taschwer, Mario and Böszörmenyi, Laszlo}, booktitle = {ICME'09 Proceedings of the 2009 IEEE international Conference on Multimedia and Expo}, title = {Visualization of Video Motion in Context of Video Browsing}, year = {2009}, address = {Los Alamitos, CA, USA}, editor = {Lin, CY and Cox, I}, month = jul, pages = {658-661}, publisher = {IEEE}, abstract = {We present a new approach for video browsing using visualization of motion direction and motion intensity statistics by color and brightness variations. Statistics are collected from motion vectors of H.264/AVC encoded video streams, so full video decoding is not required. By interpreting visualized motion patterns of video segments, users are able to quickly identify scenes similar to a prototype scene or identify potential scenes of interest. We give some examples of motion patterns with different semantic value, including camera zooms, hill jumps of ski-jumpers, and the repeated appearance of a news speaker. In a user study we show that certain scenes of interest can be found significantly faster using our video browsing tool than using a video player with VCR-like controls.}, issn = {9781424442911}, language = {EN}, talktype = {none}, url = {http://dl.acm.org/citation.cfm?id=1698924.1699086} } @InProceedings{Schoeffmann2009d, author = {Schoeffmann, Klaus and Taschwer, Mario and Böszörmenyi, Laszlo}, booktitle = {Proceedings oft the International Conference on Multimedia and Expo 2009}, title = {Video Browsing Using Motion Visualization}, year = {2009}, address = {Los Alamitos, CA, USA}, editor = {Lin, CY and Cox, I}, month = jul, pages = {1835-1836}, publisher = {IEEE}, abstract = {We present a video browsing tool that uses a novel and powerful visualization technique of video motion. The tool provides an interactive navigation index that allows users to quickly and easily recognize content semantics like scenes with fast/slow motion (in general or according to a specific direction), scenes showing still/moving objects in front of a still/moving background, camera pans, or camera zooms. Moreover, the visualization facilitates identification of similar segments in a video. A first user study has shown encouraging results.}, issn = {1945-788X}, language = {EN}, talktype = {none} } @TechReport{Taschwer2005, author = {Taschwer, Mario and Müller, Armin and Böszörmenyi, Laszlo}, institution = {Institute of Information Technology ({ITEC}), Klagenfurt University}, title = {Integrating Semantic Search and Adaptive Streaming of Video Segments: the {DAHL} Project}, year = {2005}, address = {Klagenfurt, Austria}, month = {jan}, number = {TR/ITEC/05/2.04}, type = {final report}, abstract = {The DAHL project aimed at demonstrating some of the research achievements at ITEC by extending anexisting web application with content-based search mechanisms and an adaptive streaming environment for video data. The search is based on MPEG-7 descriptions of video data, and video retrieval uses an MPEG-4 conforming adaptive streaming server and player, which allows to adapt the video stream dynamically to client capabilities, user preferences, and available network bandwidth. This report describes the design, implementation, and integration work done in the DAHL project.}, keywords = {semantic video querying, adaptive video streaming, {MPEG-7} annotation tool}, language = {EN}, pages = {34} } @InProceedings{Taschwer2001, author = {Taschwer, Mario}, booktitle = {Kommunikationssicherheit im Zeichen des Internet}, title = {Modular Multiplication Using Special Prime Moduli}, year = {2001}, address = {Braunschweig/Wiesbaden}, editor = {Horster, Patrick}, month = {jan}, pages = {346--371}, publisher = {Vieweg}, abstract = {Elliptic curve cryptosystems allow the use of prime fields with special prime moduli that speed up the finite field arithmetic considerably. Two algorithms for reduction with respect to special moduli have been implemented in software on both a 32-bit and a 64-bit platform and compared to well-known generic modular reduction methods. Timing results for multiplications in prime fields of size between 2^{191} and 2^{512} are presented and discussed.}, isbn10 = {3-528-05763-7}, language = {EN}, pdf = {https://www.itec.aau.at/bib/files/2001si_modmult.pdf}, talktype = {none} }