@article{KalogerosGDN23, author = {Eleftherios Kalogeros and Manolis Gergatsoulis and Matthew Damigos and Christos Nomikos}, title = {Efficient Query Evaluation Techniques Over Large Amount of Distributed Linked Data}, journal = {Information Systems}, volume = {115}, pages = {102194}, year = {2023}, url = {https://doi.org/10.1016/j.is.2023.102194}, doi = {10.1016/J.IS.2023.102194}, abstract = {As RDF becomes more widely established and the amount of linked data is rapidly increasing, the efficient querying of large amount of data becomes a significant challenge. In this paper, we propose a family of algorithms for querying large amount of linked data in a distributed manner. These query evaluation algorithms are independent of the way the data is stored, as well as of the particular implementation of the query evaluation. We then use the MapReduce paradigm to present a distributed implementation of these algorithms and experimentally evaluate them, although the algorithms could be straightforwardly translated into other distributed processing frameworks. We also investigate and propose multiple query decomposition approaches of Basic Graph Patterns (subclass of SPARQL queries) that are used to improve the overall performance of the distributed query answering. A deep analysis of the effectiveness of these decomposition algorithms is also provided.}, keywors = {Linked Data, Graph Querying, Big Data, Map-Reduce, Distributed Processing, Cloud Computing, Semantic Web} } @inproceedings{UhartegaraydDK23, author = {Remi Uhartegaray and Laurent d'Orazio and Matthew Damigos and Eleftherios Kalogeros}, title = {Scalable Computation of Fuzzy Joins Over Large Collections of {JSON} Data}, booktitle = {{IEEE} International Conference on Fuzzy Systems, {FUZZ} 2023, Incheon, Republic of Korea, August 13-17, 2023}, pages = {1--6}, publisher = {{IEEE}}, year = {2023}, url = {https://doi.org/10.1109/FUZZ52849.2023.10309759}, doi = {10.1109/FUZZ52849.2023.10309759}, abstract = {Fuzzy joins are widely used in a variety of data analysis applications such as data integration, data mining, and master data management. In the context of Big Data, computing fuzzy joins is challenging due to the high computational cost required and the communication cost. While on one hand big fuzzy joins on relational data and on the other hand joins on tree-structured data have been investigated in the literature, to the best of our knowledge, combining the two is still an open problem. In this context, we study methods for leveraging distributed environments in order to compute fuzzy joins over large collections of JSON documents. Our algorithms take into account both the text-similarity of the joining data, as well as its structural similarity.}, keywords = {Fuzzy Join, Similarity, Trees, JSON, Distributed Computation} } @inproceedings{BountouriDDGK23, author = {Lina Bountouri and Matthew Damigos and Markella Drakiou and Manolis Gergatsoulis and Eleftherios Kalogeros}, editor = {Dion Hoe{-}Lian Goh and Shu{-}Jiun Chen and Suppawong Tuarob}, title = {The Semantic Mapping of RiC-CM to {CIDOC-CRM}}, booktitle = {Leveraging Generative Intelligence in Digital Libraries: Towards Human-Machine Collaboration - 25th International Conference on Asia-Pacific Digital Libraries, {ICADL} 2023, Taipei, Taiwan, December 4-7, 2023, Proceedings, Part {II}}, series = {Lecture Notes in Computer Science}, volume = {14458}, pages = {90--99}, publisher = {Springer}, year = {2023}, url = {https://doi.org/10.1007/978-981-99-8088-8_8}, doi = {10.1007/978-981-99-8088-8_8}, abstract = {This paper investigates the semantic closeness between the Records in Contexts - Conceptual Model (RiC-CM) and the CIDOC Conceptual Reference Model (CIDOC-CRM). The research aims to establish a bridge between these two conceptual models, enabling interoperability and seamless integration of data. In this context, we define a mapping of (1) the main RiC-CM entities (focusing on the record-related entities) to CIDOC-CRM entities, (2) the main RiC-CM attributes to CIDOC-CRM (paths of) properties and entities, and (3) the (mainly, record-related) relations of RiC-CM to CIDOC-CRM (paths of) properties and entities. With this research, we achieve a deeper understanding of the semantic relationship between the two models.}, keywords = {Archival Conceptual Models, RiC-CM, CIDOC-CRM, Semantic Mappings, Ontologies} } @inproceedings{AgathosKGP22, author = {Michail Agathos and Eleftherios Kalogeros and Manolis Gergatsoulis and Georgios Papaioannou}, editor = {Yuen{-}Hsien Tseng and Marie Katsurai and Hoa Ngoc Nguyen}, title = {Documenting Architectural Styles Using {CIDOC} {CRM}}, booktitle = {From Born-Physical to Born-Virtual: Augmenting Intelligence in Digital Libraries - 24th International Conference on Asian Digital Libraries, {ICADL} 2022, Hanoi, Vietnam, November 30 - December 2, 2022, Proceedings}, series = {Lecture Notes in Computer Science}, volume = {13636}, pages = {345--359}, publisher = {Springer}, year = {2022}, url = {https://doi.org/10.1007/978-3-031-21756-2_27}, doi = {10.1007/978-3-031-21756-2_27}, abstract = {Documenting cultural heritage information is of major importance and significance nowadays in the emerging world of digital libraries, computing and information studies across the globe. Architecture and architectural styles fall within this big effort, as architecture is strictly connected with human societies and heritage. The CIDOC CRM is a well-established and continuously emerging reference model aiming to represent cultural heritage information. This paper connects the dots and offers for the first time a model for documenting architectural styles via CIDOC CRM and its extensions. In this paper, we discuss the notion of architectural style and the previous work on documenting architecture. Via classes and properties of CIDOC CRM and its extensions, we model the historical context of architectural styles/periods, we represent specific elements that characterize architectural styles using Gothic Architecture as an example, and we document various and complex spatial and temporal relationships and influences among architectural styles and periods, such sub-styles of given architectural styles, regional variations among architectural styles, and architectural influences from one architectural style to another.}, keywords = {Architectural Style, CIDOC CRM, Ontologies, Cultural Heritage Documentation} } @inproceedings{SfyridouPGKP22, author = {Eleni Sfyridou and Georgios Papaioannou and Manolis Gergatsoulis and Eleftherios Kalogeros and Konstantinos D. Politis}, editor = {Emmanouel Garoufallou and Andreas Vlachidis}, title = {Epigraphical Heritage Documentation via {CIDOC} {CRM} and CRMtex}, booktitle = {Metadata and Semantic Research - 16th Research Conference, {MTSR} 2022, London, UK, November 7-11, 2022, Revised Selected Papers}, series = {Communications in Computer and Information Science}, volume = {1789}, pages = {65--76}, publisher = {Springer}, year = {2022}, url = {https://doi.org/10.1007/978-3-031-39141-5_6}, doi = {10.1007/978-3-031-39141-5_6}, abstract = {As the need for solid and interoperable heritage documentation models and systems becomes bigger, addressing aspects of representing archaeological and textual/epigraphical information adds special importance and value. Towards this end, this paper explores the use of CIDOC CRM and CRMtex to represent epigraphical heritage information. This paper aims to (1) describe, study and explain how ancient funerary inscriptions can be represented through CIDOC CRM and its extensions, (2) examine these archaeological and textual objects at two levels: (a) physical description, including size, shape, material, text and symbols, and b) interpretations, including chronology, text content, other archaeological interpretation. Our work consists of a case study based on a funerary inscription with a Greek inscription of the 5th century AD discovered in Ghor as-Safi (Byzantine Zoora or Zoara) in modern Jordan. This inscription is a typical example of funerary inscriptions in the area and the world, as it contains the basic information frequently found on similar object: name of the deceased, age, date of death, symbols. CIDOC CRM and CRMtex has been used to represent the production and the physical characteristics of a funerary inscription, its textual information (including ancient writing, transcription and translation) and cultural information content, its date, and its current condition and location. This case study initiates the work towards a model and an automated system for archaeological/epigraphical documentation and data integration.}, keywords = {Epigraphy, Cultural Heritage Documentation, Archaeology, Funerary Inscriptions, CIDOC CRM, CRMtex, Ontologies} } @article{KalogerosGDN22, author = {Eleftherios Kalogeros and Manolis Gergatsoulis and Matthew Damigos and Christos Nomikos}, title = {Efficient Query Evaluation Techniques Over Large Amount of Distributed Linked Data}, journal = {CoRR}, volume = {abs/2209.05359}, year = {2022}, url = {https://doi.org/10.48550/arXiv.2209.05359}, doi = {10.48550/ARXIV.2209.05359}, eprinttype = {arXiv}, eprint = {2209.05359}, abstract = {As RDF becomes more widely established and the amount of linked data is rapidly increasing, the efficient querying of large amount of data becomes a significant challenge. In this paper, we propose a family of algorithms for querying large amount of linked data in a distributed manner. These query evaluation algorithms are independent of the way the data is stored, as well as of the particular implementation of the query evaluation. We then use the MapReduce paradigm to present a distributed implementation of these algorithms and experimentally evaluate them, although the algorithms could be straightforwardly translated into other distributed processing frameworks. We also investigate and propose multiple query decomposition approaches of Basic Graph Patterns (subclass of SPARQL queries) that are used to improve the overall performance of the distributed query answering. A deep analysis of the effectiveness of these decomposition algorithms is also provided.}, keywords = {Linked Data, Graph Querying, Big Data, Map-Reduce, Distributed Processing, Cloud Computing, Semantic Web} } @inproceedings{GergatsoulisPKMTC21, author = {Manolis Gergatsoulis and Georgios Papaioannou and Eleftherios Kalogeros and Ioannis Mpismpikopoulos and Katerina Tsiouprou and Robert Carter}, editor = {Hao{-}Ren Ke and Chei Sian Lee and Kazunari Sugiyama}, title = {Modelling Archaeological Buildings Using {CIDOC-CRM} and Its Extensions: The Case of Fuwairit, Qatar}, booktitle = {Towards Open and Trustworthy Digital Societies - 23rd International Conference on Asia-Pacific Digital Libraries, {ICADL} 2021, Virtual Event, December 1-3, 2021, Proceedings}, series = {Lecture Notes in Computer Science}, volume = {13133}, pages = {357--372}, publisher = {Springer}, year = {2021}, url = {https://doi.org/10.1007/978-3-030-91669-5_28}, doi = {10.1007/978-3-030-91669-5_28}, abstract = {This paper explores the use of CIDOC CRM and its extensions (CRMba, CRMarchaeo) to represent archaeological buildings that have been studied and interpreted by archaeologists during their work in the field. These archaeological observations and reflections usually appear in archaeological reports containing text and visual representations, such as images and photographs, plans and drawings, and maps. For our approach (case study), we used the recent archaeological excavations and other heritage works of the Origins of Doha and Qatar Project in Fuwairit, Qatar. We investigate, explore and review issues related to the application of classes and properties as they appear in the latest versions of the aforementioned models, i.e. CIDOC CRM, and CRMba. We focus on archaeological building construction, on specific building components and materials, on the visual representations of archaeological buildings as well as on issues of archaeological buildings’ chronology and buildings’ information provenance. The proposed data model contributes towards an automated system for archaeological buildings representations, documentation, and heritage data integration.}, keywords = {Archaeology, Archaeology of Buildings, CIDOC CRM, CRMarchaeo, CRMba, Ontologies, Digital Humanities} } @inproceedings{KalogerosDSZDZMPG21, author = {Eleftherios Kalogeros and Matthew Damigos and Michalis Sfakakis and Sofia Zapounidou and Aggeliki Drakopoulou and Costas Zervopoulos and Gerasimos Martinis and Christos Papatheodorou and Manolis Gergatsoulis}, editor = {Emmanouel Garoufallou and Mar{\'{\i}}a Antonia Ovalle{-}Perandones and Andreas Vlachidis}, title = {Digitizing, Transcribing and Publishing the Handwritten Music Score Archives of Ionian Islands Philharmonic Bands}, booktitle = {Metadata and Semantic Research - 15th International Conference, {MTSR} 2021, Virtual Event, November 29 - December 3, 2021, Revised Selected Papers}, series = {Communications in Computer and Information Science}, volume = {1537}, pages = {370--381}, publisher = {Springer}, year = {2021}, url = {https://doi.org/10.1007/978-3-030-98876-0_32}, doi = {10.1007/978-3-030-98876-0_32}, abstract = {During the long history of the philharmonic bands in the Ionian Islands, since the mid of the nineteenth century, valuable archives of handwritten music scores have been established. These archives consist of the scores of original works locally created and from adaptations of western music works of Greek and other European composers. For the long-term preservation of the archives of 7 Philharmonic Bands, the handwritten music scores were digitised and a significant amount of (the most important of) them was transcribed into MusicXML. Moreover, all these archives were integrated into, and published as a single archive. All these activities were part of the project “Preservation and Prominence of the Musical Heritage of the Region of Ionian Islands Prefecture through the management of the digital archives of the Philharmonic Orchestras of the Region.” This work presents the challenges, the workflows and the system developed to achieve the objectives of the project.}, keywords = {Music Score Archives, Linked Data, MARC21, RDA, IIIF, MusicXML, Semantic Web} } @article{KalogerosGD20, author = {Eleftherios Kalogeros and Manolis Gergatsoulis and Matthew Damigos}, title = {Document-based {RDF} Storage Method for Parallel Evaluation of Basic Graph Pattern Queries}, journal = {International Journal of Metadata, Semantics and Ontologies}, volume = {14}, number = {1}, pages = {63--80}, year = {2020}, url = {https://doi.org/10.1504/IJMSO.2020.107798}, doi = {10.1504/IJMSO.2020.107798}, abstract = {In this paper, we investigate the problem of efficiently evaluating (Basic Graph Pattern) BGP SPARQL queries over a large amount of RDF data. We propose an effective data model for storing RDF data in a document database using maximum replication factor of 2 (i.e., in the worst case scenario, the data graph will be doubled in storage size). The proposed storage model is utilised for efficiently evaluating SPARQL queries, in a distributed manner. Each query is decomposed into a set of generalised star queries, which are queries that allow both subject-object and object-subject edges from a specific node, called central node. The proposed data model ensures that no joining operations over multiple data sets are required to evaluate generalised star queries. The results of the evaluation of the generalised star sub-queries of a query Q are then combined properly, in order to compute the answers of the query Q posed over the RDF data. The proposed approach has been implemented using MongoDB and Apache Spark.}, keywords = {Semantic Web, Parallel Processing, Query Processing, Resource Description Framework, Big Data Applications} } @inproceedings{GergatsoulisPKC20, author = {Manolis Gergatsoulis and Georgios Papaioannou and Eleftherios Kalogeros and Robert Carter}, editor = {Emmanouel Garoufallou and Mar{\'{\i}}a Antonia Ovalle{-}Perandones}, title = {Representing Archeological Excavations Using the {CIDOC} {CRM} Based Conceptual Models}, booktitle = {Metadata and Semantic Research - 14th International Conference, {MTSR} 2020, Madrid, Spain, December 2-4, 2020, Revised Selected Papers}, series = {Communications in Computer and Information Science}, volume = {1355}, pages = {355--366}, publisher = {Springer}, year = {2020}, url = {https://doi.org/10.1007/978-3-030-71903-6_33}, doi = {10.1007/978-3-030-71903-6_33}, abstract = {This paper uses CIDOC CRM and CRM-based models (CRMarchaeo, CRMsci) to represent archaeological excavation activities and the observations of archaeologists during their work in the excavation field. These observations are usually recorded in documents such as context sheets. As an application of our approach (case study), we used the records of the recent archaeological excavations in Fuwairit in Qatar, part of the Origins of Doha and Qatar Project. We explore issues related to the application of classes and properties as they appear in the latest versions of the aforementioned models, i.e. CIDOC CRM, CRMarchaeo, and CRMsci. The proposed data model could be used as the basis to create an automated system for archaeological documentation and archeological data integration.}, keywords = {Archaeology, Excavation, Context Sheet, Archaeological Documentation, CIDOC CRM, CRMarchaeo, CRMsci, Ontologies} } @inproceedings{KalogerosGD18, author = {Eleftherios Kalogeros and Manolis Gergatsoulis and Matthew Damigos}, editor = {Emmanouel Garoufallou and Fabio Sartori and Rania Siatri and Marios Zervas}, title = {Document Based {RDF} Storage Method for Efficient Parallel Query Processing}, booktitle = {Metadata and Semantic Research - 12th International Conference, {MTSR} 2018, Limassol, Cyprus, October 23-26, 2018, Revised Selected Papers}, series = {Communications in Computer and Information Science}, volume = {846}, pages = {13--25}, publisher = {Springer}, year = {2018}, url = {https://doi.org/10.1007/978-3-030-14401-2_2}, doi = {10.1007/978-3-030-14401-2_2}, abstract = {In this paper, we investigate the problem of efficiently evaluating SPARQL queries, over large amount of linked data utilizing distributed NoSQL system. We propose an efficient approach for partitioning large linked data graphs using distributed frameworks (MapReduce), as well as an effective data model for storing linked data in a document database using a maximum replication factor of 2 (i.e., in the worst case scenario, the data graph will be doubled in storage size). The model proposed and the partitioning approach ensure high-performance query evaluation and horizontal scaling for the type of queries called generalized star queries (i.e., queries allowing both subject-object and object-subject edges from a central node), due to the fact that no joining operations over multiple datasets are required to evaluate the queries. Furthermore, we present an implementation of our approach using MongoDB and an algorithm for translating generalized star queries into MongoDB query language, based on the proposed data model.}, keywords = {RDF, Linked Data, Parallel Processing, NoSQL, Document Databases} } @inproceedings{AgathosKK16, author = {Michail Agathos and Eleftherios Kalogeros and Sarantos Kapidakis}, editor = {Norbert Fuhr and L{\'{a}}szl{\'{o}} Kov{\'{a}}cs and Thomas Risse and Wolfgang Nejdl}, title = {A Case Study of Summarizing and Normalizing the Properties of DBpedia Building Instances}, booktitle = {Research and Advanced Technology for Digital Libraries - 20th International Conference on Theory and Practice of Digital Libraries, {TPDL} 2016, Hannover, Germany, September 5-9, 2016, Proceedings}, series = {Lecture Notes in Computer Science}, volume = {9819}, pages = {398--404}, publisher = {Springer}, year = {2016}, url = {https://doi.org/10.1007/978-3-319-43997-6_33}, doi = {10.1007/978-3-319-43997-6_33}, abstract = {The DBpedia ontology forms the structural backbone of DBpedia linked open dataset. Among its classes dbo:Building and dbo:HistoricBuilding entities, hold information for thousands of important buildings and monuments, thus making DBpedia an international digital repository of the architectural heritage. This knowledge for these architectural structures, in order to be fully exploited for academic research and other purposes, must be homogenized, as its richest source - Wikipedia infobox template system - is a heterogeneous and non-standardized environment. The work presented below summarizes the most widely used properties for buildings, categorizes and highlights structural and semantic heterogeneities allowing DBpedia’s users a full exploitation of the available information.}, keywords = {DBpedia, Wikipedia, Digital Libraries, Semantic Web, Historic Buildings, Monuments, Cultural Heritage, RDF} } @inproceedings{KalogerosGD15, author = {Eleftherios Kalogeros and Manolis Gergatsoulis and Matthew Damigos}, editor = {Irfan Awan and Muhammad Younas and Massimo Mecella}, title = {Redundancy in Linked Data Partitioning for Efficient Query Evaluation}, booktitle = {3rd International Conference on Future Internet of Things and Cloud, FiCloud 2015, Rome, Italy, August 24-26, 2015}, pages = {497--504}, publisher = {{IEEE} Computer Society}, year = {2015}, url = {https://doi.org/10.1109/FiCloud.2015.36}, doi = {10.1109/FICLOUD.2015.36}, abstract = {The problem of efficient querying large amount of linked data using Map-Reduce is investigated in this paper. The proposed approach is based on the following assumptions: a) Data graphs are arbitrarily partitioned in the distributed file system is such a way that replication of data triples between the data segments is allowed. b) Data triples are replicated is such a way that answers to a special form of queries, called subject-object star queries, can be obtained from a single data segment. c) Each query posed by the user, can be transformed into a set of subject-object star sub queries. We propose a one and a half phase, scalable, Map-Reduce algorithm that efficiently computes the answers of the initial query by computing and appropriately combining the sub query answers. We prove that, under certain conditions, query can be answered in a single map-reduce phase.}, keywords = {Linked Data, MapReduce, Graph Querying, Cloud Computing, Semantic Web} } @inproceedings{NomikosGKD14, author = {Christos Nomikos and Manolis Gergatsoulis and Eleftherios Kalogeros and Matthew Damigos}, editor = {K. Sel{\c{c}}uk Candan and Sihem Amer{-}Yahia and Nicole Schweikardt and Vassilis Christophides and Vincent Leroy}, title = {A Map-Reduce Algorithm for Querying Linked Data Based on Query Decomposition into Stars}, booktitle = {Proceedings of the Workshops of the {EDBT/ICDT} 2014 Joint Conference {(EDBT/ICDT} 2014), Athens, Greece, March 28, 2014}, series = {{CEUR} Workshop Proceedings}, volume = {1133}, pages = {224--231}, publisher = {CEUR-WS.org}, year = {2014}, url = {https://ceur-ws.org/Vol-1133/paper-37.pdf}, abstract = {In this paper, we investigate the problem of efficient querying large amount of linked data using Map-Reduce framework. We assume data graphs that are arbitrarily partitioned in the distributed file system. Our technique focuses on the decomposition of the query posed by the user, which is given in the form of a query graph into star subqueries. We propose a two-phase, scalable Map-Reduce algorithm that efficiently results the answer of the initial query by computing and appropriately combining the subquery answers.}, keywords = {Linked Data, Graph Querying, Map-Reduce, Distributed Processing, Cloud Computing, Semantic Web} } @inproceedings{DamigosGK14, author = {Matthew Damigos and Manolis Gergatsoulis and Eleftherios Kalogeros}, editor = {Sokratis K. Katsikas and Michael Hatzopoulos and Theodoros Apostolopoulos and Dimosthenis Anagnostopoulos and Elias Carayiannis and Theodora A. Varvarigou and Mara Nikolaidou}, title = {Distributed Evaluation of XPath Queries Over Large Integrated {XML} Data}, booktitle = {18th Panhellenic Conference on Informatics, {PCI} '14, Athens, Greece, October 2-4, 2014}, pages = {61:1--61:6}, publisher = {{ACM}}, year = {2014}, url = {https://doi.org/10.1145/2645791.2645804}, doi = {10.1145/2645791.2645804}, abstract = {XML is a widespread, text-based format used for exchanging information on the Web and representing metadata. Since the amount of XML information is rapidly increasing, efficient querying of large data repositories, containing XML data, is a significant challenge faced by system designers and data analysts who need to support operational actions and decision-making. In this paper we propose a technique for integrating large amount of XML data and use the Map-Reduce framework to efficiently query the integrated data. Each XML document obtained from the sources is transformed properly in order to fit into a predefined, virtual XML structure. Although the transformed documents are not physically integrated, the user is able to pose queries over a single XML structure. To achieve this feature we propose a single-step, Map-Reduce algorithm which takes advantage of virtual structure and computes efficiently the answer of a given XPath queries in a distributed manner.}, keywords = {Big Data, XML Integration, XPath, MapReduce} } @inproceedings{GergatsoulisNKD13, author = {Manolis Gergatsoulis and Christos Nomikos and Eleftherios Kalogeros and Matthew Damigos}, editor = {Abdelkader Hameurlain and J. Wenny Rahayu and David Taniar}, title = {An Algorithm for Querying Linked Data Using Map-Reduce}, booktitle = {Data Management in Cloud, Grid and {P2P} Systems - 6th International Conference, Globe 2013, Prague, Czech Republic, August 28-29, 2013. Proceedings}, series = {Lecture Notes in Computer Science}, volume = {8059}, pages = {51--62}, publisher = {Springer}, year = {2013}, url = {https://doi.org/10.1007/978-3-642-40053-7_5}, doi = {10.1007/978-3-642-40053-7_5}, abstract = {In this paper, we exploit the widely used Map-Reduce framework and propose a generic two-phase, Map-Reduce algorithm for querying large amount of linked data. The algorithm is based on the idea that the data graph can be arbitrarily partitioned into graph segments which can be stored in different nodes of a cluster of commodity computers. To answer a user query Q, Q is also decomposed into a set of subqueries. In the first phase, the subqueries are applied to each graph segment, in isolation, and intermediate results are computed. The intermediate results are combined in the second phase to obtain the answers of the query Q. The proposed algorithm computes the answers to a given query correctly, independently of a) the data graph partitioning, b) how graph segments are stored, c) the query decomposition, and d) the algorithm used for calculating (partial) results.}, keywords = {Linked Data, Graph Querying, MapReduce, Distributed Processing, Cloud Computing, Semantic Web} } @inproceedings{PinoKSK03, author = {Alexandros Pino and Eleftherios Kalogeros and Elias Salemis and Georgios Kouroupetroglou}, editor = {Constantine Stephanidis}, title = {Brain Computer Interface Cursor Measures for Motion-impaired and Able-bodied Users}, booktitle = {Human-Computer Interaction: Universal Access in {HCI:} Inclusive Design in the Information Society, Proceedings of {HCI} International 2003 (the 10th International Conference on Human-Computer Interaction), {HCI} 2003, Crete, Greece, June 22-27, 2003, Volume 4}, pages = {1462--1466}, publisher = {Lawrence Erlbaum}, year = {2003}, url = {https://speech.di.uoa.gr/sppages/spppdf/Final%20BCI%20HCII2003%20_web_.pdf}, abstract = {This paper presents the results of experimental studies that aim to measure the effectiveness of a Brain Computer Interface (BCI) against a mouse on "point and click" tasks performed by able-bodied and upper-limp motion-impaired users. Our methodology is based on the ISO 9241-9 guidelines. We examine how Fitts' law fits the tested input devices, and we use gross and detailed trajectory measures in order to quantify cursor movement and evaluate performance. We conclude that Fitts' law can only describe able-bodied users' performance when selecting targets with the mouse. On the other hand, the performance of both user groups with the BCI, and of motion-impaired users with the mouse does not conform to Fitts' law. Tables and charts of results are given, showing that the BCI cannot currently compete with the mouse in terms of usability, but can be used as an alternative for motion actuated devices when no other solution is possible.} } @article{KalogerosS05, title = {Αξιολόγηση Απόδοσης Συσκευών Προσομοίωσης Ποντικιού - Εφαρμογή σε Σύστημα Εγκεφαλικών Σημάτων}, volume = {2}, issn = {1790-4544}, number = {1}, journal = {Επιλεγμένες διπλωματικές και πτυχιακές εργασίες, Τμήμα Πληροφορικής και Τηλεπικοινωνιών, Εθνικό και Καποδιστριακό Πανεπιστήμιο Αθηνών}, author = {Ελευθέριος Καλόγερος and Ηλίας Σαλεμής}, year = {2005}, publisher = {Τμήμα Πληροφορικής και Τηλεπικοινωνιών, Εθνικό και Καποδιστριακό Πανεπιστήμιο Αθηνών}, pages = {107--116}, url = {https://speech.di.uoa.gr/sppages/spppdf/salemis2005.pdf}, abstract = {Στην παρούσα εργασία παρουσιάζουμε τη σχεδίαση και υλοποίηση μιας εφαρμογής λογισμικού που επεκτείνει τις συστάσεις και υποδείξεις του προτύπου ISO 9241-9 για την πραγματοποίηση πολυπαραμετρικών πειραμάτων και αναλύσεων αξιολόγησης των επιδόσεων εργονομίας Διεπαφής Ανθρώπου Υπολογιστή κατά την χρήση οποιασδήποτε συσκευής κατάδειξης και επιλογής (point and click) που προσομοιώνει τις λειτουργίες ενός κοινού ποντικιού. Στη συνέχεια εφαρμόζοντας το λογισμικό αυτό εξετάζουμε κατά πόσο μια συσκευή ανίχνευσης εγκεφαλικών κυμάτων που υποστηρίζει επικοινωνία ανθρώπου - υπολογιστή και προσομοιώνει τις λειτουργίες του ποντικιού, είναι αξιόπιστη, ακριβής λειτουργική, ιδιαίτερα στις περιπτώσεις ατόμων με σοβαρές αναπηρίες στα άνω άκρα.} } @phdthesis{KalogerosPhD22, author = {Eleftherios Kalogeros}, title = {Data and Knowledge Management in Cloud Computing Environment}, school = {Department of Archives, Library Science and Museology, Ionian University}, address = {Corfu, Greece}, doi = {10.12681/eadd/51958}, url = {http://hdl.handle.net/10442/hedi/51958}, year = {2022}, abstract = {In this Ph.D. thesis, we study the problem of efficiently evaluating basic graph pattern (BGP) queries over a large amount of linked data (RDF data) in parallel and provide four approaches. In this context, we consider that the data graph has been partitioned into graph segments and the initial query Q is decomposed into a set of BGP subqueries. In the first three approaches, the widely used MapReduce framework is used for querying a large amount of linked data. In the first approach, a generic two-phase, MapReduce algorithm is presented. The algorithm is based on the idea that the data graph has been arbitrarily partitioned into graph segments which are stored in different nodes of a cluster of commodity machines. To answer a user query Q, Q is also decomposed into a set of random BGP subqueries. In the first phase, the subqueries are applied to each graph segment, in isolation, and intermediate results are computed. The intermediate results are appropriately combined in the second phase to obtain the answers of the initial query Q. The proposed algorithm computes the answers to a given query correctly, independently of a) the data graph partitioning, b) the way that graph segments are stored, c) the query decomposition, and d) the algorithm used for calculating (partial) results. In the second approach, we present a method which focusing on the decomposition of the query Q, into a set of generalized star queries, which are queries that allow both subject-object and object-subject edges from a specific node, called central node. It is proved that each query Q can be transformed into a set of subject-object star subqueries. The data graph has also been arbitrarily partitioned into graph segments and a two-phase, scalable MapReduce algorithm is proposed that efficiently results from the answer of the initial query Q by computing and appropriately combining the generalized subqueries answers. The third approach is based on the assumptions that data graphs are partitioned in the distributed file system in such a way so as replication of data triples between the data segments is allowed. Data triples are replicated in such a way so as answers of generalized star queries, can be obtained from a single data segment. One and a half phase, scalable, MapReduce algorithm is proposed that efficiently computes the answer of the initial query Q by computing and appropriately combining the subquery answers. It is proved that, under certain conditions, the query can be answered in a single MapReduce phase. In the fourth approach, we propose an effective data model for storing RDF data in a document database using a maximum replication factor of 2 (i.e., in the worst-case scenario, the data graph will be doubled in storage size). The proposed storage model is utilized for efficiently evaluating BGP queries in a distributed manner. Each query is decomposed into a set of generalized star queries. The proposed data model ensures that no joining operations over multiple datasets are required to evaluate generalized star queries. The results of the evaluation of the generalized star subqueries of a query Q are then properly combined, in order to compute the answers of the initial query Q. The proposed approach has been implemented using MongoDB and Apache Spark.}, keywords = {Semantic Web; Linked Data; Parallel Query Processing; Distributed Algorithms; RDF; Cloud Computing; MapReduce; Big Data} } @online{SfakakisKDZDZMPG22, author = {Michalis Sfakakis and Eleftherios Kalogeros and Matthew Damigos and Sofia Zapounidou and Aggeliki Drakopoulou and Costas Zervopoulos and Gerasimos Martinis and Christos Papatheodorou and Manolis Gergatsoulis}, title = {Music Score Archives of Ionian Islands (Greece) Philharmonic Bands}, url = {https://youtu.be/inULjf0ulss?t=2239}, organization = {EuropeanaTech & IIIF Webinar}, date = {2022-11-21} }