2024

  • B. Casella, W. Riviera, M. Aldinucci, and G. Menegaz, “Protocol for training MERGE: A federated multi-input neural network for COVID-19 prognosis,” STAR Protocols, 2024. doi:10.1016/j.xpro.2023.102812
    [BibTeX] [Abstract] [Download PDF]

    Federated learning is a cooperative learning approach that has emerged as an effective way to address privacy concerns. Here, we present a protocol for training MERGE: a federated multi-input neural network (NN) for COVID-19 prognosis. We describe steps for collecting and preprocessing datasets. We then detail the process of training a multi-input NN. This protocol can be adapted for use with datasets containing both image- and table-based input sources.

    @article{24:casella:starprotocol,
    title = {Protocol for training MERGE: A federated multi-input neural network for COVID-19 prognosis},
    author = {Casella, Bruno and Riviera, Walter and Aldinucci, Marco and Menegaz, Gloria},
    year = {2024},
    journal = {STAR Protocols},
    doi = {10.1016/j.xpro.2023.102812},
    note = {https://prod-shared-star-protocols.s3.amazonaws.com/protocols/3225.pdf},
    institution = {Computer Science Department, University of Torino},
    url = {https://prod-shared-star-protocols.s3.amazonaws.com/protocols/3225.pdf},
    keywords = {icsc,epi, federated},
    abstract = {Federated learning is a cooperative learning approach that has emerged as an effective way to address privacy concerns. Here, we present a protocol for training MERGE: a federated multi-input neural network (NN) for COVID-19 prognosis. We describe steps for collecting and preprocessing datasets. We then detail the process of training a multi-input NN. This protocol can be adapted for use with datasets containing both image- and table-based input sources.}
    }

2023

  • G. Mittone, G. Malenza, M. Aldinucci, and R. Birke, “Distributed Edge Inference: an Experimental Study on Multiview Detection,” in UCC ’23: Proceedings of the 16th IEEE/ACM International Conference on Utility and Cloud Computing Companion, Taormina, Italy, 2023.
    [BibTeX] [Abstract] [Download PDF]

    Computing is evolving rapidly to cater to the increasing demand for sophisticated services, and Cloud computing lays a solid foundation for ￿exible on-demand provisioning. However, as the size of applications grows, the centralised client-server approach used by Cloud computing increasingly limits the applications’ scalability. To achieve ultra-scalability, cloud/edge/fog computing converges into the compute continuum, completely decentralising the infrastructure to encompass universal, pervasive resources. The com- pute continuum makes devising applications bene￿tting from this complex environment a challenging research problem. We put the opportunities the compute continuum o￿ers to the test through a real-world multi-view detection model (MvDet) implemented with the FastFL C/C++ high-performance edge inference framework. Computational performance is discussed considering many experi- mental scenarios, encompassing di￿erent edge computational capa- bilities and network bandwidths. We obtain up to 1.92x speedup in inference time over a centralised solution using the same devices.

    @inproceedings{23:mittone:multiview,
    title = {Distributed Edge Inference: an Experimental Study on Multiview Detection},
    author = {Gianluca Mittone and Giulio Malenza and Marco Aldinucci and Robert Birke},
    year = {2023},
    month = dec,
    booktitle = {UCC '23: Proceedings of the 16th IEEE/ACM International Conference on Utility and Cloud Computing Companion},
    publisher = {},
    address = {Taormina, Italy},
    note = {eupilot, icsc, In press},
    abstract = {Computing is evolving rapidly to cater to the increasing demand for sophisticated services, and Cloud computing lays a solid foundation for ￿exible on-demand provisioning. However, as the size of applications grows, the centralised client-server approach used by Cloud computing increasingly limits the applications’ scalability. To achieve ultra-scalability, cloud/edge/fog computing converges into the compute continuum, completely decentralising the infrastructure to encompass universal, pervasive resources. The com- pute continuum makes devising applications bene￿tting from this complex environment a challenging research problem. We put the opportunities the compute continuum o￿ers to the test through a real-world multi-view detection model (MvDet) implemented with the FastFL C/C++ high-performance edge inference framework. Computational performance is discussed considering many experi- mental scenarios, encompassing di￿erent edge computational capa- bilities and network bandwidths. We obtain up to 1.92x speedup in inference time over a centralised solution using the same devices.},
    institution = {Computer Science Department, University of Torino},
    keywords = {learning, federated},
    url = {https://iris.unito.it/handle/2318/1950083}
    }

  • S. Fonio, L. Paletto, M. Cerrato, D. Ienco, and R. Esposito, “Hierarchical priors for Hyperspherical Prototypical Networks,” in 31th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning, ESANN, Bruges, Belgium, 2023.
    [BibTeX] [Abstract] [Download PDF]

    In this paper, we explore the usage of hierarchical priors to improve learning in contexts where the number of available examples is extremely low. Specifically, we consider a Prototype Learning setting where deep neural networks are used to embed data in hyperspherical geometries.In this scenario, we propose an innovative way to learn the prototypes by combining class separation and hierarchical information. In addition, we introduce a contrastive loss function capable of balancing the exploitation of prototypes through a prototype pruning mechanism. We compare the proposed method with state-of-the-art approaches on two public datasets.

    @inproceedings{23:esann:fonio,
    title = {Hierarchical priors for Hyperspherical Prototypical Networks},
    author = {Fonio, Samuele and Paletto, Lorenzo and Cerrato, Mattia and Ienco, Dino and Esposito, Roberto},
    year = {2023},
    month = oct,
    booktitle = {31th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning, {ESANN}},
    address = {Bruges, Belgium},
    note = {In print},
    abstract = {In this paper, we explore the usage of hierarchical priors to improve learning in contexts where the number of available examples is extremely low. Specifically, we consider a Prototype Learning setting where deep neural networks are used to embed data in hyperspherical geometries.In this scenario, we propose an innovative way to learn the prototypes by combining class separation and hierarchical information. In addition, we introduce a contrastive loss function capable of balancing the exploitation of prototypes through a prototype pruning mechanism. We compare the proposed method with state-of-the-art approaches on two public datasets.},
    url = {https://www.esann.org/sites/default/files/proceedings/2023/ES2023-65.pdf},
    bdsk-url-1 = {https://www.esann.org/sites/default/files/proceedings/2023/ES2023-65.pdf},
    bdsk-url-2 = {https://doi.org/10.14428/esann/2023.ES2023-65},
    keywords = {icsc}
    }

  • G. Mittone and S. Fonio, “Benchmarking Federated Learning Scalability,” in Proceedings of the 2nd Italian Conference on Big Data and Data Science, ITADATA 2023, September 11-13, 2023, Naples, Italy, 2023.
    [BibTeX] [Abstract] [Download PDF]

    Federated Learning (FL) is a widespread Machine Learning paradigm handling distributed Big Data. In this work, we demonstrate that different FL frameworks expose different scaling performances despite adopting the same technologies, highlighting the need for a more comprehensive study on the topic.

    @inproceedings{23:itadata:extabstract:mittone:fonio,
    title = {Benchmarking Federated Learning Scalability},
    author = {Mittone, Gianluca and Fonio, Samuele},
    year = {2023},
    month = sep,
    booktitle = {Proceedings of the 2nd Italian Conference on Big Data and Data Science, {ITADATA} 2023, September 11-13, 2023},
    publisher = {{CEUR}},
    address = {Naples, Italy},
    abstract = {Federated Learning (FL) is a widespread Machine Learning paradigm handling distributed Big Data. In this work, we demonstrate that different FL frameworks expose different scaling performances despite adopting the same technologies, highlighting the need for a more comprehensive study on the topic.},
    keywords = {icsc, eupilot, In press, learning, parallel, federated},
    url = {https://hdl.handle.net/2318/1933852}
    }

  • C. Hong, J. Huang, R. Birke, and L. Y. Chen, “Exploring and Exploiting Data-Free Model Stealing,” in European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECML PKDD), Turin, Italy, 2023. doi:10.1007/978-3-031-43424-2_2
    [BibTeX] [Abstract] [Download PDF]

    Deep machine learning models, e.g., image classifier, are increasingly deployed in the wild to provide services to users. Adversaries are shown capable of stealing the knowledge of these models by sending inference queries and then training substitute models based on query results. The availability and quality of adversarial query inputs are undoubtedly crucial in the stealing process. The recent prior art demonstrates the feasibility of replacing real data by exploring the synthetic adversarial queries, so called data-free attacks, under strong adversarial assumptions, i.e., the deployed classier returns not only class labels but also class probabilities. In this paper, we consider a general adversarial model and propose an effective data-free stealing algorithm, Tandem-GAN, which not only explores synthetic queries but also explicitly exploits the high quality ones. The core of TandemGAN is composed of (i) substitute model which imitates the target model through synthetic queries and their inferred labels; and (ii) a tandem generator consisting of two networks, Gx and Ge, which first explores the synthetic data space via Gx and then exploits high-quality examples via Ge to maximize the knowledge transfer from the target to the substitute model. Our results on four datasets show that the accuracy of our trained substitute model ranges between 96-67\% of the target model and outperforms the existing state-of-the-art data-free model stealing approach by up to 2.5X.

    @inproceedings{23:hong:datafree,
    title = {{Exploring and Exploiting Data-Free Model Stealing}},
    author = {Hong, Chi and Huang, Jiyue and Birke, Robert and Chen, Lydia Y.},
    year = {2023},
    month = sep,
    booktitle = {{European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECML PKDD)}},
    address = {Turin, Italy},
    doi = {10.1007/978-3-031-43424-2_2},
    abstract = {Deep machine learning models, e.g., image classifier, are increasingly deployed in the wild to provide services to users. Adversaries are shown capable of stealing the knowledge of these models by sending inference queries and then training substitute models based on query results. The availability and quality of adversarial query inputs are undoubtedly crucial in the stealing process. The recent prior art demonstrates the feasibility of replacing real data by exploring the synthetic adversarial queries, so called data-free attacks, under strong adversarial assumptions, i.e., the deployed classier returns not only class labels but also class probabilities. In this paper, we consider a general adversarial model and propose an effective data-free stealing algorithm, Tandem-GAN, which not only explores synthetic queries but also explicitly exploits the high quality ones. The core of TandemGAN is composed of (i) substitute model which imitates the target model through synthetic queries and their inferred labels; and (ii) a tandem generator consisting of two networks, Gx and Ge, which first explores the synthetic data space via Gx and then exploits high-quality examples via Ge to maximize the knowledge transfer from the target to the substitute model. Our results on four datasets show that the accuracy of our trained substitute model ranges between 96-67\% of the target model and outperforms the existing state-of-the-art data-free model stealing approach by up to 2.5X.},
    url = {https://iris.unito.it/retrieve/ce44dec6-12c9-443d-99e7-f1141e50aa3a/Data-free%20Model%20Stealing.pdf},
    bdsk-url-1 = {https://dx.doi.org/10.1007/978-3-031-43424-2_2},
    keywords = {icsc, eupilot}
    }

  • G. Mittone, W. Riviera, I. Colonnelli, R. Birke, and M. Aldinucci, “Model-Agnostic Federated Learning,” in Euro-Par 2023: Parallel Processing, Limassol, Cyprus, 2023, p. 383–396. doi:10.1007/978-3-031-39698-4_26
    [BibTeX] [Abstract] [Download PDF]

    Since its debut in 2016, Federated Learning (FL) has been tied to the inner workings of Deep Neural Networks (DNNs). On the one hand, this allowed its development and widespread use as DNNs proliferated. On the other hand, it neglected all those scenarios in which using DNNs is not possible or advantageous. The fact that most current FL frameworks only allow training DNNs reinforces this problem. To address the lack of FL solutions for non-DNN-based use cases, we propose MAFL (Model-Agnostic Federated Learning). MAFL marries a model-agnostic FL algorithm, AdaBoost.F, with an open industry-grade FL framework: Intel OpenFL. MAFL is the first FL system not tied to any specific type of machine learning model, allowing exploration of FL scenarios beyond DNNs and trees. We test MAFL from multiple points of view, assessing its correctness, flexibility and scaling properties up to 64 nodes. We optimised the base software achieving a 5.5x speedup on a standard FL scenario. MAFL is compatible with x86-64, ARM-v8, Power and RISC-V.

    @inproceedings{23:mittone:mafl,
    title = {Model-Agnostic Federated Learning},
    author = {Mittone, Gianluca and Riviera, Walter and Colonnelli, Iacopo and Birke, Robert and Aldinucci, Marco},
    year = {2023},
    month = aug,
    booktitle = {Euro-Par 2023: Parallel Processing},
    publisher = {{Springer}},
    address = {Limassol, Cyprus},
    volume = {14100},
    pages = {383--396},
    doi = {10.1007/978-3-031-39698-4_26},
    abstract = {Since its debut in 2016, Federated Learning (FL) has been tied to the inner workings of Deep Neural Networks (DNNs). On the one hand, this allowed its development and widespread use as DNNs proliferated. On the other hand, it neglected all those scenarios in which using DNNs is not possible or advantageous. The fact that most current FL frameworks only allow training DNNs reinforces this problem. To address the lack of FL solutions for non-DNN-based use cases, we propose MAFL (Model-Agnostic Federated Learning). MAFL marries a model-agnostic FL algorithm, AdaBoost.F, with an open industry-grade FL framework: Intel OpenFL. MAFL is the first FL system not tied to any specific type of machine learning model, allowing exploration of FL scenarios beyond DNNs and trees. We test MAFL from multiple points of view, assessing its correctness, flexibility and scaling properties up to 64 nodes. We optimised the base software achieving a 5.5x speedup on a standard FL scenario. MAFL is compatible with x86-64, ARM-v8, Power and RISC-V.},
    date-added = {2023-03-8 21:51:14 +0000},
    institution = {Computer Science Department, University of Torino},
    keywords = {eupilot, icsc, learning, riscv, federated},
    url = {https://doi.org/10.1007/978-3-031-39698-4_26},
    bdsk-url-1 = {https://doi.org/10.48550/arXiv.2303.04906}
    }

  • J. Valtari, A. Kulmala, S. Schönborn, D. Khozaya, R. Birke, and R. Jyrki, “Real-life Pilot of Virtual Protection and Control – Experiences and Performance Analysis,” in 27th International Conference on Electricity Distribution (CIRED), Rome, Italy, 2023. doi:10.1049/icp.2023.1219
    [BibTeX] [Abstract] [Download PDF]

    Virtualized protection and control (VPC) is seen as a promising evolution for the centralized protection and control (CPC) concept. Centralization of protection functions consolidates the functions of multiple traditional relays into one device. This consolidation reduces communications network complexity and offers effective ways to manage protection applications of the substation. Making the CPC available as a VPC software image instead of a dedicated device creates yet another degree of freedom. The solution becomes hardware independent, bringing more flexibility and scalability to the solution. ABB and Caruna together wanted to explore these possibilities in a real-life substation pilot. This paper describes the piloted VPC environment and the results from the piloting period. The results show that virtualization technology is suitable for time critical protection and control applications, with real-time performance comparable to existing non- virtualized solutions.

    @inproceedings{23:valtari:pilot,
    title = {{Real-life Pilot of Virtual Protection and Control - Experiences and Performance Analysis}},
    author = {Valtari, Jani and Kulmala, Anna and Sch\"{o}nborn, Sandro and Khozaya, David and Birke, Robert and Reikko Jyrki},
    year = {2023},
    month = jun,
    booktitle = {{27th International Conference on Electricity Distribution (CIRED)}},
    address = {Rome, Italy},
    doi = {10.1049/icp.2023.1219},
    abstract = {Virtualized protection and control (VPC) is seen as a promising evolution for the centralized protection and control (CPC) concept. Centralization of protection functions consolidates the functions of multiple traditional relays into one device. This consolidation reduces communications network complexity and offers effective ways to manage protection applications of the substation. Making the CPC available as a VPC software image instead of a dedicated device creates yet another degree of freedom. The solution becomes hardware independent, bringing more flexibility and scalability to the solution. ABB and Caruna together wanted to explore these possibilities in a real-life substation pilot. This paper describes the piloted VPC environment and the results from the piloting period. The results show that virtualization technology is suitable for time critical protection and control applications, with real-time performance comparable to existing non- virtualized solutions.},
    url = {https://iris.unito.it/retrieve/5de5fb00-02bf-4ba8-a4db-5876415d5105/virtualization_full_paper_cired2023_submitted.pdf}
    }

  • A. Antelmi, G. Cordasco, M. Polato, V. Scarano, C. Spagnuolo, and D. Yang, “A Survey on Hypergraph Representation Learning,” ACM Comput. Surv., 2023. doi:10.1145/3605776
    [BibTeX] [Abstract] [Download PDF]

    Hypergraphs have attracted increasing attention in recent years thanks to their flexibility in naturally modeling a broad range of systems where high-order relationships exist among their interacting parts. This survey reviews the newly born hypergraph representation learning problem, whose goal is to learn a function to project objects – most commonly nodes – of an input hyper-network into a latent space such that both the structural and relational properties of the network can be encoded and preserved. We provide a thorough overview of existing literature and offer a new taxonomy of hypergraph embedding methods by identifying three main families of techniques, i.e., spectral, proximity-preserving, and (deep) neural networks. For each family, we describe its characteristics and our insights in a single yet flexible framework and then discuss the peculiarities of individual methods, as well as their pros and cons. We then review the main tasks, datasets, and settings in which hypergraph embeddings are typically used. We finally identify and discuss open challenges that would inspire further research in this field.

    @article{Antelmi_CSUR_23,
    title = {A Survey on Hypergraph Representation Learning},
    author = {Antelmi, Alessia and Cordasco, Gennaro and Polato, Mirko and Scarano, Vittorio and Spagnuolo, Carmine and Yang, Dingqi},
    year = {2023},
    month = jun,
    journal = {{ACM Comput. Surv.}},
    publisher = {Association for Computing Machinery},
    address = {New York, NY, USA},
    doi = {10.1145/3605776},
    issn = {0360-0300},
    abstract = {Hypergraphs have attracted increasing attention in recent years thanks to their flexibility in naturally modeling a broad range of systems where high-order relationships exist among their interacting parts. This survey reviews the newly born hypergraph representation learning problem, whose goal is to learn a function to project objects - most commonly nodes - of an input hyper-network into a latent space such that both the structural and relational properties of the network can be encoded and preserved. We provide a thorough overview of existing literature and offer a new taxonomy of hypergraph embedding methods by identifying three main families of techniques, i.e., spectral, proximity-preserving, and (deep) neural networks. For each family, we describe its characteristics and our insights in a single yet flexible framework and then discuss the peculiarities of individual methods, as well as their pros and cons. We then review the main tasks, datasets, and settings in which hypergraph embeddings are typically used. We finally identify and discuss open challenges that would inspire further research in this field.},
    url = {https://doi.org/10.1145/3605776},
    bdsk-url-1 = {https://doi.org/10.1145/3605776},
    keywords = {analytics}
    }

  • S. Schönborn, R. Birke, D. Kozhaya, and T. Sivanthi, “Real-Time Performance of Virtualised Protection and Control Software,” in 27th International Conference on Electricity Distribution (CIRED), Rome, Italy, 2023. doi:10.1049/icp.2023.1028
    [BibTeX] [Abstract] [Download PDF]

    Substation automation is ever challenged by the integration of distributed energy resources which imposes higher deployment flexibility and adaptability for protection and control. Although virtualization helps to run software applications independent of the underlying platform in IT infrastructures and cloud computing, it is still not commonly used in the field of substation automation. This is mainly due to the real-time performance demands of substation automation protection and control applications. In this article, we present an approach for running substation automation protection and control software in virtual environments. We contrast the real-time performance of different virtualization technologies under different workloads and focus on the performance evaluation of protection and control software in container- based solutions running on Linux with PREEMPT RT. We also present additional results for performance achieved in virtual machines. Our results clearly demonstrate that it is possible to run substation automation protection and control software in virtual environments while still providing the necessary performance. This paves the way for the deployment of substation protection and control software in virtualisation environments.

    @inproceedings{23:schoenborn:vipac,
    title = {{Real-Time Performance of Virtualised Protection and Control Software}},
    author = {Sch\"{o}nborn, Sandro and Birke, Robert and Kozhaya, David and Sivanthi, Thanikesavan},
    year = {2023},
    month = jun,
    booktitle = {{27th International Conference on Electricity Distribution (CIRED)}},
    address = {Rome, Italy},
    doi = {10.1049/icp.2023.1028},
    abstract = {Substation automation is ever challenged by the integration of distributed energy resources which imposes higher deployment flexibility and adaptability for protection and control. Although virtualization helps to run software applications independent of the underlying platform in IT infrastructures and cloud computing, it is still not commonly used in the field of substation automation. This is mainly due to the real-time performance demands of substation automation protection and control applications. In this article, we present an approach for running substation automation protection and control software in virtual environments. We contrast the real-time performance of different virtualization technologies under different workloads and focus on the performance evaluation of protection and control software in container- based solutions running on Linux with PREEMPT RT. We also present additional results for performance achieved in virtual machines. Our results clearly demonstrate that it is possible to run substation automation protection and control software in virtual environments while still providing the necessary performance. This paves the way for the deployment of substation protection and control software in virtualisation environments.},
    url = {https://iris.unito.it/retrieve/eb610327-6e38-4f5e-8673-e62f2d956821/10702-Scho%cc%88nborn.pdf}
    }

  • M. Aldinucci, R. Birke, A. Brogi, E. Carlini, M. Coppola, M. Danelutto, P. Dazzi, L. Ferrucci, F. Stefano, H. Kavalionak, G. Mencagli, M. Mordacchin, M. Pasin, F. Paganelli, and M. Torquati, “A Proposal for a Continuum-aware Programming Model: From Workflows to Services Autonomously Interacting in the Compute Continuum,” in 2023 IEEE 47th Annual Computers, Software, and Applications Conference (COMPSAC), Turin, Italy, 2023. doi:10.1109/COMPSAC57700.2023.00287
    [BibTeX] [Abstract] [Download PDF]

    This paper proposes a continuum-aware programming model enabling the execution of application workflows across the compute continuum: cloud, fog and edge resources. It simplifies the management of heterogeneous nodes while alleviating the burden of programmers and unleashing innovation. This model optimizes the continuum through advanced development experiences by transforming workflows into autonomous service collaborations. It reduces complexity in positioning/interconnecting services across the continuum. A meta-model introduces high-level workflow descriptions as service networks with defined contracts and quality of service, thus enabling the deployment/management of workflows as first-class entities. It also provides automation based on policies, monitoring and heuristics. Tailored mechanisms orchestrate/manage services across the continuum, optimizing performance, cost, data protection and sustainability while managing risks. This model facilitates incremental development with visibility of design impacts and seamless evolution of applications and infrastructures. In this work, we explore this new computing paradigm showing how it can trigger the development of a new generation of tools to support the compute continuum progress.

    @inproceedings{23:aldinucci:continuum,
    title = {{A Proposal for a Continuum-aware Programming Model: From Workflows to Services Autonomously Interacting in the Compute Continuum}},
    author = {Aldinucci, Marco and Birke, Robert and Brogi, Antonio and Carlini, Emanuele and Coppola, Massimo and Danelutto, Marco and Dazzi, Patrizio and Ferrucci, Luca and Forti Stefano and Kavalionak, Hanna and Mencagli, Gabriele and Mordacchin, Matteo and Pasin, Marcelo and Paganelli, Federica and Torquati, Massimo},
    year = {2023},
    month = jun,
    booktitle = {{2023 IEEE 47th Annual Computers, Software, and Applications Conference (COMPSAC)}},
    publisher = {{IEEE}},
    address = {Turin, Italy},
    doi = {10.1109/COMPSAC57700.2023.00287},
    abstract = {This paper proposes a continuum-aware programming model enabling the execution of application workflows across the compute continuum: cloud, fog and edge resources. It simplifies the management of heterogeneous nodes while alleviating the burden of programmers and unleashing innovation. This model optimizes the continuum through advanced development experiences by transforming workflows into autonomous service collaborations. It reduces complexity in positioning/interconnecting services across the continuum. A meta-model introduces high-level workflow descriptions as service networks with defined contracts and quality of service, thus enabling the deployment/management of workflows as first-class entities. It also provides automation based on policies, monitoring and heuristics. Tailored mechanisms orchestrate/manage services across the continuum, optimizing performance, cost, data protection and sustainability while managing risks. This model facilitates incremental development with visibility of design impacts and seamless evolution of applications and infrastructures. In this work, we explore this new computing paradigm showing how it can trigger the development of a new generation of tools to support the compute continuum progress.},
    url = {https://iris.unito.it/retrieve/2ae13a33-5814-43da-8ea6-2d3e8b122384/Continuum-aware-PM.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/2ae13a33-5814-43da-8ea6-2d3e8b122384/Continuum-aware-PM.pdf},
    bdsk-url-2 = {https://doi.org/10.1109/COMPSAC57700.2023.00287},
    keywords = {icsc}
    }

  • G. Mittone, N. Tonci, R. Birke, I. Colonnelli, D. Medić, A. Bartolini, R. Esposito, E. Parisi, F. Beneventi, M. Polato, M. Torquati, L. Benini, and M. Aldinucci, “Experimenting with Emerging RISC-V Systems for Decentralised Machine Learning,” in 20th ACM International Conference on Computing Frontiers (CF ’23), Bologna, Italy, 2023. doi:10.1145/3587135.3592211
    [BibTeX] [Abstract] [Download PDF]

    Decentralised Machine Learning (DML) enables collaborative machine learning without centralised input data. Federated Learning (FL) and Edge Inference are examples of DML. While tools for DML (especially FL) are starting to flourish, many are not flexible and portable enough to experiment with novel systems (e.g., RISC-V), non-fully connected topologies, and asynchronous collaboration schemes. We overcome these limitations via a domain-specific language allowing to map DML schemes to an underlying middleware, i.e. the FastFlow parallel programming library. We experiment with it by generating different working DML schemes on two emerging architectures (ARM-v8, RISC-V) and the x86-64 platform. We characterise the performance and energy efficiency of the presented schemes and systems. As a byproduct, we introduce a RISC-V porting of the PyTorch framework, the first publicly available to our knowledge.

    @inproceedings{23:mittone:fl-riscv,
    title = {Experimenting with Emerging {RISC-V} Systems for Decentralised Machine Learning},
    author = {Mittone, Gianluca and Tonci, Nicol{\`o} and Birke, Robert and Colonnelli, Iacopo and Medi\'{c}, Doriana and Bartolini, Andrea and Esposito, Roberto and Parisi, Emanuele and Beneventi, Francesco and Polato, Mirko and Torquati, Massimo and Benini, Luca and Aldinucci, Marco},
    year = {2023},
    month = may,
    booktitle = {20th {ACM} International Conference on Computing Frontiers ({CF} '23)},
    publisher = {{ACM}},
    address = {Bologna, Italy},
    doi = {10.1145/3587135.3592211},
    isbn = {979-8-4007-0140-5/23/05},
    note = {https://arxiv.org/abs/2302.07946},
    abstract = {Decentralised Machine Learning (DML) enables collaborative machine learning without centralised input data. Federated Learning (FL) and Edge Inference are examples of DML. While tools for DML (especially FL) are starting to flourish, many are not flexible and portable enough to experiment with novel systems (e.g., RISC-V), non-fully connected topologies, and asynchronous collaboration schemes. We overcome these limitations via a domain-specific language allowing to map DML schemes to an underlying middleware, i.e. the FastFlow parallel programming library. We experiment with it by generating different working DML schemes on two emerging architectures (ARM-v8, RISC-V) and the x86-64 platform. We characterise the performance and energy efficiency of the presented schemes and systems. As a byproduct, we introduce a RISC-V porting of the PyTorch framework, the first publicly available to our knowledge.},
    date-added = {2023-03-14 15:34:00 +0000},
    institution = {Computer Science Department, University of Torino},
    keywords = {eupilot, icsc, learning, parallel, riscv, federated},
    url = {https://dl.acm.org/doi/pdf/10.1145/3587135.3592211}
    }

  • A. Mulone, S. Awad, D. Chiarugi, and M. Aldinucci, “Porting the Variant Calling Pipeline for NGS data in cloud-HPC environment,” in 47th IEEE Annual Computers, Software, and Applications Conference, COMPSAC 2023, Torino, Italy, 2023, p. 1858–1863. doi:10.1109/COMPSAC57700.2023.00288
    [BibTeX] [Abstract] [Download PDF]

    In recent years we have understood the importance of analyzing and sequencing human genetic variation. A relevant aspect that emerged from the Covid-19 pandemic was the need to obtain results very quickly; this involved using High-Performance Computing (HPC) environments to execute the Next Generation Sequencing (NGS) pipeline. However, HPC is not always the most suitable environment for the entire execution of a pipeline, especially when it involves many heterogeneous tools. The ability to execute parts of the pipeline on different environments can lead to higher performance but also cheaper executions. This work shows the design and optimization process that led us to a state-of-the-art Variant Calling hybrid workflow based on the StreamFlow Workflow Management System (WfMS). We also compare StreamFlow with Snakemake, an established WfMS targeting HPC facilities, observing comparable performance on single environments and satisfactory improvements with a hybrid cloud-HPC configuration.

    @inproceedings{23:mulone:wide:vcp,
    title = {Porting the {V}ariant {C}alling {P}ipeline for {NGS} data in cloud-{HPC} environment},
    author = {Alberto Mulone and Sherine Awad and Davide Chiarugi and Marco Aldinucci},
    year = {2023},
    booktitle = {47th {IEEE} Annual Computers, Software, and Applications Conference, {COMPSAC} 2023},
    publisher = {{IEEE}},
    address = {Torino, Italy},
    pages = {1858--1863},
    doi = {10.1109/COMPSAC57700.2023.00288},
    abstract = {In recent years we have understood the importance of analyzing and sequencing human genetic variation. A relevant aspect that emerged from the Covid-19 pandemic was the need to obtain results very quickly; this involved using High-Performance Computing (HPC) environments to execute the Next Generation Sequencing (NGS) pipeline. However, HPC is not always the most suitable environment for the entire execution of a pipeline, especially when it involves many heterogeneous tools. The ability to execute parts of the pipeline on different environments can lead to higher performance but also cheaper executions. This work shows the design and optimization process that led us to a state-of-the-art Variant Calling hybrid workflow based on the StreamFlow Workflow Management System (WfMS). We also compare StreamFlow with Snakemake, an established WfMS targeting HPC facilities, observing comparable performance on single environments and satisfactory improvements with a hybrid cloud-HPC configuration.},
    editor = {Hossain Shahriar and Yuuichi Teranishi and Alfredo Cuzzocrea and Moushumi Sharmin and Dave Towey and A. K. M. Jahangir Alam Majumder and Hiroki Kashiwazaki and Ji{-}Jiang Yang and Michiharu Takemoto and Nazmus Sakib and Ryohei Banno and Sheikh Iqbal Ahamed},
    url = {https://iris.unito.it/bitstream/2318/1919364/1/paper.pdf},
    bdsk-url-1 = {https://iris.unito.it/bitstream/2318/1919364/1/paper.pdf},
    bdsk-url-2 = {https://doi.org/10.1109/COMPSAC57700.2023.00288},
    keywords = {across, icsc, streamflow}
    }

  • A. Antelmi, D. De Vinco, G. Cordasco, and C. Spagnuolo, “Towards Unraveling Developers Communities in Stack Overflow and Reddit,” in International Conference on Computational Social Science 2023, 2023.
    [BibTeX] [Abstract] [Download PDF]

    This work investigates the developers’ behavior and community formation around the twenty most popular programming languages. We examined two consecutive years of programming-related questions from Stack Overflow and Reddit, performing a longitudinal study on users’ posting activity and their high-order interaction patterns abstracted via hypergraphs. Our analysis highlighted crucial differences in how these QA platforms are utilized by their users. In line with previous literature, it emphasized the constant decline of Stack Overflow in favor of more community-friendly platforms, such as Reddit, which has been growing rapidly lately.

    @inproceedings{Antelmi_IC2S2_2023,
    title = {Towards Unraveling Developers Communities in Stack Overflow and Reddit},
    author = {Antelmi, Alessia and De Vinco, Daniele and Cordasco, Gennaro and Spagnuolo, Carmine},
    year = {2023},
    booktitle = {International Conference on Computational Social Science 2023},
    abstract = {This work investigates the developers' behavior and community formation around the twenty most popular programming languages. We examined two consecutive years of programming-related questions from Stack Overflow and Reddit, performing a longitudinal study on users' posting activity and their high-order interaction patterns abstracted via hypergraphs. Our analysis highlighted crucial differences in how these QA platforms are utilized by their users. In line with previous literature, it emphasized the constant decline of Stack Overflow in favor of more community-friendly platforms, such as Reddit, which has been growing rapidly lately.},
    url = {https://openreview.net/forum?id=WP5ZaAFP19},
    bdsk-url-1 = {https://openreview.net/forum?id=WP5ZaAFP19},
    keywords = {icsc, analytics}
    }

  • A. Antelmi, “Engagement in Open Data Workshops: The dark side of remote settings,” in Methodologies and Intelligent Systems for Technology Enhanced Learning, 12th International Conference, Cham, 2023.
    [BibTeX] [Abstract] [Download PDF]

    The increasing availability of Open Data gives birth to a fertile field for interested stakeholders to create value out of them; however, limited technical expertise and poor awareness are crucial barriers to their exploitation. Because of these reasons, there is an urge for learners to acquire data and information literacy competencies, which are essential for 21st-century skills, and become familiar with available Open Data sources and their potential uses. To promote the dialogue around activities to boost recognition of Open Data and improve users’ skills to work with them, we proposed a series of workshops to introduce Italian high school learners to searching for, authoring, and building effective communication based on Open Data. This article describes an ongoing activity and details its organization, reports preliminary results on learners’ engagement, and discusses both challenges of the remote setting as well as promising learning outcomes.

    @inproceedings{Antelmi_TEL4FC_2023,
    title = {Engagement in Open Data Workshops: The dark side of remote settings},
    author = {Antelmi, Alessia},
    year = {2023},
    booktitle = {Methodologies and Intelligent Systems for Technology Enhanced Learning, 12th International Conference},
    publisher = {Springer International Publishing},
    address = {Cham},
    abstract = {The increasing availability of Open Data gives birth to a fertile field for interested stakeholders to create value out of them; however, limited technical expertise and poor awareness are crucial barriers to their exploitation. Because of these reasons, there is an urge for learners to acquire data and information literacy competencies, which are essential for 21st-century skills, and become familiar with available Open Data sources and their potential uses. To promote the dialogue around activities to boost recognition of Open Data and improve users’ skills to work with them, we proposed a series of workshops to introduce Italian high school learners to searching for, authoring, and building effective communication based on Open Data. This article describes an ongoing activity and details its organization, reports preliminary results on learners’ engagement, and discusses both challenges of the remote setting as well as promising learning outcomes.},
    url = {https://link.springer.com/chapter/10.1007/978-3-031-42134-1_33},
    bdsk-url-1 = {https://link.springer.com/chapter/10.1007/978-3-031-42134-1_33},
    keywords = {icsc, analytics}
    }

  • D. Medić and M. Aldinucci, “Towards formal model for location aware workflows,” in 47th IEEE Annual Computers, Software, and Applications Conference, COMPSAC 2023, Torino, Italy, 2023, p. 1864–1869. doi:10.1109/COMPSAC57700.2023.00289
    [BibTeX] [Abstract] [Download PDF]

    Designing complex applications and executing them on large-scale topologies of heterogeneous architectures is becoming increasingly crucial in many scientific domains. As a result, diverse workflow modelling paradigms are developed, most of them with no formalisation provided. In these circumstances, comparing two different models or switching from one system to the other becomes a hard nut to crack. This paper investigates the capability of process algebra to model a location aware workflow system. Distributed $\pi$-calculus is considered as the base of the formal model due to its ability to describe the communicating components that change their structure as an outcome of the communication. Later, it is discussed how the base model could be extended or modified to capture different features of location aware workflow system. The intention of this paper is to highlight the fact that due to its flexibility, $\pi$-calculus, could be a good candidate to represent the behavioural perspective of the workflow system.

    @inproceedings{23:medic:formal-model,
    title = {Towards formal model for location aware workflows},
    author = {Doriana Medi\'{c} and Marco Aldinucci},
    year = {2023},
    booktitle = {47th {IEEE} Annual Computers, Software, and Applications Conference, {COMPSAC} 2023},
    publisher = {{IEEE}},
    address = {Torino, Italy},
    pages = {1864--1869},
    doi = {10.1109/COMPSAC57700.2023.00289},
    abstract = {Designing complex applications and executing them on large-scale topologies of heterogeneous architectures is becoming increasingly crucial in many scientific domains. As a result, diverse workflow modelling paradigms are developed, most of them with no formalisation provided. In these circumstances, comparing two different models or switching from one system to the other becomes a hard nut to crack. This paper investigates the capability of process algebra to model a location aware workflow system. Distributed $\pi$-calculus is considered as the base of the formal model due to its ability to describe the communicating components that change their structure as an outcome of the communication. Later, it is discussed how the base model could be extended or modified to capture different features of location aware workflow system. The intention of this paper is to highlight the fact that due to its flexibility, $\pi$-calculus, could be a good candidate to represent the behavioural perspective of the workflow system.},
    editor = {Hossain Shahriar and Yuuichi Teranishi and Alfredo Cuzzocrea and Moushumi Sharmin and Dave Towey and A. K. M. Jahangir Alam Majumder and Hiroki Kashiwazaki and Ji{-}Jiang Yang and Michiharu Takemoto and Nazmus Sakib and Ryohei Banno and Sheikh Iqbal Ahamed},
    keywords = {eupex, icsc, semantics},
    url = {https://iris.unito.it/retrieve/1f9f959c-cd88-4d9c-90ea-54f1c86a15bc/6210-medic.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/1f9f959c-cd88-4d9c-90ea-54f1c86a15bc/6210-medic.pdf},
    bdsk-url-2 = {https://doi.org/10.1109/COMPSAC57700.2023.00289}
    }

  • A. Antelmi, L. La Cava, and A. Pera, “Tell Me Who You Are and I Will Predict Your Vulnerability to Political Persuasion Techniques,” in The 12th International Conference on Complex Networks and their Applications-Book of Abstracts, 2023.
    [BibTeX] [Abstract] [Download PDF]

    Given the evolving role of social media in political communication and the strategic use of these platforms by politicians to shape public opinion, research has commonly focused on investigating computational propaganda as a means for automated information diffusion. Focusing on a less explored yet promising line, we aim to assess political persuasion in digital contexts by introducing a computational framework that combines Natural Language Processing and Network Science methods to investigate the linkage between persuasion techniques on social media and personality traits of online political audiences. Our final goal is to enhance public awareness of political tactics and encourage critical thinking in response to the online spread of political information.

    @inproceedings{Antelmi_CNA1_2023,
    title = {Tell Me Who You Are and I Will Predict Your Vulnerability to Political Persuasion Techniques},
    author = {Antelmi, Alessia and La Cava, Luca and Pera, Arianna},
    year = {2023},
    booktitle = {The 12th International Conference on Complex Networks and their Applications-Book of Abstracts},
    abstract = {Given the evolving role of social media in political communication and the strategic use of these platforms by politicians to shape public opinion, research has commonly focused on investigating computational propaganda as a means for automated information diffusion. Focusing on a less explored yet promising line, we aim to assess political persuasion in digital contexts by introducing a computational framework that combines Natural Language Processing and Network Science methods to investigate the linkage between persuasion techniques on social media and personality traits of online political audiences. Our final goal is to enhance public awareness of political tactics and encourage critical thinking in response to the online spread of political information.},
    url = {https://iris.unito.it/bitstream/2318/1949370/1/_CNA__23__Personality_vs_propaganda.pdf},
    bdsk-url-1 = {https://iris.unito.it/bitstream/2318/1949370/1/_CNA__23__Personality_vs_propaganda.pdf},
    keywords = {icsc, analytics}
    }

  • O. de Filippo, F. Bruno, T. H. Pinxterhuis, M. Gasior, L. Perl, L. Gaido, D. Tuttolomondo, A. Greco, R. Verardi, G. Lo Martire, M. Iannaccone, A. Leone, G. Liccardo, S. Caglioni, R. González Ferreiro, G. Rodinò, G. Musumeci, G. Patti, I. Borzillo, G. Tarantini, W. Wańha, B. Casella, E. H. Ploumen, L. Pyka, R. Kornowski, A. Gagnor, R. Piccolo, S. R. Roubin, D. Capodanno, P. Zocca, F. Conrotto, G. M. De Ferrari, C. von Birgelen, and F. D’Ascenzo, “Predictors of target lesion failure after treatment of left main, bifurcation, or chronic total occlusion lesions with ultrathin-strut drug-eluting coronary stents in the ULTRA registry,” Catheterization and Cardiovascular Interventions, 2023. doi:10.1002/ccd.30696
    [BibTeX] [Abstract] [Download PDF]

    Background: Data about the long-term performance of new-generation ultrathin-strut drug-eluting stents (DES) in challenging coronary lesions, such as left main (LM), bifurcation, and chronic total occlusion (CTO) lesions are scant. Methods: The international multicenter retrospective observational ULTRA study included consecutive patients treated from September 2016 to August 2021 with ultrathin-strut (<70µm) DES in challenging de novo lesions. Primary endpoint was target lesion failure (TLF): composite of cardiac death, target-lesion revascularization (TLR), target-vessel myocardial infarction (TVMI), or definite stent thrombosis (ST). Secondary endpoints included all-cause death, acute myocardial infarction (AMI), target vessel revascularization, and TLF components. TLF predictors were assessed with Cox multivariable analysis. Results: Of 1801 patients (age: 66.6$\pm$11.2 years; male: 1410 [78.3\%]), 170 (9.4\%) experienced TLF during follow-up of 3.1$\pm$1.4 years. In patients with LM, CTO, and bifurcation lesions, TLF rates were 13.5\%, 9.9\%, and 8.9\%, respectively. Overall, 160 (8.9\%) patients died (74 [4.1\%] from cardiac causes). AMI and TVMI rates were 6.0\% and 3.2\%, respectively. ST occurred in 11 (1.1\%) patients while 77 (4.3\%) underwent TLR. Multivariable analysis identified the following predictors of TLF: age, STEMI with cardiogenic shock, impaired left ventricular ejection fraction, diabetes, and renal dysfunction. Among the procedural variables, total stent length increased TLF risk (HR: 1.01, 95\% CI: 1-1.02 per mm increase), while intracoronary imaging reduced the risk substantially (HR: 0.35, 95\% CI: 0.12-0.82). Conclusions: Ultrathin-strut DES showed high efficacy and satisfactory safety, even in patients with challenging coronary lesions. Yet, despite using contemporary gold-standard DES, the association persisted between established patient- and procedure-related features of risk and impaired 3-year clinical outcome.

    @article{23:casella:ultra,
    title = {Predictors of target lesion failure after treatment of left main, bifurcation, or chronic total occlusion lesions with ultrathin-strut drug-eluting coronary stents in the ULTRA registry},
    author = {de Filippo, Ovidio and Bruno, Francesco and Pinxterhuis, Tineke H. and Gasior, Mariusz and Perl, Leor and Gaido, Luca and Tuttolomondo, Domenico and Greco, Antonio and Verardi, Roberto and Lo Martire, Gianluca and Iannaccone, Mario and Leone, Attilio and Liccardo, Gaetano and Caglioni, Serena and Gonz{\'a}lez Ferreiro, Rocio and Rodin{\`o}, Giulio and Musumeci, Giuseppe and Patti, Giuseppe and Borzillo, Irene and Tarantini, Giuseppe and Wa{\'n}ha, Wojciech and Casella, Bruno and Ploumen, Eline H and Pyka, Lukasz and Kornowski, Ran and Gagnor, Andrea and Piccolo, Raffaele and Roubin, Sergio Raposeiras and Capodanno, Davide and Zocca, Paolo and Conrotto, Federico and De Ferrari, Gaetano M and von Birgelen, Clemens and D'Ascenzo, Fabrizio},
    year = {2023},
    journal = {Catheterization and Cardiovascular Interventions},
    doi = {10.1002/ccd.30696},
    abstract = {Background: Data about the long-term performance of new-generation ultrathin-strut drug-eluting stents (DES) in challenging coronary lesions, such as left main (LM), bifurcation, and chronic total occlusion (CTO) lesions are scant. Methods: The international multicenter retrospective observational ULTRA study included consecutive patients treated from September 2016 to August 2021 with ultrathin-strut (<70µm) DES in challenging de novo lesions. Primary endpoint was target lesion failure (TLF): composite of cardiac death, target-lesion revascularization (TLR), target-vessel myocardial infarction (TVMI), or definite stent thrombosis (ST). Secondary endpoints included all-cause death, acute myocardial infarction (AMI), target vessel revascularization, and TLF components. TLF predictors were assessed with Cox multivariable analysis. Results: Of 1801 patients (age: 66.6$\pm$11.2 years; male: 1410 [78.3\%]), 170 (9.4\%) experienced TLF during follow-up of 3.1$\pm$1.4 years. In patients with LM, CTO, and bifurcation lesions, TLF rates were 13.5\%, 9.9\%, and 8.9\%, respectively. Overall, 160 (8.9\%) patients died (74 [4.1\%] from cardiac causes). AMI and TVMI rates were 6.0\% and 3.2\%, respectively. ST occurred in 11 (1.1\%) patients while 77 (4.3\%) underwent TLR. Multivariable analysis identified the following predictors of TLF: age, STEMI with cardiogenic shock, impaired left ventricular ejection fraction, diabetes, and renal dysfunction. Among the procedural variables, total stent length increased TLF risk (HR: 1.01, 95\% CI: 1-1.02 per mm increase), while intracoronary imaging reduced the risk substantially (HR: 0.35, 95\% CI: 0.12-0.82). Conclusions: Ultrathin-strut DES showed high efficacy and satisfactory safety, even in patients with challenging coronary lesions. Yet, despite using contemporary gold-standard DES, the association persisted between established patient- and procedure-related features of risk and impaired 3-year clinical outcome.},
    url = {https://onlinelibrary.wiley.com/doi/full/10.1002/ccd.30696},
    bdsk-url-1 = {https://onlinelibrary.wiley.com/doi/full/10.1002/ccd.30696},
    bdsk-url-2 = {https://doi.org/10.1002/ccd.30696}
    }

  • G. Mittone, F. Svoboda, M. Aldinucci, N. D. Lane, and P. Lio, "A Federated Learning Benchmark for Drug-Target Interaction," in Companion Proceedings of the ACM Web Conference 2023 (WWW '23 Companion), Austin, Texas, 2023. doi:10.1145/3543873.3587687
    [BibTeX] [Abstract] [Download PDF]

    Aggregating pharmaceutical data in the drug-target interaction (DTI) domain has the potential to deliver life-saving breakthroughs. It is, however, notoriously difficult due to regulatory constraints and commercial interests. This work proposes the application of federated learning, which we argue to be reconcilable with the industry's constraints, as it does not require sharing of any information that would reveal the entities' data or any other high-level summary of it. When used on a representative GraphDTA model and the KIBA dataset it achieves up to 15 percent improved performance relative to the best available non-privacy preserving alternative. Our extensive battery of experiments shows that, unlike in other domains, the non-IID data distribution in the DTI datasets does not deteriorate FL performance. Additionally, we identify a material trade-off between the benefits of adding new data, and the cost of adding more clients.

    @inproceedings{23:mittone:dti,
    title = {A Federated Learning Benchmark for Drug-Target Interaction},
    author = {Mittone, Gianluca and Svoboda, Filip and Aldinucci, Marco and Lane, Nicholas D. and Lio, Pietro},
    year = {2023},
    month = apr,
    booktitle = {Companion Proceedings of the ACM Web Conference 2023 (WWW '23 Companion)},
    publisher = {{ACM}},
    address = {Austin, Texas},
    doi = {10.1145/3543873.3587687},
    isbn = {978-1-4503-9419-2/23/04},
    note = {https://arxiv.org/abs/2302.07684},
    abstract = {Aggregating pharmaceutical data in the drug-target interaction (DTI) domain has the potential to deliver life-saving breakthroughs. It is, however, notoriously difficult due to regulatory constraints and commercial interests. This work proposes the application of federated learning, which we argue to be reconcilable with the industry's constraints, as it does not require sharing of any information that would reveal the entities' data or any other high-level summary of it. When used on a representative GraphDTA model and the KIBA dataset it achieves up to 15 percent improved performance relative to the best available non-privacy preserving alternative. Our extensive battery of experiments shows that, unlike in other domains, the non-IID data distribution in the DTI datasets does not deteriorate FL performance. Additionally, we identify a material trade-off between the benefits of adding new data, and the cost of adding more clients.},
    date-added = {2023-03-14 15:34:00 +0000},
    institution = {Computer Science Department, University of Torino},
    keywords = {eupilot, icsc, learning, federated},
    url = {https://hdl.handle.net/2318/1898472},
    bdsk-url-1 = {https://hdl.handle.net/2318/1898472},
    bdsk-url-2 = {https://doi.org/10.1145/3543873.3587687}
    }

  • B. Casella, R. Esposito, A. Sciarappa, C. Cavazzoni, and M. Aldinucci, "Experimenting with Normalization Layers in Federated Learning on non-IID scenarios," Computer Science Department, University of Torino 2023.
    [BibTeX] [Abstract] [Download PDF]

    Training Deep Learning (DL) models require large, high-quality datasets, often assembled with data from different institutions. Federated Learning (FL) has been emerging as a method for privacy-preserving pooling of datasets employing collaborative training from different institutions by iteratively globally aggregating locally trained models. One critical performance challenge of FL is operating on datasets not independently and identically distributed (non-IID) among the federation participants. Even though this fragility cannot be eliminated, it can be debunked by a suitable optimization of two hyperparameters: layer normalization methods and collaboration frequency selection. In this work, we benchmark five different normalization layers for training Neural Networks (NNs), two families of non-IID data skew, and two datasets. Results show that Batch Normalization, widely employed for centralized DL, is not the best choice for FL, whereas Group and Layer Normalization consistently outperform Batch Normalization. Similarly, frequent model aggregation decreases convergence speed and mode quality.

    @techreport{23:casella:normalization,
    title = {Experimenting with Normalization Layers in Federated Learning on non-IID scenarios},
    author = {Casella, Bruno and Esposito, Roberto and Sciarappa, Antonio and Cavazzoni, Carlo and Aldinucci, Marco},
    year = {2023},
    abstract = {Training Deep Learning (DL) models require large, high-quality datasets, often assembled with data from different institutions. Federated Learning (FL) has been emerging as a method for privacy-preserving pooling of datasets employing collaborative training from different institutions by iteratively globally aggregating locally trained models. One critical performance challenge of FL is operating on datasets not independently and identically distributed (non-IID) among the federation participants. Even though this fragility cannot be eliminated, it can be debunked by a suitable optimization of two hyperparameters: layer normalization methods and collaboration frequency selection. In this work, we benchmark five different normalization layers for training Neural Networks (NNs), two families of non-IID data skew, and two datasets. Results show that Batch Normalization, widely employed for centralized DL, is not the best choice for FL, whereas Group and Layer Normalization consistently outperform Batch Normalization. Similarly, frequent model aggregation decreases convergence speed and mode quality.},
    institution = {Computer Science Department, University of Torino},
    url = {https://arxiv.org/pdf/2303.10630.pdf},
    bdsk-url-1 = {https://arxiv.org/pdf/2303.10630.pdf},
    keywords = {epi, icsc}
    }

  • I. Colonnelli, B. Casella, G. Mittone, Y. Arfat, B. Cantalupo, R. Esposito, A. R. Martinelli, D. Medić, and M. Aldinucci, "Federated Learning meets HPC and cloud," in Astrophysics and Space Science Proceedings, Catania, Italy, 2023, p. 193–199. doi:10.1007/978-3-031-34167-0_39
    [BibTeX] [Abstract] [Download PDF]

    HPC and AI are fated to meet for several reasons. This article will discuss some of them and argue why this will happen through the set of methods and technologies that underpin cloud computing. As a paradigmatic example, we present a new federated learning system that collaboratively trains a deep learning model in different supercomputing centers. The system is based on the StreamFlow workflow manager designed for hybrid cloud-HPC infrastructures.

    @inproceedings{22:ml4astro,
    title = {Federated Learning meets {HPC} and cloud},
    author = {Iacopo Colonnelli and Bruno Casella and Gianluca Mittone and Yasir Arfat and Barbara Cantalupo and Roberto Esposito and Alberto Riccardo Martinelli and Doriana Medi\'{c} and Marco Aldinucci},
    year = {2023},
    booktitle = {Astrophysics and Space Science Proceedings},
    publisher = {Springer},
    address = {Catania, Italy},
    volume = {60},
    pages = {193--199},
    doi = {10.1007/978-3-031-34167-0_39},
    isbn = {978-3-031-34167-0},
    abstract = {HPC and AI are fated to meet for several reasons. This article will discuss some of them and argue why this will happen through the set of methods and technologies that underpin cloud computing. As a paradigmatic example, we present a new federated learning system that collaboratively trains a deep learning model in different supercomputing centers. The system is based on the StreamFlow workflow manager designed for hybrid cloud-HPC infrastructures.},
    editor = {Bufano, Filomena and Riggi, Simone and Sciacca, Eva and Schilliro, Francesco},
    url = {https://iris.unito.it/retrieve/5631da1c-96a0-48c0-a48e-2cdf6b84841d/main.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/5631da1c-96a0-48c0-a48e-2cdf6b84841d/main.pdf},
    keywords = {across, eupilot, streamflow, federated}
    }

  • Y. Arfat, G. Mittone, I. Colonnelli, F. D'Ascenzo, R. Esposito, and M. Aldinucci, "Pooling critical datasets with Federated Learning," in 31st Euromicro International Conference on Parallel, Distributed and Network-Based Processing, PDP 2023, Napoli, Italy, 2023, p. 329–337. doi:10.1109/PDP59025.2023.00057
    [BibTeX] [Abstract] [Download PDF]

    Federated Learning (FL) is becoming popular in different industrial sectors where data access is critical for security, privacy and the economic value of data itself. Unlike traditional machine learning, where all the data must be globally gathered for analysis, FL makes it possible to extract knowledge from data distributed across different organizations that can be coupled with different Machine Learning paradigms. In this work, we replicate, using Federated Learning, the analysis of a pooled dataset (with AdaBoost) that has been used to define the PRAISE score, which is today among the most accurate scores to evaluate the risk of a second acute myocardial infarction. We show that thanks to the extended-OpenFL framework, which implements AdaBoost.F, we can train a federated PRAISE model that exhibits comparable accuracy and recall as the centralised model. We achieved F1 and F2 scores which are consistently comparable to the PRAISE score study of a 16- parties federation but within an order of magnitude less time.

    @inproceedings{23:praise-fl:pdp,
    title = {Pooling critical datasets with Federated Learning},
    author = {Yasir Arfat and Gianluca Mittone and Iacopo Colonnelli and Fabrizio D'Ascenzo and Roberto Esposito and Marco Aldinucci},
    year = {2023},
    booktitle = {31st Euromicro International Conference on Parallel, Distributed and Network-Based Processing, {PDP} 2023},
    publisher = {IEEE},
    address = {Napoli, Italy},
    pages = {329--337},
    doi = {10.1109/PDP59025.2023.00057},
    abstract = {Federated Learning (FL) is becoming popular in different industrial sectors where data access is critical for security, privacy and the economic value of data itself. Unlike traditional machine learning, where all the data must be globally gathered for analysis, FL makes it possible to extract knowledge from data distributed across different organizations that can be coupled with different Machine Learning paradigms. In this work, we replicate, using Federated Learning, the analysis of a pooled dataset (with AdaBoost) that has been used to define the PRAISE score, which is today among the most accurate scores to evaluate the risk of a second acute myocardial infarction. We show that thanks to the extended-OpenFL framework, which implements AdaBoost.F, we can train a federated PRAISE model that exhibits comparable accuracy and recall as the centralised model. We achieved F1 and F2 scores which are consistently comparable to the PRAISE score study of a 16- parties federation but within an order of magnitude less time.},
    date-added = {2023-02-04 18:16:36 +0100},
    date-modified = {2023-02-04 18:34:25 +0100},
    keywords = {admire, hpc4ai, c3s, learning, federated},
    url = {https://iris.unito.it/retrieve/491e22ec-3db5-4989-a063-085a199edd20/23_pdp_fl.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/491e22ec-3db5-4989-a063-085a199edd20/23_pdp_fl.pdf}
    }

  • S. G. Contaldo, L. Alessandri, I. Colonnelli, M. Beccuti, and M. Aldinucci, "Bringing Cell Subpopulation Discovery on a Cloud-HPC Using rCASC and StreamFlow," in Single Cell Transcriptomics: Methods and Protocols, R. A. Calogero and V. Benes, Eds., New York, NY: Springer US, 2023, p. 337–345. doi:10.1007/978-1-0716-2756-3_17
    [BibTeX] [Abstract] [Download PDF]

    The idea behind novel single-cell RNA sequencing (scRNA-seq) pipelines is to isolate single cells through microfluidic approaches and generate sequencing libraries in which the transcripts are tagged to track their cell of origin. Modern scRNA-seq platforms are capable of analyzing up to many thousands of cells in each run. Then, combined with massive high-throughput sequencing producing billions of reads, scRNA-seq allows the assessment of fundamental biological properties of cell populations and biological systems at unprecedented resolution.

    @inbook{Contaldo2023,
    title = {Bringing Cell Subpopulation Discovery on a Cloud-{HPC} Using {rCASC} and {StreamFlow}},
    author = {Contaldo, Sandro Gepiro and Alessandri, Luca and Colonnelli, Iacopo and Beccuti, Marco and Aldinucci, Marco},
    year = {2023},
    booktitle = {Single Cell Transcriptomics: Methods and Protocols},
    publisher = {Springer {US}},
    address = {New York, NY},
    pages = {337--345},
    doi = {10.1007/978-1-0716-2756-3_17},
    isbn = {978-1-0716-2756-3},
    abstract = {The idea behind novel single-cell RNA sequencing (scRNA-seq) pipelines is to isolate single cells through microfluidic approaches and generate sequencing libraries in which the transcripts are tagged to track their cell of origin. Modern scRNA-seq platforms are capable of analyzing up to many thousands of cells in each run. Then, combined with massive high-throughput sequencing producing billions of reads, scRNA-seq allows the assessment of fundamental biological properties of cell populations and biological systems at unprecedented resolution.},
    editor = {Calogero, Raffaele Adolfo and Benes, Vladimir},
    url = {https://datacloud.di.unito.it/index.php/s/KMfKo4m7GTGdZmF},
    bdsk-url-1 = {https://doi.org/10.1007/978-1-0716-2756-3_17},
    keywords = {streamflow}
    }

  • S. Fonio, "Benchmarking Federated Learning Frameworks for Medical Imaging Tasks," in Image Analysis and Processing - ICIAP 2023 - 22th International Conference - FedMed, Udine, Italy, 2023.
    [BibTeX] [Abstract] [Download PDF]

    This paper presents a comprehensive benchmarking study of various Federated Learning (FL) frameworks applied to the task of Medical Image Classification. The research specifically addresses the often neglected and complex aspects of scalability and usability in off-the-shelf FL frameworks. Through experimental validation using real case deployments, we provide empirical evidence of the performance and practical relevance of open source FL frameworks. Our findings contribute valuable insights for anyone interested in deploying a FL system, with a particular focus on the healthcare domain—an increasingly attractive field for FL applications.

    @inproceedings{23:iciap:fedmed:ws:fonio,
    title = {Benchmarking Federated Learning Frameworks for Medical Imaging Tasks},
    author = {Fonio, Samuele},
    year = {2023},
    month = sep,
    booktitle = {Image Analysis and Processing - {ICIAP} 2023 - 22th International Conference - FedMed},
    publisher = {Springer LNCS},
    address = {Udine, Italy},
    note = {In print},
    abstract = {This paper presents a comprehensive benchmarking study of various Federated Learning (FL) frameworks applied to the task of Medical Image Classification. The research specifically addresses the often neglected and complex aspects of scalability and usability in off-the-shelf FL frameworks. Through experimental validation using real case deployments, we provide empirical evidence of the performance and practical relevance of open source FL frameworks. Our findings contribute valuable insights for anyone interested in deploying a FL system, with a particular focus on the healthcare domain—an increasingly attractive field for FL applications.},
    url = {https://iris.unito.it/retrieve/c6be8be7-3980-4c4c-874e-68b6fd855ebc/FedMed23-3.pdf},
    keywords = {icsc, eupilot}
    }

  • W. Fornaciari, F. Reghenzani, F. Terraneo, D. Baroffio, C. Metra, M. Omana, J. R. E. Condia, M. S. Reorda, R. Birke, I. Colonnelli, G. Mittone, M. Aldinucci, G. Mencagli, F. Iannone, F. Palombi, G. Zummo, D. Cesarini, and F. Tesser, "RISC-V-based Platforms for HPC: Analyzing Non-functional Properties for Future HPC and Big-Data Clusters," in Embedded Computer Systems: Architectures, Modeling, and Simulation - 23rd International Conference, SAMOS 2023, Samos, Greece, 2023. doi:10.1007/978-3-031-46077-7_26
    [BibTeX] [Abstract] [Download PDF]

    High-PerformanceComputing(HPC)haveevolvedtobeused to perform simulations of systems where physical experimentation is pro- hibitively impractical, expensive, or dangerous. This paper provides a general overview and showcases the analysis of non-functional properties in RISC-V-based platforms for HPCs. In particular, our analyses target the evaluation of power and energy control, thermal management, and reliability assessment of promising systems, structures, and technologies devised for current and future generation of HPC machines. The main set of design methodologies and technologies developed within the activ- ities of the Future and HPC & Big Data spoke of the National Centre of HPC, Big Data and Quantum Computing project are described along with the description of the testbed for experimenting two-phase cooling approaches.

    @inproceedings{23:SAMOS,
    title = {{RISC-V}-based Platforms for {HPC}: Analyzing Non-functional Properties for Future {HPC} and {Big-Data} Clusters},
    author = {William Fornaciari and Federico Reghenzani and Federico Terraneo and Davide Baroffio and Cecilia Metra and Martin Omana and Josie E. Rodriguez Condia and Matteo Sonza Reorda and Robert Birke and Iacopo Colonnelli and Gianluca Mittone and Marco Aldinucci and Gabriele Mencagli and Francesco Iannone and Filippo Palombi and Giuseppe Zummo and Daniele Cesarini and Federico Tesser},
    year = {2023},
    booktitle = {{Embedded Computer Systems: Architectures, Modeling, and Simulation - 23rd International Conference, {SAMOS} 2023}},
    address = {Samos, Greece},
    doi = {10.1007/978-3-031-46077-7_26},
    note = {icsc},
    abstract = {High-PerformanceComputing(HPC)haveevolvedtobeused to perform simulations of systems where physical experimentation is pro- hibitively impractical, expensive, or dangerous. This paper provides a general overview and showcases the analysis of non-functional properties in RISC-V-based platforms for HPCs. In particular, our analyses target the evaluation of power and energy control, thermal management, and reliability assessment of promising systems, structures, and technologies devised for current and future generation of HPC machines. The main set of design methodologies and technologies developed within the activ- ities of the Future and HPC & Big Data spoke of the National Centre of HPC, Big Data and Quantum Computing project are described along with the description of the testbed for experimenting two-phase cooling approaches.},
    url = {https://iris.unito.it/retrieve/b627eab0-3aa1-4fd7-8685-f47c62c792b3/SAMOS_2023_CN_HPC_FL1.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/b627eab0-3aa1-4fd7-8685-f47c62c792b3/SAMOS_2023_CN_HPC_FL1.pdf}
    }

  • A. Antelmi, L. La Cava, and A. Pera, "Finding Hidden Swingers in the 2022 Italian Elections Twitter Discourse," in The 12th International Conference on Complex Networks and their Applications-Book of Abstracts, 2023.
    [BibTeX] [Abstract] [Download PDF]

    The volume of the Italian online political discourse on social media has recently increased, but the coverage level does not compare with other Countries such as the US. Nonetheless, researchers focused on studying polarization and homophily with respect to political debates or investigating the role of populism in online engagement. In this research landscape, the analysis of political preference shifts through social media remains to be explored. We aim to bridge this gap by examining the Twitter discourse during the 2022 Italian general elections, with a specific emphasis on political "swingers". In particular, our findings indicate a stable political discourse in Italy, yet they also uncover a growing presence of political swingers willing to shift their support to significantly different factions.

    @inproceedings{Antelmi_CNA_2023,
    title = {Finding Hidden Swingers in the 2022 Italian Elections Twitter Discourse},
    author = {Antelmi, Alessia and La Cava, Luca and Pera, Arianna},
    year = {2023},
    booktitle = {The 12th International Conference on Complex Networks and their Applications-Book of Abstracts},
    abstract = {The volume of the Italian online political discourse on social media has recently increased, but the coverage level does not compare with other Countries such as the US. Nonetheless, researchers focused on studying polarization and homophily with respect to political debates or investigating the role of populism in online engagement. In this research landscape, the analysis of political preference shifts through social media remains to be explored. We aim to bridge this gap by examining the Twitter discourse during the 2022 Italian general elections, with a specific emphasis on political "swingers". In particular, our findings indicate a stable political discourse in Italy, yet they also uncover a growing presence of political swingers willing to shift their support to significantly different factions.},
    url = {https://iris.unito.it/bitstream/2318/1949354/1/_CNA__23__TweetYourMind.pdf},
    bdsk-url-1 = {https://iris.unito.it/bitstream/2318/1949354/1/_CNA__23__TweetYourMind.pdf},
    keywords = {icsc, analytics}
    }

  • G. Audrito, A. R. Martinelli, and G. Torta, "Parallelising an Aggregate Programming Framework with Message-Passing Interface," in 2023 IEEE International Conference on Autonomic Computing and Self-Organizing Systems Companion (ACSOS-C), 2023, p. 140–145. doi:10.1109/ACSOS-C58168.2023.00054
    [BibTeX]
    @inproceedings{23:acsos:fcppmpi,
    title = {Parallelising an Aggregate Programming Framework with Message-Passing Interface},
    author = {Giorgio Audrito and Alberto Riccardo Martinelli and Gianluca Torta},
    year = {2023},
    booktitle = {2023 IEEE International Conference on Autonomic Computing and Self-Organizing Systems Companion (ACSOS-C)},
    volume = {},
    pages = {140--145},
    doi = {10.1109/ACSOS-C58168.2023.00054},
    keywords = {parallel},
    number = {}
    }

  • A. Antelmi, M. Torquati, D. Gregori, F. Polzella, G-. Spinatelli, and M. Aldinucci, "The SWH-Analytics Framework," in Proceedings of the 2nd Italian Conference on Big Data and Data Science (ITADATA 2023), Naples, Italy, September 11-13, 2023, 2023.
    [BibTeX] [Abstract] [Download PDF]

    The Software Heritage (SWH) dataset serves as a vast repository for open-source code, with the ambitious goal of preserving all publicly available open-source projects. Despite being designed to effectively archive project files, its size of nearly 1 petabyte presents challenges in efficiently supporting Big Data MapReduce or AI systems. To address this disparity and enable seamless custom analytics on the SWH dataset, we present the SWH-Analytics (SWHA) architecture. This development environment quickly and transparently runs custom analytic applications on open-source software data preserved over time by SWH.

    @inproceedings{Antelmi_ITADATA_2023,
    title = {The SWH-Analytics Framework},
    author = {Antelmi, A. and Torquati, M. and Gregori, D. and Polzella, F. and Spinatelli, G-. and Aldinucci, M.},
    year = {2023},
    booktitle = {Proceedings of the 2nd Italian Conference on Big Data and Data Science ({ITADATA} 2023), Naples, Italy, September 11-13, 2023},
    publisher = {CEUR-WS.org},
    series = {{CEUR} Workshop Proceedings},
    volume = {3606},
    editor = {Nicola Bena and Beniamino Di Martino and Antonio Maratea and Alessandro Sperduti and Emanuel Di Nardo and Angelo Ciaramella and Raffaele Montella and Claudio A. Ardagna},
    url = {https://ceur-ws.org/Vol-3606/paper76.pdf},
    bdsk-url-1 = {https://ceur-ws.org/Vol-3606/paper76.pdf},
    abstract = {The Software Heritage (SWH) dataset serves as a vast repository for open-source code, with the ambitious goal of preserving all publicly available open-source projects. Despite being designed to effectively archive project files, its size of nearly 1 petabyte presents challenges in efficiently supporting Big Data MapReduce or AI systems. To address this disparity and enable seamless custom analytics on the SWH dataset, we present the SWH-Analytics (SWHA) architecture. This development environment quickly and transparently runs custom analytic applications on open-source software data preserved over time by SWH.},
    keywords = {admire, icsc, analytics}
    }

  • I. Colonnelli, "Workflow Models for Heterogeneous Distributed Systems," in Proceedings of the 2nd Italian Conference on Big Data and Data Science (ITADATA 2023), Naples, Italy, September 11-13, 2023, 2023.
    [BibTeX] [Abstract] [Download PDF]

    This article introduces a novel hybrid workflow abstraction that injects topology awareness directly into the definition of a distributed workflow model. In particular, the article briefly discusses the advantages brought by this approach to the design and orchestration of large-scale data-oriented workflows, the current level of support from state-of-the-art workflow systems, and some future research directions.

    @inproceedings{23:colonnelli:itadata,
    title = {Workflow Models for Heterogeneous Distributed Systems},
    author = {Iacopo Colonnelli},
    year = {2023},
    booktitle = {Proceedings of the 2nd Italian Conference on Big Data and Data Science ({ITADATA} 2023), Naples, Italy, September 11-13, 2023},
    publisher = {CEUR-WS.org},
    series = {{CEUR} Workshop Proceedings},
    volume = {3606},
    editor = {Nicola Bena and Beniamino Di Martino and Antonio Maratea and Alessandro Sperduti and Emanuel Di Nardo and Angelo Ciaramella and Raffaele Montella and Claudio A. Ardagna},
    url = {https://ceur-ws.org/Vol-3606/invited77.pdf},
    abstract = {This article introduces a novel hybrid workflow abstraction that injects topology awareness directly into the definition of a distributed workflow model. In particular, the article briefly discusses the advantages brought by this approach to the design and orchestration of large-scale data-oriented workflows, the current level of support from state-of-the-art workflow systems, and some future research directions.},
    keywords = {across, eupex, icsc, streamflow, jupyter-workflow}
    }

  • B. Casella and L. Paletto, "Predicting Cryptocurrencies Market Phases through On-Chain Data Long-Term Forecasting," in Proceedings of the 2023 IEEE International Conference on Blockchain and Cryptocurrency (ICBC), 1-5 May 2023, Dubai, 2023. doi:https://doi.org/10.1109/ICBC56567.2023.10174989
    [BibTeX] [Abstract] [Download PDF]

    Blockchain, the underlying technology of Bitcoin and several other cryptocurrencies, like Ethereum, produces a massive amount of open-access data that can be analyzed, providing important information about the network's activity and its respective token. The on-chain data have extensively been used as input to Machine Learning algorithms for predicting cryptocurrencies' future prices; however, there is a lack of study in predicting the future behaviour of on-chain data. This study aims to show how on-chain data can be used to detect cryptocurrency market regimes, like minimum and maximum, bear and bull market phases, and how forecasting these data can provide an optimal asset allocation for long-term investors.

    @inproceedings{23:casella:onchain,
    title = {Predicting Cryptocurrencies Market Phases through On-Chain Data Long-Term Forecasting},
    author = {Casella, Bruno and Paletto, Lorenzo},
    year = {2023},
    booktitle = {Proceedings of the 2023 IEEE International Conference on Blockchain and Cryptocurrency (ICBC), 1-5 May 2023, Dubai},
    doi = {https://doi.org/10.1109/ICBC56567.2023.10174989},
    note = {https://ieeexplore.ieee.org/document/10174989},
    url = {https://iris.unito.it/bitstream/2318/1902652/1/6.%20ICBC23%20-%20PREDICTING%20BTC.pdf},
    abstract = {Blockchain, the underlying technology of Bitcoin and several other cryptocurrencies, like Ethereum, produces a massive amount of open-access data that can be analyzed, providing important information about the network's activity and its respective token. The on-chain data have extensively been used as input to Machine Learning algorithms for predicting cryptocurrencies' future prices; however, there is a lack of study in predicting the future behaviour of on-chain data. This study aims to show how on-chain data can be used to detect cryptocurrency market regimes, like minimum and maximum, bear and bull market phases, and how forecasting these data can provide an optimal asset allocation for long-term investors.},
    keywords = {icsc, epi}
    }

  • B. Casella and S. Fonio, "Architecture-Based FedAvg for Vertical Federated Learning," in Proceedings of the 3rd Workshop on Distributed Machine Learning for the Intelligent Computing Continuum (DML-ICC), IEEE/ACM UCC 2023, Taormina, Italy, 4 December 2023, 2023. doi:10.1109/ICCVW60793.2023.00362
    [BibTeX] [Abstract] [Download PDF]

    Federated Learning (FL) has emerged as a promising solution to address privacy concerns by collaboratively training Deep Learning (DL) models across distributed parties. This work proposes an architecture-based aggregation strategy in Vertical FL, where parties hold data with different attributes but shared instances. Our approach leverages the identical architectural parts, i.e. neural network layers, of different models to selectively aggregate weights, which is particularly relevant when collaborating with institutions holding different types of datasets, i.e., image, text, or tabular datasets. In a scenario where two entities train DL models, such as a Convolutional Neural Network (CNN) and a Multi-Layer Perceptron (MLP), our strategy computes the average only for architecturally identical segments. This preserves data-specific features learned from demographic and clinical data. We tested our approach on two clinical datasets, i.e., the COVID-CXR dataset and the ADNI study. Results show that our method achieves comparable results with the centralized scenario, in which all the data are collected in a single data lake, and benefits from FL generalizability. In particular, compared to the non-federated models, our proposed proof-of-concept model exhibits a slight performance loss on the COVID-CXR dataset (less than 8%), but outperforms ADNI models by up to 12%. Moreover, communication costs between training rounds are minimized by exchanging only the dense layer parameters.

    @inproceedings{23:casella:architecturalfedavg,
    title = {Architecture-Based FedAvg for Vertical Federated Learning},
    author = {Casella, Bruno and Fonio, Samuele},
    year = {2023},
    booktitle = {Proceedings of the 3rd Workshop on Distributed Machine Learning for the Intelligent Computing Continuum (DML-ICC), IEEE/ACM UCC 2023, Taormina, Italy, 4 December 2023},
    doi = {10.1109/ICCVW60793.2023.00362},
    note = {https://iris.unito.it/bitstream/2318/1949730/1/HALF_HVL_for_DML_ICC23___Taormina-2.pdf},
    url = {https://iris.unito.it/bitstream/2318/1949730/1/HALF_HVL_for_DML_ICC23___Taormina-2.pdf},
    keywords = {icsc, epi, ai,federated},
    abstract = {Federated Learning (FL) has emerged as a promising solution to address privacy concerns by collaboratively training Deep Learning (DL) models across distributed parties. This work proposes an architecture-based aggregation strategy in Vertical FL, where parties hold data with different attributes but shared instances. Our approach leverages the identical architectural parts, i.e. neural network layers, of different models to selectively aggregate weights, which is particularly relevant when collaborating with institutions holding different types of datasets, i.e., image, text, or tabular datasets. In a scenario where two entities train DL models, such as a Convolutional Neural Network (CNN) and a Multi-Layer Perceptron (MLP), our strategy computes the average only for architecturally identical segments. This preserves data-specific features learned from demographic and clinical data. We tested our approach on two clinical datasets, i.e., the COVID-CXR dataset and the ADNI study. Results show that our method achieves comparable results with the centralized scenario, in which all the data are collected in a single data lake, and benefits from FL generalizability. In particular, compared to the non-federated models, our proposed proof-of-concept model exhibits a slight performance loss on the COVID-CXR dataset (less than 8%), but outperforms ADNI models by up to 12%. Moreover, communication costs between training rounds are minimized by exchanging only the dense layer parameters.}
    }

  • M. Pennisi, F. Proietto Salanitri, G. Bellitto, B. Casella, M. Aldinucci, S. Palazzo, and C. Spampinato, "Experience Replay as an Effective Strategy for Optimizing Decentralized Federated Learning," in Proceedings of the 1st Workshop on Visual Continual Learning, ICCV 2023, Paris, France, 2 October 2023, 2023. doi:10.1109/ICCVW60793.2023.00362
    [BibTeX] [Abstract] [Download PDF]

    Federated and continual learning are training paradigms addressing data distribution shift in space and time. More specifically, federated learning tackles non-i.i.d data in space as information is distributed in multiple nodes, while continual learning faces with temporal aspect of training as it deals with continuous streams of data. Distribution shifts over space and time is what it happens in real federated learning scenarios that show multiple challenges. First, the federated model needs to learn sequentially while retaining knowledge from the past training rounds. Second, the model has also to deal with concept drift from the distributed data distributions. To address these complexities, we attempt to combine continual and federated learning strategies by proposing a solution inspired by experience replay and generative adversarial concepts for supporting decentralized distributed training. In particular, our approach relies on using limited memory buffers of synthetic privacy-preserving samples and interleaving training on local data and on buffer data. By translating the CL formulation into the task of integrating distributed knowledge with local knowledge, our method enables models to effectively integrate learned representation from local nodes, providing models the capability to generalize across multiple datasets.We test our integrated strategy on two realistic medical image analysis tasks — tuberculosis and melanoma classification — using multiple datasets in order to simulate realistic non-i.i.d. medical data scenarios. Results show that our approach achieves performance comparable to standard (non-federated) learning and significantly outperforms state-of-the-art federated methods in their centralized (thus, more favourable) formulation.

    @inproceedings{23:casella:ERGANs,
    title = {Experience Replay as an Effective Strategy for Optimizing Decentralized Federated Learning},
    author = {Pennisi, Matteo and Proietto Salanitri, Federica and Bellitto, Giovanni and Casella, Bruno and Aldinucci, Marco and Palazzo, Simone and Spampinato, Concetto},
    year = {2023},
    booktitle = {Proceedings of the 1st Workshop on Visual Continual Learning, ICCV 2023, Paris, France, 2 October 2023},
    doi = {10.1109/ICCVW60793.2023.00362},
    note = {https://ieeexplore.ieee.org/document/10350429},
    url = {https://openaccess.thecvf.com/content/ICCV2023W/VCL/papers/Pennisi_Experience_Replay_as_an_Effective_Strategy_for_Optimizing_Decentralized_Federated_ICCVW_2023_paper.pdf},
    keywords = {, ai,federated},
    abstract = {Federated and continual learning are training paradigms addressing data distribution shift in space and time. More specifically, federated learning tackles non-i.i.d data in space as information is distributed in multiple nodes, while continual learning faces with temporal aspect of training as it deals with continuous streams of data. Distribution shifts over space and time is what it happens in real federated learning scenarios that show multiple challenges. First, the federated model needs to learn sequentially while retaining knowledge from the past training rounds. Second, the model has also to deal with concept drift from the distributed data distributions. To address these complexities, we attempt to combine continual and federated learning strategies by proposing a solution inspired by experience replay and generative adversarial concepts for supporting decentralized distributed training. In particular, our approach relies on using limited memory buffers of synthetic privacy-preserving samples and interleaving training on local data and on buffer data. By translating the CL formulation into the task of integrating distributed knowledge with local knowledge, our method enables models to effectively integrate learned representation from local nodes, providing models the capability to generalize across multiple datasets.We test our integrated strategy on two realistic medical image analysis tasks — tuberculosis and melanoma classification — using multiple datasets in order to simulate realistic non-i.i.d. medical data scenarios. Results show that our approach achieves performance comparable to standard (non-federated) learning and significantly outperforms state-of-the-art federated methods in their centralized (thus, more favourable) formulation.}
    }

  • Z. Zhao, R. Birke, and L. Y. Chen, "GDTS: GAN-based Distributed Tabular Synthesizer," in 16th IEEE International Conference on Cloud Computing (CLOUD), Chicago, USA, 2023. doi:10.1109/CLOUD60044.2023.00078
    [BibTeX] [Abstract] [Download PDF]

    Generative Adversarial Networks (GANs) are typically trained to synthesize data, from images and more recently tabular data, under the assumption of directly accessible training data. While learning image GANs on Federated Learning (FL) and Multi-Discriminator (MD) systems has just been demonstrated, it is unknown if tabular GANs can be learned from decentralized data sources. Different from image GANs, state-of-the-art tabular GANs require prior knowledge on the data distribution of each (discrete and continuous) column to agree on a common encoding – risking privacy guarantees. In this paper, we propose GDTS, a distributed framework for GAN-based tabular synthesizer. GDTS provides different system architectures to match the two training paradigms termed GDTS FL and GDTS MD. Key to enable learning on distributed data is the proposed novel privacy-preserving multi-source feature encoding to capture the global data properties. In addition GDTS encompasses a weighting strategy based on table similarity to counter the detrimental effects of non-IID data and a validation pipeline to easily assess and compare the performance of different paradigms and hyper parameters. We evaluate the effectiveness of GDTS in terms of synthetic data quality, and overall training scalability. Experiments show that GDTS FL achieves better statistical similarity and machine learning utility between generated and original data compared to GDTS MD.

    @inproceedings{23:cloud:gdts,
    title = {{GDTS: GAN-based Distributed Tabular Synthesizer}},
    author = {Zilong Zhao and Robert Birke and Lydia Y. Chen},
    year = {2023},
    month = jul,
    booktitle = {16th {IEEE} International Conference on Cloud Computing ({CLOUD})},
    publisher = {{IEEE}},
    address = {Chicago, USA},
    doi = {10.1109/CLOUD60044.2023.00078},
    abstract = {Generative Adversarial Networks (GANs) are typically trained to synthesize data, from images and more recently tabular data, under the assumption of directly accessible training data. While learning image GANs on Federated Learning (FL) and Multi-Discriminator (MD) systems has just been demonstrated, it is unknown if tabular GANs can be learned from decentralized data sources. Different from image GANs, state-of-the-art tabular GANs require prior knowledge on the data distribution of each (discrete and continuous) column to agree on a common encoding – risking privacy guarantees. In this paper, we propose GDTS, a distributed framework for GAN-based tabular synthesizer. GDTS provides different system architectures to match the two training paradigms termed GDTS FL and GDTS MD. Key to enable learning on distributed data is the proposed novel privacy-preserving multi-source feature encoding to capture the global data properties. In addition GDTS encompasses a weighting strategy based on table similarity to counter the detrimental effects of non-IID data and a validation pipeline to easily assess and compare the performance of different paradigms and hyper parameters. We evaluate the effectiveness of GDTS in terms of synthetic data quality, and overall training scalability. Experiments show that GDTS FL achieves better statistical similarity and machine learning utility between generated and original data compared to GDTS MD.},
    keywords = {, ai},
    url = {https://iris.unito.it/retrieve/8bc610de-3ccd-4a0a-b97f-ee329e487b76/GDTS_IEEE_CLOUD_preprint.pdf}
    }

  • A. R. Martinelli, M. Torquati, M. Aldinucci, I. Colonnelli, and B. Cantalupo, "CAPIO: a Middleware for Transparent I/O Streaming in Data-Intensive Workflows," in 2023 IEEE 30th International Conference on High Performance Computing, Data, and Analytics (HiPC), Goa, India, 2023. doi:10.1109/HiPC58850.2023.00031
    [BibTeX] [Abstract] [Download PDF]

    With the increasing amount of digital data available for analysis and simulation, the class of I/O-intensive HPC workflows is fated to quickly expand, further exacerbating the performance gap between computing, memory, and storage technologies. This paper introduces CAPIO (Cross-Application Programmable I/O), a middleware capable of injecting I/O streaming capabilities into file-based workflows, improving the computation-I/O overlap without the need to change the application code. The contribution is twofold: 1) at design time, a new I/O coordination language allows users to annotate workflow data dependencies with synchronization semantics; 2) at run time, a user-space middleware automatically and transparently to the user turns a workflow batch execution into a streaming execution according to the semantics expressed in the configuration file. CAPIO has been tested on synthetic benchmarks simulating typical workflow I/O patterns and two real-world workflows. Experiments show that CAPIO reduces the execution time by 10\% to 66\% for data-intensive workflows that use the file system as a communication medium.

    @inproceedings{23:hipc:capio,
    title = {{CAPIO}: a Middleware for Transparent I/O Streaming in Data-Intensive Workflows},
    author = {Alberto Riccardo Martinelli and Massimo Torquati and Marco Aldinucci and Iacopo Colonnelli and Barbara Cantalupo},
    year = {2023},
    month = dec,
    booktitle = {2023 IEEE 30th International Conference on High Performance Computing, Data, and Analytics (HiPC)},
    publisher = {{IEEE}},
    address = {Goa, India},
    doi = {10.1109/HiPC58850.2023.00031},
    abstract = {With the increasing amount of digital data available for analysis and simulation, the class of I/O-intensive HPC workflows is fated to quickly expand, further exacerbating the performance gap between computing, memory, and storage technologies. This paper introduces CAPIO (Cross-Application Programmable I/O), a middleware capable of injecting I/O streaming capabilities into file-based workflows, improving the computation-I/O overlap without the need to change the application code. The contribution is twofold: 1) at design time, a new I/O coordination language allows users to annotate workflow data dependencies with synchronization semantics; 2) at run time, a user-space middleware automatically and transparently to the user turns a workflow batch execution into a streaming execution according to the semantics expressed in the configuration file. CAPIO has been tested on synthetic benchmarks simulating typical workflow I/O patterns and two real-world workflows. Experiments show that CAPIO reduces the execution time by 10\% to 66\% for data-intensive workflows that use the file system as a communication medium.},
    keywords = {admire, eupex, icsc, capio},
    url = {https://iris.unito.it/retrieve/27380f37-0978-409e-a9d8-2b5e95a4bb85/CAPIO-HiPC23-preprint.pdf}
    }

  • M. Pennisi, F. Proietto Salanitri, G. Bellitto, B. Casella, M. Aldinucci, S. Palazzo, and C. Spampinato, "FedER: Federated Learning through Experience Replay and Privacy-Preserving Data Synthesis," Computer Vision and Image Understanding, 2023. doi:10.1016/j.cviu.2023.103882
    [BibTeX] [Abstract] [Download PDF]

    In the medical field, multi-center collaborations are often sought to yield more generalizable findings by leveraging the heterogeneity of patient and clinical data. However, recent privacy regulations hinder the possibility to share data, and consequently, to come up with machine learning-based solutions that support diagnosis and prognosis. Federated learning (FL) aims at sidestepping this limitation by bringing AI-based solutions to data owners and only sharing local AI models, or parts thereof, that need then to be aggregated. However, most of the existing federated learning solutions are still at their infancy and show several shortcomings, from the lack of a reliable and effective aggregation scheme able to retain the knowledge learned locally to weak privacy preservation as real data may be reconstructed from model updates. Furthermore, the majority of these approaches, especially those dealing with medical data, relies on a centralized distributed learning strategy that poses robustness, scalability and trust issues. In this paper we present a federated and decentralized learning strategy, FedER, that, exploiting experience replay and generative adversarial concepts, effectively integrates features from local nodes, providing models able to generalize across multiple datasets while maintaining privacy. FedER is tested on two tasks — tuberculosis and melanoma classification — using multiple datasets in order to simulate realistic non-i.i.d. medical data scenarios. Results show that our approach achieves performance comparable to standard (non-federated) learning and significantly outperforms state-of-the-art federated methods in their centralized (thus, more favourable) formulation. Code is available at https://github.com/perceivelab/FedER

    @article{23:casella:FedER,
    title = {FedER: Federated Learning through Experience Replay and Privacy-Preserving Data Synthesis},
    author = {Pennisi, Matteo and Proietto Salanitri, Federica and Bellitto, Giovanni and Casella, Bruno and Aldinucci, Marco and Palazzo, Simone and Spampinato, Concetto},
    year = {2023},
    journal = {Computer Vision and Image Understanding},
    doi = {10.1016/j.cviu.2023.103882},
    note = {https://www.sciencedirect.com/science/article/pii/S107731422300262X?via%3Dihub},
    institution = {Computer Science Department, University of Torino},
    url = {https://www.sciencedirect.com/science/article/pii/S107731422300262X?via%3Dihub},
    keywords = {, ai,federated},
    abstract = {In the medical field, multi-center collaborations are often sought to yield more generalizable findings by leveraging the heterogeneity of patient and clinical data. However, recent privacy regulations hinder the possibility to share data, and consequently, to come up with machine learning-based solutions that support diagnosis and prognosis. Federated learning (FL) aims at sidestepping this limitation by bringing AI-based solutions to data owners and only sharing local AI models, or parts thereof, that need then to be aggregated. However, most of the existing federated learning solutions are still at their infancy and show several shortcomings, from the lack of a reliable and effective aggregation scheme able to retain the knowledge learned locally to weak privacy preservation as real data may be reconstructed from model updates. Furthermore, the majority of these approaches, especially those dealing with medical data, relies on a centralized distributed learning strategy that poses robustness, scalability and trust issues. In this paper we present a federated and decentralized learning strategy, FedER, that, exploiting experience replay and generative adversarial concepts, effectively integrates features from local nodes, providing models able to generalize across multiple datasets while maintaining privacy. FedER is tested on two tasks — tuberculosis and melanoma classification — using multiple datasets in order to simulate realistic non-i.i.d. medical data scenarios. Results show that our approach achieves performance comparable to standard (non-federated) learning and significantly outperforms state-of-the-art federated methods in their centralized (thus, more favourable) formulation. Code is available at https://github.com/perceivelab/FedER}
    }

  • P. Ângelo, V. Bono, M. D. -, and M. Florido, "Gradual Guarantee for FJ with lambda-Expressions," in Proceedings of the 25th ACM International Workshop on Formal Techniques for Java-like Programs, FTfJP 2023, Seattle, WA, USA, 18 July 2023, 2023, p. 32–38. doi:10.1145/3605156.3606453
    [BibTeX] [Download PDF]
    @inproceedings{DBLP:conf/ftfjp/AngeloBDF23,
    title = {Gradual Guarantee for {FJ} with lambda-Expressions},
    author = {Pedro {\^{A}}ngelo and Viviana Bono and Mariangiola Dezani{-}Ciancaglini and M{\'{a}}rio Florido},
    year = {2023},
    booktitle = {Proceedings of the 25th {ACM} International Workshop on Formal Techniques for Java-like Programs, FTfJP 2023, Seattle, WA, USA, 18 July 2023},
    publisher = {{ACM}},
    pages = {32--38},
    doi = {10.1145/3605156.3606453},
    bibsource = {dblp computer science bibliography, https://dblp.org},
    biburl = {https://dblp.org/rec/conf/ftfjp/AngeloBDF23.bib},
    editor = {Aaron Tomb},
    timestamp = {Mon, 17 Jul 2023 13:10:51 +0200},
    url = {https://doi.org/10.1145/3605156.3606453},
    bdsk-url-1 = {https://doi.org/10.1145/3605156.3606453},
    keywords = {admire, icsc}
    }

  • B. Casella, W. Riviera, M. Aldinucci, and G. Menegaz, "MERGE: A model for multi-input biomedical federated learning," Patterns, p. 100856, 2023. doi:10.1016/j.patter.2023.100856
    [BibTeX] [Abstract] [Download PDF]

    Driven by the deep learning (DL) revolution, artificial intelligence (AI) has become a fundamental tool for many biomedical tasks, including analyzing and classifying diagnostic images. Imaging, however, is not the only source of information. Tabular data, such as personal and genomic data and blood test results, are routinely collected but rarely considered in DL pipelines. Nevertheless, DL requires large datasets that often must be pooled from different institutions, raising non-trivial privacy concerns. Federated learning (FL) is a cooperative learning paradigm that aims to address these issues by moving models instead of data across different institutions. Here, we present a federated multi-input architecture using images and tabular data as a methodology to enhance model performance while preserving data privacy. We evaluated it on two showcases: the prognosis of COVID-19 and patients’ stratification in Alzheimer’s disease, providing evidence of enhanced accuracy and F1 scores against single-input models and improved generalizability against non-federated models.

    @article{23:fl:patterns,
    title = {MERGE: A model for multi-input biomedical federated learning},
    author = {Bruno Casella and Walter Riviera and Marco Aldinucci and Gloria Menegaz},
    year = {2023},
    journal = {Patterns},
    pages = {100856},
    doi = {10.1016/j.patter.2023.100856},
    issn = {2666-3899},
    url = {https://www.sciencedirect.com/science/article/pii/S2666389923002404},
    keywords = {icsc, epi, ai,federated},
    abstract = {Driven by the deep learning (DL) revolution, artificial intelligence (AI) has become a fundamental tool for many biomedical tasks, including analyzing and classifying diagnostic images. Imaging, however, is not the only source of information. Tabular data, such as personal and genomic data and blood test results, are routinely collected but rarely considered in DL pipelines. Nevertheless, DL requires large datasets that often must be pooled from different institutions, raising non-trivial privacy concerns. Federated learning (FL) is a cooperative learning paradigm that aims to address these issues by moving models instead of data across different institutions. Here, we present a federated multi-input architecture using images and tabular data as a methodology to enhance model performance while preserving data privacy. We evaluated it on two showcases: the prognosis of COVID-19 and patients’ stratification in Alzheimer’s disease, providing evidence of enhanced accuracy and F1 scores against single-input models and improved generalizability against non-federated models.}
    }

  • M. Aldinucci, E. M. Baralis, V. Cardellini, I. Colonnelli, M. Danelutto, S. Decherchi, G. D. Modica, L. Ferrucci, M. Gribaudo, F. Iannone, M. Lapegna, D. Medic, G. Muscianisi, F. Righetti, E. Sciacca, N. Tonellotto, M. Tortonesi, P. Trunfio, and T. Vardanega, "A Systematic Mapping Study of Italian Research on Workflows," in Proceedings of the SC '23 Workshops of The International Conference on High Performance Computing, Network, Storage, and Analysis, SC-W 2023, Denver, CO, USA, 2023, p. 2065–2076. doi:10.1145/3624062.3624285
    [BibTeX] [Abstract] [Download PDF]

    An entire ecosystem of methodologies and tools revolves around scientific workflow management. They cover crucial non-functional requirements that standard workflow models fail to target, such as interactive execution, energy efficiency, performance portability, Big Data management, and intelligent orchestration in the Computing Continuum. Characterizing and monitoring this ecosystem is crucial to develop an informed view of current and future research directions. This work conducts a systematic mapping study of the Italian workflow research community, collecting and analyzing 25 tools and 10 applications from several scientific domains in the context of the ``National Research Centre for HPC, Big Data, and Quantum Computing'' (ICSC). The study aims to outline the main current research directions and determine how they address the critical needs of modern scientific applications. The findings highlight a variegated research ecosystem of tools, with a prominent interest in advanced workflow orchestration and still immature but promising efforts toward energy efficiency.

    @inproceedings{WORKS2023,
    title = {A Systematic Mapping Study of Italian Research on Workflows},
    author = {Marco Aldinucci and Elena Maria Baralis and Valeria Cardellini and Iacopo Colonnelli and Marco Danelutto and Sergio Decherchi and Giuseppe Di Modica and Luca Ferrucci and Marco Gribaudo and Francesco Iannone and Marco Lapegna and Doriana Medic and Giuseppa Muscianisi and Francesca Righetti and Eva Sciacca and Nicola Tonellotto and Mauro Tortonesi and Paolo Trunfio and Tullio Vardanega},
    year = {2023},
    month = nov,
    booktitle = {Proceedings of the {SC} '23 Workshops of The International Conference on High Performance Computing, Network, Storage, and Analysis, {SC-W} 2023},
    publisher = {{ACM}},
    address = {Denver, CO, USA},
    pages = {2065--2076},
    doi = {10.1145/3624062.3624285},
    abstract = {An entire ecosystem of methodologies and tools revolves around scientific workflow management. They cover crucial non-functional requirements that standard workflow models fail to target, such as interactive execution, energy efficiency, performance portability, Big Data management, and intelligent orchestration in the Computing Continuum. Characterizing and monitoring this ecosystem is crucial to develop an informed view of current and future research directions. This work conducts a systematic mapping study of the Italian workflow research community, collecting and analyzing 25 tools and 10 applications from several scientific domains in the context of the ``National Research Centre for HPC, Big Data, and Quantum Computing'' (ICSC). The study aims to outline the main current research directions and determine how they address the critical needs of modern scientific applications. The findings highlight a variegated research ecosystem of tools, with a prominent interest in advanced workflow orchestration and still immature but promising efforts toward energy efficiency.},
    keywords = {streamflow, jupyter-workflow, icsc},
    url = {https://doi.org/10.1145/3624062.3624285}
    }

  • J. Garcia-Blas, G. Sanchez-Gallegos, C. Petre, A. R. Martinelli, M. Aldinucci, and J. Carretero, "Hercules: Scalable and Network Portable In-Memory Ad-Hoc File System for Data-Centric and High-Performance Applications," in Euro-Par 2023: Parallel Processing, Cham, 2023, p. 679–693.
    [BibTeX] [Abstract]

    The growing demands for data processing by new data-intensive applications are putting pressure on the performance and capacity of HPC storage systems. The advancement in storage technologies, such as NVMe and persistent memory, are aimed at meeting these demands. However, relying solely on ultra-fast storage devices is not cost-effective, leading to the need for multi-tier storage hierarchies to move data based on its usage. To address this issue, ad-hoc file systems have been proposed as a solution. They utilise the available storage of compute nodes, such as memory and persistent storage, to create a temporary file system that adapts to the application behaviour in the HPC environment. This work presents the design, implementation, and evaluation of a distributed ad-hoc in-memory storage system (Hercules), highlighting the new communication model included in Hercules. This communication model takes advantage of the Unified Communication X framework (UCX). This solution leverages the capabilities of RDMA protocols, including Infiniband, Onmipath, shared memory, and zero-copy transfers. The preliminary evaluation results show excellent network utilisation compared with other existing technologies.

    @inproceedings{10.1007/978-3-031-39698-4_46,
    title = {Hercules: Scalable and Network Portable In-Memory Ad-Hoc File System for Data-Centric and High-Performance Applications},
    author = {Garcia-Blas, Javier and Sanchez-Gallegos, Genaro and Petre, Cosmin and Martinelli, Alberto Riccardo and Aldinucci, Marco and Carretero, Jesus},
    year = {2023},
    booktitle = {Euro-Par 2023: Parallel Processing},
    publisher = {Springer Nature Switzerland},
    address = {Cham},
    pages = {679--693},
    isbn = {978-3-031-39698-4},
    abstract = {The growing demands for data processing by new data-intensive applications are putting pressure on the performance and capacity of HPC storage systems. The advancement in storage technologies, such as NVMe and persistent memory, are aimed at meeting these demands. However, relying solely on ultra-fast storage devices is not cost-effective, leading to the need for multi-tier storage hierarchies to move data based on its usage. To address this issue, ad-hoc file systems have been proposed as a solution. They utilise the available storage of compute nodes, such as memory and persistent storage, to create a temporary file system that adapts to the application behaviour in the HPC environment. This work presents the design, implementation, and evaluation of a distributed ad-hoc in-memory storage system (Hercules), highlighting the new communication model included in Hercules. This communication model takes advantage of the Unified Communication X framework (UCX). This solution leverages the capabilities of RDMA protocols, including Infiniband, Onmipath, shared memory, and zero-copy transfers. The preliminary evaluation results show excellent network utilisation compared with other existing technologies.},
    editor = {Cano, Jos{\'e} and Dikaiakos, Marios D. and Papadopoulos, George A. and Peric{\`a}s, Miquel and Sakellariou, Rizos}
    }

  • J. Carretero, J. Garcia-Blas, M. Aldinucci, J. B. B. Besnard, J. Acquaviva, A. Brinkmann, M. Vef, E. Jeannot, A. Miranda, R. Nou, M. Riedel, M. Torquati, and F. Wolf, "Adaptive multi-tier intelligent data manager for Exascale," in 20th ACM International Conference on Computing Frontiers (CF '23), Bologna, Italy, 2023. doi:10.1145/3587135.3592174
    [BibTeX] [Abstract] [Download PDF]

    The main objective of the ADMIRE project1 is the creation of an active I/O stack that dynamically adjusts computation and storage requirements through intelligent global coordination, the elasticity of computation and I/O, and the scheduling of storage resources along all levels of the storage hierarchy, while offering quality-of-service (QoS), energy efficiency, and resilience for accessing extremely large data sets in very heterogeneous computing and storage environments. We have developed a framework prototype that is able to dynamically adjust computation and storage requirements through intelligent global coordination, separated control, and data paths, the malleability of computation and I/O, the scheduling of storage resources along all levels of the storage hierarchy, and scalable monitoring techniques. The leading idea in ADMIRE is to co-design applications with ad-hoc storage systems that can be deployed with the application and adapt their computing and I/O behaviour on runtime, using malleability techniques, to increase the performance of applications and the throughput of the applications.

    @inproceedings{23:admire:cf,
    title = {Adaptive multi-tier intelligent data manager for Exascale},
    author = {Jesus Carretero and Javier Garcia-Blas and Marco Aldinucci and Besnard, Jean Baptiste Besnard and Jean-Thomas Acquaviva and Andr{\'e} Brinkmann and Marc-Andr{\'e} Vef and Emmanuel Jeannot and Alberto Miranda and Ramon Nou and Morris Riedel and Massimo Torquati and Felix Wolf},
    year = {2023},
    month = may,
    booktitle = {20th {ACM} International Conference on Computing Frontiers ({CF} '23)},
    publisher = {{ACM}},
    address = {Bologna, Italy},
    doi = {10.1145/3587135.3592174},
    abstract = {The main objective of the ADMIRE project1 is the creation of an active I/O stack that dynamically adjusts computation and storage requirements through intelligent global coordination, the elasticity of computation and I/O, and the scheduling of storage resources along all levels of the storage hierarchy, while offering quality-of-service (QoS), energy efficiency, and resilience for accessing extremely large data sets in very heterogeneous computing and storage environments. We have developed a framework prototype that is able to dynamically adjust computation and storage requirements through intelligent global coordination, separated control, and data paths, the malleability of computation and I/O, the scheduling of storage resources along all levels of the storage hierarchy, and scalable monitoring techniques. The leading idea in ADMIRE is to co-design applications with ad-hoc storage systems that can be deployed with the application and adapt their computing and I/O behaviour on runtime, using malleability techniques, to increase the performance of applications and the throughput of the applications.},
    date-added = {2023-03-14 15:34:00 +0000},
    url = {https://dl.acm.org/doi/pdf/10.1145/3587135.3592174},
    bdsk-url-1 = {https://dl.acm.org/doi/pdf/10.1145/3587135.3592174},
    bdsk-url-2 = {https://doi.org/10.1145/3587135.3592174},
    keywords = {admire}
    }

  • Z. Zhao, R. Birke, and L. Y. Chen, "FCT-GAN: Enhancing Global Correlation of Table Synthesis via Fourier Transform," in 32nd ACM International Conference on Information and Knowledge Management (CIKM '23), Birmingham, United Kingdom, 2023. doi:10.1145/3583780.3615202
    [BibTeX] [Abstract] [Download PDF]

    An alternative method for sharing knowledge while complying with strict data access regulations, such as the European General Data Protection Regulation (GDPR), is the emergence of synthetic tabular data. Mainstream table synthesizers utilize methodologies derived from Generative Adversarial Networks (GAN). Although several state-of-the-art (SOTA) tabular GAN algorithms inherit Convolutional Neural Network (CNN)-based architectures, which have proven effective for images, they tend to overlook two critical properties of tabular data: (i) the global correlation across columns, and (ii) the semantic invariance to the column order. Permuting columns in a table does not alter the semantic meaning of the data, but features extracted by CNNs can change significantly due to their limited convolution filter kernel size. To address the above problems, we propose FCT-GAN– the first conditional tabular GAN to adopt Fourier networks into table synthesis. FCT-GAN enhances permutation invariant GAN training by strengthening the learning of global correlations via Fourier layers. Extensive evaluation on benchmarks and real-world datasets show that FCT-GAN can synthesize tabular data with better (up to 27.8%) machine learning utility (i.e. a proxy of global correlations) and higher (up to 26.5%) statistical similarity to real data. FCT-GAN also has the least variation on synthetic data quality among 7 SOTA baselines on 3 different training-data column orders.

    @inproceedings{23:zhao:fctgan,
    title = {{FCT-GAN: Enhancing Global Correlation of Table Synthesis via Fourier Transform}},
    author = {Zhao, Zilong and Birke, Robert and Chen, Lydia Y.},
    year = {2023},
    month = oct,
    booktitle = {32nd {ACM} International Conference on Information and Knowledge Management ({CIKM '23})},
    publisher = {{ACM}},
    address = {Birmingham, United Kingdom},
    doi = {10.1145/3583780.3615202},
    abstract = {An alternative method for sharing knowledge while complying with strict data access regulations, such as the European General Data Protection Regulation (GDPR), is the emergence of synthetic tabular data. Mainstream table synthesizers utilize methodologies derived from Generative Adversarial Networks (GAN). Although several state-of-the-art (SOTA) tabular GAN algorithms inherit Convolutional Neural Network (CNN)-based architectures, which have proven effective for images, they tend to overlook two critical properties of tabular data: (i) the global correlation across columns, and (ii) the semantic invariance to the column order. Permuting columns in a table does not alter the semantic meaning of the data, but features extracted by CNNs can change significantly due to their limited convolution filter kernel size. To address the above problems, we propose FCT-GAN-- the first conditional tabular GAN to adopt Fourier networks into table synthesis. FCT-GAN enhances permutation invariant GAN training by strengthening the learning of global correlations via Fourier layers. Extensive evaluation on benchmarks and real-world datasets show that FCT-GAN can synthesize tabular data with better (up to 27.8%) machine learning utility (i.e. a proxy of global correlations) and higher (up to 26.5%) statistical similarity to real data. FCT-GAN also has the least variation on synthetic data quality among 7 SOTA baselines on 3 different training-data column orders.},
    url = {https://iris.unito.it/retrieve/966ba767-dbbd-41e1-b4e3-7ab7ba09303f/FCT-GAN.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/966ba767-dbbd-41e1-b4e3-7ab7ba09303f/FCT-GAN.pdf},
    bdsk-url-2 = {https://doi.org/10.1145/3583780.3615202},
    keywords = {icsc}
    }

  • I. Colonnelli, R. Birke, and M. Aldinucci, "Experimenting with PyTorch on RISC-V," in RISC-V Summit Europe 2023, Barcelona, Spain, 2023.
    [BibTeX] [Abstract] [Download PDF]

    RISC-V is an emerging instruction set architecture. Its modular and extensible open-source royalty-free design is increasingly attracting interest from both research and industry. Nowadays, different RISC-V-based boards can be bought off the shelf. However, software availability is equivalently vital in guaranteeing the RISC-V ecosystem's success. Here we contribute with the first publicly available port of PyTorch. PyTorch is one of the most popular Deep Learning libraries available today. As such, it is a crucial enabler in running state-of-the-art AI applications on RISC-V-based systems and a first step towards a fully democratic end-to-end codesign process.

    @inproceedings{23:risc-v-summit,
    title = {Experimenting with {PyTorch} on {RISC-V}},
    author = {Iacopo Colonnelli and Robert Birke and Marco Aldinucci},
    year = {2023},
    month = jun,
    booktitle = {{RISC-V Summit Europe 2023}},
    address = {Barcelona, Spain},
    note = {Poster},
    abstract = {RISC-V is an emerging instruction set architecture. Its modular and extensible open-source royalty-free design is increasingly attracting interest from both research and industry. Nowadays, different RISC-V-based boards can be bought off the shelf. However, software availability is equivalently vital in guaranteeing the RISC-V ecosystem's success. Here we contribute with the first publicly available port of PyTorch. PyTorch is one of the most popular Deep Learning libraries available today. As such, it is a crucial enabler in running state-of-the-art AI applications on RISC-V-based systems and a first step towards a fully democratic end-to-end codesign process.},
    url = {https://iris.unito.it/retrieve/429bf344-9090-42c3-809c-1b8ac320a930/2023-06-08-Iacopo-COLONNELLI-abstract.pdff},
    bdsk-url-1 = {https://iris.unito.it/retrieve/429bf344-9090-42c3-809c-1b8ac320a930/2023-06-08-Iacopo-COLONNELLI-abstract.pdf}
    }

  • V. Cesare, U. Becciani, A. Vecchiato, M. G. Lattanzi, F. Pitari, M. Aldinucci, and B. Bucciarelli, "The MPI + CUDA Gaia AVU–GSR Parallel Solver Toward Next-generation Exascale Infrastructures," Publications of the Astronomical Society of the Pacific, vol. 135, iss. 1049, 2023. doi:10.1088/1538-3873/acdf1e
    [BibTeX] [Abstract] [Download PDF]

    We ported to the GPU with CUDA the Astrometric Verification Unit–Global Sphere Reconstruction (AVU–GSR) Parallel Solver developed for the ESA Gaia mission, by optimizing a previous OpenACC porting of this application. The code aims to find, with a [10, 100] μarcsec precision, the astrometric parameters of about 10^8 stars, the attitude and instrumental settings of the Gaia satellite, and the global parameter γ of the parametrized Post-Newtonian formalism, by solving a system of linear equations, A × x = b, with the LSQR iterative algorithm. The coefficient matrix A of the final Gaia data set is large, with ∼1011 × 108 elements, and sparse, reaching a size of ∼10–100 TB, typical for the Big Data analysis, which requires an efficient parallelization to obtain scientific results in reasonable timescales. The speedup of the CUDA code over the original AVU–GSR solver, parallelized on the CPU with MPI + OpenMP, increases with the system size and the number of resources, reaching a maximum of ∼14×, >9× over the OpenACC application. This result is obtained by comparing the two codes on the CINECA cluster Marconi100, with 4 V100 GPUs per node. After verifying the agreement between the solutions of a set of systems with different sizes computed with the CUDA and the OpenMP codes and that the solutions showed the required precision, the CUDA code was put in production on Marconi100, essential for an optimal AVU–GSR pipeline and the successive Gaia Data Releases. This analysis represents a first step to understand the (pre-)Exascale behavior of a class of applications that follow the same structure of this code. In the next months, we plan to run this code on the pre-Exascale platform Leonardo of CINECA, with 4 next-generation A200 GPUs per node, toward a porting on this infrastructure, where we expect to obtain even higher performances.

    @article{23:GAIAMPI_PASP,
    title = {The {MPI + CUDA Gaia AVU--GSR} Parallel Solver Toward Next-generation Exascale Infrastructures},
    author = {Valentina Cesare and Ugo Becciani and Alberto Vecchiato and Lattanzi, Mario Gilberto and Fabio Pitari and Marco Aldinucci and Beatrice Bucciarelli},
    year = {2023},
    month = aug,
    journal = {Publications of the Astronomical Society of the Pacific},
    volume = {135},
    doi = {10.1088/1538-3873/acdf1e},
    abstract = {We ported to the GPU with CUDA the Astrometric Verification Unit--Global Sphere Reconstruction (AVU--GSR) Parallel Solver developed for the ESA Gaia mission, by optimizing a previous OpenACC porting of this application. The code aims to find, with a [10, 100] μarcsec precision, the astrometric parameters of about 10^8 stars, the attitude and instrumental settings of the Gaia satellite, and the global parameter γ of the parametrized Post-Newtonian formalism, by solving a system of linear equations, A × x = b, with the LSQR iterative algorithm. The coefficient matrix A of the final Gaia data set is large, with ∼1011 × 108 elements, and sparse, reaching a size of ∼10--100 TB, typical for the Big Data analysis, which requires an efficient parallelization to obtain scientific results in reasonable timescales. The speedup of the CUDA code over the original AVU--GSR solver, parallelized on the CPU with MPI + OpenMP, increases with the system size and the number of resources, reaching a maximum of ∼14×, >9× over the OpenACC application. This result is obtained by comparing the two codes on the CINECA cluster Marconi100, with 4 V100 GPUs per node. After verifying the agreement between the solutions of a set of systems with different sizes computed with the CUDA and the OpenMP codes and that the solutions showed the required precision, the CUDA code was put in production on Marconi100, essential for an optimal AVU--GSR pipeline and the successive Gaia Data Releases. This analysis represents a first step to understand the (pre-)Exascale behavior of a class of applications that follow the same structure of this code. In the next months, we plan to run this code on the pre-Exascale platform Leonardo of CINECA, with 4 next-generation A200 GPUs per node, toward a porting on this infrastructure, where we expect to obtain even higher performances.},
    key = {icsc, eupex},
    number = {1049},
    url = {https://iopscience.iop.org/article/10.1088/1538-3873/acdf1e/pdf},
    bdsk-url-1 = {https://iopscience.iop.org/article/10.1088/1538-3873/acdf1e/pdf},
    bdsk-url-2 = {https://doi.org/10.1088/1538-3873/acdf1e}
    }

  • M. A. Roberto Esposito Mirko Polato, "Boosting Methods for Federated Learning," in Proc. of the 31th Italian Symposium on Advanced Database Systems,SEBD 2023, 2023, p. 439–448.
    [BibTeX] [Abstract] [Download PDF]

    Federated Learning (FL) has been proposed to develop better AI systems without compromising the privacy of final users and the legitimate interests of private companies. Initially deployed by Google to predict text input on mobile devices, FL has been deployed in many other industries. Since its introduction, Federated Learning mainly exploited the inner working of neural networks and other gradient descent-based algorithms by either exchanging the weights of the model or the gradients computed during learning. While this approach has been very successful, it rules out applying FL in contexts where other models are preferred, e.g., easier to interpret or known to work better. This paper proposes to leverage distributed versions of the AdaBoost algorithm to acquire strong federated models. In contrast with previous approaches, our proposal does not put any constraint on the client-side learning models and does not rely on inner workings of the learning algorithms used in the clients. We perform a large set of experiments on ten UCI datasets, comparing the algorithms in six non-iidness settings. Results show that the approach is effective, in the case of an IID setting, results are often near to the theoretical optimum (i.e., the performances of AdaBoost on the complete dataset). In case of non-IID settings, results very much depend on the severity of the non-IIDness.

    @inproceedings{DBLP:conf/sebd/Esposito23,
    title = {Boosting Methods for Federated Learning},
    author = {Roberto Esposito, Mirko Polato, Marco Aldinucci},
    year = {2023},
    booktitle = {Proc. of the 31th Italian Symposium on Advanced Database Systems,{SEBD} 2023},
    publisher = {CEUR-WS.org},
    series = {{CEUR} Workshop Proceedings},
    pages = {439--448},
    abstract = {Federated Learning (FL) has been proposed to develop better AI systems without compromising the privacy of final users and the legitimate interests of private companies. Initially deployed by Google to predict text input on mobile devices, FL has been deployed in many other industries. Since its introduction, Federated Learning mainly exploited the inner working of neural networks and other gradient descent-based algorithms by either exchanging the weights of the model or the gradients computed during learning. While this approach has been very successful, it rules out applying FL in contexts where other models are preferred, e.g., easier to interpret or known to work better. This paper proposes to leverage distributed versions of the AdaBoost algorithm to acquire strong federated models. In contrast with previous approaches, our proposal does not put any constraint on the client-side learning models and does not rely on inner workings of the learning algorithms used in the clients. We perform a large set of experiments on ten UCI datasets, comparing the algorithms in six non-iidness settings. Results show that the approach is effective, in the case of an IID setting, results are often near to the theoretical optimum (i.e., the performances of AdaBoost on the complete dataset). In case of non-IID settings, results very much depend on the severity of the non-IIDness.},
    editor = {Diego Calvanese and Claudia Diamantini and Nicola Ferro and Stefano Marchesin and Gianmaria Silvello and Letizia Tanca},
    url = {https://ceur-ws.org/Vol-3478/paper48.pdf},
    vol = {3478},
    bdsk-url-1 = {https://iris.unito.it/retrieve/7c133509-b661-488c-9e3e-793f07fa9b70/paper-ceur.pdf},
    keywords = {eupilot}
    }

  • A. Ghiassi, R. Birke, and L. Chen, "Robust Learning via Golden Symmetric Loss of (un)Trusted Labels," in SDM '23: SIAM International Conference on Data Mining, 2023, p. 568–576. doi:10.1137/1.9781611977653.ch64
    [BibTeX] [Abstract] [Download PDF]

    Learning robust deep models against noisy labels becomes ever critical when today's data is commonly collected from open platforms and subject to adversarial corruption. The information on the label corruption process, i.e., corruption matrix, can greatly enhance the robustness of deep models but still fall behind in combating hard classes. In this paper, we propose to construct a golden symmetric loss (GSL) based on the estimated corruption matrix as to avoid overfitting to noisy labels and learn effectively from hard classes. GSL is the weighted sum of the corrected regular cross entropy and reverse cross entropy. By leveraging a small fraction of trusted clean data, we estimate the corruption matrix and use it to correct the loss as well as to determine the weights of GSL. We theoretically prove the robustness of the proposed loss function in the presence of dirty labels. We provide a heuristics to adaptively tune the loss weights of GSL according to the noise rate and diversity measured from the dataset. We evaluate our proposed golden symmetric loss on both vision and natural language deep models subject to different types of label noise patterns. Empirical results show that GSL can significantly outperform the existing robust training methods on different noise patterns, showing accuracy improvement up to 18% on CIFAR-100 and 1% on real world noisy dataset of Clothing1M.

    @inproceedings{sdm-ghiassi23,
    title = {Robust Learning via Golden Symmetric Loss of (un)Trusted Labels},
    author = {Amirmasoud Ghiassi and Robert Birke and Lydia Chen},
    year = {2023},
    booktitle = {{SDM} '23: {SIAM} International Conference on Data Mining},
    pages = {568--576},
    doi = {10.1137/1.9781611977653.ch64},
    abstract = {Learning robust deep models against noisy labels becomes ever critical when today's data is commonly collected from open platforms and subject to adversarial corruption. The information on the label corruption process, i.e., corruption matrix, can greatly enhance the robustness of deep models but still fall behind in combating hard classes. In this paper, we propose to construct a golden symmetric loss (GSL) based on the estimated corruption matrix as to avoid overfitting to noisy labels and learn effectively from hard classes. GSL is the weighted sum of the corrected regular cross entropy and reverse cross entropy. By leveraging a small fraction of trusted clean data, we estimate the corruption matrix and use it to correct the loss as well as to determine the weights of GSL. We theoretically prove the robustness of the proposed loss function in the presence of dirty labels. We provide a heuristics to adaptively tune the loss weights of GSL according to the noise rate and diversity measured from the dataset. We evaluate our proposed golden symmetric loss on both vision and natural language deep models subject to different types of label noise patterns. Empirical results show that GSL can significantly outperform the existing robust training methods on different noise patterns, showing accuracy improvement up to 18% on CIFAR-100 and 1% on real world noisy dataset of Clothing1M.},
    url = {https://datacloud.di.unito.it/index.php/s/b6z3moNLxnNiCxz},
    bdsk-url-1 = {https://datacloud.di.unito.it/index.php/s/b6z3moNLxnNiCxz},
    bdsk-url-2 = {https://doi.org/10.1137/1.9781611977653.ch64},
    keywords = {textrossa}
    }

2022

  • Y. Zhu, Z. Zhao, R. Birke, and L. Y. Chen, "Permutation-Invariant Tabular Data Synthesis," in IEEE International Conference on Big Data (Big Data), 2022, p. 5855–5864. doi:10.1109/BigData55660.2022.10020639
    [BibTeX] [Abstract] [Download PDF]

    Tabular data synthesis is an emerging approach to circumvent strict regulations on data privacy while discovering knowledge through big data. Although state-of-the-art AI-based tabular data synthesizers, e.g., table-GAN, CTGAN, TVAE, and CTAB-GAN, are effective at generating synthetic tabular data, their training is sensitive to column permutations of input data. In this paper, we first c onduct a n e xtensive e mpirical s tudy to disclose such a property of permutation invariance and an in-depth analysis of the existing synthesizers. We show that changing the input column order worsens the statistical difference between real and synthetic data by up to 38.67\% due to the encoding of tabular data and the network architectures. To fully unleash the potential of big synthetic tabular data, we propose two solutions: (i) AE-GAN, a synthesizer that uses an autoencoder network to represent the tabular data and GAN networks to synthesize the latent representation, and (ii) a feature sorting algorithm to find t he s uitable c olumn o rder o f i nput d ata f or CNN-based synthesizers. We evaluate the proposed solutions on five datasets in terms of the sensitivity to the column permutation, the quality of synthetic data, and the utility in downstream analyses. Our results show that we enhance the property of permutation-invariance when training synthesizers and further improve the quality and utility of synthetic data, up to 22\%, compared to the existing synthesizers.

    @inproceedings{bigdata-zhu22,
    title = {Permutation-Invariant Tabular Data Synthesis},
    author = {Yujin Zhu and Zilong Zhao and Robert Birke and Lydia Y. Chen},
    year = {2022},
    month = dec,
    booktitle = {{IEEE} International Conference on Big Data (Big Data)},
    publisher = {{IEEE}},
    pages = {5855--5864},
    doi = {10.1109/BigData55660.2022.10020639},
    abstract = {Tabular data synthesis is an emerging approach to circumvent strict regulations on data privacy while discovering knowledge through big data. Although state-of-the-art AI-based tabular data synthesizers, e.g., table-GAN, CTGAN, TVAE, and CTAB-GAN, are effective at generating synthetic tabular data, their training is sensitive to column permutations of input data. In this paper, we first c onduct a n e xtensive e mpirical s tudy to disclose such a property of permutation invariance and an in-depth analysis of the existing synthesizers. We show that changing the input column order worsens the statistical difference between real and synthetic data by up to 38.67\% due to the encoding of tabular data and the network architectures. To fully unleash the potential of big synthetic tabular data, we propose two solutions: (i) AE-GAN, a synthesizer that uses an autoencoder network to represent the tabular data and GAN networks to synthesize the latent representation, and (ii) a feature sorting algorithm to find t he s uitable c olumn o rder o f i nput d ata f or CNN-based synthesizers. We evaluate the proposed solutions on five datasets in terms of the sensitivity to the column permutation, the quality of synthetic data, and the utility in downstream analyses. Our results show that we enhance the property of permutation-invariance when training synthesizers and further improve the quality and utility of synthetic data, up to 22\%, compared to the existing synthesizers.},
    editor = {Shusaku Tsumoto and Yukio Ohsawa and Lei Chen and Dirk Van den Poel and Xiaohua Hu and Yoichi Motomura and Takuya Takagi and Lingfei Wu and Ying Xie and Akihiro Abe and Vijay Raghavan},
    url = {https://datacloud.di.unito.it/index.php/s/b6z3moNLxnNiCxz},
    bdsk-url-1 = {https://datacloud.di.unito.it/index.php/s/b6z3moNLxnNiCxz},
    bdsk-url-2 = {https://doi.org/10.1109/BigData55660.2022.10020639}
    }

  • I. Colonnelli and M. Aldinucci, "Hybrid Workflows For Large - Scale Scientific Applications," in Sixth EAGE High Performance Computing Workshop, Milano, Italy, 2022, p. 1–5. doi:10.3997/2214-4609.2022615029
    [BibTeX] [Abstract] [Download PDF]

    Large-scale scientific applications are facing an irreversible transition from monolithic, high-performance oriented codes to modular and polyglot deployments of specialised (micro-)services. The reasons behind this transition are many: coupling of standard solvers with Deep Learning techniques, offloading of data analysis and visualisation to Cloud, and the advent of specialised hardware accelerators. Topology-aware Workflow Management Systems (WMSs) play a crucial role. In particular, topology-awareness allows an explicit mapping of workflow steps onto heterogeneous locations, allowing automated executions on top of hybrid architectures (e.g., cloud+HPC or classical+quantum). Plus, topology-aware WMSs can offer nonfunctional requirements OOTB, e.g. components' life-cycle orchestration, secure and efficient data transfers, fault tolerance, and cross-cluster execution of urgent workloads. Augmenting interactive Jupyter Notebooks with distributed workflow capabilities allows domain experts to prototype and scale applications using the same technological stack, while relying on a feature-rich and user-friendly web interface. This abstract will showcase how these general methodologies can be applied to a typical geoscience simulation pipeline based on the Full Wavefront Inversion (FWI) technique. In particular, a prototypical Jupyter Notebook will be executed interactively on Cloud. Preliminary data analyses and post-processing will be executed locally, while the computationally demanding optimisation loop will be scheduled on a remote HPC cluster.

    @inproceedings{22:eage-hpc-workshop,
    title = {Hybrid Workflows For Large - Scale Scientific Applications},
    author = {Iacopo Colonnelli and Marco Aldinucci},
    year = {2022},
    month = sep,
    booktitle = {Sixth {EAGE} High Performance Computing Workshop},
    publisher = {{European Association of Geoscientists \& Engineers }},
    address = {Milano, Italy},
    pages = {1--5},
    doi = {10.3997/2214-4609.2022615029},
    issn = {2214-4609},
    abstract = {Large-scale scientific applications are facing an irreversible transition from monolithic, high-performance oriented codes to modular and polyglot deployments of specialised (micro-)services. The reasons behind this transition are many: coupling of standard solvers with Deep Learning techniques, offloading of data analysis and visualisation to Cloud, and the advent of specialised hardware accelerators. Topology-aware Workflow Management Systems (WMSs) play a crucial role. In particular, topology-awareness allows an explicit mapping of workflow steps onto heterogeneous locations, allowing automated executions on top of hybrid architectures (e.g., cloud+HPC or classical+quantum). Plus, topology-aware WMSs can offer nonfunctional requirements OOTB, e.g. components' life-cycle orchestration, secure and efficient data transfers, fault tolerance, and cross-cluster execution of urgent workloads. Augmenting interactive Jupyter Notebooks with distributed workflow capabilities allows domain experts to prototype and scale applications using the same technological stack, while relying on a feature-rich and user-friendly web interface. This abstract will showcase how these general methodologies can be applied to a typical geoscience simulation pipeline based on the Full Wavefront Inversion (FWI) technique. In particular, a prototypical Jupyter Notebook will be executed interactively on Cloud. Preliminary data analyses and post-processing will be executed locally, while the computationally demanding optimisation loop will be scheduled on a remote HPC cluster.},
    url = {https://iris.unito.it/retrieve/d79ddabb-f9d7-4a55-9f84-1528b1533ba3/Extended_Abstract.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/d79ddabb-f9d7-4a55-9f84-1528b1533ba3/Extended_Abstract.pdf},
    bdsk-url-2 = {https://doi.org/10.3997/2214-4609.2022615029},
    keywords = {across, eupex}
    }

  • C. Stewart, N. Morris, L. Y. Chen, and R. Birke, "Performance Modeling for Short-Term Cache Allocation," in Proceedings of the 51st International Conference on Parallel Processing (ICPP), 2022, p. 31:1–31:11. doi:10.1145/3545008.3545094
    [BibTeX] [Abstract] [Download PDF]

    Short-term cache allocation grants and then revokes access to processor cache lines dynamically. For online services, short-term allocation can speed up targeted query executions and free up cache lines reserved, but normally not needed, for performance. However, in collocated settings, short-term allocation can increase cache contention, slowing down collocated query executions. To offset slowdowns, collocated services may request short-term allocation more often, making the problem worse. Short-term allocation policies manage which queries receive cache allocations and when. In collocated settings, these policies should balance targeted query speedups against slowdowns caused by recurring cache contention. We present a model-driven approach that (1) predicts response time under a given policy, (2) explores competing policies and (3) chooses policies that yield low response time for all collocated services. Our approach profiles cache usage offline, characterizes the effects of cache allocation policies using deep learning techniques and devises novel performance models for short-term allocation with online services. We tested our approach using data processing, cloud, and high-performance computing benchmarks collocated on Intel processors equipped with Cache Allocation Technology. Our models predicted median response time with 11\% absolute percent error. Short-term allocation policies found using our approach out performed state-of-the-art shared cache allocation policies by 1.2-2.3X.

    @inproceedings{icpp-stewart22,
    title = {Performance Modeling for Short-Term Cache Allocation},
    author = {Christopher Stewart and Nathaniel Morris and Lydia Y. Chen and Robert Birke},
    year = {2022},
    month = aug,
    booktitle = {Proceedings of the 51st International Conference on Parallel Processing ({ICPP})},
    publisher = {{ACM}},
    pages = {31:1--31:11},
    doi = {10.1145/3545008.3545094},
    abstract = {Short-term cache allocation grants and then revokes access to processor cache lines dynamically. For online services, short-term allocation can speed up targeted query executions and free up cache lines reserved, but normally not needed, for performance. However, in collocated settings, short-term allocation can increase cache contention, slowing down collocated query executions. To offset slowdowns, collocated services may request short-term allocation more often, making the problem worse. Short-term allocation policies manage which queries receive cache allocations and when. In collocated settings, these policies should balance targeted query speedups against slowdowns caused by recurring cache contention. We present a model-driven approach that (1) predicts response time under a given policy, (2) explores competing policies and (3) chooses policies that yield low response time for all collocated services. Our approach profiles cache usage offline, characterizes the effects of cache allocation policies using deep learning techniques and devises novel performance models for short-term allocation with online services. We tested our approach using data processing, cloud, and high-performance computing benchmarks collocated on Intel processors equipped with Cache Allocation Technology. Our models predicted median response time with 11\% absolute percent error. Short-term allocation policies found using our approach out performed state-of-the-art shared cache allocation policies by 1.2-2.3X.},
    url = {https://doi.org/10.1145/3545008.3545094},
    bdsk-url-1 = {https://doi.org/10.1145/3545008.3545094}
    }

  • M. Polato, R. Esposito, and M. Aldinucci, "Boosting the Federation: Cross-Silo Federated Learning without Gradient Descent," in Intl. Joint Conference on Neural Networks (IJCNN), {P}adua, {Italy}, 2022, p. 1–10. doi:10.1109/IJCNN55064.2022.9892284
    [BibTeX] [Abstract] [Download PDF]

    Federated Learning has been proposed to develop better AI systems without compromising the privacy of final users and the legitimate interests of private companies. Initially deployed by Google to predict text input on mobile devices, FL has been deployed in many other industries. Since its introduction, Federated Learning mainly exploited the inner working of neural networks and other gradient descent-based algorithms by either exchanging the weights of the model or the gradients computed during learning. While this approach has been very successful, it rules out applying FL in contexts where other models are preferred, e.g., easier to interpret or known to work better. This paper proposes FL algorithms that build federated models without relying on gradient descent-based methods. Specifically, we leverage distributed versions of the AdaBoost algorithm to acquire strong federated models. In contrast with previous approaches, our proposal does not put any constraint on the client-side learning models. We perform a large set of experiments on ten UCI datasets, comparing the algorithms in six non-iidness settings.

    @inproceedings{22:fl:ijcnn,
    title = {Boosting the Federation: Cross-Silo Federated Learning without Gradient Descent},
    author = {Mirko Polato and Roberto Esposito and Marco Aldinucci},
    year = {2022},
    month = jul,
    booktitle = {{I}ntl. {J}oint {C}onference on {N}eural {N}etworks (IJCNN)},
    publisher = {{IEEE}},
    address = {{P}adua, {Italy}},
    pages = {1--10},
    doi = {10.1109/IJCNN55064.2022.9892284},
    abstract = {Federated Learning has been proposed to develop better AI systems without compromising the privacy of final users and the legitimate interests of private companies. Initially deployed by Google to predict text input on mobile devices, FL has been deployed in many other industries. Since its introduction, Federated Learning mainly exploited the inner working of neural networks and other gradient descent-based algorithms by either exchanging the weights of the model or the gradients computed during learning. While this approach has been very successful, it rules out applying FL in contexts where other models are preferred, e.g., easier to interpret or known to work better. This paper proposes FL algorithms that build federated models without relying on gradient descent-based methods. Specifically, we leverage distributed versions of the AdaBoost algorithm to acquire strong federated models. In contrast with previous approaches, our proposal does not put any constraint on the client-side learning models. We perform a large set of experiments on ten UCI datasets, comparing the algorithms in six non-iidness settings.},
    url = {https://iris.unito.it/retrieve/03a7b692-aecc-43db-a792-874c553d9ebe/ijcnn22-internal.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/03a7b692-aecc-43db-a792-874c553d9ebe/ijcnn22-internal.pdf},
    bdsk-url-2 = {https://doi.org/10.1109/IJCNN55064.2022.9892284},
    keywords = {hpc4ai,eupilot}
    }

  • A. Ghiassi, R. Birke, and L. Y. Chen, "LABNET: A Collaborative Method for DNN Training and Label Aggregation," in 14th International Conference on Agents and Artificial Intelligence (ICAART), 2022, p. 56–66. doi:10.5220/0010770400003116
    [BibTeX] [Abstract] [Download PDF]

    Today, to label the massive datasets needed to train Deep Neural Networks (DNNs), cheap and error-prone methods such as crowdsourcing are used. Label aggregation methods aim to infer the true labels from noisy labels annotated by crowdsourcing workers via labels statistics features. Aggregated labels are the main data source to train deep neural networks, and their accuracy directly affects the deep neural network performance. In this paper, we argue that training DNN and aggregating labels are not two separate tasks. Incorporation between DNN training and label aggregation connects data features, noisy labels, and aggregated labels. Since each image contains valuable knowledge about its label, the data features help aggregation methods enhance their performance. We propose LABNET an iterative two-step method. Step one: the label aggregation algorithm provides labels to train the DNN. Step two: the DNN shares a representation of the data features with the label aggregation algorithm. These steps are repeated until the converging label aggregation error rate. To evaluate LABNET we conduct an extensive empirical comparison on CIFAR-10 and CIFAR-100 under different noise and worker statistics. Our evaluation results show that LABNET achieves the highest mean accuracy with an increase of at least 8% to 0.6% and lowest error rate with a reduction of 7.5% to 0.25% against existing aggregation and training methods in most cases.

    @inproceedings{ghiassi/iccart22,
    title = {{LABNET:} {A} Collaborative Method for {DNN} Training and Label Aggregation},
    author = {Amirmasoud Ghiassi and Robert Birke and Lydia Y. Chen},
    year = {2022},
    month = feb,
    booktitle = {14th International Conference on Agents and Artificial Intelligence ({ICAART})},
    publisher = {{SCITEPRESS}},
    pages = {56--66},
    doi = {10.5220/0010770400003116},
    abstract = {Today, to label the massive datasets needed to train Deep Neural Networks (DNNs), cheap and error-prone methods such as crowdsourcing are used. Label aggregation methods aim to infer the true labels from noisy labels annotated by crowdsourcing workers via labels statistics features. Aggregated labels are the main data source to train deep neural networks, and their accuracy directly affects the deep neural network performance. In this paper, we argue that training DNN and aggregating labels are not two separate tasks. Incorporation between DNN training and label aggregation connects data features, noisy labels, and aggregated labels. Since each image contains valuable knowledge about its label, the data features help aggregation methods enhance their performance. We propose LABNET an iterative two-step method. Step one: the label aggregation algorithm provides labels to train the DNN. Step two: the DNN shares a representation of the data features with the label aggregation algorithm. These steps are repeated until the converging label aggregation error rate. To evaluate LABNET we conduct an extensive empirical comparison on CIFAR-10 and CIFAR-100 under different noise and worker statistics. Our evaluation results show that LABNET achieves the highest mean accuracy with an increase of at least 8% to 0.6% and lowest error rate with a reduction of 7.5% to 0.25% against existing aggregation and training methods in most cases.},
    editor = {Ana Paula Rocha and Luc Steels and H. Jaap van den Herik},
    url = {https://www.scitepress.org/Link.aspx?doi=10.5220/0010770400003116},
    bdsk-url-1 = {https://www.scitepress.org/Link.aspx?doi=10.5220/0010770400003116},
    bdsk-url-2 = {https://doi.org/10.5220/0010770400003116}
    }

  • E. Sulis, I. A. Amantea, M. Aldinucci, G. Boella, R. Marinello, M. Grosso, P. Platter, and S. Ambrosini, "An ambient assisted living architecture for hospital at home coupled with a process-oriented perspective," Journal of Ambient Intelligence and Humanized Computing, 2022. doi:10.1007/s12652-022-04388-6
    [BibTeX] [Abstract] [Download PDF]

    The growing number of next-generation applications offers a relevant opportunity for healthcare services, generating an urgent need for architectures for systems integration. Moreover, the huge amount of stored information related to events can be explored by adopting a process-oriented perspective. This paper discusses an Ambient Assisted Living healthcare architecture to manage hospital home-care services. The proposed solution relies on adopting an event manager to integrate sources ranging from personal devices to web-based applications. Data are processed on a federated cloud platform offering computing infrastructure and storage resources to improve scientific research. In a second step, a business process analysis of telehealth and telemedicine applications is considered. An initial study explored the business process flow to capture the main sequences of tasks, activities, events. This step paves the way for the integration of process mining techniques to compliance monitoring in an AAL architecture framework.

    @article{Sulis2022,
    title = {An ambient assisted living architecture for hospital at home coupled with a process-oriented perspective},
    author = {Sulis, Emilio and Amantea, Ilaria Angela and Aldinucci, Marco and Boella, Guido and Marinello, Renata and Grosso, Marco and Platter, Paolo and Ambrosini, Serena},
    year = {2022},
    journal = {Journal of Ambient Intelligence and Humanized Computing},
    doi = {10.1007/s12652-022-04388-6},
    isbn = {1868-5145},
    abstract = {The growing number of next-generation applications offers a relevant opportunity for healthcare services, generating an urgent need for architectures for systems integration. Moreover, the huge amount of stored information related to events can be explored by adopting a process-oriented perspective. This paper discusses an Ambient Assisted Living healthcare architecture to manage hospital home-care services. The proposed solution relies on adopting an event manager to integrate sources ranging from personal devices to web-based applications. Data are processed on a federated cloud platform offering computing infrastructure and storage resources to improve scientific research. In a second step, a business process analysis of telehealth and telemedicine applications is considered. An initial study explored the business process flow to capture the main sequences of tasks, activities, events. This step paves the way for the integration of process mining techniques to compliance monitoring in an AAL architecture framework.},
    date = {2022/09/21},
    date-added = {2023-02-04 18:45:08 +0100},
    date-modified = {2023-02-04 18:45:08 +0100},
    url = {https://iris.unito.it/retrieve/c7eaab0b-f78b-4af0-8c17-fa5479d776e6/jaihc2021-preprint.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/c7eaab0b-f78b-4af0-8c17-fa5479d776e6/jaihc2021-preprint.pdf},
    bdsk-url-2 = {https://doi.org/10.1007/s12652-022-04388-6}
    }

  • D. Oniga, B. Cantalupo, E. Tartaglione, D. Perlo, M. Grangetto, M. Aldinucci, F. Bolelli, F. Pollastri, M. Cancilla, L. Canalini, C. Grana, C. M. Alcalde, F. A. Cardillo, and M. Florea, "Applications of AI and HPC in the Health Domain," in HPC, Big Data, and AI Convergence Towards Exascale: Challenge and Vision, O. Terzo and J. Martinovič, Eds., Boca Raton, Florida: CRC Press, 2022, p. 217–239. doi:10.1201/9781003176664
    [BibTeX] [Abstract]

    This chapter presents the applications of artificial intelligence (AI) and high-computing performance (HPC) in the health domain, illustrated by the description of five of the use cases that are developed in the DeepHealth project. In the context of the European Commission supporting the use of AI and HPC in the health sector, DeepHealth Project is helping health experts process large quantities of images, putting at their disposal DeepLearning and computer vision techniques, combined in the DeepHealth toolkit and HPC infrastructures. The DeepHealth toolkit is tested and validated through 15 use cases, each of them representing a biomedical application. The most promising use cases are described in the chapter, which concludes with the value proposition and the benefits that DeepHealth toolkit offers to future end users.

    @incollection{22:applications:HPCbook,
    title = {Applications of {AI} and {HPC} in the Health Domain},
    author = {Dana Oniga and Barbara Cantalupo and Enzo Tartaglione and Daniele Perlo and Marco Grangetto and Marco Aldinucci and Federico Bolelli and Federico Pollastri and Michele Cancilla and Laura Canalini and Costantino Grana and Cristina Mu{\~n}oz Alcalde and Franco Alberto Cardillo and Monica Florea},
    year = {2022},
    booktitle = {{HPC}, Big Data, and {AI} Convergence Towards Exascale: Challenge and Vision},
    publisher = {{CRC} Press},
    address = {Boca Raton, Florida},
    pages = {217--239},
    doi = {10.1201/9781003176664},
    isbn = {978-1-0320-0984-1},
    abstract = {This chapter presents the applications of artificial intelligence (AI) and high-computing performance (HPC) in the health domain, illustrated by the description of five of the use cases that are developed in the DeepHealth project. In the context of the European Commission supporting the use of AI and HPC in the health sector, DeepHealth Project is helping health experts process large quantities of images, putting at their disposal DeepLearning and computer vision techniques, combined in the DeepHealth toolkit and HPC infrastructures. The DeepHealth toolkit is tested and validated through 15 use cases, each of them representing a biomedical application. The most promising use cases are described in the chapter, which concludes with the value proposition and the benefits that DeepHealth toolkit offers to future end users.},
    chapter = {11},
    editor = {Olivier Terzo and Jan Martinovi\v{c}},
    bdsk-url-1 = {https://doi.org/10.1201/9781003176664},
    keywords = {deephealth, streamflow}
    }

  • B. Cox, R. Birke, and L. Y. Chen, "Memory-aware and context-aware multi-DNN inference on the edge," Pervasive and Mobile Computing, vol. 83, p. 1–16, 2022. doi:https://doi.org/10.1016/j.pmcj.2022.101594
    [BibTeX] [Abstract] [Download PDF]

    Deep neural networks (DNNs) are becoming the core components of many applications running on edge devices, especially for real time image-based analysis. Increasingly, multi-faced knowledge is extracted by executing multiple DNNs inference models, e.g., identifying objects, faces, and genders from images. It is of paramount importance to guarantee low response times of such multi-DNN executions as it affects not only users quality of experience but also safety. The challenge, largely unaddressed by the state of the art, is how to overcome the memory limitation of edge devices without altering the DNN models. In this paper, we design and implement Masa, a responsive memory-aware multi-DNN execution and scheduling framework, which requires no modification of DNN models. The aim of Masa is to consistently ensure the average response time when deterministically and stochastically executing multiple DNN-based image analyses. The enabling features of Masa are (i) modeling inter- and intra-network dependency, (ii) leveraging complimentary memory usage of each layer, and (iii) exploring the context dependency of DNNs. We verify the correctness and scheduling optimality via mixed integer programming. We extensively evaluate two versions of Masa, context-oblivious and context-aware, on three configurations of Raspberry Pi and a large set of popular DNN models triggered by different generation patterns of images. Our evaluation results show that Masa can achieve lower average response times by up to 90% on devices with small memory, i.e., 512 MB to 1 GB, compared to the state of the art multi-DNN scheduling solutions.

    @article{COX2022101594,
    title = {Memory-aware and context-aware multi-DNN inference on the edge},
    author = {Bart Cox and Robert Birke and Lydia Y. Chen},
    year = {2022},
    journal = {Pervasive and Mobile Computing},
    volume = {83},
    pages = {1--16},
    doi = {https://doi.org/10.1016/j.pmcj.2022.101594},
    issn = {1574-1192},
    abstract = {Deep neural networks (DNNs) are becoming the core components of many applications running on edge devices, especially for real time image-based analysis. Increasingly, multi-faced knowledge is extracted by executing multiple DNNs inference models, e.g., identifying objects, faces, and genders from images. It is of paramount importance to guarantee low response times of such multi-DNN executions as it affects not only users quality of experience but also safety. The challenge, largely unaddressed by the state of the art, is how to overcome the memory limitation of edge devices without altering the DNN models. In this paper, we design and implement Masa, a responsive memory-aware multi-DNN execution and scheduling framework, which requires no modification of DNN models. The aim of Masa is to consistently ensure the average response time when deterministically and stochastically executing multiple DNN-based image analyses. The enabling features of Masa are (i) modeling inter- and intra-network dependency, (ii) leveraging complimentary memory usage of each layer, and (iii) exploring the context dependency of DNNs. We verify the correctness and scheduling optimality via mixed integer programming. We extensively evaluate two versions of Masa, context-oblivious and context-aware, on three configurations of Raspberry Pi and a large set of popular DNN models triggered by different generation patterns of images. Our evaluation results show that Masa can achieve lower average response times by up to 90% on devices with small memory, i.e., 512 MB to 1 GB, compared to the state of the art multi-DNN scheduling solutions.},
    url = {https://www.sciencedirect.com/science/article/pii/S1574119222000372},
    bdsk-url-1 = {https://www.sciencedirect.com/science/article/pii/S1574119222000372},
    bdsk-url-2 = {https://doi.org/10.1016/j.pmcj.2022.101594}
    }

  • I. Colonnelli, M. Aldinucci, B. Cantalupo, L. Padovani, S. Rabellino, C. Spampinato, R. Morelli, R. Di Carlo, N. Magini, and C. Cavazzoni, "Distributed workflows with Jupyter," Future Generation Computer Systems, vol. 128, p. 282–298, 2022. doi:10.1016/j.future.2021.10.007
    [BibTeX] [Abstract] [Download PDF]

    The designers of a new coordination interface enacting complex workflows have to tackle a dichotomy: choosing a language-independent or language-dependent approach. Language-independent approaches decouple workflow models from the host code's business logic and advocate portability. Language-dependent approaches foster flexibility and performance by adopting the same host language for business and coordination code. Jupyter Notebooks, with their capability to describe both imperative and declarative code in a unique format, allow taking the best of the two approaches, maintaining a clear separation between application and coordination layers but still providing a unified interface to both aspects. We advocate the Jupyter Notebooks' potential to express complex distributed workflows, identifying the general requirements for a Jupyter-based Workflow Management System (WMS) and introducing a proof-of-concept portable implementation working on hybrid Cloud-HPC infrastructures. As a byproduct, we extended the vanilla IPython kernel with workflow-based parallel and distributed execution capabilities. The proposed Jupyter-workflow (Jw) system is evaluated on common scenarios for High Performance Computing (HPC) and Cloud, showing its potential in lowering the barriers between prototypical Notebooks and production-ready implementations.

    @article{21:FGCS:jupyflow,
    title = {Distributed workflows with {Jupyter}},
    author = {Iacopo Colonnelli and Marco Aldinucci and Barbara Cantalupo and Luca Padovani and Sergio Rabellino and Concetto Spampinato and Roberto Morelli and Rosario {Di Carlo} and Nicol{\`o} Magini and Carlo Cavazzoni},
    year = {2022},
    journal = {Future Generation Computer Systems},
    volume = {128},
    pages = {282--298},
    doi = {10.1016/j.future.2021.10.007},
    issn = {0167-739X},
    abstract = {The designers of a new coordination interface enacting complex workflows have to tackle a dichotomy: choosing a language-independent or language-dependent approach. Language-independent approaches decouple workflow models from the host code's business logic and advocate portability. Language-dependent approaches foster flexibility and performance by adopting the same host language for business and coordination code. Jupyter Notebooks, with their capability to describe both imperative and declarative code in a unique format, allow taking the best of the two approaches, maintaining a clear separation between application and coordination layers but still providing a unified interface to both aspects. We advocate the Jupyter Notebooks' potential to express complex distributed workflows, identifying the general requirements for a Jupyter-based Workflow Management System (WMS) and introducing a proof-of-concept portable implementation working on hybrid Cloud-HPC infrastructures. As a byproduct, we extended the vanilla IPython kernel with workflow-based parallel and distributed execution capabilities. The proposed Jupyter-workflow (Jw) system is evaluated on common scenarios for High Performance Computing (HPC) and Cloud, showing its potential in lowering the barriers between prototypical Notebooks and production-ready implementations.},
    url = {https://www.sciencedirect.com/science/article/pii/S0167739X21003976},
    bdsk-url-1 = {https://www.sciencedirect.com/science/article/pii/S0167739X21003976},
    bdsk-url-2 = {https://doi.org/10.1016/j.future.2021.10.007},
    keywords = {streamflow, jupyter-workflow, deephealth, across}
    }

  • E. Quiñones, J. Perales, J. Ejarque, A. Badouh, S. Marco, F. Auzanneau, F. Galea, D. González, J. R. Hervás, T. Silva, I. Colonnelli, B. Cantalupo, M. Aldinucci, E. Tartaglione, R. Tornero, J. Flich, J. M. Martinez, D. Rodriguez, I. Catalán, J. Garcia, and C. Hernández, "The DeepHealth HPC Infrastructure: Leveraging Heterogenous HPC and Cloud Computing Infrastructures for IA-based Medical Solutions," in HPC, Big Data, and AI Convergence Towards Exascale: Challenge and Vision, O. Terzo and J. Martinovič, Eds., Boca Raton, Florida: CRC Press, 2022, p. 191–216. doi:10.1201/9781003176664
    [BibTeX] [Abstract] [Download PDF]

    This chapter presents the DeepHealth HPC toolkit for an efficient execution of deep learning (DL) medical application into HPC and cloud-computing infrastructures, featuring many-core, GPU, and FPGA acceleration devices. The toolkit offers to the European Computer Vision Library and the European Distributed Deep Learning Library (EDDL), developed in the DeepHealth project as well, the mechanisms to distribute and parallelize DL operations on HPC and cloud infrastructures in a fully transparent way. The toolkit implements workflow managers used to orchestrate HPC workloads for an efficient parallelization of EDDL training operations on HPC and cloud infrastructures, and includes the parallel programming models for an efficient execution EDDL inference and training operations on many-core, GPUs and FPGAs acceleration devices.

    @incollection{22:deephealth:HPCbook,
    title = {The {DeepHealth} {HPC} Infrastructure: Leveraging Heterogenous {HPC} and Cloud Computing Infrastructures for {IA}-based Medical Solutions},
    author = {Eduardo Qui\~{n}ones and Jesus Perales and Jorge Ejarque and Asaf Badouh and Santiago Marco and Fabrice Auzanneau and Fran\c{c}ois Galea and David Gonz\'{a}lez and Jos\'{e} Ram\'{o}n Herv\'{a}s and Tatiana Silva and Iacopo Colonnelli and Barbara Cantalupo and Marco Aldinucci and Enzo Tartaglione and Rafael Tornero and Jos\'{e} Flich and Jose Maria Martinez and David Rodriguez and Izan Catal\'{a}n and Jorge Garcia and Carles Hern\'{a}ndez},
    year = {2022},
    booktitle = {{HPC}, Big Data, and {AI} Convergence Towards Exascale: Challenge and Vision},
    publisher = {{CRC} Press},
    address = {Boca Raton, Florida},
    pages = {191--216},
    doi = {10.1201/9781003176664},
    isbn = {978-1-0320-0984-1},
    abstract = {This chapter presents the DeepHealth HPC toolkit for an efficient execution of deep learning (DL) medical application into HPC and cloud-computing infrastructures, featuring many-core, GPU, and FPGA acceleration devices. The toolkit offers to the European Computer Vision Library and the European Distributed Deep Learning Library (EDDL), developed in the DeepHealth project as well, the mechanisms to distribute and parallelize DL operations on HPC and cloud infrastructures in a fully transparent way. The toolkit implements workflow managers used to orchestrate HPC workloads for an efficient parallelization of EDDL training operations on HPC and cloud infrastructures, and includes the parallel programming models for an efficient execution EDDL inference and training operations on many-core, GPUs and FPGAs acceleration devices.},
    chapter = {10},
    editor = {Olivier Terzo and Jan Martinovi\v{c}},
    url = {https://iris.unito.it/retrieve/handle/2318/1832050/912413/Preprint.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1832050/912413/Preprint.pdf},
    bdsk-url-2 = {https://doi.org/10.1201/9781003176664},
    keywords = {deephealth, streamflow}
    }

  • M. Golasowski, J. Martinovič, M. Levrier, S. Hachinger, S. Karagiorgou, A. Papapostolou, S. Mouzakitis, I. Tsapelas, M. Caballero, M. Aldinucci, J. A. Gómez, A. Chazapis, and J. Acquaviva, "Toward the Convergence of High-Performance Computing, Cloud, and Big Data Domains," in HPC, Big Data, and AI Convergence Towards Exascale: Challenge and Vision, O. Terzo and J. Martinovič, Eds., Boca Raton, Florida: CRC Press, 2022, p. 1–16. doi:10.1201/9781003176664
    [BibTeX] [Abstract]

    Convergence between big data, high-performance computing, and the cloud is the key driving factor for sustainable economic growth in the future. Technological advances in many fields are determined by competence to gain precise information from the large amounts of data collected, which in turn requires powerful computing resources. This chapter provides an overview on the evolution of the three fields and four different points of view on their convergence provided by the CYBELE, DeepHealth, Evolve, and LEXIS projects funded by the European Union under the Horizon 2020 Programme.

    @incollection{22:intro:HPCbook,
    title = {Toward the Convergence of High-Performance Computing, Cloud, and Big Data Domains},
    author = {Martin Golasowski and Jan Martinovi{\v c} and Marc Levrier and Stephan Hachinger and Sophia Karagiorgou and Aikaterini Papapostolou and Spiros Mouzakitis and Ioannis Tsapelas and Monica Caballero and Marco Aldinucci and Jon Ander G{\'o}mez and Antony Chazapis and Jean-Thomas Acquaviva},
    year = {2022},
    booktitle = {{HPC}, Big Data, and {AI} Convergence Towards Exascale: Challenge and Vision},
    publisher = {{CRC} Press},
    address = {Boca Raton, Florida},
    pages = {1--16},
    doi = {10.1201/9781003176664},
    isbn = {978-1-0320-0984-1},
    abstract = {Convergence between big data, high-performance computing, and the cloud is the key driving factor for sustainable economic growth in the future. Technological advances in many fields are determined by competence to gain precise information from the large amounts of data collected, which in turn requires powerful computing resources. This chapter provides an overview on the evolution of the three fields and four different points of view on their convergence provided by the CYBELE, DeepHealth, Evolve, and LEXIS projects funded by the European Union under the Horizon 2020 Programme.},
    chapter = {1},
    editor = {Olivier Terzo and Jan Martinovi\v{c}},
    bdsk-url-1 = {https://doi.org/10.1201/9781003176664},
    keywords = {deephealth, streamflow}
    }

  • F. Proietto Salanitri, G. Bellitto, S. Palazzo, I. Irmakci, M. B. Wallace, C. W. Bolan, M. Engels, S. Hoogenboom, M. Aldinucci, U. Bagci, D. Giordano, and C. Spampinato, "Neural Transformers for Intraductal Papillary Mucosal Neoplasms (IPMN) Classification in MRI images," in 44th Annual International Conference of the IEEE Engineering in Medicine & Biology Society, EMBC 2022, Glasgow, Scotland, United Kingdom, July 11-15, 2022, 2022, p. 475–479. doi:10.1109/EMBC48229.2022.9871547
    [BibTeX] [Download PDF]
    @inproceedings{DBLP:conf/embc/SalanitriBPIWBE22,
    title = {Neural Transformers for Intraductal Papillary Mucosal Neoplasms {(IPMN)} Classification in {MRI} images},
    author = {Proietto Salanitri, Federica and Giovanni Bellitto and Simone Palazzo and Ismail Irmakci and Wallace, Michael B. and Bolan, Candice W. and Megan Engels and Sanne Hoogenboom and Marco Aldinucci and Ulas Bagci and Daniela Giordano and Concetto Spampinato},
    year = {2022},
    booktitle = {44th Annual International Conference of the {IEEE} Engineering in Medicine {\&} Biology Society, {EMBC} 2022, Glasgow, Scotland, United Kingdom, July 11-15, 2022},
    publisher = {{IEEE}},
    pages = {475--479},
    doi = {10.1109/EMBC48229.2022.9871547},
    url = {https://doi.org/10.1109/EMBC48229.2022.9871547},
    bdsk-url-1 = {https://doi.org/10.1109/EMBC48229.2022.9871547},
    keywords = {hpc4ai, canp}
    }

  • B. Casella, A. Chisari, S. Battiato, and M. Giuffrida., "Transfer Learning via Test-time Neural Networks Aggregation," in Proceedings of the 17th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications, VISIGRAPP 2022, Volume 5: VISAPP, Online Streaming, February 6-8, 2022, 2022, p. 642–649. doi:10.5220/0010907900003124
    [BibTeX] [Abstract] [Download PDF]

    It has been demonstrated that deep neural networks outperform traditional machine learning. However, deep networks lack generalisability, that is, they will not perform as good as in a new (testing) set drawn from a different distribution due to the domain shift. In order to tackle this known issue, several transfer learning approaches have been proposed, where the knowledge of a trained model is transferred into another to improve performance with different data. However, most of these approaches require additional training steps, or they suffer from catastrophic forgetting that occurs when a trained model has overwritten previously learnt knowledge. We address both problems with a novel transfer learning approach that uses network aggregation. We train dataset-specific networks together with an aggregation network in a unified framework. The loss function includes two main components: a task-specific loss (such as cross-entropy) and an aggregation loss. The proposed aggregation loss allows our model to learn how trained deep network parameters can be aggregated with an aggregation operator. We demonstrate that the proposed approach learns model aggregation at test time without any further training step, reducing the burden of transfer learning to a simple arithmetical operation. The proposed approach achieves comparable performance w.r.t. the baseline. Besides, if the aggregation operator has an inverse, we will show that our model also inherently allows for selective forgetting, i.e., the aggregated model can forget one of the datasets it was trained on, retaining information on the others.

    @inproceedings{22:VISAPP:transferlearning,
    title = {Transfer Learning via Test-time Neural Networks Aggregation},
    author = {Bruno Casella and Alessio Chisari and Sebastiano Battiato and Mario Giuffrida.},
    year = {2022},
    booktitle = {Proceedings of the 17th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications, {VISIGRAPP} 2022, Volume 5: VISAPP, Online Streaming, February 6-8, 2022},
    publisher = {SciTePress},
    pages = {642--649},
    doi = {10.5220/0010907900003124},
    isbn = {978-989-758-555-5},
    abstract = {It has been demonstrated that deep neural networks outperform traditional machine learning. However, deep networks lack generalisability, that is, they will not perform as good as in a new (testing) set drawn from a different distribution due to the domain shift. In order to tackle this known issue, several transfer learning approaches have been proposed, where the knowledge of a trained model is transferred into another to improve performance with different data. However, most of these approaches require additional training steps, or they suffer from catastrophic forgetting that occurs when a trained model has overwritten previously learnt knowledge. We address both problems with a novel transfer learning approach that uses network aggregation. We train dataset-specific networks together with an aggregation network in a unified framework. The loss function includes two main components: a task-specific loss (such as cross-entropy) and an aggregation loss. The proposed aggregation loss allows our model to learn how trained deep network parameters can be aggregated with an aggregation operator. We demonstrate that the proposed approach learns model aggregation at test time without any further training step, reducing the burden of transfer learning to a simple arithmetical operation. The proposed approach achieves comparable performance w.r.t. the baseline. Besides, if the aggregation operator has an inverse, we will show that our model also inherently allows for selective forgetting, i.e., the aggregated model can forget one of the datasets it was trained on, retaining information on the others.},
    editor = {Giovanni Maria Farinella and Petia Radeva and Kadi Bouatouch},
    organization = {INSTICC},
    url = {https://iris.unito.it/retrieve/handle/2318/1844159/947123/TRANSFER_LEARNING_VIA_TEST_TIME_NEURAL_NETWORKS_AGGREGATION.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1844159/947123/TRANSFER_LEARNING_VIA_TEST_TIME_NEURAL_NETWORKS_AGGREGATION.pdf},
    bdsk-url-2 = {https://doi.org/10.5220/0010907900003124}
    }

  • M. Aldinucci, D. Atienza, F. Bolelli, M. Caballero, I. Colonnelli, J. Flich, J. A. Gómez, D. González, C. Grana, M. Grangetto, S. Leo, P. López, D. Oniga, R. Paredes, L. Pireddu, E. Quiñones, T. Silva, E. Tartaglione, and M. Zapater, "The DeepHealth Toolkit: A Key European Free and Open-Source Software for Deep Learning and Computer Vision Ready to Exploit Heterogeneous HPC and Cloud Architectures," in Technologies and Applications for Big Data Value, E. Curry, S. Auer, A. J. Berre, A. Metzger, M. S. Perez, and S. Zillner, Eds., Cham: Springer International Publishing, 2022, p. 183–202. doi:10.1007/978-3-030-78307-5_9
    [BibTeX] [Abstract] [Download PDF]

    At the present time, we are immersed in the convergence between Big Data, High-Performance Computing and Artificial Intelligence. Technological progress in these three areas has accelerated in recent years, forcing different players like software companies and stakeholders to move quickly. The European Union is dedicating a lot of resources to maintain its relevant position in this scenario, funding projects to implement large-scale pilot testbeds that combine the latest advances in Artificial Intelligence, High-Performance Computing, Cloud and Big Data technologies. The DeepHealth project is an example focused on the health sector whose main outcome is the DeepHealth toolkit, a European unified framework that offers deep learning and computer vision capabilities, completely adapted to exploit underlying heterogeneous High-Performance Computing, Big Data and cloud architectures, and ready to be integrated into any software platform to facilitate the development and deployment of new applications for specific problems in any sector. This toolkit is intended to be one of the European contributions to the field of AI. This chapter introduces the toolkit with its main components and complementary tools, providing a clear view to facilitate and encourage its adoption and wide use by the European community of developers of AI-based solutions and data scientists working in the healthcare sector and others.

    @incollection{22:TABDV,
    title = {The {DeepHealth} Toolkit: A Key European Free and Open-Source Software for Deep Learning and Computer Vision Ready to Exploit Heterogeneous {HPC} and {C}loud Architectures},
    author = {Marco Aldinucci and David Atienza and Federico Bolelli and M\'{o}nica Caballero and Iacopo Colonnelli and Jos\'{e} Flich and Jon Ander G\'{o}mez and David Gonz\'{a}lez and Costantino Grana and Marco Grangetto and Simone Leo and Pedro L\'{o}pez and Dana Oniga and Roberto Paredes and Luca Pireddu and Eduardo Qui\~{n}ones and Tatiana Silva and Enzo Tartaglione and Marina Zapater},
    year = {2022},
    booktitle = {Technologies and Applications for Big Data Value},
    publisher = {Springer International Publishing},
    address = {Cham},
    pages = {183--202},
    doi = {10.1007/978-3-030-78307-5_9},
    isbn = {978-3-030-78307-5},
    abstract = {At the present time, we are immersed in the convergence between Big Data, High-Performance Computing and Artificial Intelligence. Technological progress in these three areas has accelerated in recent years, forcing different players like software companies and stakeholders to move quickly. The European Union is dedicating a lot of resources to maintain its relevant position in this scenario, funding projects to implement large-scale pilot testbeds that combine the latest advances in Artificial Intelligence, High-Performance Computing, Cloud and Big Data technologies. The DeepHealth project is an example focused on the health sector whose main outcome is the DeepHealth toolkit, a European unified framework that offers deep learning and computer vision capabilities, completely adapted to exploit underlying heterogeneous High-Performance Computing, Big Data and cloud architectures, and ready to be integrated into any software platform to facilitate the development and deployment of new applications for specific problems in any sector. This toolkit is intended to be one of the European contributions to the field of AI. This chapter introduces the toolkit with its main components and complementary tools, providing a clear view to facilitate and encourage its adoption and wide use by the European community of developers of AI-based solutions and data scientists working in the healthcare sector and others.},
    chapter = {9},
    editor = {Edward Curry and S\"{o}ren Auer and Arne J. Berre and Andreas Metzger and Maria S. Perez and Sonja Zillner},
    url = {https://link.springer.com/content/pdf/10.1007/978-3-030-78307-5_9.pdf},
    bdsk-url-1 = {https://link.springer.com/content/pdf/10.1007/978-3-030-78307-5_9.pdf},
    bdsk-url-2 = {https://doi.org/10.1007/978-3-030-78307-5_9},
    keywords = {deephealth, streamflow}
    }

  • B. Casella, R. Esposito, C. Cavazzoni, and M. Aldinucci, "Benchmarking FedAvg and FedCurv for Image Classification Tasks," in Proceedings of the 1st Italian Conference on Big Data and Data Science, ITADATA 2022, September 20-21, 2022, 2022.
    [BibTeX] [Abstract] [Download PDF]

    Classic Machine Learning (ML) techniques require training on data available in a single data lake (either centralized or distributed). However, aggregating data from different owners is not always convenient for different reasons, including security, privacy and secrecy. Data carry a value that might vanish when shared with others; the ability to avoid sharing the data enables industrial applications where security and privacy are of paramount importance, making it possible to train global models by implementing only local policies which can be run independently and even on air-gapped data centres. Federated Learning (FL) is a distributed machine learning approach which has emerged as an effective way to address privacy concerns by only sharing local AI models while keeping the data decentralized. Two critical challenges of Federated Learning are managing the heterogeneous systems in the same federated network and dealing with real data, which are often not independently and identically distributed (non-IID) among the clients. In this paper, we focus on the second problem, i.e., the problem of statistical heterogeneity of the data in the same federated network. In this setting, local models might be strayed far from the local optimum of the complete dataset, thus possibly hindering the convergence of the federated model. Several Federated Learning algorithms, such as FedAvg, FedProx and Federated Curvature (FedCurv), aiming at tackling the non-IID setting, have already been proposed. This work provides an empirical assessment of the behaviour of FedAvg and FedCurv in common non-IID scenarios. Results show that the number of epochs per round is an important hyper-parameter that, when tuned appropriately, can lead to significant performance gains while reducing the communication cost. As a side product of this work, we release the non-IID version of the datasets we used so to facilitate further comparisons from the FL community.

    @inproceedings{casella2022benchmarking,
    title = {Benchmarking FedAvg and FedCurv for Image Classification Tasks},
    author = {Bruno Casella and Roberto Esposito and Carlo Cavazzoni and Marco Aldinucci},
    year = {2022},
    booktitle = {Proceedings of the 1st Italian Conference on Big Data and Data Science, {ITADATA} 2022, September 20-21, 2022},
    publisher = {CEUR-WS.org},
    series = {{CEUR} Workshop Proceedings},
    volume = {3340},
    editor = {Marco Anisetti and Angela Bonifati and Nicola Bena and Claudio Ardagna and Donato Malerba},
    url = {https://ceur-ws.org/Vol-3340/paper40.pdf},
    abstract = {Classic Machine Learning (ML) techniques require training on data available in a single data lake (either centralized or distributed). However, aggregating data from different owners is not always convenient for different reasons, including security, privacy and secrecy. Data carry a value that might vanish when shared with others; the ability to avoid sharing the data enables industrial applications where security and privacy are of paramount importance, making it possible to train global models by implementing only local policies which can be run independently and even on air-gapped data centres. Federated Learning (FL) is a distributed machine learning approach which has emerged as an effective way to address privacy concerns by only sharing local AI models while keeping the data decentralized. Two critical challenges of Federated Learning are managing the heterogeneous systems in the same federated network and dealing with real data, which are often not independently and identically distributed (non-IID) among the clients. In this paper, we focus on the second problem, i.e., the problem of statistical heterogeneity of the data in the same federated network. In this setting, local models might be strayed far from the local optimum of the complete dataset, thus possibly hindering the convergence of the federated model. Several Federated Learning algorithms, such as FedAvg, FedProx and Federated Curvature (FedCurv), aiming at tackling the non-IID setting, have already been proposed. This work provides an empirical assessment of the behaviour of FedAvg and FedCurv in common non-IID scenarios. Results show that the number of epochs per round is an important hyper-parameter that, when tuned appropriately, can lead to significant performance gains while reducing the communication cost. As a side product of this work, we release the non-IID version of the datasets we used so to facilitate further comparisons from the FL community.},
    bdsk-url-1 = {https://iris.unito.it/bitstream/2318/1870961/1/Benchmarking_FedAvg_and_FedCurv_for_Image_Classification_Tasks.pdf},
    keywords = {eupilot}
    }

  • G. Agosta, M. Aldinucci, C. Alvarez, R. Ammendola, Y. Arfat, O. Beaumont, M. Bernaschi, A. Biagioni, T. Boccali, B. Bramas, C. Brandolese, B. Cantalupo, M. Carrozzo, D. Cattaneo, A. Celestini, M. Celino, I. Colonnelli, P. Cretaro, P. D'Ambra, M. Danelutto, R. Esposito, L. Eyraud-Dubois, A. Filgueras, W. Fornaciari, O. Frezza, A. Galimberti, F. Giacomini, B. Goglin, D. Gregori, A. Guermouche, F. Iannone, M. Kulczewski, F. Lo Cicero, A. Lonardo, A. R. Martinelli, M. Martinelli, X. Martorell, G. Massari, S. Montangero, G. Mittone, R. Namyst, A. Oleksiak, P. Palazzari, P. S. Paolucci, F. Reghenzani, C. Rossi, S. Saponara, F. Simula, F. Terraneo, S. Thibault, M. Torquati, M. Turisini, P. Vicini, M. Vidal, D. Zoni, and G. Zummo, "Towards EXtreme scale technologies and accelerators for euROhpc hw/Sw supercomputing applications for exascale: The TEXTAROSSA approach," Microprocessors and Microsystems, vol. 95, p. 104679, 2022. doi:10.1016/j.micpro.2022.104679
    [BibTeX] [Abstract]

    In the near future, Exascale systems will need to bridge three technology gaps to achieve high performance while remaining under tight power constraints: energy efficiency and thermal control; extreme computation efficiency via HW acceleration and new arithmetic; methods and tools for seamless integration of reconfigurable accelerators in heterogeneous HPC multi-node platforms. TEXTAROSSA addresses these gaps through a co-design approach to heterogeneous HPC solutions, supported by the integration and extension of HW and SW IPs, programming models, and tools derived from European research.

    @article{textarossa2022micpro:,
    title = {Towards EXtreme scale technologies and accelerators for euROhpc hw/Sw supercomputing applications for exascale: The TEXTAROSSA approach},
    author = {Giovanni Agosta and Marco Aldinucci and Carlos Alvarez and Roberto Ammendola and Yasir Arfat and Olivier Beaumont and Massimo Bernaschi and Andrea Biagioni and Tommaso Boccali and Berenger Bramas and Carlo Brandolese and Barbara Cantalupo and Mauro Carrozzo and Daniele Cattaneo and Alessandro Celestini and Massimo Celino and Iacopo Colonnelli and Paolo Cretaro and Pasqua D'Ambra and Marco Danelutto and Roberto Esposito and Lionel Eyraud-Dubois and Antonio Filgueras and William Fornaciari and Ottorino Frezza and Andrea Galimberti and Francesco Giacomini and Brice Goglin and Daniele Gregori and Abdou Guermouche and Francesco Iannone and Michal Kulczewski and Francesca {Lo Cicero} and Alessandro Lonardo and Alberto R. Martinelli and Michele Martinelli and Xavier Martorell and Giuseppe Massari and Simone Montangero and Gianluca Mittone and Raymond Namyst and Ariel Oleksiak and Paolo Palazzari and Pier Stanislao Paolucci and Federico Reghenzani and Cristian Rossi and Sergio Saponara and Francesco Simula and Federico Terraneo and Samuel Thibault and Massimo Torquati and Matteo Turisini and Piero Vicini and Miquel Vidal and Davide Zoni and Giuseppe Zummo},
    year = {2022},
    journal = {Microprocessors and Microsystems},
    volume = {95},
    pages = {104679},
    doi = {10.1016/j.micpro.2022.104679},
    issn = {0141-9331},
    abstract = {In the near future, Exascale systems will need to bridge three technology gaps to achieve high performance while remaining under tight power constraints: energy efficiency and thermal control; extreme computation efficiency via HW acceleration and new arithmetic; methods and tools for seamless integration of reconfigurable accelerators in heterogeneous HPC multi-node platforms. TEXTAROSSA addresses these gaps through a co-design approach to heterogeneous HPC solutions, supported by the integration and extension of HW and SW IPs, programming models, and tools derived from European research.},
    bdsk-url-1 = {https://doi.org/10.1016/j.micpro.2022.104679},
    keywords = {textrossa}
    }

  • V. Cesare, U. Becciani, A. Vecchiato, M. G. Lattanzi, F. Pitari, M. Raciti, G. Tudisco, M. Aldinucci, and B. Bucciarelli, "The Gaia AVU-GSR parallel solver: Preliminary studies of a LSQR-based application in perspective of exascale systems," Astronomy and Computing, p. 100660, 2022. doi:10.1016/j.ascom.2022.100660
    [BibTeX] [Abstract] [Download PDF]

    The Gaia Astrometric Verification Unit–Global Sphere Reconstruction (AVU–GSR) Parallel Solver aims to find the astrometric parameters for circa 10^8 stars in the Milky Way, the attitude and the instrumental specifications of the Gaia satellite, and the global parameter γ of the post Newtonian formalism. The code iteratively solves a system of linear equations, A×x=b, where the coefficient matrix A is large (circa 10^11×10^8 elements) and sparse. To solve this system of equations, the code exploits a hybrid implementation of the iterative PC-LSQR algorithm, where the computation related to different horizontal portions of the coefficient matrix is assigned to separate MPI processes. In the original code, each matrix portion is further parallelized over the OpenMP threads. To further improve the code performance, we ported the application to the GPU, replacing the OpenMP parallelization language with OpenACC. In this port, ∼95\% of the data is copied from the host to the device at the beginning of the entire cycle of iterations, making the code compute bound rather than data-transfer bound. The OpenACC code presents a speedup of circa 1.5 over the OpenMP version but further optimizations are in progress to obtain higher gains. The code runs on multiple GPUs and it was tested on the CINECA supercomputer Marconi100, in anticipation of a port to the pre-exascale system Leonardo, that will be installed at CINECA in 2022.

    @article{CESARE2022100660,
    title = {The Gaia {AVU-GSR} parallel solver: Preliminary studies of a {LSQR-based} application in perspective of exascale systems},
    author = {Cesare, Valentina and Becciani, Ugo and Vecchiato, Alberto and Lattanzi, Mario Gilberto and Pitari, Fabio and Raciti, Mario and Tudisco, Giuseppe and Aldinucci, Marco and Bucciarelli, Beatrice},
    year = {2022},
    journal = {Astronomy and Computing},
    pages = {100660},
    doi = {10.1016/j.ascom.2022.100660},
    issn = {2213-1337},
    abstract = {The Gaia Astrometric Verification Unit--Global Sphere Reconstruction (AVU--GSR) Parallel Solver aims to find the astrometric parameters for circa 10^8 stars in the Milky Way, the attitude and the instrumental specifications of the Gaia satellite, and the global parameter γ of the post Newtonian formalism. The code iteratively solves a system of linear equations, A×x=b, where the coefficient matrix A is large (circa 10^11×10^8 elements) and sparse. To solve this system of equations, the code exploits a hybrid implementation of the iterative PC-LSQR algorithm, where the computation related to different horizontal portions of the coefficient matrix is assigned to separate MPI processes. In the original code, each matrix portion is further parallelized over the OpenMP threads. To further improve the code performance, we ported the application to the GPU, replacing the OpenMP parallelization language with OpenACC. In this port, ∼95\% of the data is copied from the host to the device at the beginning of the entire cycle of iterations, making the code compute bound rather than data-transfer bound. The OpenACC code presents a speedup of circa 1.5 over the OpenMP version but further optimizations are in progress to obtain higher gains. The code runs on multiple GPUs and it was tested on the CINECA supercomputer Marconi100, in anticipation of a port to the pre-exascale system Leonardo, that will be installed at CINECA in 2022.},
    url = {https://openaccess.inaf.it/handle/20.500.12386/32451},
    bdsk-url-1 = {https://openaccess.inaf.it/handle/20.500.12386/32451},
    bdsk-url-2 = {https://doi.org/10.1016/j.ascom.2022.100660},
    keywords = {eupex}
    }

  • G. Gallone, J. Kang, F. Bruno, J. Han, O. De Filippo, H. Yang, M. Doronzo, K. Park, G. Mittone, H. Kang, R. Parma, H. Gwon, E. Cerrato, W. J. Chun, G. Smolka, S. Hur, G. Helft, S. H. Han, S. Muscoli, Y. B. Song, F. Figini, K. H. Choi, G. Boccuzzi, S. Hong, D. Trabattoni, C. Nam, M. Giammaria, H. Kim, F. Conrotto, J. Escaned, C. Di Mario, F. D'Ascenzo, B. Koo, and G. M. de Ferrari, "Impact of Left Ventricular Ejection Fraction on Procedural and Long-Term Outcomes of Bifurcation Percutaneous Coronary Intervention," The American Journal of Cardiology, vol. 172, p. 18–25, 2022. doi:https://doi.org/10.1016/j.amjcard.2022.02.015
    [BibTeX] [Abstract] [Download PDF]

    {The association of left ventricular ejection fraction (LVEF) with procedural and long-term outcomes after state-of-the-art percutaneous coronary intervention (PCI) of bifurcation lesions remains unsettled. A total of 5,333 patients who underwent contemporary coronary bifurcation PCI were included in the intercontinental retrospective combined insights from the unified RAIN (veRy thin stents for patients with left mAIn or bifurcatioN in real life) and COBIS (COronary BIfurcation Stenting) III bifurcation registries. Of 5,003 patients (93.8%) with known baseline LVEF, 244 (4.9%) had LVEF <40% (bifurcation with reduced ejection fraction [BIFrEF] group), 430 (8.6%) had LVEF 40% to 49% (bifurcation with mildly reduced ejection fraction [BIFmEF] group) and 4,329 (86.5%) had ejection fraction (EF) ≥50% (bifurcation with preserved ejection fraction [BIFpEF] group). The primary end point was the Kaplan-Meier estimate of major adverse cardiac events (MACEs) (a composite of all-cause death, myocardial infarction, and target vessel revascularization). Patients with BIFrEF had a more complex clinical profile and coronary anatomy. No difference in procedural (30 days) MACE was observed across EF categories, also after adjustment for in-study outcome predictors (BIFrEF vs BIFmEF: adjusted hazard ratio [adj-HR] 1.39, 95% confidence interval [CI] 0.37 to 5.21

    @article{GALLONE202218,
    title = {Impact of Left Ventricular Ejection Fraction on Procedural and Long-Term Outcomes of Bifurcation Percutaneous Coronary Intervention},
    author = {Guglielmo Gallone and Jeehoon Kang and Francesco Bruno and Jung-Kyu Han and Ovidio {De Filippo} and Han-Mo Yang and Mattia Doronzo and Kyung-Woo Park and Gianluca Mittone and Hyun-Jae Kang and Radoslaw Parma and Hyeon-Cheol Gwon and Enrico Cerrato and Woo Jung Chun and Grzegorz Smolka and Seung-Ho Hur and Gerard Helft and Seung Hwan Han and Saverio Muscoli and Young Bin Song and Filippo Figini and Ki Hong Choi and Giacomo Boccuzzi and Soon-Jun Hong and Daniela Trabattoni and Chang-Wook Nam and Massimo Giammaria and Hyo-Soo Kim and Federico Conrotto and Javier Escaned and Carlo {Di Mario} and Fabrizio D'Ascenzo and Bon-Kwon Koo and Gaetano Maria {de Ferrari}},
    year = {2022},
    journal = {The American Journal of Cardiology},
    volume = {172},
    pages = {18--25},
    doi = {https://doi.org/10.1016/j.amjcard.2022.02.015},
    issn = {0002-9149},
    abstract = {The association of left ventricular ejection fraction (LVEF) with procedural and long-term outcomes after state-of-the-art percutaneous coronary intervention (PCI) of bifurcation lesions remains unsettled. A total of 5,333 patients who underwent contemporary coronary bifurcation PCI were included in the intercontinental retrospective combined insights from the unified RAIN (veRy thin stents for patients with left mAIn or bifurcatioN in real life) and COBIS (COronary BIfurcation Stenting) III bifurcation registries. Of 5,003 patients (93.8%) with known baseline LVEF, 244 (4.9%) had LVEF <40% (bifurcation with reduced ejection fraction [BIFrEF] group), 430 (8.6%) had LVEF 40% to 49% (bifurcation with mildly reduced ejection fraction [BIFmEF] group) and 4,329 (86.5%) had ejection fraction (EF) ≥50% (bifurcation with preserved ejection fraction [BIFpEF] group). The primary end point was the Kaplan-Meier estimate of major adverse cardiac events (MACEs) (a composite of all-cause death, myocardial infarction, and target vessel revascularization). Patients with BIFrEF had a more complex clinical profile and coronary anatomy. No difference in procedural (30 days) MACE was observed across EF categories, also after adjustment for in-study outcome predictors (BIFrEF vs BIFmEF: adjusted hazard ratio [adj-HR] 1.39, 95% confidence interval [CI] 0.37 to 5.21, p = 0.626; BIFrEF vs BIFpEF: adj-HR 1.11, 95% CI 0.25 to 2.87, p = 0.883; BIFmEF vs BIFpEF: adj-HR 0.81, 95% CI 0.29 to 2.27, p = 0.683). BIFrEF was independently associated with long-term MACE (median follow-up 21 months, interquartile range 10 to 21 months) than both BIFmEF (adj-HR 2.20, 95% CI 1.41 to 3.41, p <0.001) and BIFpEF (adj-HR 1.91, 95% CI 1.41 to 2.60, p <0.001) groups, although no difference was observed between BIFmEF and BIFpEF groups (adj-HR 0.87, 95% CI 0.61 to 1.24, p = 0.449). In conclusion, in patients who underwent PCI of a coronary bifurcation lesion according to contemporary clinical practice, reduced LVEF (<40%), although a strong predictor of long-term MACEs, does not affect procedural outcomes.},
    url = {https://www.sciencedirect.com/science/article/pii/S0002914922001692},
    bdsk-url-1 = {https://www.sciencedirect.com/science/article/pii/S0002914922001692},
    bdsk-url-2 = {https://doi.org/10.1016/j.amjcard.2022.02.015},
    keywords = {}
    }

2021

  • A. Ghiassi, R. Birke, and L. Y. Chen, "TrustNet: Learning from Trusted Data Against (A)symmetric Label Noise," in 8th IEEE/ACM International Conference on Big Data Computing, Applications and Technologies (BDCAT), 2021, p. 52–62. doi:10.1145/3492324.3494166
    [BibTeX] [Abstract] [Download PDF]

    Big Data systems allow collecting massive datasets to feed the data hungry deep learning. Labelling these ever-bigger datasets is increasingly challenging and label errors affect even highly curated sets. This makes robustness to label noise a critical property for weakly-supervised classifiers. The related works on resilient deep networks tend to focus on a limited set of synthetic noise patterns, and with disparate views on their impacts, e.g., robustness against symmetric v.s. asymmetric noise patterns. In this paper, we first extend the theoretical analysis of test accuracy for any given noise patterns. Based on the insights, we design TrustNet that first learns the pattern of noise corruption, being it both symmetric or asymmetric, from a small set of trusted data. Then, TrustNet is trained via a robust loss function, which weights the given labels against the inferred labels from the learned noise pattern. The weight is adjusted based on model uncertainty across training epochs. We evaluate TrustNet on synthetic label noise for CIFAR-10, CIFAR-100 and big real-world data with label noise, i.e., Clothing1M. We compare against state-of-the-art methods demonstrating the strong robustness of TrustNet under a diverse set of noise patterns.

    @inproceedings{bdcat-ghiassi21,
    title = {{TrustNet}: Learning from Trusted Data Against (A)symmetric Label Noise},
    author = {Amirmasoud Ghiassi and Robert Birke and Lydia Y. Chen},
    year = {2021},
    month = dec,
    booktitle = {8th {IEEE/ACM} International Conference on Big Data Computing, Applications and Technologies ({BDCAT})},
    publisher = {{ACM}},
    pages = {52--62},
    doi = {10.1145/3492324.3494166},
    abstract = {Big Data systems allow collecting massive datasets to feed the data hungry deep learning. Labelling these ever-bigger datasets is increasingly challenging and label errors affect even highly curated sets. This makes robustness to label noise a critical property for weakly-supervised classifiers. The related works on resilient deep networks tend to focus on a limited set of synthetic noise patterns, and with disparate views on their impacts, e.g., robustness against symmetric v.s. asymmetric noise patterns. In this paper, we first extend the theoretical analysis of test accuracy for any given noise patterns. Based on the insights, we design TrustNet that first learns the pattern of noise corruption, being it both symmetric or asymmetric, from a small set of trusted data. Then, TrustNet is trained via a robust loss function, which weights the given labels against the inferred labels from the learned noise pattern. The weight is adjusted based on model uncertainty across training epochs. We evaluate TrustNet on synthetic label noise for CIFAR-10, CIFAR-100 and big real-world data with label noise, i.e., Clothing1M. We compare against state-of-the-art methods demonstrating the strong robustness of TrustNet under a diverse set of noise patterns.},
    url = {https://doi.org/10.1145/3492324.3494166},
    bdsk-url-1 = {https://doi.org/10.1145/3492324.3494166}
    }

  • T. Younesian, Z. Zhao, A. Ghiassi, R. Birke, and L. Y. Chen, "QActor: Active Learning on Noisy Labels," in Proceedings of The 13th Asian Conference on Machine Learning, 2021, p. 548–563.
    [BibTeX] [Abstract] [Download PDF]

    Noisy labeled data is more a norm than a rarity for self-generated content that is continuously published on the web and social media from non-experts. Active querying experts are conventionally adopted to provide labels for the informative samples which don't have labels, instead of possibly incorrect labels. The new challenge that arises here is how to discern the informative and noisy labels which benefit from expert cleaning. In this paper, we aim to leverage the stringent oracle budget to robustly maximize learning accuracy. We propose a noise-aware active learning framework, QActor, and a novel measure \emph{CENT}, which considers both cross-entropy and entropy to select informative and noisy labels for an expert cleansing. QActor iteratively cleans samples via quality models and actively querying an expert on those noisy yet informative samples. To adapt to learning capacity per iteration, QActor dynamically adjusts the query limit according to the learning loss for each learning iteration. We extensively evaluate different image datasets with noise label ratios ranging between 30% and 60%. Our results show that QActor can nearly match the optimal accuracy achieved using only clean data at the cost of only an additional 10% of ground truth data from the oracle.

    @inproceedings{pmlr-v157-younesian21a,
    title = {{QActor}: Active Learning on Noisy Labels},
    author = {Younesian, Taraneh and Zhao, Zilong and Ghiassi, Amirmasoud and Birke, Robert and Chen, Lydia Y},
    year = {2021},
    month = nov,
    booktitle = {Proceedings of The 13th Asian Conference on Machine Learning},
    publisher = {PMLR},
    series = {Proceedings of Machine Learning Research},
    volume = {157},
    pages = {548--563},
    abstract = {Noisy labeled data is more a norm than a rarity for self-generated content that is continuously published on the web and social media from non-experts. Active querying experts are conventionally adopted to provide labels for the informative samples which don't have labels, instead of possibly incorrect labels. The new challenge that arises here is how to discern the informative and noisy labels which benefit from expert cleaning. In this paper, we aim to leverage the stringent oracle budget to robustly maximize learning accuracy. We propose a noise-aware active learning framework, QActor, and a novel measure \emph{CENT}, which considers both cross-entropy and entropy to select informative and noisy labels for an expert cleansing. QActor iteratively cleans samples via quality models and actively querying an expert on those noisy yet informative samples. To adapt to learning capacity per iteration, QActor dynamically adjusts the query limit according to the learning loss for each learning iteration. We extensively evaluate different image datasets with noise label ratios ranging between 30% and 60%. Our results show that QActor can nearly match the optimal accuracy achieved using only clean data at the cost of only an additional 10% of ground truth data from the oracle.},
    editor = {Balasubramanian, Vineeth N. and Tsang, Ivor},
    pdf = {https://proceedings.mlr.press/v157/younesian21a/younesian21a.pdf},
    url = {https://proceedings.mlr.press/v157/younesian21a.html},
    bdsk-url-1 = {https://proceedings.mlr.press/v157/younesian21a.html}
    }

  • Z. Zhao, A. Kunar, R. Birke, and L. Y. Chen, "CTAB-GAN: Effective Table Data Synthesizing," in Proceedings of The 13th Asian Conference on Machine Learning, 2021, p. 97–112.
    [BibTeX] [Abstract] [Download PDF]

    While data sharing is crucial for knowledge development, privacy concerns and strict regulation (e.g., European General Data Protection Regulation (GDPR)) unfortunately limit its full effectiveness. Synthetic tabular data emerges as an alternative to enable data sharing while fulfilling regulatory and privacy constraints. The state-of-the-art tabular data synthesizers draw methodologies from Generative Adversarial Networks (GAN) and address two main data types in industry, i.e., continuous and categorical. In this paper, we develop CTAB-GAN, a novel conditional table GAN architecture that can effectively model diverse data types, including a mix of continuous and categorical variables. Moreover, we address data imbalance and long tail issues, i.e., certain variables have drastic frequency differences across large values. To achieve those aims, we first introduce the information loss, classification loss and generator loss to the conditional GAN. Secondly, we design a novel conditional vector, which efficiently encodes the mixed data type and skewed distribution of data variable. We extensively evaluate CTAB-GAN with the state of the art GANs that generate synthetic tables, in terms of data similarity and analysis utility. The results on five datasets show that the synthetic data of CTAB-GAN remarkably resembles the real data for all three types of variables and results into higher accuracy for five machine learning algorithms, by up to 17%.

    @inproceedings{pmlr-v157-zhao21a,
    title = {CTAB-GAN: Effective Table Data Synthesizing},
    author = {Zhao, Zilong and Kunar, Aditya and Birke, Robert and Chen, Lydia Y.},
    year = {2021},
    month = nov,
    booktitle = {Proceedings of The 13th Asian Conference on Machine Learning},
    publisher = {PMLR},
    series = {Proceedings of Machine Learning Research},
    volume = {157},
    pages = {97--112},
    abstract = {While data sharing is crucial for knowledge development, privacy concerns and strict regulation (e.g., European General Data Protection Regulation (GDPR)) unfortunately limit its full effectiveness. Synthetic tabular data emerges as an alternative to enable data sharing while fulfilling regulatory and privacy constraints. The state-of-the-art tabular data synthesizers draw methodologies from Generative Adversarial Networks (GAN) and address two main data types in industry, i.e., continuous and categorical. In this paper, we develop CTAB-GAN, a novel conditional table GAN architecture that can effectively model diverse data types, including a mix of continuous and categorical variables. Moreover, we address data imbalance and long tail issues, i.e., certain variables have drastic frequency differences across large values. To achieve those aims, we first introduce the information loss, classification loss and generator loss to the conditional GAN. Secondly, we design a novel conditional vector, which efficiently encodes the mixed data type and skewed distribution of data variable. We extensively evaluate CTAB-GAN with the state of the art GANs that generate synthetic tables, in terms of data similarity and analysis utility. The results on five datasets show that the synthetic data of CTAB-GAN remarkably resembles the real data for all three types of variables and results into higher accuracy for five machine learning algorithms, by up to 17%.},
    editor = {Balasubramanian, Vineeth N. and Tsang, Ivor},
    pdf = {https://proceedings.mlr.press/v157/zhao21a/zhao21a.pdf},
    url = {https://proceedings.mlr.press/v157/zhao21a.html},
    bdsk-url-1 = {https://proceedings.mlr.press/v157/zhao21a.html}
    }

  • G. Albanese, R. Birke, G. Giannopoulou, S. Schönborn, and T. Sivanthi, "Evaluation of Networking Options for Containerized Deployment of Real-Time Applications," in 26th IEEE International Conference on Emerging Technologies and Factory Automation (ETFA), 2021, p. 1–8. doi:10.1109/ETFA45728.2021.9613320
    [BibTeX] [Abstract] [Download PDF]

    Enterprises in the field of industrial automation experience an increasing demand for providing virtualized software solutions. Inspired by the recent trends in serverless and cloud computing, software virtualization is considered even for safety-critical applications with hard real-time requirements, as a means of avoiding hardware vendor lock-in and reducing volume and maintenance cost of devices. In this work, we evaluate the applicability of OS-level virtualization to an industrial automation use case. Our application runs in Docker containers on top of Linux patched with PREEMPT_RT. We investigate the ability of Docker coupled with diverse networking technologies to fulfill the latency requirements of the application under normal or heavy system load. We empirically compare four networking technologies with respect to communication latency and frequency of missing packets. The results indicate that Docker with certain technologies, such as the Single Root I/O Virtualization interface, performs robustly even under heavy load, enabling sufficient performance isolation and low overhead that does not jeopardise the real-time performance of our application.

    @inproceedings{etfa-albanese21,
    title = {Evaluation of Networking Options for Containerized Deployment of Real-Time Applications},
    author = {Giuliano Albanese and Robert Birke and Georgia Giannopoulou and Sandro Sch{\"{o}}nborn and Thanikesavan Sivanthi},
    year = {2021},
    month = sep,
    booktitle = {26th {IEEE} International Conference on Emerging Technologies and Factory Automation ({ETFA})},
    publisher = {{IEEE}},
    pages = {1--8},
    doi = {10.1109/ETFA45728.2021.9613320},
    abstract = {Enterprises in the field of industrial automation experience an increasing demand for providing virtualized software solutions. Inspired by the recent trends in serverless and cloud computing, software virtualization is considered even for safety-critical applications with hard real-time requirements, as a means of avoiding hardware vendor lock-in and reducing volume and maintenance cost of devices. In this work, we evaluate the applicability of OS-level virtualization to an industrial automation use case. Our application runs in Docker containers on top of Linux patched with PREEMPT_RT. We investigate the ability of Docker coupled with diverse networking technologies to fulfill the latency requirements of the application under normal or heavy system load. We empirically compare four networking technologies with respect to communication latency and frequency of missing packets. The results indicate that Docker with certain technologies, such as the Single Root I/O Virtualization interface, performs robustly even under heavy load, enabling sufficient performance isolation and low overhead that does not jeopardise the real-time performance of our application.},
    url = {https://doi.org/10.1109/ETFA45728.2021.9613320},
    bdsk-url-1 = {https://doi.org/10.1109/ETFA45728.2021.9613320}
    }

  • G. Agosta, W. Fornaciari, A. Galimberti, G. Massari, F. Reghenzani, F. Terraneo, D. Zoni, C. Brandolese, M. Celino, F. Iannone, P. Palazzari, G. Zummo, M. Bernaschi, P. D'Ambra, S. Saponara, M. Danelutto, M. Torquati, M. Aldinucci, Y. Arfat, B. Cantalupo, I. Colonnelli, R. Esposito, A. R. Martinelli, G. Mittone, O. Beaumont, B. Bramas, L. Eyraud-Dubois, B. Goglin, A. Guermouche, R. Namyst, S. Thibault, A. Filgueras, M. Vidal, C. Alvarez, X. Martorell, A. Oleksiak, M. Kulczewski, A. Lonardo, P. Vicini, F. L. Cicero, F. Simula, A. Biagioni, P. Cretaro, O. Frezza, P. S. Paolucci, M. Turisini, F. Giacomini, T. Boccali, S. Montangero, and R. Ammendola, "TEXTAROSSA: Towards EXtreme scale Technologies and Accelerators for euROhpc hw/Sw Supercomputing Applications for exascale," in Proc. of the 24th Euromicro Conference on Digital System Design (DSD), Palermo, Italy, 2021. doi:10.1109/DSD53832.2021.00051
    [BibTeX] [Abstract]

    To achieve high performance and high energy effi- ciency on near-future exascale computing systems, three key technology gaps needs to be bridged. These gaps include: en- ergy efficiency and thermal control; extreme computation effi- ciency via HW acceleration and new arithmetics; methods and tools for seamless integration of reconfigurable accelerators in heterogeneous HPC multi-node platforms. TEXTAROSSA aims at tackling this gap through a co-design approach to heterogeneous HPC solutions, supported by the integration and extension of HW and SW IPs, programming models and tools derived from European research.

    @inproceedings{21:DSD:textarossa,
    title = {{TEXTAROSSA}: Towards EXtreme scale Technologies and Accelerators for euROhpc hw/Sw Supercomputing Applications for exascale},
    author = {Giovanni Agosta and William Fornaciari and Andrea Galimberti and Giuseppe Massari and Federico Reghenzani and Federico Terraneo and Davide Zoni and Carlo Brandolese and Massimo Celino and Francesco Iannone and Paolo Palazzari and Giuseppe Zummo and Massimo Bernaschi and Pasqua D'Ambra and Sergio Saponara and Marco Danelutto and Massimo Torquati and Marco Aldinucci and Yasir Arfat and Barbara Cantalupo and Iacopo Colonnelli and Roberto Esposito and Alberto Riccardo Martinelli and Gianluca Mittone and Olivier Beaumont and Berenger Bramas and Lionel Eyraud-Dubois and Brice Goglin and Abdou Guermouche and Raymond Namyst and Samuel Thibault and Antonio Filgueras and Miquel Vidal and Carlos Alvarez and Xavier Martorell and Ariel Oleksiak and Michal Kulczewski and Alessandro Lonardo and Piero Vicini and Francesco Lo Cicero and Francesco Simula and Andrea Biagioni and Paolo Cretaro and Ottorino Frezza and Pier Stanislao Paolucci and Matteo Turisini and Francesco Giacomini and Tommaso Boccali and Simone Montangero and Roberto Ammendola},
    year = {2021},
    month = aug,
    booktitle = {Proc. of the 24th Euromicro Conference on Digital System Design ({DSD})},
    publisher = {IEEE},
    address = {Palermo, Italy},
    doi = {10.1109/DSD53832.2021.00051},
    abstract = {To achieve high performance and high energy effi- ciency on near-future exascale computing systems, three key technology gaps needs to be bridged. These gaps include: en- ergy efficiency and thermal control; extreme computation effi- ciency via HW acceleration and new arithmetics; methods and tools for seamless integration of reconfigurable accelerators in heterogeneous HPC multi-node platforms. TEXTAROSSA aims at tackling this gap through a co-design approach to heterogeneous HPC solutions, supported by the integration and extension of HW and SW IPs, programming models and tools derived from European research.},
    date-added = {2021-09-04 12:07:42 +0200},
    date-modified = {2021-09-04 12:23:41 +0200},
    bdsk-url-1 = {https://doi.org/10.1109/DSD53832.2021.00051},
    keywords = {textarossa, streamflow}
    }

  • A. Ghiassi, R. Birke, R. Han, and L. Y. Chen, "LABELNET: Recovering Noisy Labels," in International Joint Conference on Neural Networks (IJCNN), 2021, p. 1–8. doi:10.1109/IJCNN52387.2021.9533562
    [BibTeX] [Abstract] [Download PDF]

    Today's available datasets in the wild, e.g., from social media and open platforms, present tremendous opportunities and challenges for deep learning, as there is a significant portion of tagged images, but often with noisy, i.e. erroneous, labels. Recent studies improve the robustness of deep models against noisy labels without the knowledge of true labels. In this paper, we advocate to derive a stronger classifier which proactively makes use of the noisy labels in addition to the original images - turning noisy labels into learning features. To such an end, we propose a novel framework, LABELNET, composed of Amateur and Expert, which iteratively learn from each other. Amateur is a regular image classifier trained by the feedback of Expert, which imitates how human experts would correct the predicted labels from Amateur using the noise pattern learnt from the knowledge of both the noisy and ground truth labels. The trained Amateur and Expert proactively leverage the images and their noisy labels to infer image classes. Our empirical evaluations on noisy versions of MNIST, CIFAR-10, CIFAR-100 and real-world data of Clothing1M show that the proposed model can achieve robust classification against a wide range of noise ratios and with as little as 20-50% training data, compared to state-of-the-art deep models that solely focus on distilling the impact of noisy labels.

    @inproceedings{ijcnn-ghiassi21,
    title = {{LABELNET:} Recovering Noisy Labels},
    author = {Amirmasoud Ghiassi and Robert Birke and Rui Han and Lydia Y. Chen},
    year = {2021},
    month = jul,
    booktitle = {International Joint Conference on Neural Networks ({IJCNN})},
    publisher = {{IEEE}},
    pages = {1--8},
    doi = {10.1109/IJCNN52387.2021.9533562},
    abstract = {Today's available datasets in the wild, e.g., from social media and open platforms, present tremendous opportunities and challenges for deep learning, as there is a significant portion of tagged images, but often with noisy, i.e. erroneous, labels. Recent studies improve the robustness of deep models against noisy labels without the knowledge of true labels. In this paper, we advocate to derive a stronger classifier which proactively makes use of the noisy labels in addition to the original images - turning noisy labels into learning features. To such an end, we propose a novel framework, LABELNET, composed of Amateur and Expert, which iteratively learn from each other. Amateur is a regular image classifier trained by the feedback of Expert, which imitates how human experts would correct the predicted labels from Amateur using the noise pattern learnt from the knowledge of both the noisy and ground truth labels. The trained Amateur and Expert proactively leverage the images and their noisy labels to infer image classes. Our empirical evaluations on noisy versions of MNIST, CIFAR-10, CIFAR-100 and real-world data of Clothing1M show that the proposed model can achieve robust classification against a wide range of noise ratios and with as little as 20-50% training data, compared to state-of-the-art deep models that solely focus on distilling the impact of noisy labels.},
    url = {https://doi.org/10.1109/IJCNN52387.2021.9533562},
    bdsk-url-1 = {https://doi.org/10.1109/IJCNN52387.2021.9533562}
    }

  • M. Aldinucci, G. Agosta, A. Andreini, C. A. Ardagna, A. Bartolini, A. Cilardo, B. Cosenza, M. Danelutto, R. Esposito, W. Fornaciari, R. Giorgi, D. Lengani, R. Montella, M. Olivieri, S. Saponara, D. Simoni, and M. Torquati, "The Italian research on HPC key technologies across EuroHPC," in ACM Computing Frontiers, Virtual Conference, Italy, 2021, p. 279–286. doi:10.1145/3457388.3458508
    [BibTeX] [Abstract] [Download PDF]

    High-Performance Computing (HPC) is one of the strategic priorities for research and innovation worldwide due to its relevance for industrial and scientific applications. We envision HPC as composed of three pillars: infrastructures, applications, and key technologies and tools. While infrastructures are by construction centralized in large-scale HPC centers, and applications are generally within the purview of domain-specific organizations, key technologies fall in an intermediate case where coordination is needed, but design and development are often decentralized. A large group of Italian researchers has started a dedicated laboratory within the National Interuniversity Consortium for Informatics (CINI) to address this challenge. The laboratory, albeit young, has managed to succeed in its first attempts to propose a coordinated approach to HPC research within the EuroHPC Joint Undertaking, participating in the calls 2019-20 to five successful proposals for an aggregate total cost of 95M Euro. In this paper, we outline the working group's scope and goals and provide an overview of the five funded projects, which become fully operational in March 2021, and cover a selection of key technologies provided by the working group partners, highlighting their usage development within the projects.

    @inproceedings{21:CINI_acm_CF,
    title = {The {Italian} research on {HPC} key technologies across {EuroHPC}},
    author = {Marco Aldinucci and Giovanni Agosta and Antonio Andreini and Claudio A. Ardagna and Andrea Bartolini and Alessandro Cilardo and Biagio Cosenza and Marco Danelutto and Roberto Esposito and William Fornaciari and Roberto Giorgi and Davide Lengani and Raffaele Montella and Mauro Olivieri and Sergio Saponara and Daniele Simoni and Massimo Torquati},
    year = {2021},
    month = may,
    booktitle = {{ACM Computing Frontiers}},
    publisher = {ACM},
    address = {Virtual Conference, Italy},
    pages = {279--286},
    doi = {10.1145/3457388.3458508},
    abstract = {High-Performance Computing (HPC) is one of the strategic priorities for research and innovation worldwide due to its relevance for industrial and scientific applications. We envision HPC as composed of three pillars: infrastructures, applications, and key technologies and tools. While infrastructures are by construction centralized in large-scale HPC centers, and applications are generally within the purview of domain-specific organizations, key technologies fall in an intermediate case where coordination is needed, but design and development are often decentralized. A large group of Italian researchers has started a dedicated laboratory within the National Interuniversity Consortium for Informatics (CINI) to address this challenge. The laboratory, albeit young, has managed to succeed in its first attempts to propose a coordinated approach to HPC research within the EuroHPC Joint Undertaking, participating in the calls 2019-20 to five successful proposals for an aggregate total cost of 95M Euro. In this paper, we outline the working group's scope and goals and provide an overview of the five funded projects, which become fully operational in March 2021, and cover a selection of key technologies provided by the working group partners, highlighting their usage development within the projects.},
    date-added = {2021-03-26 15:14:04 +0100},
    date-modified = {2021-03-26 23:18:32 +0100},
    url = {https://iris.unito.it/retrieve/handle/2318/1783118/744641/preprint.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1783118/744641/preprint.pdf},
    bdsk-url-2 = {https://doi.org/10.1145/3457388.3458508},
    keywords = {across, admire, textarossa, eupex, eupilot}
    }

  • C. Hong, A. Ghiassi, Y. Zhou, R. Birke, and L. Y. Chen, "Online Label Aggregation: A Variational Bayesian Approach," in WWW '21: The Web Conference 2021, 2021, p. 1904–1915. doi:10.1145/3442381.3449933
    [BibTeX] [Abstract] [Download PDF]

    Noisy labeled data is more a norm than a rarity for crowd sourced contents. It is effective to distill noise and infer correct labels through aggregating results from crowd workers. To ensure the time relevance and overcome slow responses of workers, online label aggregation is increasingly requested, calling for solutions that can incrementally infer true label distribution via subsets of data items. In this paper, we propose a novel online label aggregation framework, BiLA , which employs variational Bayesian inference method and designs a novel stochastic optimization scheme for incremental training. BiLA is flexible to accommodate any generating distribution of labels by the exact computation of its posterior distribution. We also derive the convergence bound of the proposed optimizer. We compare BiLA with the state of the art based on minimax entropy, neural networks and expectation maximization algorithms, on synthetic and real-world data sets. Our evaluation results on various online scenarios show that BiLA can effectively infer the true labels, with an error rate reduction of at least 10 to 1.5 percent points for synthetic and real-world datasets, respectively.

    @inproceedings{www-hong21,
    title = {Online Label Aggregation: {A} Variational Bayesian Approach},
    author = {Chi Hong and Amirmasoud Ghiassi and Yichi Zhou and Robert Birke and Lydia Y. Chen},
    year = {2021},
    month = apr,
    booktitle = {{WWW} '21: The Web Conference 2021},
    publisher = {{ACM} / {IW3C2}},
    pages = {1904--1915},
    doi = {10.1145/3442381.3449933},
    abstract = {Noisy labeled data is more a norm than a rarity for crowd sourced contents. It is effective to distill noise and infer correct labels through aggregating results from crowd workers. To ensure the time relevance and overcome slow responses of workers, online label aggregation is increasingly requested, calling for solutions that can incrementally infer true label distribution via subsets of data items. In this paper, we propose a novel online label aggregation framework, BiLA , which employs variational Bayesian inference method and designs a novel stochastic optimization scheme for incremental training. BiLA is flexible to accommodate any generating distribution of labels by the exact computation of its posterior distribution. We also derive the convergence bound of the proposed optimizer. We compare BiLA with the state of the art based on minimax entropy, neural networks and expectation maximization algorithms, on synthetic and real-world data sets. Our evaluation results on various online scenarios show that BiLA can effectively infer the true labels, with an error rate reduction of at least 10 to 1.5 percent points for synthetic and real-world datasets, respectively.},
    editor = {Jure Leskovec and Marko Grobelnik and Marc Najork and Jie Tang and Leila Zia},
    url = {https://doi.org/10.1145/3442381.3449933},
    bdsk-url-1 = {https://doi.org/10.1145/3442381.3449933}
    }

  • C. Pino, S. Palazzo, F. Trenta, F. Cordero, U. Bagci, F. Rundo, S. Battiato, D. Giordano, M. Aldinucci, and C. Spampinato, "Interpretable Deep Model for Predicting Gene-Addicted Non-Small-Cell Lung Cancer in CT Scans," in 18th IEEE Intl. Symposium on Biomedical Imaging (ISBI), Nice, France, 2021. doi:10.1109/ISBI48211.2021.9433832
    [BibTeX] [Abstract] [Download PDF]

    Genetic profiling and characterization of lung cancers have recently emerged as a new technique for targeted therapeutic treatment based on immunotherapy or molecular drugs. However, the most effective way to discover specific gene mutations through tissue biopsy has several limitations, from invasiveness to being a risky procedure. Recently, quantitative assessment of visual features from CT data has been demonstrated to be a valid alternative to biopsy for the diagnosis of gene-addicted tumors. In this paper, we present a deep model for automated lesion segmentation and classification as gene-addicted or not. The segmentation approach extends the 2D Tiramisu architecture for 3D segmentation through dense blocks and squeeze-and-excitation layers, while a multi-scale 3D CNN is used for lesion classification. We also train our model with adversarial samples, and show that this approach acts as a gradient regularizer and enhances model interpretability. We also built a dataset, the first of its nature, consisting of 73 CT scans annotated with the presence of a specific genomics profile. We test our approach on this dataset achieving a segmentation accuracy of 93.11\% (Dice score) and a classification accuracy in identifying oncogene-addicted lung tumors of 82.00\%.

    @inproceedings{21:ct:isbi,
    title = {Interpretable Deep Model for Predicting Gene-Addicted Non-Small-Cell Lung Cancer in {CT} Scans},
    author = {Carmelo Pino and Simone Palazzo and Francesca Trenta and Francesca Cordero and Ulas Bagci and Francesco Rundo and Sebastiano Battiato and Daniela Giordano and Marco Aldinucci and Concetto Spampinato},
    year = {2021},
    month = apr,
    booktitle = {18th {IEEE} Intl. Symposium on Biomedical Imaging {(ISBI)}},
    publisher = {IEEE},
    address = {Nice, France},
    doi = {10.1109/ISBI48211.2021.9433832},
    abstract = {Genetic profiling and characterization of lung cancers have recently emerged as a new technique for targeted therapeutic treatment based on immunotherapy or molecular drugs. However, the most effective way to discover specific gene mutations through tissue biopsy has several limitations, from invasiveness to being a risky procedure. Recently, quantitative assessment of visual features from CT data has been demonstrated to be a valid alternative to biopsy for the diagnosis of gene-addicted tumors. In this paper, we present a deep model for automated lesion segmentation and classification as gene-addicted or not. The segmentation approach extends the 2D Tiramisu architecture for 3D segmentation through dense blocks and squeeze-and-excitation layers, while a multi-scale 3D CNN is used for lesion classification. We also train our model with adversarial samples, and show that this approach acts as a gradient regularizer and enhances model interpretability. We also built a dataset, the first of its nature, consisting of 73 CT scans annotated with the presence of a specific genomics profile. We test our approach on this dataset achieving a segmentation accuracy of 93.11\% (Dice score) and a classification accuracy in identifying oncogene-addicted lung tumors of 82.00\%.},
    url = {https://iris.unito.it/retrieve/handle/2318/1790376/764762/21_ISBI_smallcell.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1790376/764762/21_ISBI_smallcell.pdf},
    bdsk-url-2 = {https://doi.org/10.1109/ISBI48211.2021.9433832},
    keywords = {deephealth}
    }

  • J. Galjaard, B. Cox, A. Ghiassi, L. Y. Chen, and R. Birke, "MemA: Fast Inference of Multiple Deep Models," in 19th IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events, 2021, p. 281–286. doi:10.1109/PerComWorkshops51409.2021.9430952
    [BibTeX] [Abstract] [Download PDF]

    The execution of deep neural network (DNN) inference jobs on edge devices has become increasingly popular. Multiple of such inference models can concurrently analyse the on-device data, e.g. images, to extract valuable insights. Prior art focuses on low-power accelerators, compressed neural network architectures, and specialized frameworks to reduce execution time of single inference jobs on edge devices which are resource constrained. However, it is little known how different scheduling policies can further improve the runtime performance of multi-inference jobs without additional edge resources. To enable the exploration of scheduling policies, we first develop an execution framework, EdgeCaffe, which splits the DNN inference jobs by loading and execution of each network layer. We empirically characterize the impact of loading and scheduling policies on the execution time of multi-inference jobs and point out their dependency on the available memory space. We propose a novel memory-aware scheduling policy, MemA, which opportunistically interleaves the executions of different types of DNN layers based on their estimated run-time memory demands. Our evaluation on exhaustive combinations of five networks, data inputs, and memory configurations show that MemA can alleviate the degradation of execution times of multi-inference (up to 5×) under severely constrained memory compared to standard scheduling policies without affecting accuracy.

    @inproceedings{percom-galjaard21,
    title = {{MemA}: Fast Inference of Multiple Deep Models},
    author = {Jeroen Galjaard and Bart Cox and Amirmasoud Ghiassi and Lydia Y. Chen and Robert Birke},
    year = {2021},
    month = mar,
    booktitle = {19th {IEEE} International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events},
    publisher = {{IEEE}},
    pages = {281--286},
    doi = {10.1109/PerComWorkshops51409.2021.9430952},
    abstract = {The execution of deep neural network (DNN) inference jobs on edge devices has become increasingly popular. Multiple of such inference models can concurrently analyse the on-device data, e.g. images, to extract valuable insights. Prior art focuses on low-power accelerators, compressed neural network architectures, and specialized frameworks to reduce execution time of single inference jobs on edge devices which are resource constrained. However, it is little known how different scheduling policies can further improve the runtime performance of multi-inference jobs without additional edge resources. To enable the exploration of scheduling policies, we first develop an execution framework, EdgeCaffe, which splits the DNN inference jobs by loading and execution of each network layer. We empirically characterize the impact of loading and scheduling policies on the execution time of multi-inference jobs and point out their dependency on the available memory space. We propose a novel memory-aware scheduling policy, MemA, which opportunistically interleaves the executions of different types of DNN layers based on their estimated run-time memory demands. Our evaluation on exhaustive combinations of five networks, data inputs, and memory configurations show that MemA can alleviate the degradation of execution times of multi-inference (up to 5×) under severely constrained memory compared to standard scheduling policies without affecting accuracy.},
    url = {https://doi.org/10.1109/PerComWorkshops51409.2021.9430952},
    bdsk-url-1 = {https://doi.org/10.1109/PerComWorkshops51409.2021.9430952}
    }

  • B. Cox, J. Galjaard, A. Ghiassi, R. Birke, and L. Y. Chen, "Masa: Responsive Multi-DNN Inference on the Edge," in 19th IEEE International Conference on Pervasive Computing and Communications (PerCom), 2021, p. 1–10. doi:10.1109/PERCOM50583.2021.9439111
    [BibTeX] [Abstract] [Download PDF]

    Deep neural networks (DNNs) are becoming the core components of many applications running on edge devices, especially for real time image-based analysis. Increasingly, multi-faced knowledge is extracted via executing multiple DNNs inference models, e.g., identifying objects, faces, and genders from images. The response times of multi-DNN highly affect users' quality of experience and safety as well. Different DNNs exhibit diversified resource requirements and execution patterns across layers and networks, which may easily exceed the available device memory and riskily degrade the responsiveness. In this paper, we design and implement Masa, a responsive memory-aware multi-DNN execution framework, an on-device middleware featuring on modeling inter- and intra-network dependency and leveraging complimentary memory usage of each layer. Masa can consistently ensure the average response time when deterministically and stochastically executing multiple DNN-based image analyses. We extensively evaluate Masa on three configurations of Raspberry Pi and a large set of popular DNN models triggered by different generation patterns of images. Our evaluation results show that Masa can achieve lower average response times by up to 90% on devices with small memory, i.e., 512 MB to 1 GB, compared to the state of the art multi-DNN scheduling solutions.

    @inproceedings{percom-cox21a,
    title = {Masa: Responsive Multi-DNN Inference on the Edge},
    author = {Bart Cox and Jeroen Galjaard and Amirmasoud Ghiassi and Robert Birke and Lydia Y. Chen},
    year = {2021},
    month = mar,
    booktitle = {19th {IEEE} International Conference on Pervasive Computing and Communications ({PerCom})},
    publisher = {{IEEE}},
    pages = {1--10},
    doi = {10.1109/PERCOM50583.2021.9439111},
    abstract = {Deep neural networks (DNNs) are becoming the core components of many applications running on edge devices, especially for real time image-based analysis. Increasingly, multi-faced knowledge is extracted via executing multiple DNNs inference models, e.g., identifying objects, faces, and genders from images. The response times of multi-DNN highly affect users' quality of experience and safety as well. Different DNNs exhibit diversified resource requirements and execution patterns across layers and networks, which may easily exceed the available device memory and riskily degrade the responsiveness. In this paper, we design and implement Masa, a responsive memory-aware multi-DNN execution framework, an on-device middleware featuring on modeling inter- and intra-network dependency and leveraging complimentary memory usage of each layer. Masa can consistently ensure the average response time when deterministically and stochastically executing multiple DNN-based image analyses. We extensively evaluate Masa on three configurations of Raspberry Pi and a large set of popular DNN models triggered by different generation patterns of images. Our evaluation results show that Masa can achieve lower average response times by up to 90% on devices with small memory, i.e., 512 MB to 1 GB, compared to the state of the art multi-DNN scheduling solutions.},
    url = {https://doi.org/10.1109/PERCOM50583.2021.9439111},
    bdsk-url-1 = {https://doi.org/10.1109/PERCOM50583.2021.9439111}
    }

  • I. Colonnelli, B. Cantalupo, I. Merelli, and M. Aldinucci, "StreamFlow: cross-breeding cloud with HPC," IEEE Transactions on Emerging Topics in Computing, vol. 9, iss. 4, p. 1723–1737, 2021. doi:10.1109/TETC.2020.3019202
    [BibTeX] [Abstract] [Download PDF]

    Workflows are among the most commonly used tools in a variety of execution environments. Many of them target a specific environment; few of them make it possible to execute an entire workflow in different environments, e.g. Kubernetes and batch clusters. We present a novel approach to workflow execution, called StreamFlow, that complements the workflow graph with the declarative description of potentially complex execution environments, and that makes it possible the execution onto multiple sites not sharing a common data space. StreamFlow is then exemplified on a novel bioinformatics pipeline for single cell transcriptomic data analysis workflow.

    @article{20Lstreamflow:tetc,
    title = {{StreamFlow}: cross-breeding cloud with {HPC}},
    author = {Iacopo Colonnelli and Barbara Cantalupo and Ivan Merelli and Marco Aldinucci},
    year = {2021},
    journal = {{IEEE} {T}ransactions on {E}merging {T}opics in {C}omputing},
    volume = {9},
    pages = {1723--1737},
    doi = {10.1109/TETC.2020.3019202},
    abstract = {Workflows are among the most commonly used tools in a variety of execution environments. Many of them target a specific environment; few of them make it possible to execute an entire workflow in different environments, e.g. Kubernetes and batch clusters. We present a novel approach to workflow execution, called StreamFlow, that complements the workflow graph with the declarative description of potentially complex execution environments, and that makes it possible the execution onto multiple sites not sharing a common data space. StreamFlow is then exemplified on a novel bioinformatics pipeline for single cell transcriptomic data analysis workflow.},
    date-added = {2020-08-27 09:29:49 +0200},
    date-modified = {2020-08-27 09:36:33 +0200},
    number = {4},
    url = {https://arxiv.org/pdf/2002.01558},
    bdsk-url-1 = {https://arxiv.org/pdf/2002.01558},
    bdsk-url-2 = {https://doi.org/10.1109/TETC.2020.3019202},
    keywords = {deephealth, hpc4ai, streamflow}
    }

  • R. Birke, J. F. Pérez, Z. Qiu, M. Björkqvist, and L. Y. Chen, "sPARE: Partial Replication for Multi-Tier Applications in the Cloud," IEEE Trans. Serv. Comput., vol. 14, iss. 2, p. 574–588, 2021. doi:10.1109/TSC.2017.2780845
    [BibTeX] [Abstract] [Download PDF]

    Offering consistent low latency remains a key challenge for distributed applications, especially when deployed on the cloud where virtual machines (VMs) suffer from capacity variability caused by co-located tenants. Replicating redundant requests was shown to be an effective mechanism to defend application performance from high capacity variability. While the prior art centers on single-tier systems, it still remains an open question how to design replication strategies for distributed multi-tier systems. In this paper, we design a first of its kind PArtial REplication system, sPARE, that replicates and dispatches read-only workloads for distributed multi-tier web applications. The two key components of sPARE are (i) the variability-aware replicator that coordinates the replication levels on all tiers via an iterative searching algorithm, and (ii) the replication-aware arbiter that uses a novel token-based arbitration algorithm (TAD) to dispatch requests in each tier. We evaluate sPARE on web serving and searching applications, i.e., MediaWiki and Solr, the former deployed on our private cloud and the latter on Amazon EC2. Our results based on various interference patterns and traffic loads show that sPARE is able to improve the tail latency of MediaWiki and Solr by a factor of almost 2.7x and 2.9x, respectively.

    @article{BirkePQBC21,
    title = {sPARE: Partial Replication for Multi-Tier Applications in the Cloud},
    author = {Robert Birke and Juan F. P{\'{e}}rez and Zhan Qiu and Mathias Bj{\"{o}}rkqvist and Lydia Y. Chen},
    year = {2021},
    journal = {{IEEE} Trans. Serv. Comput.},
    volume = {14},
    pages = {574--588},
    doi = {10.1109/TSC.2017.2780845},
    abstract = {Offering consistent low latency remains a key challenge for distributed applications, especially when deployed on the cloud where virtual machines (VMs) suffer from capacity variability caused by co-located tenants. Replicating redundant requests was shown to be an effective mechanism to defend application performance from high capacity variability. While the prior art centers on single-tier systems, it still remains an open question how to design replication strategies for distributed multi-tier systems. In this paper, we design a first of its kind PArtial REplication system, sPARE, that replicates and dispatches read-only workloads for distributed multi-tier web applications. The two key components of sPARE are (i) the variability-aware replicator that coordinates the replication levels on all tiers via an iterative searching algorithm, and (ii) the replication-aware arbiter that uses a novel token-based arbitration algorithm (TAD) to dispatch requests in each tier. We evaluate sPARE on web serving and searching applications, i.e., MediaWiki and Solr, the former deployed on our private cloud and the latter on Amazon EC2. Our results based on various interference patterns and traffic loads show that sPARE is able to improve the tail latency of MediaWiki and Solr by a factor of almost 2.7x and 2.9x, respectively.},
    number = {2},
    url = {https://doi.org/10.1109/TSC.2017.2780845},
    bdsk-url-1 = {https://doi.org/10.1109/TSC.2017.2780845}
    }

  • Z. Zhao, R. Birke, R. Han, B. Robu, S. Bouchenak, S. B. Mokhtar, and L. Y. Chen, "Enhancing Robustness of On-Line Learning Models on Highly Noisy Data," IEEE Trans. Dependable Secur. Comput., vol. 18, iss. 5, p. 2177–2192, 2021. doi:10.1109/TDSC.2021.3063947
    [BibTeX] [Abstract] [Download PDF]

    Classification algorithms have been widely adopted to detect anomalies for various systems, e.g., IoT, cloud and face recognition, under the common assumption that the data source is clean, i.e., features and labels are correctly set. However, data collected from the wild can be unreliable due to careless annotations or malicious data transformation for incorrect anomaly detection. In this article, we extend a two-layer on-line data selection framework: Robust Anomaly Detector (RAD) with a newly designed ensemble prediction where both layers contribute to the final anomaly detection decision. To adapt to the on-line nature of anomaly detection, we consider additional features of conflicting opinions of classifiers, repetitive cleaning, and oracle knowledge. We on-line learn from incoming data streams and continuously cleanse the data, so as to adapt to the increasing learning capacity from the larger accumulated data set. Moreover, we explore the concept of oracle learning that provides additional information of true labels for difficult data points. We specifically focus on three use cases, (i) detecting 10 classes of IoT attacks, (ii) predicting 4 classes of task failures of big data jobs, and (iii) recognising 100 celebrities faces. Our evaluation results show that RAD can robustly improve the accuracy of anomaly detection, to reach up to 98.95 percent for IoT device attacks (i.e., +7%), up to 85.03 percent for cloud task failures (i.e., +14%) under 40 percent label noise, and for its extension, it can reach up to 77.51 percent for face recognition (i.e., +39%) under 30 percent label noise. The proposed RAD and its extensions are general and can be applied to different anomaly detection algorithms.

    @article{ZhaoBHRBMC21,
    title = {Enhancing Robustness of On-Line Learning Models on Highly Noisy Data},
    author = {Zilong Zhao and Robert Birke and Rui Han and Bogdan Robu and Sara Bouchenak and Sonia Ben Mokhtar and Lydia Y. Chen},
    year = {2021},
    journal = {{IEEE} Trans. Dependable Secur. Comput.},
    volume = {18},
    pages = {2177--2192},
    doi = {10.1109/TDSC.2021.3063947},
    abstract = {Classification algorithms have been widely adopted to detect anomalies for various systems, e.g., IoT, cloud and face recognition, under the common assumption that the data source is clean, i.e., features and labels are correctly set. However, data collected from the wild can be unreliable due to careless annotations or malicious data transformation for incorrect anomaly detection. In this article, we extend a two-layer on-line data selection framework: Robust Anomaly Detector (RAD) with a newly designed ensemble prediction where both layers contribute to the final anomaly detection decision. To adapt to the on-line nature of anomaly detection, we consider additional features of conflicting opinions of classifiers, repetitive cleaning, and oracle knowledge. We on-line learn from incoming data streams and continuously cleanse the data, so as to adapt to the increasing learning capacity from the larger accumulated data set. Moreover, we explore the concept of oracle learning that provides additional information of true labels for difficult data points. We specifically focus on three use cases, (i) detecting 10 classes of IoT attacks, (ii) predicting 4 classes of task failures of big data jobs, and (iii) recognising 100 celebrities faces. Our evaluation results show that RAD can robustly improve the accuracy of anomaly detection, to reach up to 98.95 percent for IoT device attacks (i.e., +7%), up to 85.03 percent for cloud task failures (i.e., +14%) under 40 percent label noise, and for its extension, it can reach up to 77.51 percent for face recognition (i.e., +39%) under 30 percent label noise. The proposed RAD and its extensions are general and can be applied to different anomaly detection algorithms.},
    number = {5},
    url = {https://doi.org/10.1109/TDSC.2021.3063947},
    bdsk-url-1 = {https://doi.org/10.1109/TDSC.2021.3063947}
    }

  • M. Pennisi, I. Kavasidis, C. Spampinato, V. Schinina, S. Palazzo, F. P. Salanitri, G. Bellitto, F. Rundo, M. Aldinucci, M. Cristofaro, and others, "An Explainable AI System for Automated COVID-19 Assessment and Lesion Categorization from CT-scans," Artificial Intelligence in Medicine, p. 102114, 2021. doi:10.1016/j.artmed.2021.102114
    [BibTeX] [Abstract] [Download PDF]

    COVID-19 infection caused by SARS-CoV-2 pathogen has been a catastrophic pandemic outbreak all over the world, with exponential increasing of confirmed cases and, unfortunately, deaths. In this work we propose an AI-powered pipeline, based on the deep-learning paradigm, for automated COVID-19 detection and lesion categorization from CT scans. We first propose a new segmentation module aimed at automatically identifying lung parenchyma and lobes. Next, we combine the segmentation network with classification networks for COVID-19 identification and lesion categorization. We compare the model's classification results with those obtained by three expert radiologists on a dataset of 166 CT scans. Results showed a sensitivity of 90.3\% and a specificity of 93.5\% for COVID-19 detection, at least on par with those yielded by the expert radiologists, and an average lesion categorization accuracy of about 84\%. Moreover, a significant role is played by prior lung and lobe segmentation, that allowed us to enhance classification performance by over 6 percent points. The interpretation of the trained AI models reveals that the most significant areas for supporting the decision on COVID-19 identification are consistent with the lesions clinically associated to the virus, i.e., crazy paving, consolidation and ground glass. This means that the artificial models are able to discriminate a positive patient from a negative one (both controls and patients with interstitial pneumonia tested negative to COVID) by evaluating the presence of those lesions into CT scans. Finally, the AI models are integrated into a user-friendly GUI to support AI explainability for radiologists, which is publicly available at http://perceivelab.com/covid-ai. The whole AI system is unique since, to the best of our knowledge, it is the first AI-based software, publicly available, that attempts to explain to radiologists what information is used by AI methods for making decisions and that proactively involves them in the decision loop to further improve the COVID-19 understanding.

    @article{pennisi2021explainable,
    title = {An Explainable {AI} System for Automated {COVID-19} Assessment and Lesion Categorization from {CT-scans}},
    author = {Pennisi, Matteo and Kavasidis, Isaak and Spampinato, Concetto and Schinina, Vincenzo and Palazzo, Simone and Salanitri, Federica Proietto and Bellitto, Giovanni and Rundo, Francesco and Aldinucci, Marco and Cristofaro, Massimo and others},
    year = {2021},
    journal = {Artificial Intelligence in Medicine},
    publisher = {Elsevier},
    pages = {102114},
    doi = {10.1016/j.artmed.2021.102114},
    abstract = {COVID-19 infection caused by SARS-CoV-2 pathogen has been a catastrophic pandemic outbreak all over the world, with exponential increasing of confirmed cases and, unfortunately, deaths. In this work we propose an AI-powered pipeline, based on the deep-learning paradigm, for automated COVID-19 detection and lesion categorization from CT scans. We first propose a new segmentation module aimed at automatically identifying lung parenchyma and lobes. Next, we combine the segmentation network with classification networks for COVID-19 identification and lesion categorization. We compare the model's classification results with those obtained by three expert radiologists on a dataset of 166 CT scans. Results showed a sensitivity of 90.3\% and a specificity of 93.5\% for COVID-19 detection, at least on par with those yielded by the expert radiologists, and an average lesion categorization accuracy of about 84\%. Moreover, a significant role is played by prior lung and lobe segmentation, that allowed us to enhance classification performance by over 6 percent points. The interpretation of the trained AI models reveals that the most significant areas for supporting the decision on COVID-19 identification are consistent with the lesions clinically associated to the virus, i.e., crazy paving, consolidation and ground glass. This means that the artificial models are able to discriminate a positive patient from a negative one (both controls and patients with interstitial pneumonia tested negative to COVID) by evaluating the presence of those lesions into CT scans. Finally, the AI models are integrated into a user-friendly GUI to support AI explainability for radiologists, which is publicly available at http://perceivelab.com/covid-ai. The whole AI system is unique since, to the best of our knowledge, it is the first AI-based software, publicly available, that attempts to explain to radiologists what information is used by AI methods for making decisions and that proactively involves them in the decision loop to further improve the COVID-19 understanding.},
    keynote = {deephealth},
    url = {https://iris.unito.it/retrieve/handle/2318/1792619/770952/2021_COVID_AIM_preprint.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1792619/770952/2021_COVID_AIM_preprint.pdf},
    bdsk-url-2 = {https://doi.org/10.1016/j.artmed.2021.102114}
    }

  • I. Lanese, D. Medić, and C. A. Mezzina, "Static versus dynamic reversibility in CCS," Acta Informatica, vol. 58, p. 1–34, 2021. doi:10.1007/s00236-019-00346-6
    [BibTeX] [Abstract] [Download PDF]

    The notion of reversible computing is attracting interest because of its applications in diverse fields, in particular the study of programming abstractions for fault tolerant systems. Most computational models are not naturally reversible since computation causes loss of information, and history information must be stored to enable reversibility. In the literature, two approaches to reverse the CCS process calculus exist, differing on how history information is kept. Reversible CCS (RCCS), proposed by Danos and Krivine, exploits dedicated stacks of memories attached to each thread. CCS with Keys (CCSK), proposed by Phillips and Ulidowski, makes CCS operators static so that computation does not cause information loss. In this paper we show that RCCS and CCSK are equivalent in terms of LTS isomorphism.

    @article{21:journals:LaneseMM21,
    title = {Static versus dynamic reversibility in {CCS}},
    author = {Ivan Lanese and Doriana Medi\'{c} and Claudio Antares Mezzina},
    year = {2021},
    journal = {Acta Informatica},
    volume = {58},
    pages = {1--34},
    doi = {10.1007/s00236-019-00346-6},
    abstract = {The notion of reversible computing is attracting interest because of its applications in diverse fields, in particular the study of programming abstractions for fault tolerant systems. Most computational models are not naturally reversible since computation causes loss of information, and history information must be stored to enable reversibility. In the literature, two approaches to reverse the CCS process calculus exist, differing on how history information is kept. Reversible CCS (RCCS), proposed by Danos and Krivine, exploits dedicated stacks of memories attached to each thread. CCS with Keys (CCSK), proposed by Phillips and Ulidowski, makes CCS operators static so that computation does not cause information loss. In this paper we show that RCCS and CCSK are equivalent in terms of LTS isomorphism.},
    keywords = {, semantics},
    url = {https://doi.org/10.1007/s00236-019-00346-6},
    bdsk-url-1 = {https://doi.org/10.1007/s00236-019-00346-6}
    }

  • M. Aldinucci, High-performance computing and AI team up for COVID-19 diagnostic imaging, 2021.
    [BibTeX] [Abstract] [Download PDF]

    The Confederation of Laboratories for Artificial Intelligence Research in Europe (CLAIRE) taskforce on AI & COVID-19 supported the creation of a research group focused on AI-assisted diagnosis of COVID-19 pneumonia. The first results demonstrate the great potential of AI-assisted diagnostic imaging. Furthermore, the impact of the taskforce work is much larger, and it embraces the cross-fertilisation of artificial intelligence (AI) and high-performance computing (HPC): a partnership with rocketing potential for many scientific domains.

    @misc{21:covid:aihub,
    title = {High-performance computing and {AI} team up for {COVID-19} diagnostic imaging},
    author = {Marco Aldinucci},
    year = {2021},
    month = jan,
    note = {(magazine)},
    abstract = {The Confederation of Laboratories for Artificial Intelligence Research in Europe (CLAIRE) taskforce on AI & COVID-19 supported the creation of a research group focused on AI-assisted diagnosis of COVID-19 pneumonia. The first results demonstrate the great potential of AI-assisted diagnostic imaging. Furthermore, the impact of the taskforce work is much larger, and it embraces the cross-fertilisation of artificial intelligence (AI) and high-performance computing (HPC): a partnership with rocketing potential for many scientific domains.},
    date-modified = {2021-04-17 00:39:37 +0200},
    howpublished = {AIhub},
    url = {https://aihub.org/2021/01/12/high-performance-computing-and-ai-team-up-for-covid-19-diagnostic-imaging/},
    bdsk-url-1 = {https://aihub.org/2021/01/12/high-performance-computing-and-ai-team-up-for-covid-19-diagnostic-imaging/},
    keywords = {deephealth, claire, hpc4ai}
    }

  • C. Aubert and D. Medić, "Explicit Identifiers and Contexts in Reversible Concurrent Calculus," in Reversible Computation - 13th International Conference, RC 2021, Virtual Event, July 7-8, 2021, Proceedings, 2021. doi:10.1007/978-3-030-79837-6_9
    [BibTeX] [Abstract] [Download PDF]

    Existing formalisms for the algebraic specification and representation of networks of reversible agents suffer some shortcomings. Despite multiple attempts, reversible declensions of the Calculus of Communicating Systems (CCS) do not offer satisfactory adaptation of notions usual in ?forward-only? process algebras, such as replication or context. Existing formalisms disallow the ?hot-plugging? of processes during their execution in contexts with their own past. They also assume the existence of ?eternally fresh? keys or identifiers that, if implemented poorly, could result in unnecessary bottlenecks and look-ups involving all the threads. In this paper, we begin investigating those issues, by first designing a process algebra endowed with a mechanism to generate identifiers without the need to consult with the other threads. We use this calculus to recast the possible representations of non-determinism in CCS, and as a by-product establish a simple and straightforward definition of concurrency. Our reversible calculus is then proven to satisfy expected properties. We also observe that none of the reversible bisimulations defined thus far are congruences under our notion of ?reversible? contexts.

    @inproceedings{21:RC:AubertM21,
    title = {Explicit Identifiers and Contexts in Reversible Concurrent Calculus},
    author = {Cl{\'{e}}ment Aubert and Doriana Medi\'{c}},
    year = {2021},
    booktitle = {Reversible Computation - 13th International Conference, {RC} 2021, Virtual Event, July 7-8, 2021, Proceedings},
    publisher = {Springer},
    doi = {10.1007/978-3-030-79837-6\_9},
    abstract = {Existing formalisms for the algebraic specification and representation of networks of reversible agents suffer some shortcomings. Despite multiple attempts, reversible declensions of the Calculus of Communicating Systems (CCS) do not offer satisfactory adaptation of notions usual in ?forward-only? process algebras, such as replication or context. Existing formalisms disallow the ?hot-plugging? of processes during their execution in contexts with their own past. They also assume the existence of ?eternally fresh? keys or identifiers that, if implemented poorly, could result in unnecessary bottlenecks and look-ups involving all the threads. In this paper, we begin investigating those issues, by first designing a process algebra endowed with a mechanism to generate identifiers without the need to consult with the other threads. We use this calculus to recast the possible representations of non-determinism in CCS, and as a by-product establish a simple and straightforward definition of concurrency. Our reversible calculus is then proven to satisfy expected properties. We also observe that none of the reversible bisimulations defined thus far are congruences under our notion of ?reversible? contexts.},
    bdsk-url-1 = {https://doi.org/10.1007/978-3-030-79837-6\_9},
    keywords = {semantics},
    url = {https://doi.org/10.1007/978-3-030-79837-6\_9}
    }

  • I. Colonnelli, B. Cantalupo, R. Esposito, M. Pennisi, C. Spampinato, and M. Aldinucci, "HPC Application Cloudification: The StreamFlow Toolkit," in 12th Workshop on Parallel Programming and Run-Time Management Techniques for Many-core Architectures and 10th Workshop on Design Tools and Architectures for Multicore Embedded Computing Platforms (PARMA-DITAM 2021), Dagstuhl, Germany, 2021, p. 5:1–5:13. doi:10.4230/OASIcs.PARMA-DITAM.2021.5
    [BibTeX] [Abstract] [Download PDF]

    Finding an effective way to improve accessibility to High-Performance Computing facilities, still anchored to SSH-based remote shells and queue-based job submission mechanisms, is an open problem in computer science. This work advocates a cloudification of HPC applications through a cluster-as-accelerator pattern, where computationally demanding portions of the main execution flow hosted on a Cloud Finding an effective way to improve accessibility to High-Performance Computing facilities, still anchored to SSH-based remote shells and queue-based job submission mechanisms, is an open problem in computer science. This work advocates a cloudification of HPC applications through a cluster-as-accelerator pattern, where computationally demanding portions of the main execution flow hosted on a Cloud infrastructure can be offloaded to HPC environments to speed them up. We introduce StreamFlow, a novel Workflow Management System that supports such a design pattern and makes it possible to run the steps of a standard workflow model on independent processing elements with no shared storage. We validated the proposed approach's effectiveness on the CLAIRE COVID-19 universal pipeline, i.e. a reproducible workflow capable of automating the comparison of (possibly all) state-of-the-art pipelines for the diagnosis of COVID-19 interstitial pneumonia from CT scans images based on Deep Neural Networks (DNNs).

    @inproceedings{colonnelli_et_al:OASIcs.PARMA-DITAM.2021.5,
    title = {{HPC Application Cloudification: The StreamFlow Toolkit}},
    author = {Colonnelli, Iacopo and Cantalupo, Barbara and Esposito, Roberto and Pennisi, Matteo and Spampinato, Concetto and Aldinucci, Marco},
    year = {2021},
    booktitle = {12th Workshop on Parallel Programming and Run-Time Management Techniques for Many-core Architectures and 10th Workshop on Design Tools and Architectures for Multicore Embedded Computing Platforms (PARMA-DITAM 2021)},
    publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik},
    address = {Dagstuhl, Germany},
    series = {Open Access Series in Informatics (OASIcs)},
    volume = {88},
    pages = {5:1--5:13},
    doi = {10.4230/OASIcs.PARMA-DITAM.2021.5},
    isbn = {978-3-95977-181-8},
    issn = {2190-6807},
    abstract = {Finding an effective way to improve accessibility to High-Performance Computing facilities, still anchored to SSH-based remote shells and queue-based job submission mechanisms, is an open problem in computer science. This work advocates a cloudification of HPC applications through a cluster-as-accelerator pattern, where computationally demanding portions of the main execution flow hosted on a Cloud Finding an effective way to improve accessibility to High-Performance Computing facilities, still anchored to SSH-based remote shells and queue-based job submission mechanisms, is an open problem in computer science. This work advocates a cloudification of HPC applications through a cluster-as-accelerator pattern, where computationally demanding portions of the main execution flow hosted on a Cloud infrastructure can be offloaded to HPC environments to speed them up. We introduce StreamFlow, a novel Workflow Management System that supports such a design pattern and makes it possible to run the steps of a standard workflow model on independent processing elements with no shared storage. We validated the proposed approach's effectiveness on the CLAIRE COVID-19 universal pipeline, i.e. a reproducible workflow capable of automating the comparison of (possibly all) state-of-the-art pipelines for the diagnosis of COVID-19 interstitial pneumonia from CT scans images based on Deep Neural Networks (DNNs).},
    annote = {Projects: cloud computing, distributed computing, high-performance computing, streamflow, workflow management systems},
    editor = {Bispo, Jo\~{a}o and Cherubin, Stefano and Flich, Jos\'{e}},
    url = {https://drops.dagstuhl.de/opus/volltexte/2021/13641/pdf/OASIcs-PARMA-DITAM-2021-5.pdf},
    urn = {urn:nbn:de:0030-drops-136419},
    bdsk-url-1 = {https://drops.dagstuhl.de/opus/volltexte/2021/13641/pdf/OASIcs-PARMA-DITAM-2021-5.pdf},
    bdsk-url-2 = {https://doi.org/10.4230/OASIcs.PARMA-DITAM.2021.5},
    keywords = {deephealth, hpc4ai, streamflow}
    }

  • G. Bontempi, R. Chavarriaga, H. De Canck, E. Girardi, H. Hoos, I. Kilbane‐Dawe, T. Ball, A. Nowé, J. Sousa, D. Bacciu, M. Aldinucci, M. De Domenico, A. Saffiotti, and M. Maratea, "The CLAIRE COVID-19 initiative: approach, experiences and recommendations," Ethics and Information Technology, 2021. doi:10.1007/s10676-020-09567-7
    [BibTeX] [Abstract] [Download PDF]

    A volunteer effort by Artificial Intelligence (AI) researchers has shown it can deliver significant research outcomes rapidly to help tackle COVID-19. Within two months, CLAIRE's self-organising volunteers delivered the World's first comprehensive curated repository of COVID-19-related datasets useful for drug-repurposing, drafted review papers on the role CT/X-ray scan analysis and robotics could play, and progressed research in other areas. Given the pace required and nature of voluntary efforts, the teams faced a number of challenges. These offer insights in how better to prepare for future volunteer scientific efforts and large scale, data-dependent AI collaborations in general. We offer seven recommendations on how to best leverage such efforts and collaborations in the context of managing future crises.

    @article{21:eit:covidclaire,
    title = {The {CLAIRE COVID-19} initiative: approach, experiences and recommendations},
    author = {Bontempi, Gianluca and Chavarriaga, Ricardo and De Canck, Hans and Girardi, Emanuela and Hoos, Holger and Kilbane‐Dawe, Iarla and Ball, Tonio and Now{\'e}, Ann and Sousa, Jose and Bacciu, Davide and Aldinucci, Marco and De Domenico, Manlio and Saffiotti, Alessandro and Maratea, Marco},
    year = {2021},
    month = feb,
    journal = {Ethics and Information Technology},
    publisher = {Springer},
    doi = {10.1007/s10676-020-09567-7},
    abstract = {A volunteer effort by Artificial Intelligence (AI) researchers has shown it can deliver significant research outcomes rapidly to help tackle COVID-19. Within two months, CLAIRE's self-organising volunteers delivered the World's first comprehensive curated repository of COVID-19-related datasets useful for drug-repurposing, drafted review papers on the role CT/X-ray scan analysis and robotics could play, and progressed research in other areas. Given the pace required and nature of voluntary efforts, the teams faced a number of challenges. These offer insights in how better to prepare for future volunteer scientific efforts and large scale, data-dependent AI collaborations in general. We offer seven recommendations on how to best leverage such efforts and collaborations in the context of managing future crises.},
    date-added = {2021-04-04 01:03:31 +0200},
    date-modified = {2021-08-29 16:12:25 +0200},
    url = {https://iris.unito.it/retrieve/handle/2318/1784271/747923/Bontempi2021_Article_TheCLAIRECOVID-19InitiativeApp-3.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1784271/747923/Bontempi2021_Article_TheCLAIRECOVID-19InitiativeApp-3.pdf},
    bdsk-url-2 = {https://doi.org/10.1007/s10676-020-09567-7},
    keywords = {claire, deephealth}
    }

  • M. Aldinucci, "L'infrastruttura necessaria per creare interoperabilità tra pubbliche amministrazioni," in L'amministrazione pubblica con i big data: da Torino un dibattito sull'intelligenza artificiale, R. Cavallo Perin, Ed., , 2021, p. 225–232.
    [BibTeX] [Abstract] [Download PDF]

    L'articolo affronta il tema dell'interoperabilità dal punto di vista informatico, ponendo l'accento sulle infrastrutture necessarie affinché la comunicazione tra sistemi informatici pubblici sia possibile. La struttura a silos su cui si basa il sistema informativo della pubblica amministrazione italiana risulta inadeguato all'approccio della big data analysis che, a contrario, richiede la piena comunicabilità tra sistemi informativi affinché il reperimento dei dati su cui condurre sperimentazioni sia quanto più facile e mirato.

    @incollection{21:bigdata:ius,
    title = {L'infrastruttura necessaria per creare interoperabilit{\`a} tra pubbliche amministrazioni},
    author = {Marco Aldinucci},
    year = {2021},
    booktitle = {L'amministrazione pubblica con i big data: da Torino un dibattito sull'intelligenza artificiale},
    pages = {225--232},
    isbn = {9788875901806},
    abstract = {L'articolo affronta il tema dell'interoperabilit{\`a} dal punto di vista informatico, ponendo l'accento sulle infrastrutture necessarie affinch{\'e} la comunicazione tra sistemi informatici pubblici sia possibile. La struttura a silos su cui si basa il sistema informativo della pubblica amministrazione italiana risulta inadeguato all'approccio della big data analysis che, a contrario, richiede la piena comunicabilit{\`a} tra sistemi informativi affinch{\'e} il reperimento dei dati su cui condurre sperimentazioni sia quanto pi{\`u} facile e mirato.},
    chapter = {15},
    editor = {Cavallo Perin, Roberto},
    url = {https://iris.unito.it/retrieve/handle/2318/1784335/748058/15.Aldinucci.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1784335/748058/15.Aldinucci.pdf}
    }

  • D. D'Agostino, P. Liò, M. Aldinucci, and I. Merelli, "Advantages of using graph databases to explore chromatin conformation capture experiments," BMC Bioinformatics, vol. 22, iss. 2, p. 43–58, 2021. doi:10.1186/s12859-020-03937-0
    [BibTeX] [Abstract] [Download PDF]

    High-throughput sequencing Chromosome Conformation Capture (Hi-C) allows the study of DNA interactions and 3D chromosome folding at the genome-wide scale. Usually, these data are represented as matrices describing the binary contacts among the different chromosome regions. On the other hand, a graph-based representation can be advantageous to describe the complex topology achieved by the DNA in the nucleus of eukaryotic cells.

    @article{21:neohic:bmc,
    title = {Advantages of using graph databases to explore chromatin conformation capture experiments},
    author = {D'Agostino, Daniele and Li{\`o}, Pietro and Aldinucci, Marco and Merelli, Ivan},
    year = {2021},
    journal = {{BMC} Bioinformatics},
    volume = {22},
    pages = {43--58},
    doi = {10.1186/s12859-020-03937-0},
    isbn = {1471-2105},
    abstract = {High-throughput sequencing Chromosome Conformation Capture (Hi-C) allows the study of DNA interactions and 3D chromosome folding at the genome-wide scale. Usually, these data are represented as matrices describing the binary contacts among the different chromosome regions. On the other hand, a graph-based representation can be advantageous to describe the complex topology achieved by the DNA in the nucleus of eukaryotic cells.},
    annote = {https://iris.unito.it/retrieve/handle/2318/1787302/756557/2021_neohic_BMC.pdf},
    da = {2021/04/26},
    date-added = {2021-04-26 09:50:50 +0200},
    date-modified = {2021-04-26 15:42:52 +0200},
    number = {2},
    url = {https://bmcbioinformatics.biomedcentral.com/track/pdf/10.1186/s12859-020-03937-0.pdf},
    bdsk-url-1 = {https://doi.org/10.1186/s12859-020-03937-0},
    keywords = {deephealth, hpc4ai}
    }

  • D. D'Agostino, I. Merelli, M. Aldinucci, and D. Cesini, "Hardware and Software Solutions for Energy-Efficient Computing in Scientific Programming," Scientific Programming, vol. 2021, p. 5514284, 2021. doi:10.1155/2021/5514284
    [BibTeX] [Abstract] [Download PDF]

    Energy consumption is one of the major issues in today&{\#}x2019;s computer science, and an increasing number of scientific communities are interested in evaluating the tradeoff between time-to-solution and energy-to-solution. Despite, in the last two decades, computing which revolved around centralized computing infrastructures, such as supercomputing and data centers, the wide adoption of the Internet of Things (IoT) paradigm is currently inverting this trend due to the huge amount of data it generates, pushing computing power back to places where the data are generated&{\#}x2014;the so-called fog/edge computing. This shift towards a decentralized model requires an equivalent change in the software engineering paradigms, development environments, hardware tools, languages, and computation models for scientific programming because the local computational capabilities are typically limited and require a careful evaluation of power consumption. This paper aims to present how these concepts can be actually implemented in scientific software by presenting the state of the art of powerful, less power-hungry processors from one side and energy-aware tools and techniques from the other one.

    @article{21:dagostino:lowpower,
    title = {Hardware and Software Solutions for Energy-Efficient Computing in Scientific Programming},
    author = {D'Agostino, Daniele and Merelli, Ivan and Aldinucci, Marco and Cesini, Daniele},
    year = {2021},
    journal = {Scientific Programming},
    publisher = {Hindawi},
    volume = {2021},
    pages = {5514284},
    doi = {10.1155/2021/5514284},
    isbn = {1058-9244},
    abstract = {Energy consumption is one of the major issues in today\&{\#}x2019;s computer science, and an increasing number of scientific communities are interested in evaluating the tradeoff between time-to-solution and energy-to-solution. Despite, in the last two decades, computing which revolved around centralized computing infrastructures, such as supercomputing and data centers, the wide adoption of the Internet of Things (IoT) paradigm is currently inverting this trend due to the huge amount of data it generates, pushing computing power back to places where the data are generated\&{\#}x2014;the so-called fog/edge computing. This shift towards a decentralized model requires an equivalent change in the software engineering paradigms, development environments, hardware tools, languages, and computation models for scientific programming because the local computational capabilities are typically limited and require a careful evaluation of power consumption. This paper aims to present how these concepts can be actually implemented in scientific software by presenting the state of the art of powerful, less power-hungry processors from one side and energy-aware tools and techniques from the other one.},
    da = {2021/06/09},
    date-added = {2021-06-10 22:03:27 +0200},
    date-modified = {2021-06-10 22:46:34 +0200},
    ty = {JOUR},
    url = {https://downloads.hindawi.com/journals/sp/2021/5514284.pdf},
    bdsk-url-1 = {https://doi.org/10.1155/2021/5514284}
    }

  • M. Aldinucci, V. Cesare, I. Colonnelli, A. R. Martinelli, G. Mittone, B. Cantalupo, C. Cavazzoni, and M. Drocco, "Practical Parallelization of Scientific Applications with OpenMP, OpenACC and MPI," Journal of Parallel and Distributed Computing, vol. 157, p. 13–29, 2021. doi:10.1016/j.jpdc.2021.05.017
    [BibTeX] [Abstract] [Download PDF]

    This work aims at distilling a systematic methodology to modernize existing sequential scientific codes with a little re-designing effort, turning an old codebase into \emph{modern} code, i.e., parallel and robust code. We propose a semi-automatic methodology to parallelize scientific applications designed with a purely sequential programming mindset, possibly using global variables, aliasing, random number generators, and stateful functions. We demonstrate that the same methodology works for the parallelization in the shared memory model (via OpenMP), message passing model (via MPI), and General Purpose Computing on GPU model (via OpenACC). The method is demonstrated parallelizing four real-world sequential codes in the domain of physics and material science. The methodology itself has been distilled in collaboration with MSc students of the Parallel Computing course at the University of Torino, that applied it for the first time to the project works that they presented for the final exam of the course. Every year the course hosts some special lectures from industry representatives, who present how they use parallel computing and offer codes to be parallelized.

    @article{21:jpdc:loop,
    title = {Practical Parallelization of Scientific Applications with {OpenMP, OpenACC and MPI}},
    author = {Aldinucci, Marco and Cesare, Valentina and Colonnelli, Iacopo and Martinelli, Alberto Riccardo and Mittone, Gianluca and Cantalupo, Barbara and Cavazzoni, Carlo and Drocco, Maurizio},
    year = {2021},
    journal = {Journal of Parallel and Distributed Computing},
    volume = {157},
    pages = {13--29},
    doi = {10.1016/j.jpdc.2021.05.017},
    abstract = {This work aims at distilling a systematic methodology to modernize existing sequential scientific codes with a little re-designing effort, turning an old codebase into \emph{modern} code, i.e., parallel and robust code. We propose a semi-automatic methodology to parallelize scientific applications designed with a purely sequential programming mindset, possibly using global variables, aliasing, random number generators, and stateful functions. We demonstrate that the same methodology works for the parallelization in the shared memory model (via OpenMP), message passing model (via MPI), and General Purpose Computing on GPU model (via OpenACC). The method is demonstrated parallelizing four real-world sequential codes in the domain of physics and material science. The methodology itself has been distilled in collaboration with MSc students of the Parallel Computing course at the University of Torino, that applied it for the first time to the project works that they presented for the final exam of the course. Every year the course hosts some special lectures from industry representatives, who present how they use parallel computing and offer codes to be parallelized.},
    date-added = {2021-06-10 22:05:54 +0200},
    date-modified = {2021-06-10 22:30:05 +0200},
    url = {https://iris.unito.it/retrieve/handle/2318/1792557/770851/Practical_Parallelization_JPDC_preprint.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1792557/770851/Practical_Parallelization_JPDC_preprint.pdf},
    bdsk-url-2 = {https://doi.org/10.1016/j.jpdc.2021.05.017},
    keywords = {saperi}
    }

  • Y. Arfat, G. Mittone, R. Esposito, B. Cantalupo, G. M. De Ferrari, and M. Aldinucci, "A Review of Machine Learning for Cardiology," Minerva cardiology and angiology, 2021. doi:10.23736/s2724-5683.21.05709-4
    [BibTeX] [Abstract] [Download PDF]

    This paper reviews recent cardiology literature and reports how Artificial Intelligence Tools (specifically, Machine Learning techniques) are being used by physicians in the field. Each technique is introduced with enough details to allow the understanding of how it works and its intent, but without delving into details that do not add immediate benefits and require expertise in the field. We specifically focus on the principal Machine Learning based risk scores used in cardiovascular research. After introducing them and summarizing their assumptions and biases, we discuss their merits and shortcomings. We report on how frequently they are adopted in the field and suggest why this is the case based on our expertise in Machine Learning. We complete the analysis by reviewing how corresponding statistical approaches compare with them. Finally, we discuss the main open issues in applying Machine Learning tools to cardiology tasks, also drafting possible future directions. Despite the growing interest in these tools, we argue that there are many still underutilized techniques: while Neural Networks are slowly being incorporated in cardiovascular research, other important techniques such as Semi-Supervised Learning and Federated Learning are still underutilized. The former would allow practitioners to harness the information contained in large datasets that are only partially labeled, while the latter would foster collaboration between institutions allowing building larger and better models.

    @article{21:ai4numbers:minerva,
    title = {A Review of Machine Learning for Cardiology},
    author = {Yasir Arfat and Gianluca Mittone and Roberto Esposito and Barbara Cantalupo and Gaetano Maria {De Ferrari} and Marco Aldinucci},
    year = {2021},
    journal = {Minerva cardiology and angiology},
    doi = {10.23736/s2724-5683.21.05709-4},
    abstract = {This paper reviews recent cardiology literature and reports how Artificial Intelligence Tools (specifically, Machine Learning techniques) are being used by physicians in the field. Each technique is introduced with enough details to allow the understanding of how it works and its intent, but without delving into details that do not add immediate benefits and require expertise in the field. We specifically focus on the principal Machine Learning based risk scores used in cardiovascular research. After introducing them and summarizing their assumptions and biases, we discuss their merits and shortcomings. We report on how frequently they are adopted in the field and suggest why this is the case based on our expertise in Machine Learning. We complete the analysis by reviewing how corresponding statistical approaches compare with them. Finally, we discuss the main open issues in applying Machine Learning tools to cardiology tasks, also drafting possible future directions. Despite the growing interest in these tools, we argue that there are many still underutilized techniques: while Neural Networks are slowly being incorporated in cardiovascular research, other important techniques such as Semi-Supervised Learning and Federated Learning are still underutilized. The former would allow practitioners to harness the information contained in large datasets that are only partially labeled, while the latter would foster collaboration between institutions allowing building larger and better models.},
    date-added = {2021-08-09 23:00:12 +0200},
    date-modified = {2021-08-09 23:05:36 +0200},
    keywords = {deephealth, hpc4ai, learning},
    url = {https://iris.unito.it/retrieve/handle/2318/1796298/780512/21_AI4numbers-preprint.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1796298/780512/21_AI4numbers-preprint.pdf},
    bdsk-url-2 = {https://doi.org/10.23736/s2724-5683.21.05709-4}
    }

  • O. D. Filippo, J. Kang, F. Bruno, J. Han, A. Saglietto, H. Yang, G. Patti, K. Park, R. Parma, H. Kim, L. D. Luca, H. Gwon, M. Iannaccone, W. J. Chun, G. Smolka, S. Hur, E. Cerrato, S. H. Han, C. di Mario, Y. B. Song, J. Escaned, K. H. Choi, G. Helft, J. Doh, A. T. Giachet, S. Hong, S. Muscoli, C. Nam, G. Gallone, D. Capodanno, D. Trabattoni, Y. Imori, V. Dusi, B. Cortese, A. Montefusco, F. Conrotto, I. Colonnelli, I. Sheiban, G. M. de Ferrari, B. Koo, and F. D'Ascenzo, "Benefit of Extended Dual Antiplatelet Therapy Duration in Acute Coronary Syndrome Patients Treated with Drug Eluting Stents for Coronary Bifurcation Lesions (from the BIFURCAT Registry)," The American Journal of Cardiology, 2021. doi:10.1016/j.amjcard.2021.07.005
    [BibTeX] [Abstract] [Download PDF]

    Optimal dual antiplatelet therapy (DAPT) duration for patients undergoing percutaneous coronary intervention (PCI) for coronary bifurcations is an unmet issue. The BIFURCAT registry was obtained by merging two registries on coronary bifurcations. Three groups were compared in a two-by-two fashion: short-term DAPT (≤ 6 months), intermediate-term DAPT (6-12 months) and extended DAPT (>12 months). Major adverse cardiac events (MACE) (a composite of all-cause death, myocardial infarction (MI), target-lesion revascularization and stent thrombosis) were the primary endpoint. Single components of MACE were the secondary endpoints. Events were appraised according to the clinical presentation: chronic coronary syndrome (CCS) versus acute coronary syndrome (ACS). 5537 patients (3231 ACS, 2306 CCS) were included. After a median follow-up of 2.1 years (IQR 0.9-2.2), extended DAPT was associated with a lower incidence of MACE compared with intermediate-term DAPT (2.8\% versus 3.4\%, adjusted HR 0.23 [0.1-0.54], p <0.001), driven by a reduction of all-cause death in the ACS cohort. In the CCS cohort, an extended DAPT strategy was not associated with a reduced risk of MACE. In conclusion, among real-world patients receiving PCI for coronary bifurcation, an extended DAPT strategy was associated with a reduction of MACE in ACS but not in CCS patients.

    @article{21:ajc:bifurcat,
    title = {Benefit of Extended Dual Antiplatelet Therapy Duration in Acute Coronary Syndrome Patients Treated with Drug Eluting Stents for Coronary Bifurcation Lesions (from the {BIFURCAT} Registry)},
    author = {Ovidio De Filippo and Jeehoon Kang and Francesco Bruno and Jung-Kyu Han and Andrea Saglietto and Han-Mo Yang and Giuseppe Patti and Kyung-Woo Park and Radoslaw Parma and Hyo-Soo Kim and Leonardo De Luca and Hyeon-Cheol Gwon and Mario Iannaccone and Woo Jung Chun and Grzegorz Smolka and Seung-Ho Hur and Enrico Cerrato and Seung Hwan Han and Carlo di Mario and Young Bin Song and Javier Escaned and Ki Hong Choi and Gerard Helft and Joon-Hyung Doh and Alessandra Truffa Giachet and Soon-Jun Hong and Saverio Muscoli and Chang-Wook Nam and Guglielmo Gallone and Davide Capodanno and Daniela Trabattoni and Yoichi Imori and Veronica Dusi and Bernardo Cortese and Antonio Montefusco and Federico Conrotto and Iacopo Colonnelli and Imad Sheiban and Gaetano Maria de Ferrari and Bon-Kwon Koo and Fabrizio D'Ascenzo},
    year = {2021},
    journal = {The American Journal of Cardiology},
    doi = {10.1016/j.amjcard.2021.07.005},
    issn = {0002-9149},
    abstract = {Optimal dual antiplatelet therapy (DAPT) duration for patients undergoing percutaneous coronary intervention (PCI) for coronary bifurcations is an unmet issue. The BIFURCAT registry was obtained by merging two registries on coronary bifurcations. Three groups were compared in a two-by-two fashion: short-term DAPT (≤ 6 months), intermediate-term DAPT (6-12 months) and extended DAPT (>12 months). Major adverse cardiac events (MACE) (a composite of all-cause death, myocardial infarction (MI), target-lesion revascularization and stent thrombosis) were the primary endpoint. Single components of MACE were the secondary endpoints. Events were appraised according to the clinical presentation: chronic coronary syndrome (CCS) versus acute coronary syndrome (ACS). 5537 patients (3231 ACS, 2306 CCS) were included. After a median follow-up of 2.1 years (IQR 0.9-2.2), extended DAPT was associated with a lower incidence of MACE compared with intermediate-term DAPT (2.8\% versus 3.4\%, adjusted HR 0.23 [0.1-0.54], p <0.001), driven by a reduction of all-cause death in the ACS cohort. In the CCS cohort, an extended DAPT strategy was not associated with a reduced risk of MACE. In conclusion, among real-world patients receiving PCI for coronary bifurcation, an extended DAPT strategy was associated with a reduction of MACE in ACS but not in CCS patients.},
    url = {https://www.sciencedirect.com/science/article/pii/S0002914921006354},
    bdsk-url-1 = {https://www.sciencedirect.com/science/article/pii/S0002914921006354},
    bdsk-url-2 = {https://doi.org/10.1016/j.amjcard.2021.07.005}
    }

  • I. Colonnelli, B. Cantalupo, C. Spampinato, M. Pennisi, and M. Aldinucci, "Bringing AI pipelines onto cloud-HPC: setting a baseline for accuracy of COVID-19 diagnosis," in ENEA CRESCO in the fight against COVID-19, 2021. doi:10.5281/zenodo.5151511
    [BibTeX] [Abstract] [Download PDF]

    HPC is an enabling platform for AI. The introduction of AI workloads in the HPC applications basket has non-trivial consequences both on the way of designing AI applications and on the way of providing HPC computing. This is the leitmotif of the convergence between HPC and AI. The formalized definition of AI pipelines is one of the milestones of HPC-AI convergence. If well conducted, it allows, on the one hand, to obtain portable and scalable applications. On the other hand, it is crucial for the reproducibility of scientific pipelines. In this work, we advocate the StreamFlow Workflow Management System as a crucial ingredient to define a parametric pipeline, called ``CLAIRE COVID-19 Universal Pipeline'', which is able to explore the optimization space of methods to classify COVID-19 lung lesions from CT scans, compare them for accuracy, and therefore set a performance baseline. The universal pipeline automatizes the training of many different Deep Neural Networks (DNNs) and many different hyperparameters. It, therefore, requires a massive computing power, which is found in traditional HPC infrastructure thanks to the portability-by-design of pipelines designed with StreamFlow. Using the universal pipeline, we identified a DNN reaching over 90\% accuracy in detecting COVID-19 lesions in CT scans.

    @inproceedings{21:covi:enea,
    title = {Bringing AI pipelines onto cloud-HPC: setting a baseline for accuracy of COVID-19 diagnosis},
    author = {Colonnelli, Iacopo and Cantalupo, Barbara and Spampinato, Concetto and Pennisi, Matteo and Aldinucci, Marco},
    year = {2021},
    booktitle = {ENEA CRESCO in the fight against COVID-19},
    publisher = {ENEA},
    doi = {10.5281/zenodo.5151511},
    abstract = {HPC is an enabling platform for AI. The introduction of AI workloads in the HPC applications basket has non-trivial consequences both on the way of designing AI applications and on the way of providing HPC computing. This is the leitmotif of the convergence between HPC and AI. The formalized definition of AI pipelines is one of the milestones of HPC-AI convergence. If well conducted, it allows, on the one hand, to obtain portable and scalable applications. On the other hand, it is crucial for the reproducibility of scientific pipelines. In this work, we advocate the StreamFlow Workflow Management System as a crucial ingredient to define a parametric pipeline, called ``CLAIRE COVID-19 Universal Pipeline'', which is able to explore the optimization space of methods to classify COVID-19 lung lesions from CT scans, compare them for accuracy, and therefore set a performance baseline. The universal pipeline automatizes the training of many different Deep Neural Networks (DNNs) and many different hyperparameters. It, therefore, requires a massive computing power, which is found in traditional HPC infrastructure thanks to the portability-by-design of pipelines designed with StreamFlow. Using the universal pipeline, we identified a DNN reaching over 90\% accuracy in detecting COVID-19 lesions in CT scans.},
    editor = {Francesco Iannone},
    url = {https://iris.unito.it/retrieve/handle/2318/1796029/779853/21_AI-pipelines_ENEA-COVID19.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1796029/779853/21_AI-pipelines_ENEA-COVID19.pdf},
    bdsk-url-2 = {https://doi.org/10.5281/zenodo.5151511},
    keywords = {streamflow}
    }

  • M. Aldinucci, V. Cesare, I. Colonnelli, A. R. Martinelli, G. Mittone, and B. Cantalupo, "Practical Parallelizazion of a Laplace Solver with MPI," in ENEA CRESCO in the fight against COVID-19, 2021, p. 21–24.
    [BibTeX] [Abstract]

    This work exposes a practical methodology for the semi-automatic parallelization of existing code. We show how a scientific sequential code can be parallelized through our approach. The obtained parallel code is only slightly different from the starting sequential one, providing an example of how little re-designing our methodology involves. The performance of the parallelized code, executed on the CRESCO6 cluster, is then exposed and discussed. We also believe in the educational value of this approach and suggest its use as a teaching device for students.

    @inproceedings{21:laplace:enea,
    title = {Practical Parallelizazion of a {Laplace} Solver with {MPI}},
    author = {Aldinucci, Marco and Cesare, Valentina and Colonnelli, Iacopo and Martinelli, Alberto Riccardo and Mittone, Gianluca and Cantalupo, Barbara},
    year = {2021},
    booktitle = {ENEA CRESCO in the fight against COVID-19},
    publisher = {ENEA},
    pages = {21--24},
    abstract = {This work exposes a practical methodology for the semi-automatic parallelization of existing code. We show how a scientific sequential code can be parallelized through our approach. The obtained parallel code is only slightly different from the starting sequential one, providing an example of how little re-designing our methodology involves. The performance of the parallelized code, executed on the CRESCO6 cluster, is then exposed and discussed. We also believe in the educational value of this approach and suggest its use as a teaching device for students.},
    editor = {Francesco Iannone},
    keywords = {hpc4ai}
    }

  • C. Pino, G. Vecchio, M. Fronda, M. Calandri, M. Aldinucci, and C. Spampinato, "TwinLiverNet: Predicting TACE Treatment Outcome from CT scans for Hepatocellular Carcinoma using Deep Capsule Networks," in 43rd Annual International Conference of the IEEE Engineering in Medicine & Biology Society, EMBC 2021, Mexico, November 1-5, 2021, 2021, p. 3039–3043. doi:10.1109/EMBC46164.2021.9630913
    [BibTeX] [Abstract] [Download PDF]

    Predicting response to treatment plays a key role to assist radiologists in hepato-cellular carcinoma (HCC) therapy planning. The most widely used treatment for unresectable HCC is the trans-arterial chemoembolization (TACE). A complete radiological response after the first TACE is a reliable predictor of treatment favourable outcome. However, visual inspection of contrast-enhanced CT scans is time-consuming, error prone and too operator-dependent. Thus, in this paper we propose TwinLiverNet: a deep neural network that is able to predict TACE treatment outcome through learning visual cue from CT scans. TwinLiverNet, specifically, integrates 3D convolutions and capsule networks and is designed to process simultaneously late arterial and delayed phases from contrast-enhanced CTs. Experimental results carried out on a dataset consisting of 126 HCC lesions show that TwinLiverNet reaches an average accuracy of 82\% in predicting complete response to TACE treatment. Furthermore, combining multiple CT phases (specifically, late arterial and delayed ones) yields a performance increase of over 12 percent points. Finally, the introduction of capsule layers into the model avoids the model to overfit, while enhancing accuracy.Clinical relevance–- TwinLiverNet supports radiologists in visual inspection of CT scans to assess TACE treatment outcome, while reducing inter-operator variability.

    @inproceedings{21:DBLP:conf/embc/PinoVFCAS21,
    title = {TwinLiverNet: Predicting {TACE} Treatment Outcome from {CT} scans for Hepatocellular Carcinoma using Deep Capsule Networks},
    author = {C. Pino and G. Vecchio and Marco Fronda and Marco Calandri and Marco Aldinucci and Concetto Spampinato},
    year = {2021},
    booktitle = {43rd Annual International Conference of the {IEEE} Engineering in Medicine {\&} Biology Society, {EMBC} 2021, Mexico, November 1-5, 2021},
    publisher = {{IEEE}},
    pages = {3039--3043},
    doi = {10.1109/EMBC46164.2021.9630913},
    abstract = {Predicting response to treatment plays a key role to assist radiologists in hepato-cellular carcinoma (HCC) therapy planning. The most widely used treatment for unresectable HCC is the trans-arterial chemoembolization (TACE). A complete radiological response after the first TACE is a reliable predictor of treatment favourable outcome. However, visual inspection of contrast-enhanced CT scans is time-consuming, error prone and too operator-dependent. Thus, in this paper we propose TwinLiverNet: a deep neural network that is able to predict TACE treatment outcome through learning visual cue from CT scans. TwinLiverNet, specifically, integrates 3D convolutions and capsule networks and is designed to process simultaneously late arterial and delayed phases from contrast-enhanced CTs. Experimental results carried out on a dataset consisting of 126 HCC lesions show that TwinLiverNet reaches an average accuracy of 82\% in predicting complete response to TACE treatment. Furthermore, combining multiple CT phases (specifically, late arterial and delayed ones) yields a performance increase of over 12 percent points. Finally, the introduction of capsule layers into the model avoids the model to overfit, while enhancing accuracy.Clinical relevance--- TwinLiverNet supports radiologists in visual inspection of CT scans to assess TACE treatment outcome, while reducing inter-operator variability.},
    bibsource = {dblp computer science bibliography, https://dblp.org},
    biburl = {https://dblp.org/rec/conf/embc/PinoVFCAS21.bib},
    timestamp = {Thu, 06 Jan 2022 07:54:56 +0100},
    url = {https://doi.org/10.1109/EMBC46164.2021.9630913},
    bdsk-url-1 = {https://doi.org/10.1109/EMBC46164.2021.9630913}
    }

  • F. D'Ascenzo, O. De Filippo, G. Gallone, G. Mittone, M. A. Deriu, M. Iannaccone, A. Ariza-Solé, C. Liebetrau, S. Manzano-Fernández, G. Quadri, T. Kinnaird, G. Campo, J. P. Simao Henriques, J. M. Hughes, A. Dominguez-Rodriguez, M. Aldinucci, U. Morbiducci, G. Patti, S. Raposeiras-Roubin, E. Abu-Assi, G. M. De Ferrari, F. Piroli, A. Saglietto, F. Conrotto, P. Omedé, A. Montefusco, M. Pennone, F. Bruno, P. P. Bocchino, G. Boccuzzi, E. Cerrato, F. Varbella, M. Sperti, S. B. Wilton, L. Velicki, I. Xanthopoulou, A. Cequier, A. Iniguez-Romo, I. Munoz Pousa, M. Cespon Fernandez, B. Caneiro Queija, R. Cobas-Paz, A. Lopez-Cuenca, A. Garay, P. F. Blanco, A. Rognoni, G. Biondi Zoccai, S. Biscaglia, I. Nunez-Gil, T. Fujii, A. Durante, X. Song, T. Kawaji, D. Alexopoulos, Z. Huczek, J. R. Gonzalez Juanatey, S. Nie, M. Kawashiri, I. Colonnelli, B. Cantalupo, R. Esposito, S. Leonardi, W. Grosso Marra, A. Chieffo, U. Michelucci, D. Piga, M. Malavolta, S. Gili, M. Mennuni, C. Montalto, L. Oltrona Visconti, and Y. Arfat, "Machine learning-based prediction of adverse events following an acute coronary syndrome (PRAISE): a modelling study of pooled datasets," The Lancet, vol. 397, iss. 10270, p. 199–207, 2021. doi:10.1016/S0140-6736(20)32519-8
    [BibTeX] [Abstract] [Download PDF]

    Background The accuracy of current prediction tools for ischaemic and bleeding events after an acute coronary syndrome (ACS) remains insufficient for individualised patient management strategies. We developed a machine learning-based risk stratification model to predict all-cause death, recurrent acute myocardial infarction, and major bleeding after ACS. Methods Different machine learning models for the prediction of 1-year post-discharge all-cause death, myocardial infarction, and major bleeding (defined as Bleeding Academic Research Consortium type 3 or 5) were trained on a cohort of 19826 adult patients with ACS (split into a training cohort [80%] and internal validation cohort [20%]) from the BleeMACS and RENAMI registries, which included patients across several continents. 25 clinical features routinely assessed at discharge were used to inform the models. The best-performing model for each study outcome (the PRAISE score) was tested in an external validation cohort of 3444 patients with ACS pooled from a randomised controlled trial and three prospective registries. Model performance was assessed according to a range of learning metrics including area under the receiver operating characteristic curve (AUC). Findings The PRAISE score showed an AUC of 0.82 (95% CI 0.78-0.85) in the internal validation cohort and 0.92 (0.90-0.93) in the external validation cohort for 1-year all-cause death; an AUC of 0.74 (0.70-0.78) in the internal validation cohort and 0.81 (0.76-0.85) in the external validation cohort for 1-year myocardial infarction; and an AUC of 0.70 (0.66-0.75) in the internal validation cohort and 0.86 (0.82-0.89) in the external validation cohort for 1-year major bleeding. Interpretation A machine learning-based approach for the identification of predictors of events after an ACS is feasible and effective. The PRAISE score showed accurate discriminative capabilities for the prediction of all-cause death, myocardial infarction, and major bleeding, and might be useful to guide clinical decision making.

    @article{21:lancet,
    title = {Machine learning-based prediction of adverse events following an acute coronary syndrome {(PRAISE)}: a modelling study of pooled datasets},
    author = {Fabrizio D'Ascenzo and Ovidio {De Filippo} and Guglielmo Gallone and Gianluca Mittone and Marco Agostino Deriu and Mario Iannaccone and Albert Ariza-Sol\'e and Christoph Liebetrau and Sergio Manzano-Fern\'andez and Giorgio Quadri and Tim Kinnaird and Gianluca Campo and Jose Paulo {Simao Henriques} and James M Hughes and Alberto Dominguez-Rodriguez and Marco Aldinucci and Umberto Morbiducci and Giuseppe Patti and Sergio Raposeiras-Roubin and Emad Abu-Assi and Gaetano Maria {De Ferrari} and Francesco Piroli and Andrea Saglietto and Federico Conrotto and Pierluigi Omed\'e and Antonio Montefusco and Mauro Pennone and Francesco Bruno and Pier Paolo Bocchino and Giacomo Boccuzzi and Enrico Cerrato and Ferdinando Varbella and Michela Sperti and Stephen B. Wilton and Lazar Velicki and Ioanna Xanthopoulou and Angel Cequier and Andres Iniguez-Romo and Isabel {Munoz Pousa} and Maria {Cespon Fernandez} and Berenice {Caneiro Queija} and Rafael Cobas-Paz and Angel Lopez-Cuenca and Alberto Garay and Pedro Flores Blanco and Andrea Rognoni and Giuseppe {Biondi Zoccai} and Simone Biscaglia and Ivan Nunez-Gil and Toshiharu Fujii and Alessandro Durante and Xiantao Song and Tetsuma Kawaji and Dimitrios Alexopoulos and Zenon Huczek and Jose Ramon {Gonzalez Juanatey} and Shao-Ping Nie and Masa-aki Kawashiri and Iacopo Colonnelli and Barbara Cantalupo and Roberto Esposito and Sergio Leonardi and Walter {Grosso Marra} and Alaide Chieffo and Umberto Michelucci and Dario Piga and Marta Malavolta and Sebastiano Gili and Marco Mennuni and Claudio Montalto and Luigi {Oltrona Visconti} and Yasir Arfat},
    year = {2021},
    journal = {The Lancet},
    volume = {397},
    pages = {199--207},
    doi = {10.1016/S0140-6736(20)32519-8},
    issn = {0140-6736},
    abstract = {Background The accuracy of current prediction tools for ischaemic and bleeding events after an acute coronary syndrome (ACS) remains insufficient for individualised patient management strategies. We developed a machine learning-based risk stratification model to predict all-cause death, recurrent acute myocardial infarction, and major bleeding after ACS. Methods Different machine learning models for the prediction of 1-year post-discharge all-cause death, myocardial infarction, and major bleeding (defined as Bleeding Academic Research Consortium type 3 or 5) were trained on a cohort of 19826 adult patients with ACS (split into a training cohort [80%] and internal validation cohort [20%]) from the BleeMACS and RENAMI registries, which included patients across several continents. 25 clinical features routinely assessed at discharge were used to inform the models. The best-performing model for each study outcome (the PRAISE score) was tested in an external validation cohort of 3444 patients with ACS pooled from a randomised controlled trial and three prospective registries. Model performance was assessed according to a range of learning metrics including area under the receiver operating characteristic curve (AUC). Findings The PRAISE score showed an AUC of 0.82 (95% CI 0.78-0.85) in the internal validation cohort and 0.92 (0.90-0.93) in the external validation cohort for 1-year all-cause death; an AUC of 0.74 (0.70-0.78) in the internal validation cohort and 0.81 (0.76-0.85) in the external validation cohort for 1-year myocardial infarction; and an AUC of 0.70 (0.66-0.75) in the internal validation cohort and 0.86 (0.82-0.89) in the external validation cohort for 1-year major bleeding. Interpretation A machine learning-based approach for the identification of predictors of events after an ACS is feasible and effective. The PRAISE score showed accurate discriminative capabilities for the prediction of all-cause death, myocardial infarction, and major bleeding, and might be useful to guide clinical decision making.},
    date-modified = {2021-03-26 23:53:19 +0100},
    keywords = {deephealth, hpc4ai, learning},
    number = {10270},
    url = {https://www.researchgate.net/profile/James_Hughes3/publication/348501148_Machine_learning-based_prediction_of_adverse_events_following_an_acute_coronary_syndrome_PRAISE_a_modelling_study_of_pooled_datasets/links/6002a81ba6fdccdcb858b6c2/Machine-learning-based-prediction-of-adverse-events-following-an-acute-coronary-syndrome-PRAISE-a-modelling-study-of-pooled-datasets.pdf},
    bdsk-url-1 = {https://www.researchgate.net/profile/James_Hughes3/publication/348501148_Machine_learning-based_prediction_of_adverse_events_following_an_acute_coronary_syndrome_PRAISE_a_modelling_study_of_pooled_datasets/links/6002a81ba6fdccdcb858b6c2/Machine-learning-based-prediction-of-adverse-events-following-an-acute-coronary-syndrome-PRAISE-a-modelling-study-of-pooled-datasets.pdf},
    bdsk-url-2 = {https://doi.org/10.1016/S0140-6736(20)32519-8}
    }

2020

  • M. Aldinucci, Polmonite da COVID-19, diagnosi con l'intelligenza artificiale: Italia in prima fila, 2020.
    [BibTeX] [Abstract] [Download PDF]

    La Task Force su AI&COVID-19 della confederazione europea dei laboratori di ricerca sull'intelligenza artificiale (CLAIRE) ha sostenuto la creazione di un gruppo di ricerca focalizzato sulla diagnosi della polmonite da COVID assistita dall'Intelligenza Artificiale. I primi risultati sono incoraggianti

    @misc{20:covid:ag,
    title = {Polmonite da {COVID-19}, diagnosi con l'intelligenza artificiale: Italia in prima fila},
    author = {Marco Aldinucci},
    year = {2020},
    month = nov,
    note = {(magazine)},
    abstract = {La Task Force su AI&COVID-19 della confederazione europea dei laboratori di ricerca sull'intelligenza artificiale (CLAIRE) ha sostenuto la creazione di un gruppo di ricerca focalizzato sulla diagnosi della polmonite da COVID assistita dall'Intelligenza Artificiale. I primi risultati sono incoraggianti},
    date-modified = {2021-04-17 00:42:11 +0200},
    howpublished = {Agenda Digitale},
    url = {https://www.agendadigitale.eu/sanita/polmonite-da-covid-19-allo-studio-la-diagnosi-tramite-intelligenza-artificiale-italia-in-prima-fila/},
    bdsk-url-1 = {https://www.agendadigitale.eu/sanita/polmonite-da-covid-19-allo-studio-la-diagnosi-tramite-intelligenza-artificiale-italia-in-prima-fila/},
    keywords = {deephealth, claire, hpc4ai}
    }

  • D. Medić, C. A. Mezzina, I. Phillips, and N. Yoshida, "A parametric framework for reversible \emph\(\pi\)-calculi," Information and Computation, vol. 275, p. 104644, 2020. doi:10.1016/j.ic.2020.104644
    [BibTeX] [Abstract] [Download PDF]

    This paper presents a study of causality in a reversible, concurrent setting. There exist various notions of causality in Pi-calculus, which differ in the treatment of parallel extrusions of the same name. Hence, by using a parametric way of bookkeeping the order and the dependencies among extruders it is possible to map different causal semantics into the same framework. Starting from this simple observation, we present a uniform framework for reversible ?-calculi that is parametric with respect to a data structure that stores information about the extrusion of a name. Different data structures yield different approaches to the parallel extrusion problem. We map three well-known causal semantics into our framework. We prove causal-consistency for the three instances of our framework. Furthermore, we prove a causal correspondence between the appropriate instances of the framework and the Boreale-Sangiorgi semantics and an operational correspondence with the reversible \emph{{\(\pi\)}}-calculus causal semantics.

    @article{20:journals:MedicMPY20,
    title = {A parametric framework for reversible \emph{{\(\pi\)}}-calculi},
    author = {Doriana Medi\'{c} and Claudio Antares Mezzina and Iain Phillips and Nobuko Yoshida},
    year = {2020},
    journal = {Information and Computation},
    volume = {275},
    pages = {104644},
    doi = {10.1016/j.ic.2020.104644},
    abstract = {This paper presents a study of causality in a reversible, concurrent setting. There exist various notions of causality in Pi-calculus, which differ in the treatment of parallel extrusions of the same name. Hence, by using a parametric way of bookkeeping the order and the dependencies among extruders it is possible to map different causal semantics into the same framework. Starting from this simple observation, we present a uniform framework for reversible ?-calculi that is parametric with respect to a data structure that stores information about the extrusion of a name. Different data structures yield different approaches to the parallel extrusion problem. We map three well-known causal semantics into our framework. We prove causal-consistency for the three instances of our framework. Furthermore, we prove a causal correspondence between the appropriate instances of the framework and the Boreale-Sangiorgi semantics and an operational correspondence with the reversible \emph{{\(\pi\)}}-calculus causal semantics.},
    keywords = {, semantics},
    url = {https://doi.org/10.1016/j.ic.2020.104644},
    bdsk-url-1 = {https://doi.org/10.1016/j.ic.2020.104644}
    }

  • I. Lanese and D. Medić, "A General Approach to Derive Uncontrolled Reversible Semantics," in 31st International Conference on Concurrency Theory, CONCUR 2020, September 1-4, 2020, Vienna, Austria (Virtual Conference), 2020, p. 33:1–33:24. doi:10.4230/LIPIcs.CONCUR.2020.33
    [BibTeX] [Abstract] [Download PDF]

    Reversible computing is a paradigm where programs can execute backward as well as in the usual forward direction. Reversible computing is attracting interest due to its applications in areas as different as biochemical modelling, simulation, robotics and debugging, among others. In concurrent systems the main notion of reversible computing is called causal-consistent reversibility, and it allows one to undo an action if and only if its consequences, if any, have already been undone. This paper presents a general and automatic technique to define a causal-consistent reversible extension for given forward models. We support models defined using a reduction semantics in a specific format and consider a causality relation based on resources consumed and produced. The considered format is general enough to fit many formalisms studied in the literature on causal-consistent reversibility, notably Higher-Order ?-calculus and Core Erlang, an intermediate language in the Erlang compilation. Reversible extensions of these models in the literature are ad hoc, while we build them using the same general technique. This also allows us to show in a uniform way that a number of relevant properties, causal-consistency in particular, hold in the reversible extensions we build. Our technique also allows us to go beyond the reversible models in the literature: we cover a larger fragment of Core Erlang, including remote error handling based on links, which has never been considered in the reversibility literature.

    @inproceedings{20:concur:LaneseM20,
    title = {A General Approach to Derive Uncontrolled Reversible Semantics},
    author = {Ivan Lanese and Doriana Medi\'{c}},
    year = {2020},
    booktitle = {31st International Conference on Concurrency Theory, {CONCUR} 2020, September 1-4, 2020, Vienna, Austria (Virtual Conference)},
    publisher = {Schloss Dagstuhl - Leibniz-Zentrum f{\"{u}}r Informatik},
    series = {LIPIcs},
    volume = {171},
    pages = {33:1--33:24},
    doi = {10.4230/LIPIcs.CONCUR.2020.33},
    abstract = {Reversible computing is a paradigm where programs can execute backward as well as in the usual forward direction. Reversible computing is attracting interest due to its applications in areas as different as biochemical modelling, simulation, robotics and debugging, among others. In concurrent systems the main notion of reversible computing is called causal-consistent reversibility, and it allows one to undo an action if and only if its consequences, if any, have already been undone. This paper presents a general and automatic technique to define a causal-consistent reversible extension for given forward models. We support models defined using a reduction semantics in a specific format and consider a causality relation based on resources consumed and produced. The considered format is general enough to fit many formalisms studied in the literature on causal-consistent reversibility, notably Higher-Order ?-calculus and Core Erlang, an intermediate language in the Erlang compilation. Reversible extensions of these models in the literature are ad hoc, while we build them using the same general technique. This also allows us to show in a uniform way that a number of relevant properties, causal-consistency in particular, hold in the reversible extensions we build. Our technique also allows us to go beyond the reversible models in the literature: we cover a larger fragment of Core Erlang, including remote error handling based on links, which has never been considered in the reversibility literature.},
    keywords = {, semantics},
    url = {https://doi.org/10.4230/LIPIcs.CONCUR.2020.33},
    bdsk-url-1 = {https://doi.org/10.4230/LIPIcs.CONCUR.2020.33}
    }

  • D. Medić, C. A. Mezzina, I. Phillips, and N. Yoshida, "Towards a Formal Account for Software Transactional Memory," in Reversible Computation - 12th International Conference, RC 2020, Oslo, Norway, July 9-10, 2020, Proceedings, 2020, p. 255–263. doi:10.1007/978-3-030-52482-1_16
    [BibTeX] [Abstract] [Download PDF]

    Software transactional memory (STM) is a concurrency control mechanism for shared memory systems. It is opposite to the lock based mechanism, as it allows multiple processes to access the same set of variables in a concurrent way. Then according to the used policy, the effect of accessing to shared variables can be committed (hence, made permanent) or undone. In this paper, we define a formal framework for describing STMs and show how with a minor variation of the rules it is possible to model two common policies for STM: reader preference and writer preference.

    @inproceedings{20:RC:MedicM0Y20,
    title = {Towards a Formal Account for Software Transactional Memory},
    author = {Doriana Medi\'{c} and Claudio Antares Mezzina and Iain Phillips and Nobuko Yoshida},
    year = {2020},
    booktitle = {Reversible Computation - 12th International Conference, {RC} 2020, Oslo, Norway, July 9-10, 2020, Proceedings},
    publisher = {Springer},
    series = {Lecture Notes in Computer Science},
    volume = {12227},
    pages = {255--263},
    doi = {10.1007/978-3-030-52482-1\_16},
    abstract = {Software transactional memory (STM) is a concurrency control mechanism for shared memory systems. It is opposite to the lock based mechanism, as it allows multiple processes to access the same set of variables in a concurrent way. Then according to the used policy, the effect of accessing to shared variables can be committed (hence, made permanent) or undone. In this paper, we define a formal framework for describing STMs and show how with a minor variation of the rules it is possible to model two common policies for STM: reader preference and writer preference.},
    keywords = {semantics},
    url = {https://doi.org/10.1007/978-3-030-52482-1_16},
    bdsk-url-1 = {https://doi.org/10.1007/978-3-030-52482-1_16}
    }

  • P. Metzger, M. Cole, C. Fensch, M. Aldinucci, and E. Bini, "Enforcing Deadlines for Skeleton-based Parallel Programming," in 26th IEEE Real-Time and Embedded Technology and Applications Symposium (RTAS), Sydney, Australia, 2020. doi:10.1109/RTAS48715.2020.000-7
    [BibTeX] [Abstract] [Download PDF]

    High throughput applications with real-time guar- antees are increasingly relevant. For these applications, parallelism must be exposed to meet deadlines. Directed Acyclic Graphs (DAGs) are a popular and very general application model that can capture any possible interaction among threads. However, we argue that by constraining the application structure to a set of composable ``skeletons'', at the price of losing some generality w.r.t. DAGs, the following advantages are gained: (i) a finer model of the application enables tighter analysis, (ii) specialised scheduling policies are applicable, (iii) programming is simplified, (iv) specialised implementation techniques can be exploited transparently, and (v) the program can be automatically tuned to minimise resource usage while still meeting its hard deadlines. As a first step towards a set of real-time skeletons we conduct a case study with the job farm skeleton and the hard real- time XMOS xCore-200 microcontroller. We present an analytical framework for job farms that reduces the number of required cores by scheduling jobs in batches, while ensuring that deadlines are still met. Our experimental results demonstrate that batching reduces the minimum sustainable period by up to 22%, leading to a reduced number of required cores. The framework chooses the best parameters in 83% of cases and never selects parameters that cause deadline misses. Finally, we show that the overheads introduced by the skeleton abstraction layer are negligible.

    @inproceedings{20:farm:rtas,
    title = {Enforcing Deadlines for Skeleton-based Parallel Programming},
    author = {Paul Metzger and Murray Cole and Christian Fensch and Marco Aldinucci and Enrico Bini},
    year = {2020},
    booktitle = {26th {IEEE} Real-Time and Embedded Technology and Applications Symposium ({RTAS})},
    address = {Sydney, Australia},
    doi = {10.1109/RTAS48715.2020.000-7},
    abstract = {High throughput applications with real-time guar- antees are increasingly relevant. For these applications, parallelism must be exposed to meet deadlines. Directed Acyclic Graphs (DAGs) are a popular and very general application model that can capture any possible interaction among threads. However, we argue that by constraining the application structure to a set of composable ``skeletons'', at the price of losing some generality w.r.t. DAGs, the following advantages are gained: (i) a finer model of the application enables tighter analysis, (ii) specialised scheduling policies are applicable, (iii) programming is simplified, (iv) specialised implementation techniques can be exploited transparently, and (v) the program can be automatically tuned to minimise resource usage while still meeting its hard deadlines. As a first step towards a set of real-time skeletons we conduct a case study with the job farm skeleton and the hard real- time XMOS xCore-200 microcontroller. We present an analytical framework for job farms that reduces the number of required cores by scheduling jobs in batches, while ensuring that deadlines are still met. Our experimental results demonstrate that batching reduces the minimum sustainable period by up to 22%, leading to a reduced number of required cores. The framework chooses the best parameters in 83% of cases and never selects parameters that cause deadline misses. Finally, we show that the overheads introduced by the skeleton abstraction layer are negligible.},
    date-added = {2020-02-26 23:38:11 +0100},
    date-modified = {2020-02-27 00:13:59 +0100},
    url = {https://iris.unito.it/retrieve/handle/2318/1741320/616056/20_ske_RTAS.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1741320/616056/20_ske_RTAS.pdf},
    bdsk-url-2 = {https://doi.org/10.1109/RTAS48715.2020.000-7}
    }

  • V. Reniers, Y. Gao, R. Zhang, P. Viviani, A. Madhusudan, B. Lagaisse, S. Nikova, D. Van Landuyt, R. Lombardi, B. Preneel, and W. Joosen, "Authenticated and Auditable Data Sharing via Smart Contract," in Proceedings of the 35th ACM/SIGAPP Symposium on Applied Computing, New York, NY, USA, 2020, p. 1–8.
    [BibTeX] [Abstract]

    Our main use case features multiple companies that iteratively optimize on the architectural properties of aircraft components in a decentralized manner. In each optimization step of the so-called multi-disciplinary optimization (MDO) process, sensitive data is exchanged, and we require auditability and traceability of actions taken to assure compliance with signed legal agreements. In this paper, we present a distributed protocol that coordinates authenticated and auditable exchanges of files, leveraging a smart contract. The entire life cycle of a file exchange, including file registration, access request and key distribution, is recorded and traceable via the smart contract. Moreover, when one party raises a dispute, the smart contract can identify the dishonest party without compromising the file's confidentiality. The proposed protocol provides a simple, novel, yet efficient approach to exchange files with support for data access auditability between companies involved in a private consortium with no incentive to share files outside of the protocol. We implemented the protocol in Solidity, deployed it on a private Ethereum blockchain, and validated it within the use case of a decentralized workflow.

    @inproceedings{20:sac:blockchain,
    title = {Authenticated and Auditable Data Sharing via Smart Contract},
    author = {Reniers, Vincent and Gao, Yuan and Zhang, Ren and Viviani, Paolo and Madhusudan, Akash and Lagaisse, Bert and Nikova, Svetla and Van Landuyt, Dimitri and Lombardi, Riccardo and Preneel, Bart and Joosen, Wouter},
    year = {2020},
    booktitle = {Proceedings of the 35th ACM/SIGAPP Symposium on Applied Computing},
    publisher = {ACM},
    address = {New York, NY, USA},
    series = {SAC '20},
    pages = {1--8},
    isbn = {xxx-x-xxxx-xxxx-x},
    abstract = {Our main use case features multiple companies that iteratively optimize on the architectural properties of aircraft components in a decentralized manner. In each optimization step of the so-called multi-disciplinary optimization (MDO) process, sensitive data is exchanged, and we require auditability and traceability of actions taken to assure compliance with signed legal agreements. In this paper, we present a distributed protocol that coordinates authenticated and auditable exchanges of files, leveraging a smart contract. The entire life cycle of a file exchange, including file registration, access request and key distribution, is recorded and traceable via the smart contract. Moreover, when one party raises a dispute, the smart contract can identify the dishonest party without compromising the file's confidentiality. The proposed protocol provides a simple, novel, yet efficient approach to exchange files with support for data access auditability between companies involved in a private consortium with no incentive to share files outside of the protocol. We implemented the protocol in Solidity, deployed it on a private Ethereum blockchain, and validated it within the use case of a decentralized workflow.},
    date-modified = {2021-04-24 23:21:47 +0200},
    location = {Brno, Czech Republic},
    numpages = {8}
    }

  • V. Cesare, I. Colonnelli, and M. Aldinucci, "Practical Parallelization of Scientific Applications," in Proc. of 28th Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP), Västerås, Sweden, 2020, p. 376–384. doi:10.1109/PDP50117.2020.00064
    [BibTeX] [Abstract] [Download PDF]

    This work aims at distilling a systematic methodology to modernize existing sequential scientific codes with a limited re-designing effort, turning an old codebase into modern code, i.e., parallel and robust code. We propose an automatable methodology to parallelize scientific applications designed with a purely sequential programming mindset, thus possibly using global variables, aliasing, random number generators, and stateful functions. We demonstrate the methodology by way of an astrophysical application, where we model at the same time the kinematic profiles of 30 disk galaxies with a Monte Carlo Markov Chain (MCMC), which is sequential by definition. The parallel code exhibits a 12 times speedup on a 48-core platform.

    @inproceedings{20:looppar:pdp,
    title = {Practical Parallelization of Scientific Applications},
    author = {Valentina Cesare and Iacopo Colonnelli and Marco Aldinucci},
    year = {2020},
    booktitle = {Proc. of 28th Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP)},
    publisher = {IEEE},
    address = {V{\"a}ster{\aa}s, Sweden},
    pages = {376--384},
    doi = {10.1109/PDP50117.2020.00064},
    abstract = {This work aims at distilling a systematic methodology to modernize existing sequential scientific codes with a limited re-designing effort, turning an old codebase into modern code, i.e., parallel and robust code. We propose an automatable methodology to parallelize scientific applications designed with a purely sequential programming mindset, thus possibly using global variables, aliasing, random number generators, and stateful functions. We demonstrate the methodology by way of an astrophysical application, where we model at the same time the kinematic profiles of 30 disk galaxies with a Monte Carlo Markov Chain (MCMC), which is sequential by definition. The parallel code exhibits a 12 times speedup on a 48-core platform.},
    date-modified = {2020-04-05 02:21:31 +0200},
    url = {https://iris.unito.it/retrieve/handle/2318/1735377/601141/2020_looppar_PDP.pdf},
    bdsk-url-1 = {https://doi.org/10.1109/PDP50117.2020.00064},
    bdsk-url-2 = {https://iris.unito.it/retrieve/handle/2318/1735377/601141/2020_looppar_PDP.pdf},
    keywords = {hpc4ai, c3s}
    }

  • J. D. Garcia, J. D. del Rio, M. Aldinucci, F. Tordini, M. Danelutto, G. Mencagli, and M. Torquati, "Challenging the abstraction penalty in parallel patterns libraries: Adding FastFlow support to GrPPI," The Journal of Supercomputing, vol. 76, iss. 7, p. 5139–5159, 2020. doi:10.1007/s11227-019-02826-5
    [BibTeX] [Abstract] [Download PDF]

    In the last years, pattern-based programming has been recognized as a good practice for efficiently exploiting parallel hardware resources. Following this approach, multiple libraries have been designed for providing such high-level abstractions to ease the parallel programming. However, those libraries do not share a common interface. To pave the way, GrPPI has been designed for providing an intermediate abstraction layer between application developers and existing parallel programming frameworks like OpenMP, Intel TBB or ISO C++ threads. On the other hand, FastFlow has been adopted as an efficient object-based programming framework that may benefit from being supported as an additional GrPPI backend. However, the object-based approach presents some major challenges to be incorporated under the GrPPI type safe functional programming style. In this paper, we present the integration of FastFlow as a new GrPPI backend to demonstrate that structured parallel programming frameworks perfectly fit the GrPPI design. Additionally, we also demonstrate that GrPPI does not incur in additional overheads for providing its abstraction layer, and we study the programmability in terms of lines of code and cyclomatic complexity. In general, the presented work acts as reciprocal validation of both FastFlow (as an efficient, native structured parallel programming framework) and GrPPI (as an efficient abstraction layer on top of existing parallel programming frameworks).

    @article{19:jsupe:grppi,
    title = {Challenging the abstraction penalty in parallel patterns libraries: Adding FastFlow support to GrPPI},
    author = {Garcia, Jose Daniel and del Rio, Jose Daniel and Marco Aldinucci and Fabio Tordini and Marco Danelutto and Gabriele Mencagli and Massimo Torquati},
    year = {2020},
    journal = {The Journal of Supercomputing},
    volume = {76},
    pages = {5139--5159},
    doi = {10.1007/s11227-019-02826-5},
    abstract = {In the last years, pattern-based programming has been recognized as a good practice for efficiently exploiting parallel hardware resources. Following this approach, multiple libraries have been designed for providing such high-level abstractions to ease the parallel programming. However, those libraries do not share a common interface. To pave the way, GrPPI has been designed for providing an intermediate abstraction layer between application developers and existing parallel programming frameworks like OpenMP, Intel TBB or ISO C++ threads. On the other hand, FastFlow has been adopted as an efficient object-based programming framework that may benefit from being supported as an additional GrPPI backend. However, the object-based approach presents some major challenges to be incorporated under the GrPPI type safe functional programming style. In this paper, we present the integration of FastFlow as a new GrPPI backend to demonstrate that structured parallel programming frameworks perfectly fit the GrPPI design. Additionally, we also demonstrate that GrPPI does not incur in additional overheads for providing its abstraction layer, and we study the programmability in terms of lines of code and cyclomatic complexity. In general, the presented work acts as reciprocal validation of both FastFlow (as an efficient, native structured parallel programming framework) and GrPPI (as an efficient abstraction layer on top of existing parallel programming frameworks).},
    date-added = {2019-03-23 10:08:27 +0100},
    date-modified = {2020-11-15 19:15:18 +0100},
    number = {7},
    url = {https://iris.unito.it/retrieve/handle/2318/1762686/744894/2020-js-grppi-postprint.pdf},
    bdsk-url-1 = {https://doi.org/10.1007/s11227-019-02826-5},
    keywords = {rephrase, fastflow}
    }

  • V. Amaral, B. Norberto, M. Goulão, M. Aldinucci, S. Benkner, A. Bracciali, P. Carreira, E. Celms, L. 'i, C. Grelck, H. Karatza, C. Kessler, P. Kilpatrick, H. Martiniano, I. Mavridis, S. Pllana, A. R. 'i, J. Simão, L. 'i, and A. Visa, "Programming languages for data-Intensive HPC applications: A systematic mapping study," Parallel Computing, p. 102584, 2020. doi:https://doi.org/10.1016/j.parco.2019.102584
    [BibTeX] [Abstract] [Download PDF]

    A major challenge in modelling and simulation is the need to combine expertise in both software technologies and a given scientific domain. When High-Performance Computing (HPC) is required to solve a scientific problem, software development becomes a problematic issue. Considering the complexity of the software for HPC, it is useful to identify programming languages that can be used to alleviate this issue. Because the existing literature on the topic of HPC is very dispersed, we performed a Systematic Mapping Study (SMS) in the context of the European COST Action cHiPSet. This literature study maps characteristics of various programming languages for data-intensive HPC applications, including category, typical user profiles, effectiveness, and type of articles. We organised the SMS in two phases. In the first phase, relevant articles are identified employing an automated keyword-based search in eight digital libraries. This lead to an initial sample of 420 papers, which was then narrowed down in a second phase by human inspection of article abstracts, titles and projects to 152 relevant articles published in the period 2006–2018. The analysis of these articles enabled us to identify 26 programming languages referred to in 33 of relevant articles. We compared the outcome of the mapping study with results of our questionnaire-based survey that involved 57 HPC experts. The mapping study and the survey revealed that the desired features of programming languages for data-intensive HPC applications are portability, performance and usability. Furthermore, we observed that the majority of the programming languages used in the context of data-intensive HPC applications are text-based general-purpose programming languages. Typically these have a steep learning curve, which makes them difficult to adopt. We believe that the outcome of this study will inspire future research and development in programming languages for data-intensive HPC applications.

    @article{20:sms:chipset,
    title = {Programming languages for data-Intensive HPC applications: A systematic mapping study},
    author = {Vasco Amaral and Beatriz Norberto and Miguel Goul{\~a}o and Marco Aldinucci and Siegfried Benkner and Andrea Bracciali and Paulo Carreira and Edgars Celms and Lu{\'\i}s Correia and Clemens Grelck and Helen Karatza and Christoph Kessler and Peter Kilpatrick and Hugo Martiniano and Ilias Mavridis and Sabri Pllana and Ana Resp{\'\i}cio and Jos{\'e} Sim{\~a}o and Lu{\'\i}s Veiga and Ari Visa},
    year = {2020},
    journal = {Parallel Computing},
    pages = {102584},
    doi = {https://doi.org/10.1016/j.parco.2019.102584},
    issn = {0167-8191},
    abstract = {A major challenge in modelling and simulation is the need to combine expertise in both software technologies and a given scientific domain. When High-Performance Computing (HPC) is required to solve a scientific problem, software development becomes a problematic issue. Considering the complexity of the software for HPC, it is useful to identify programming languages that can be used to alleviate this issue. Because the existing literature on the topic of HPC is very dispersed, we performed a Systematic Mapping Study (SMS) in the context of the European COST Action cHiPSet. This literature study maps characteristics of various programming languages for data-intensive HPC applications, including category, typical user profiles, effectiveness, and type of articles. We organised the SMS in two phases. In the first phase, relevant articles are identified employing an automated keyword-based search in eight digital libraries. This lead to an initial sample of 420 papers, which was then narrowed down in a second phase by human inspection of article abstracts, titles and projects to 152 relevant articles published in the period 2006--2018. The analysis of these articles enabled us to identify 26 programming languages referred to in 33 of relevant articles. We compared the outcome of the mapping study with results of our questionnaire-based survey that involved 57 HPC experts. The mapping study and the survey revealed that the desired features of programming languages for data-intensive HPC applications are portability, performance and usability. Furthermore, we observed that the majority of the programming languages used in the context of data-intensive HPC applications are text-based general-purpose programming languages. Typically these have a steep learning curve, which makes them difficult to adopt. We believe that the outcome of this study will inspire future research and development in programming languages for data-intensive HPC applications.},
    date-modified = {2020-11-15 17:21:48 +0100},
    url = {https://iris.unito.it/retrieve/689605/1-s2.0-S0167819119301759-main.pdf},
    bdsk-url-1 = {http://www.sciencedirect.com/science/article/pii/S0167819119301759},
    bdsk-url-2 = {https://doi.org/10.1016/j.parco.2019.102584},
    keywords = {HPC, bigdata, chipset}
    }

  • D. D'Agostino, P. Liò, M. Aldinucci, and I. Merelli, "NeoHiC: A web application for the analysis of Hi-C data," in Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 2020, p. 98–107. doi:10.1007/978-3-030-63061-4_10
    [BibTeX] [Abstract] [Download PDF]

    High-throughput sequencing Chromosome Conformation Capture (Hi-C) allows the study of chromatin interactions and 3D chromosome folding on a larger scale. A graph-based multi-level representation of Hi-C data is essential for proper visualisation of the spatial pattern they represent, in particular for comparing different experiments or for re-mapping omics-data in a space-aware context. The size of the HiC data hampers the straightforward use of currently available graph visualisation tools and libraries. In this paper, we present the first version of NeoHiC, a user-friendly web application for the progressive graph visualisation of Hi-C data based on the use of the Neo4j graph database. The user could select the richness of the environment of the query gene by choosing among a large number of proximity and distance metrics.

    @inproceedings{20:neohic:cibb,
    title = {{NeoHiC}: A web application for the analysis of {Hi-C} data},
    author = {Daniele D'Agostino and Pietro Li{\`o} and Marco Aldinucci and Ivan Merelli},
    year = {2020},
    booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
    volume = {12313},
    pages = {98--107},
    doi = {10.1007/978-3-030-63061-4_10},
    isbn = {978-3-030-63061-4},
    abstract = {High-throughput sequencing Chromosome Conformation Capture (Hi-C) allows the study of chromatin interactions and 3D chromosome folding on a larger scale. A graph-based multi-level representation of Hi-C data is essential for proper visualisation of the spatial pattern they represent, in particular for comparing different experiments or for re-mapping omics-data in a space-aware context. The size of the HiC data hampers the straightforward use of currently available graph visualisation tools and libraries. In this paper, we present the first version of NeoHiC, a user-friendly web application for the progressive graph visualisation of Hi-C data based on the use of the Neo4j graph database. The user could select the richness of the environment of the query gene by choosing among a large number of proximity and distance metrics.},
    date-modified = {2021-01-07 10:59:27 +0100},
    url = {https://iris.unito.it/retrieve/handle/2318/1766001/690791/20_neohic_cibb.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1766001/690791/20_neohic_cibb.pdf},
    bdsk-url-2 = {https://doi.org/10.1007/978-3-030-63061-4_10}
    }

2019

  • P. Viviani, "Deep Learning at Scale with Nearest Neighbours Communications," PhD Thesis, 2019. doi:10.5281/zenodo.3516093
    [BibTeX] [Abstract] [Download PDF]

    As deep learning techniques become more and more popular, there is the need to move these applications from the data scientist's Jupyter notebook to efficient and reliable enterprise solutions. Moreover, distributed training of deep learning models will happen more and more outside the well-known borders of cloud and HPC infrastructure and will move to edge and mobile platforms. Current techniques for distributed deep learning have drawbacks in both these scenarios, limiting their long-term applicability. After a critical review of the established techniques for Data Parallel training from both a distributed computing and deep learning perspective, a novel approach based on nearest-neighbour communications is presented in order to overcome some of the issues related to mainstream approaches, such as global communication patterns. Moreover, in order to validate the proposed strategy, the Flexible Asynchronous Scalable Training (FAST) framework is introduced, which allows to apply the nearest-neighbours communications approach to a deep learning framework of choice. Finally, a relevant use-case is deployed on a medium-scale infrastructure to demonstrate both the framework and the methodology presented. Training convergence and scalability results are presented and discussed in comparison to a baseline defined by using state-of-the-art distributed training tools provided by a well-known deep learning framework.

    @phdthesis{19:dl:viviani:thesis,
    title = {Deep Learning at Scale with Nearest Neighbours Communications},
    author = {Paolo Viviani},
    year = {2019},
    month = sep,
    doi = {10.5281/zenodo.3516093},
    abstract = {As deep learning techniques become more and more popular, there is the need to move these applications from the data scientist's Jupyter notebook to efficient and reliable enterprise solutions. Moreover, distributed training of deep learning models will happen more and more outside the well-known borders of cloud and HPC infrastructure and will move to edge and mobile platforms. Current techniques for distributed deep learning have drawbacks in both these scenarios, limiting their long-term applicability. After a critical review of the established techniques for Data Parallel training from both a distributed computing and deep learning perspective, a novel approach based on nearest-neighbour communications is presented in order to overcome some of the issues related to mainstream approaches, such as global communication patterns. Moreover, in order to validate the proposed strategy, the Flexible Asynchronous Scalable Training (FAST) framework is introduced, which allows to apply the nearest-neighbours communications approach to a deep learning framework of choice. Finally, a relevant use-case is deployed on a medium-scale infrastructure to demonstrate both the framework and the methodology presented. Training convergence and scalability results are presented and discussed in comparison to a baseline defined by using state-of-the-art distributed training tools provided by a well-known deep learning framework.},
    school = {Computer Science Department, University of Torino},
    url = {https://zenodo.org/record/3516093/files/20190910_final_pdf.pdf},
    bdsk-url-1 = {https://zenodo.org/record/3516093/files/20190910_final_pdf.pdf},
    bdsk-url-2 = {https://doi.org/10.5281/zenodo.3516093},
    keywords = {fortissimo}
    }

  • M. Aldinucci, S. Bagnasco, M. Concas, S. Lusso, S. Rabellino, D. Demarchi, and S. Vallero, "Managing a heterogeneous scientific computing cluster with cloud-like tools: ideas and experience," in European Physical Journal Web of Conferences, 2019, p. 7030. doi:10.1051/epjconf/201921407030
    [BibTeX] [Abstract] [Download PDF]

    Obtaining CPU cycles on an HPC cluster is nowadays relatively simple and sometimes even cheap for academic institutions. However, in most of the cases providers of HPC services would not allow changes on the configuration, implementation of special features or a lower-level control on the computing infrastructure, for example for testing experimental configurations. The variety of use cases proposed by several departments of the University of Torino, including ones from solid-state chemistry, computational biology, genomics and many others, called for different and sometimes conflicting configurations; furthermore, several R&D activities in the field of scientific computing, with topics ranging from GPU acceleration to Cloud Computing technologies, needed a platform to be carried out on. The Open Computing Cluster for Advanced data Manipulation (OCCAM) is a multi-purpose flexible HPC cluster designed and operated by a collaboration between the University of Torino and the Torino branch of the Istituto Nazionale di Fisica Nucleare. It is aimed at providing a flexible and reconfigurable infrastructure to cater to a wide range of different scientific computing needs, as well as a platform for R&D activities on computational technologies themselves. We describe some of the use cases that prompted the design and construction of the system, its architecture and a first characterisation of its performance by some synthetic benchmark tools and a few realistic use-case tests.

    @inproceedings{2019EPJWC.21407030A,
    title = {{Managing a heterogeneous scientific computing cluster with cloud-like tools: ideas and experience}},
    author = {Aldinucci, Marco and Bagnasco, Stefano and Concas, Matteo and Lusso, Stefano and Rabellino, Sergio and Demarchi, Danilo and Vallero, Sara},
    year = {2019},
    month = jul,
    booktitle = {European Physical Journal Web of Conferences},
    series = {European Physical Journal Web of Conferences},
    volume = {214},
    pages = {07030},
    doi = {10.1051/epjconf/201921407030},
    abstract = {Obtaining CPU cycles on an HPC cluster is nowadays relatively simple and sometimes even cheap for academic institutions. However, in most of the cases providers of HPC services would not allow changes on the configuration, implementation of special features or a lower-level control on the computing infrastructure, for example for testing experimental configurations. The variety of use cases proposed by several departments of the University of Torino, including ones from solid-state chemistry, computational biology, genomics and many others, called for different and sometimes conflicting configurations; furthermore, several R&D activities in the field of scientific computing, with topics ranging from GPU acceleration to Cloud Computing technologies, needed a platform to be carried out on. The Open Computing Cluster for Advanced data Manipulation (OCCAM) is a multi-purpose flexible HPC cluster designed and operated by a collaboration between the University of Torino and the Torino branch of the Istituto Nazionale di Fisica Nucleare. It is aimed at providing a flexible and reconfigurable infrastructure to cater to a wide range of different scientific computing needs, as well as a platform for R&D activities on computational technologies themselves. We describe some of the use cases that prompted the design and construction of the system, its architecture and a first characterisation of its performance by some synthetic benchmark tools and a few realistic use-case tests.},
    adsnote = {Provided by the SAO/NASA Astrophysics Data System},
    adsurl = {https://ui.adsabs.harvard.edu/abs/2019EPJWC.21407030A},
    eid = {07030},
    url = {https://iris.unito.it/retrieve/533279/epjconf_chep2018_07030.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/533279/epjconf_chep2018_07030.pdf},
    bdsk-url-2 = {https://doi.org/10.1051/epjconf/201921407030}
    }

  • D. Medić, "Relative expressiveness of calculi for reversible concurrency," Bull. EATCS, vol. 129, 2019.
    [BibTeX] [Abstract] [Download PDF]

    A number of formalisms have been proposed to model various approaches to reversibility and to better understand its properties and characteristics. However, the relation between these formalisms has hardly been studied. This paper examines the expressiveness of the causal-consistent reversibility in process algebras CCS and \emph{{\(\pi\)}}-calculus. In particular, we show, by means of encodings, that LTSs of two reversible extensions of CCS, Reversible CCS [1] and CCS with Keys [2], are isomorphic up to some structural transformations of processes. To study different causal semantics for ?-calculus, we devise a uniform framework for reversible \emph{{\(\pi\)}}-calculi that is parametric with respect to a data structure that stores information about the extrusion of a name. Depending on the used data structure, different causal semantics can be obtained. We show that reversibility induced by our framework when instantiated with three different data structures is causally-consistent and prove a causal correspondence between certain causal semantics and matching instance of the framework.

    @article{19:eatcs:Medic19,
    title = {Relative expressiveness of calculi for reversible concurrency},
    author = {Doriana Medi\'{c}},
    year = {2019},
    journal = {Bull. {EATCS}},
    volume = {129},
    abstract = {A number of formalisms have been proposed to model various approaches to reversibility and to better understand its properties and characteristics. However, the relation between these formalisms has hardly been studied. This paper examines the expressiveness of the causal-consistent reversibility in process algebras CCS and \emph{{\(\pi\)}}-calculus. In particular, we show, by means of encodings, that LTSs of two reversible extensions of CCS, Reversible CCS [1] and CCS with Keys [2], are isomorphic up to some structural transformations of processes. To study different causal semantics for ?-calculus, we devise a uniform framework for reversible \emph{{\(\pi\)}}-calculi that is parametric with respect to a data structure that stores information about the extrusion of a name. Depending on the used data structure, different causal semantics can be obtained. We show that reversibility induced by our framework when instantiated with three different data structures is causally-consistent and prove a causal correspondence between certain causal semantics and matching instance of the framework.},
    keywords = {, semantics},
    url = {http://bulletin.eatcs.org/index.php/beatcs/article/view/590/601},
    bdsk-url-1 = {http://bulletin.eatcs.org/index.php/beatcs/article/view/590/601}
    }

  • P. Viviani, M. Drocco, D. Baccega, I. Colonnelli, and M. Aldinucci, "Deep Learning at Scale," in Proc. of 27th Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP), Pavia, Italy, 2019, p. 124–131. doi:10.1109/EMPDP.2019.8671552
    [BibTeX] [Abstract] [Download PDF]

    This work presents a novel approach to distributed training of deep neural networks (DNNs) that aims to overcome the issues related to mainstream approaches to data parallel training. Established techniques for data parallel training are discussed from both a parallel computing and deep learning perspective, then a different approach is presented that is meant to allow DNN training to scale while retaining good convergence properties. Moreover, an experimental implementation is presented as well as some preliminary results.

    @inproceedings{19:deeplearn:pdp,
    title = {Deep Learning at Scale},
    author = {Paolo Viviani and Maurizio Drocco and Daniele Baccega and Iacopo Colonnelli and Marco Aldinucci},
    year = {2019},
    booktitle = {Proc. of 27th Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP)},
    publisher = {IEEE},
    address = {Pavia, Italy},
    pages = {124--131},
    doi = {10.1109/EMPDP.2019.8671552},
    abstract = {This work presents a novel approach to distributed training of deep neural networks (DNNs) that aims to overcome the issues related to mainstream approaches to data parallel training. Established techniques for data parallel training are discussed from both a parallel computing and deep learning perspective, then a different approach is presented that is meant to allow DNN training to scale while retaining good convergence properties. Moreover, an experimental implementation is presented as well as some preliminary results.},
    date-added = {2020-01-30 10:48:12 +0100},
    date-modified = {2020-11-15 15:00:34 +0100},
    url = {https://iris.unito.it/retrieve/handle/2318/1695211/487778/19_deeplearning_PDP.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1695211/487778/19_deeplearning_PDP.pdf},
    bdsk-url-2 = {https://doi.org/10.1109/EMPDP.2019.8671552},
    keywords = {machine learning}
    }

  • I. Merelli, F. Fornari, F. Tordini, D. D'Agostino, M. Aldinucci, and D. Cesini, "Exploiting Docker containers over Grid computing for a comprehensive study of chromatin conformation in different cell types," Journal of Parallel and Distributed Computing, vol. 134, p. 116–127, 2019. doi:10.1016/j.jpdc.2019.08.002
    [BibTeX] [Abstract] [Download PDF]

    Many bioinformatic applications require to exploit the capabilities of several computational resources to effectively access and process large and distributed datasets. In this context, Grid computing has been largely used to face unprecedented challenges in Computational Biology, at the cost of complex workarounds needed to make applications successfully running. The Grid computing paradigm, in fact, has always suffered from a lack of flexibility. Although this has been partially solved by Cloud computing, the on-demand approach is way distant from the original idea of volunteering computing that boosted the Grid paradigm. A solution to outpace the impossibility of creating custom environments for running applications in Grid is represented by the containerization technology. In this paper, we describe our experience in exploiting a Docker-based approach to run in a Grid environment a novel, computationally intensive, bioinformatic application, which models the DNA spatial conformation inside the nucleus of eukaryotic cells. Results assess the feasibility of this approach in terms of performance and efforts to run large experiments.

    @article{19:merelli:jpdc,
    title = {Exploiting Docker containers over Grid computing for a comprehensive study of chromatin conformation in different cell types},
    author = {Ivan Merelli and Federico Fornari and Fabio Tordini and Daniele D'Agostino and Marco Aldinucci and Daniele Cesini},
    year = {2019},
    journal = {Journal of Parallel and Distributed Computing},
    volume = {134},
    pages = {116--127},
    doi = {10.1016/j.jpdc.2019.08.002},
    issn = {0743-7315},
    abstract = {Many bioinformatic applications require to exploit the capabilities of several computational resources to effectively access and process large and distributed datasets. In this context, Grid computing has been largely used to face unprecedented challenges in Computational Biology, at the cost of complex workarounds needed to make applications successfully running. The Grid computing paradigm, in fact, has always suffered from a lack of flexibility. Although this has been partially solved by Cloud computing, the on-demand approach is way distant from the original idea of volunteering computing that boosted the Grid paradigm. A solution to outpace the impossibility of creating custom environments for running applications in Grid is represented by the containerization technology. In this paper, we describe our experience in exploiting a Docker-based approach to run in a Grid environment a novel, computationally intensive, bioinformatic application, which models the DNA spatial conformation inside the nucleus of eukaryotic cells. Results assess the feasibility of this approach in terms of performance and efforts to run large experiments.},
    date-added = {2019-09-04 15:24:03 +0200},
    date-modified = {2019-09-04 15:25:57 +0200},
    url = {https://iris.unito.it/retrieve/handle/2318/1711684/532767/2019_Nuchart_JPDC_open.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1711684/532767/2019_Nuchart_JPDC_open.pdf},
    bdsk-url-2 = {https://doi.org/10.1016/j.jpdc.2019.08.002},
    keywords = {bioinformatics}
    }

  • M. Aldinucci, C. Berzovini, C. Grana, M. Grangetto, L. Pireddu, and G. Zanetti, Deep Learning e calcolo ad alte prestazioni per l'elaborazione di immagini biomediche, 2019.
    [BibTeX] [Abstract]

    Il progetto DeepHealth, recentemente finanziato dalla Commissione Europea, ha come obiettivo la realizzazione di un ecosistema europeo costituito da piattaforme di calcolo ad alte prestazioni, librerie software e competenze multi-disciplinari di intelligenza artificiale, calcolo parallelo e scienze mediche per l'elaborazione e la diagnosi basata su immagini. Il contributo presenta sinteticamente le competenze e le infrastrutture nazionali coinvolte nel progetto.

    @misc{19:italia,
    title = {Deep Learning e calcolo ad alte prestazioni per l'elaborazione di immagini biomediche},
    author = {Marco Aldinucci and Claudio Berzovini and Costantino Grana and Marco Grangetto and Luca Pireddu and Gianluigi Zanetti},
    year = {2019},
    month = mar,
    abstract = {Il progetto DeepHealth, recentemente finanziato dalla Commissione Europea, ha come obiettivo la realizzazione di un ecosistema europeo costituito da piattaforme di calcolo ad alte prestazioni, librerie software e competenze multi-disciplinari di intelligenza artificiale, calcolo parallelo e scienze mediche per l'elaborazione e la diagnosi basata su immagini. Il contributo presenta sinteticamente le competenze e le infrastrutture nazionali coinvolte nel progetto.},
    date-added = {2019-04-06 00:17:32 +0200},
    date-modified = {2021-03-27 00:03:43 +0100},
    howpublished = {{Ital-IA}: Convegno Nazionale CINI sull'Intelligenza Artificiale},
    keywords = {hpc4ai}
    }

  • C. Grelck, E. Niewiadomska-Szynkiewicz, M. Aldinucci, A. Bracciali, and E. Larsson, "Why High-Performance Modelling and Simulation for Big Data Applications Matters," in High-Performance Modelling and Simulation for Big Data Applications: Selected Results of the COST Action IC1406 cHiPSet, J. Ko{l}odziej and H. González-Vélez, Eds., Cham: Springer International Publishing, 2019, p. 1–35. doi:10.1007/978-3-030-16272-6_1
    [BibTeX] [Abstract] [Download PDF]

    Modelling and Simulation (M{&}S) offer adequate abstractions to manage the complexity of analysing big data in scientific and engineering domains. Unfortunately, big data problems are often not easily amenable to efficient and effective use of High Performance Computing (HPC) facilities and technologies. Furthermore, M{&}S communities typically lack the detailed expertise required to exploit the full potential of HPC solutions while HPC specialists may not be fully aware of specific modelling and simulation requirements and applications.

    @inbook{Grelck2019,
    title = {Why High-Performance Modelling and Simulation for Big Data Applications Matters},
    author = {Grelck, Clemens and Niewiadomska-Szynkiewicz, Ewa and Aldinucci, Marco and Bracciali, Andrea and Larsson, Elisabeth},
    year = {2019},
    booktitle = {High-Performance Modelling and Simulation for Big Data Applications: Selected Results of the COST Action IC1406 cHiPSet},
    publisher = {Springer International Publishing},
    address = {Cham},
    series = {LNCS},
    pages = {1--35},
    doi = {10.1007/978-3-030-16272-6_1},
    isbn = {978-3-030-16272-6},
    abstract = {Modelling and Simulation (M{\&}S) offer adequate abstractions to manage the complexity of analysing big data in scientific and engineering domains. Unfortunately, big data problems are often not easily amenable to efficient and effective use of High Performance Computing (HPC) facilities and technologies. Furthermore, M{\&}S communities typically lack the detailed expertise required to exploit the full potential of HPC solutions while HPC specialists may not be fully aware of specific modelling and simulation requirements and applications.},
    date-added = {2019-03-25 23:09:24 +0100},
    date-modified = {2019-03-25 23:09:42 +0100},
    editor = {Ko{\l}odziej, Joanna and Gonz{\'a}lez-V{\'e}lez, Horacio},
    number = {11400},
    url = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-16272-6_1.pdf},
    bdsk-url-1 = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-16272-6_1.pdf},
    bdsk-url-2 = {https://doi.org/10.1007/978-3-030-16272-6_1},
    keywords = {chipset}
    }

  • M. Drocco, P. Viviani, I. Colonnelli, M. Aldinucci, and M. Grangetto, "Accelerating spectral graph analysis through wavefronts of linear algebra operations," in Proc. of 27th Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP), Pavia, Italy, 2019, p. 9–16. doi:10.1109/EMPDP.2019.8671640
    [BibTeX] [Abstract] [Download PDF]

    The wavefront pattern captures the unfolding of a parallel computation in which data elements are laid out as a logical multidimensional grid and the dependency graph favours a diagonal sweep across the grid. In the emerging area of spectral graph analysis, the computing often consists in a wavefront running over a tiled matrix, involving expensive linear algebra kernels. While these applications might benefit from parallel heterogeneous platforms (multi-core with GPUs),programming wavefront applications directly with high-performance linear algebra libraries yields code that is complex to write and optimize for the specific application. We advocate a methodology based on two abstractions (linear algebra and parallel pattern-based run-time), that allows to develop portable, self-configuring, and easy-to-profile code on hybrid platforms.

    @inproceedings{19:gsp:pdp,
    title = {Accelerating spectral graph analysis through wavefronts of linear algebra operations},
    author = {Maurizio Drocco and Paolo Viviani and Iacopo Colonnelli and Marco Aldinucci and Marco Grangetto},
    year = {2019},
    booktitle = {Proc. of 27th Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP)},
    publisher = {IEEE},
    address = {Pavia, Italy},
    pages = {9--16},
    doi = {10.1109/EMPDP.2019.8671640},
    abstract = {The wavefront pattern captures the unfolding of a parallel computation in which data elements are laid out as a logical multidimensional grid and the dependency graph favours a diagonal sweep across the grid. In the emerging area of spectral graph analysis, the computing often consists in a wavefront running over a tiled matrix, involving expensive linear algebra kernels. While these applications might benefit from parallel heterogeneous platforms (multi-core with GPUs),programming wavefront applications directly with high-performance linear algebra libraries yields code that is complex to write and optimize for the specific application. We advocate a methodology based on two abstractions (linear algebra and parallel pattern-based run-time), that allows to develop portable, self-configuring, and easy-to-profile code on hybrid platforms.},
    date-modified = {2021-04-24 23:22:22 +0200},
    url = {https://iris.unito.it/retrieve/handle/2318/1695315/488105/19_wavefront_PDP.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1695315/488105/19_wavefront_PDP.pdf},
    bdsk-url-2 = {https://doi.org/10.1109/EMPDP.2019.8671640}
    }

  • V. Reniers, D. Van Landuyt, P. Viviani, B. Lagaisse, R. Lombardi, and W. Joosen, "Analysis of Architectural Variants for Auditable Blockchain-based Private Data Sharing," in Proceedings of the 34th ACM/SIGAPP Symposium on Applied Computing, New York, NY, USA, 2019, p. 346–354. doi:10.1145/3297280.3297316
    [BibTeX] [Abstract] [Download PDF]

    Many applications by design depend on costly trusted third-party auditors. One such example is the industrial application case of federated multi-disciplinary optimization (MDO), in which different organizations contribute to a complex engineering design effort. Although blockchain and distributed ledger technology (DLT) has strong potential in reducing the dependence on such intermediaries, the architectural complexity involved in designing a solution is daunting. In this paper, we analyze the architectural variants for decentralized private data sharing while guaranteeing auditability in terms of data access operations. Non-repudiation of actions taken by each party is a key requirement, as is availability of the shared data. % through storage governed by the chain. The architectural variants analyzed focus on attaining:~(i)~confidential data exchange, (ii)~maintaining and governing access to the shared data, (iii)~providing data access auditability, (iv)~data validation or conflict resolution, and to a lesser degree (v)~transaction and identity privacy. We systematically enumerate architectural decisions at the levels of:~storage, policy-based file access control, data encryption methods, and auditability mechanisms for private data. This analysis is based on extensive assessment of the state of the art on decentralized private data access management using static or dynamic policies, and private data validation without exposing confidential information. The main contribution of this work is a comprehensive overview of architectural variants for decentralized control of private, encrypted data, and the involved trade-offs in terms of performance, auditable trust and security. These findings are validated in the context on the aforementioned industry case that involves federated multi-disciplinary optimization (MDO).

    @inproceedings{19:sac:blockchain,
    title = {Analysis of Architectural Variants for Auditable Blockchain-based Private Data Sharing},
    author = {Reniers, Vincent and Van Landuyt, Dimitri and Viviani, Paolo and Lagaisse, Bert and Lombardi, Riccardo and Joosen, Wouter},
    year = {2019},
    booktitle = {Proceedings of the 34th ACM/SIGAPP Symposium on Applied Computing},
    publisher = {ACM},
    address = {New York, NY, USA},
    series = {SAC '19},
    pages = {346--354},
    doi = {10.1145/3297280.3297316},
    isbn = {978-1-4503-5933-7},
    abstract = {Many applications by design depend on costly trusted third-party auditors. One such example is the industrial application case of federated multi-disciplinary optimization (MDO), in which different organizations contribute to a complex engineering design effort. Although blockchain and distributed ledger technology (DLT) has strong potential in reducing the dependence on such intermediaries, the architectural complexity involved in designing a solution is daunting. In this paper, we analyze the architectural variants for decentralized private data sharing while guaranteeing auditability in terms of data access operations. Non-repudiation of actions taken by each party is a key requirement, as is availability of the shared data. % through storage governed by the chain. The architectural variants analyzed focus on attaining:~(i)~confidential data exchange, (ii)~maintaining and governing access to the shared data, (iii)~providing data access auditability, (iv)~data validation or conflict resolution, and to a lesser degree (v)~transaction and identity privacy. We systematically enumerate architectural decisions at the levels of:~storage, policy-based file access control, data encryption methods, and auditability mechanisms for private data. This analysis is based on extensive assessment of the state of the art on decentralized private data access management using static or dynamic policies, and private data validation without exposing confidential information. The main contribution of this work is a comprehensive overview of architectural variants for decentralized control of private, encrypted data, and the involved trade-offs in terms of performance, auditable trust and security. These findings are validated in the context on the aforementioned industry case that involves federated multi-disciplinary optimization (MDO).},
    acmid = {3297316},
    date-modified = {2021-04-24 23:21:37 +0200},
    location = {Limassol, Cyprus},
    numpages = {9},
    url = {https://doi.acm.org/10.1145/3297280.3297316},
    bdsk-url-1 = {https://doi.acm.org/10.1145/3297280.3297316},
    bdsk-url-2 = {https://doi.org/10.1145/3297280.3297316}
    }

  • M. Torquati, D. De Sensi, G. Mencagli, M. Aldinucci, and M. Danelutto, "Power-Aware Pipelining with Automatic Concurrency Control," Concurrency and Computation: Practice and Experience, vol. 31, iss. 5, 2019. doi:10.1002/cpe.4652
    [BibTeX] [Abstract] [Download PDF]

    Continuous streaming computations are usually composed of different modules, exchanging data through shared message queues. The selection of the algorithm used to access such queues (i.e. the concurrency control) is a critical aspect both for performance and power consumption. In this paper we describe the design of automatic concurrency control algorithm for implement- ing power-efficient communications on shared-memory multicores. The algorithm automatically switches between nonblocking and blocking concurrency protocols, getting the best from the two worlds, i.e. obtaining the same throughput offered by the nonblocking implementa- tion and the same power efficiency of the blocking concurrency protocol. We demonstrate the effectiveness of our approach using two micro-benchmarks and two real streaming applications

    @article{18:dynqueue:ccpe,
    title = {Power-Aware Pipelining with Automatic Concurrency Control},
    author = {Massimo Torquati and De Sensi, Daniele and Gabriele Mencagli and Marco Aldinucci and Marco Danelutto},
    year = {2019},
    journal = {Concurrency and Computation: Practice and Experience},
    volume = {31},
    doi = {10.1002/cpe.4652},
    abstract = {Continuous streaming computations are usually composed of different modules, exchanging data through shared message queues. The selection of the algorithm used to access such queues (i.e. the concurrency control) is a critical aspect both for performance and power consumption. In this paper we describe the design of automatic concurrency control algorithm for implement- ing power-efficient communications on shared-memory multicores. The algorithm automatically switches between nonblocking and blocking concurrency protocols, getting the best from the two worlds, i.e. obtaining the same throughput offered by the nonblocking implementa- tion and the same power efficiency of the blocking concurrency protocol. We demonstrate the effectiveness of our approach using two micro-benchmarks and two real streaming applications},
    date-added = {2018-03-13 16:50:46 +0000},
    date-modified = {2019-03-22 23:49:37 +0100},
    number = {5},
    url = {https://iris.unito.it/retrieve/handle/2318/1668445/414282/2018_CCPE.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1668445/414282/2018_CCPE.pdf},
    bdsk-url-2 = {https://doi.org/10.1002/cpe.4652},
    keywords = {rephrase}
    }

  • M. Aldinucci, M. Drocco, C. Misale, and G. Tremblay, "Languages for Big Data analysis," in Encyclopedia of Big Data Technologies, S. Sakr and A. Zomaya, Eds., Cham: Springer International Publishing, 2019. doi:10.1007/978-3-319-63962-8_142-1
    [BibTeX] [Abstract] [Download PDF]

    In this chapter, some of the most common tools for Big Data analytics are surveyed, inter-alia, Apache Spark, Flink, Storm, and Beam. They are compared against well-defined features concerning programming model (language expressivity and semantics), and execution model (parallel behaviour and run-time support). The implementation of a running example is provided for all of them.

    @inbook{bigdata:encyclopedia:18,
    title = {Languages for Big Data analysis},
    author = {Aldinucci, Marco and Drocco, Maurizio and Misale, Claudia and Tremblay, Guy},
    year = {2019},
    booktitle = {Encyclopedia of Big Data Technologies},
    publisher = {Springer International Publishing},
    address = {Cham},
    doi = {10.1007/978-3-319-63962-8_142-1},
    isbn = {978-3-319-63962-8},
    abstract = {In this chapter, some of the most common tools for Big Data analytics are surveyed, inter-alia, Apache Spark, Flink, Storm, and Beam. They are compared against well-defined features concerning programming model (language expressivity and semantics), and execution model (parallel behaviour and run-time support). The implementation of a running example is provided for all of them.},
    date-modified = {2019-03-22 08:13:33 +0100},
    editor = {Sakr, Sherif and Zomaya, Albert},
    url = {https://iris.unito.it/retrieve/handle/2318/1668051/413363/2019_bigdataframeworks_enc.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1668051/413363/2019_bigdataframeworks_enc.pdf},
    bdsk-url-2 = {https://doi.org/10.1007/978-3-319-63962-8_142-1}
    }

  • M. Danelutto, T. De Matteis, D. De Sensi, G. Mencagli, M. Torquati, M. Aldinucci, and P. Kilpatrick, "The RePhrase Extended Pattern Set for Data Intensive Parallel Computing," International Journal of Parallel Programming, vol. 47, iss. 1, p. 74–93, 2019. doi:10.1007/s10766-017-0540-z
    [BibTeX] [Abstract] [Download PDF]

    We discuss the extended parallel pattern set identified within the EU-funded project RePhrase as a candidate pattern set to support data intensive applications targeting heterogeneous architectures. The set has been designed to include three classes of pattern, namely i) core patterns, modelling common, not necessarily data intensive parallelism exploitation patterns, usually to be used in composition; ii) high level patterns, modelling common, complex and complete parallelism exploitation patterns; and iii) building block patterns, modelling the single components of data intensive applications, suitable for use–in composition–to implement patterns not covered by the core and high level patterns. We discuss the expressive power of the RePhrase extended pattern set and results illustrating the performances that may be achieved with the FastFlow implementation of the high level patterns.

    @article{17:rephrasepatterns:ijpp,
    title = {The RePhrase Extended Pattern Set for Data Intensive Parallel Computing},
    author = {Marco Danelutto and De Matteis, Tiziano and De Sensi, Daniele and Gabriele Mencagli and Massimo Torquati and Marco Aldinucci and Peter Kilpatrick},
    year = {2019},
    journal = {International Journal of Parallel Programming},
    volume = {47},
    pages = {74--93},
    doi = {10.1007/s10766-017-0540-z},
    abstract = {We discuss the extended parallel pattern set identified within the EU-funded project RePhrase as a candidate pattern set to support data intensive applications targeting heterogeneous architectures. The set has been designed to include three classes of pattern, namely i) core patterns, modelling common, not necessarily data intensive parallelism exploitation patterns, usually to be used in composition; ii) high level patterns, modelling common, complex and complete parallelism exploitation patterns; and iii) building block patterns, modelling the single components of data intensive applications, suitable for use--in composition--to implement patterns not covered by the core and high level patterns. We discuss the expressive power of the RePhrase extended pattern set and results illustrating the performances that may be achieved with the FastFlow implementation of the high level patterns.},
    date-added = {2017-11-25 15:30:59 +0000},
    date-modified = {2019-03-22 23:49:02 +0100},
    number = {1},
    url = {https://iris.unito.it/retrieve/handle/2318/1659336/387667/2017_ijpp_rephrase.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1659336/387667/2017_ijpp_rephrase.pdf},
    bdsk-url-2 = {https://doi.org/10.1007/s10766-017-0540-z},
    keywords = {rephrase, fastflow}
    }

  • M. Torquati, G. Mencagli, M. Drocco, M. Aldinucci, T. De Matteis, and M. Danelutto, "On Dynamic Memory Allocation in Sliding-Window Parallel Patterns for Streaming Analytics," The Journal of Supercomputing, vol. 75, iss. 8, p. 4114–4131, 2019. doi:10.1007/s11227-017-2152-1
    [BibTeX] [Abstract] [Download PDF]

    This work studies the issues related to dynamic memory management in Data Stream Processing, an emerging paradigm enabling the real-time processing of live data streams. In this paper we consider two streaming parallel patterns and we discuss different implementation variants related on how dynamic memory is managed. The results show that the standard mechanisms provided by modern C++ are not entirely adequate for maximizing the performance. Instead, the combined use of an efficient general-purpose memory allocator, a custom allocator optimized for the pattern considered and a custom variant of the C++ shared pointer mechanism, provides a performance improvement up to 16{\%} on the best case.

    @article{17:dmadasp:jsupe,
    title = {On Dynamic Memory Allocation in Sliding-Window Parallel Patterns for Streaming Analytics},
    author = {Massimo Torquati and Gabriele Mencagli and Maurizio Drocco and Marco Aldinucci and De Matteis, Tiziano and Marco Danelutto},
    year = {2019},
    journal = {The Journal of Supercomputing},
    volume = {75},
    pages = {4114--4131},
    doi = {10.1007/s11227-017-2152-1},
    abstract = {This work studies the issues related to dynamic memory management in Data Stream Processing, an emerging paradigm enabling the real-time processing of live data streams. In this paper we consider two streaming parallel patterns and we discuss different implementation variants related on how dynamic memory is managed. The results show that the standard mechanisms provided by modern C++ are not entirely adequate for maximizing the performance. Instead, the combined use of an efficient general-purpose memory allocator, a custom allocator optimized for the pattern considered and a custom variant of the C++ shared pointer mechanism, provides a performance improvement up to 16{\%} on the best case.},
    number = {8},
    url = {https://iris.unito.it/retrieve/handle/2318/1648626/362381/17_torquati_jsc.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1648626/362381/17_torquati_jsc.pdf},
    bdsk-url-2 = {https://doi.org/10.1007/s11227-017-2152-1},
    keywords = {rephrase, fastflow}
    }

2018

  • C. Misale, M. Drocco, G. Tremblay, and M. Aldinucci, "PiCo: a Novel Approach to Stream Data Analytics," in Proc. of Euro-Par Workshops: 1st Intl. Workshop on Autonomic Solutions for Parallel and Distributed Data Stream Processing (Auto-DaSP 2017), Santiago de Compostela, Spain, 2018. doi:10.1007/978-3-319-75178-8_10
    [BibTeX] [Abstract] [Download PDF]

    In this paper, we present a new C++ API with a fluent interface called PiCo (Pipeline Composition). PiCo's programming model aims at making easier the programming of data analytics applications while preserving or enhancing their performance. This is attained through three key design choices: 1) unifying batch and stream data access models, 2) decoupling processing from data layout, and 3) exploiting a stream-oriented, scalable, effiicient C++11 runtime system. PiCo proposes a programming model based on pipelines and operators that are polymorphic with respect to data types in the sense that it is possible to re-use the same algorithms and pipelines on different data models (e.g., streams, lists, sets, etc.). Preliminary results show that PiCo can attain better performances in terms of execution times and hugely improve memory utilization when compared to Spark and Flink in both batch and stream processing.

    @inproceedings{pico:autodasp:17,
    title = {PiCo: a Novel Approach to Stream Data Analytics},
    author = {Claudia Misale and Maurizio Drocco and Guy Tremblay and Marco Aldinucci},
    year = {2018},
    month = aug,
    booktitle = {Proc. of Euro-Par Workshops: 1st Intl. Workshop on Autonomic Solutions for Parallel and Distributed Data Stream Processing (Auto-DaSP 2017)},
    publisher = {Springer},
    address = {Santiago de Compostela, Spain},
    series = {{LNCS}},
    volume = {10659},
    doi = {10.1007/978-3-319-75178-8_10},
    abstract = {In this paper, we present a new C++ API with a fluent interface called PiCo (Pipeline Composition). PiCo's programming model aims at making easier the programming of data analytics applications while preserving or enhancing their performance. This is attained through three key design choices: 1) unifying batch and stream data access models, 2) decoupling processing from data layout, and 3) exploiting a stream-oriented, scalable, effiicient C++11 runtime system. PiCo proposes a programming model based on pipelines and operators that are polymorphic with respect to data types in the sense that it is possible to re-use the same algorithms and pipelines on different data models (e.g., streams, lists, sets, etc.). Preliminary results show that PiCo can attain better performances in terms of execution times and hugely improve memory utilization when compared to Spark and Flink in both batch and stream processing.},
    date-added = {2017-11-12 10:17:46 +0000},
    date-modified = {2018-01-21 16:08:28 +0000},
    url = {https://iris.unito.it/retrieve/handle/2318/1659344/409520/autodasp.pdf},
    bdsk-url-1 = {https://dx.doi.org/10.1007/978-3-319-75178-8_10},
    bdsk-url-2 = {https://iris.unito.it/retrieve/handle/2318/1659344/409520/autodasp.pdf},
    keywords = {rephrase, toreador, ibm}
    }

  • G. Mencagli, M. Torquati, F. Lucattini, S. Cuomo, and M. Aldinucci, "Harnessing sliding-window execution semantics for parallel stream processing," Journal of Parallel and Distributed Computing, vol. 116, p. 74–88, 2018. doi:10.1016/j.jpdc.2017.10.021
    [BibTeX] [Abstract] [Download PDF]

    Abstract According to the recent trend in data acquisition and processing technology, big data are increasingly available in the form of unbounded streams of elementary data items to be processed in real-time. In this paper we study in detail the paradigm of sliding windows, a well-known technique for approximated queries that update their results continuously as new fresh data arrive from the stream. In this work we focus on the relationship between the various existing sliding window semantics and the way the query processing is performed from the parallelism perspective. From this study two alternative parallel models are identified, each covering semantics with very precise properties. Each model is described in terms of its pros and cons, and parallel implementations in the FastFlow framework are analyzed by discussing the layout of the concurrent data structures used for the efficient windows representation in each model.

    @article{17:slidingwindows:jpdc,
    title = {Harnessing sliding-window execution semantics for parallel stream processing},
    author = {Gabriele Mencagli and Massimo Torquati and Fabio Lucattini and Salvatore Cuomo and Marco Aldinucci},
    year = {2018},
    month = jun,
    journal = {Journal of Parallel and Distributed Computing},
    volume = {116},
    pages = {74--88},
    doi = {10.1016/j.jpdc.2017.10.021},
    issn = {0743-7315},
    abstract = {Abstract According to the recent trend in data acquisition and processing technology, big data are increasingly available in the form of unbounded streams of elementary data items to be processed in real-time. In this paper we study in detail the paradigm of sliding windows, a well-known technique for approximated queries that update their results continuously as new fresh data arrive from the stream. In this work we focus on the relationship between the various existing sliding window semantics and the way the query processing is performed from the parallelism perspective. From this study two alternative parallel models are identified, each covering semantics with very precise properties. Each model is described in terms of its pros and cons, and parallel implementations in the FastFlow framework are analyzed by discussing the layout of the concurrent data structures used for the efficient windows representation in each model.},
    date-modified = {2018-12-27 18:24:34 +0100},
    url = {https://iris.unito.it/retrieve/e27ce42c-1381-2581-e053-d805fe0acbaa/preprint-jpdc-2017.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/e27ce42c-1381-2581-e053-d805fe0acbaa/preprint-jpdc-2017.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1016/j.jpdc.2017.10.021},
    bdsk-url-3 = {https://iris.unito.it/retrieve/e27ce42c-1381-2581-e053-d805fe0acbaa/preprint-jpdc-2017.pdf},
    keywords = {rephrase, fastflow}
    }

  • M. Aldinucci, S. Rabellino, M. Pironti, F. Spiga, P. Viviani, M. Drocco, M. Guerzoni, G. Boella, M. Mellia, P. Margara, I. Drago, R. Marturano, G. Marchetto, E. Piccolo, S. Bagnasco, S. Lusso, S. Vallero, G. Attardi, A. Barchiesi, A. Colla, and F. Galeazzi, "HPC4AI, an AI-on-demand federated platform endeavour," in ACM Computing Frontiers, Ischia, Italy, 2018. doi:10.1145/3203217.3205340
    [BibTeX] [Abstract] [Download PDF]

    In April 2018, under the auspices of the POR-FESR 2014-2020 program of Italian Piedmont Region, the Turin's Centre on High-Performance Computing for Artificial Intelligence (HPC4AI) was funded with a capital investment of 4.5Me and it began its deployment. HPC4AI aims to facilitate scientific research and engineering in the areas of Artificial Intelligence and Big Data Analytics. HPC4AI will specifically focus on methods for the on-demand provisioning of AI and BDA Cloud services to the regional and national industrial community, which includes the large regional ecosystem of Small-Medium Enterprises (SMEs) active in many different sectors such as automotive, aerospace, mechatronics, manufacturing, health and agrifood.

    @inproceedings{18:hpc4ai_acm_CF,
    title = {{HPC4AI}, an {AI-on-demand} federated platform endeavour},
    author = {Marco Aldinucci and Sergio Rabellino and Marco Pironti and Filippo Spiga and Paolo Viviani and Maurizio Drocco and Marco Guerzoni and Guido Boella and Marco Mellia and Paolo Margara and Idillio Drago and Roberto Marturano and Guido Marchetto and Elio Piccolo and Stefano Bagnasco and Stefano Lusso and Sara Vallero and Giuseppe Attardi and Alex Barchiesi and Alberto Colla and Fulvio Galeazzi},
    year = {2018},
    month = may,
    booktitle = {ACM Computing Frontiers},
    address = {Ischia, Italy},
    doi = {10.1145/3203217.3205340},
    abstract = {In April 2018, under the auspices of the POR-FESR 2014-2020 program of Italian Piedmont Region, the Turin's Centre on High-Performance Computing for Artificial Intelligence (HPC4AI) was funded with a capital investment of 4.5Me and it began its deployment. HPC4AI aims to facilitate scientific research and engineering in the areas of Artificial Intelligence and Big Data Analytics. HPC4AI will specifically focus on methods for the on-demand provisioning of AI and BDA Cloud services to the regional and national industrial community, which includes the large regional ecosystem of Small-Medium Enterprises (SMEs) active in many different sectors such as automotive, aerospace, mechatronics, manufacturing, health and agrifood.},
    date-added = {2018-04-21 14:18:48 +0000},
    date-modified = {2018-12-17 23:57:55 +0100},
    url = {https://iris.unito.it/retrieve/handle/2318/1765596/689772/2018_hpc4ai_ACM_CF.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1765596/689772/2018_hpc4ai_ACM_CF.pdf},
    bdsk-url-2 = {https://doi.org/10.1145/3203217.3205340},
    keywords = {hpc4ai, toreador, rephrase}
    }

  • P. Viviani, M. Drocco, and M. Aldinucci, "Pushing the boundaries of parallel Deep Learning - A practical approach," CoRR, vol. abs/1806.09528, 2018.
    [BibTeX] [Abstract] [Download PDF]

    This work aims to assess the state of the art of data parallel deep neural network training, trying to identify potential research tracks to be exploited for performance improvement. Beside, it presents a design for a practical C++ library dedicated at implementing and unifying the current state of the art methodologies for parallel training in a performance-conscious framework, allowing the user to explore novel strategies without departing significantly from its usual work-flow.

    @article{18:arxiv:deeplearning,
    title = {Pushing the boundaries of parallel Deep Learning - {A} practical approach},
    author = {Paolo Viviani and Maurizio Drocco and Marco Aldinucci},
    year = {2018},
    journal = {CoRR},
    volume = {abs/1806.09528},
    abstract = {This work aims to assess the state of the art of data parallel deep neural network training, trying to identify potential research tracks to be exploited for performance improvement. Beside, it presents a design for a practical C++ library dedicated at implementing and unifying the current state of the art methodologies for parallel training in a performance-conscious framework, allowing the user to explore novel strategies without departing significantly from its usual work-flow.},
    url = {https://arxiv.org/pdf/1806.09528},
    bdsk-url-1 = {https://arxiv.org/pdf/1806.09528}
    }

  • C. Misale, M. Drocco, G. Tremblay, A. R. Martinelli, and M. Aldinucci, "PiCo: High-performance data analytics pipelines in modern C++," Future Generation Computer Systems, vol. 87, p. 392–403, 2018. doi:10.1016/j.future.2018.05.030
    [BibTeX] [Abstract] [Download PDF]

    In this paper, we present a new C++ API with a fluent interface called PiCo (Pipeline Composition). PiCo's programming model aims at making easier the programming of data analytics applications while preserving or enhancing their performance. This is attained through three key design choices: (1) unifying batch and stream data access models, (2) decoupling processing from data layout, and (3) exploiting a stream-oriented, scalable, efficient C++11 runtime system. PiCo proposes a programming model based on pipelines and operators that are polymorphic with respect to data types in the sense that it is possible to reuse the same algorithms and pipelines on different data models (e.g., streams, lists, sets, etc.). Preliminary results show that PiCo, when compared to Spark and Flink, can attain better performances in terms of execution times and can hugely improve memory utilization, both for batch and stream processing.

    @article{18:fgcs:pico,
    title = {PiCo: High-performance data analytics pipelines in modern C++},
    author = {Claudia Misale and Maurizio Drocco and Guy Tremblay and Alberto R. Martinelli and Marco Aldinucci},
    year = {2018},
    journal = {Future Generation Computer Systems},
    booktitle = {Future Generation Computer Systems},
    volume = {87},
    pages = {392--403},
    doi = {10.1016/j.future.2018.05.030},
    abstract = {In this paper, we present a new C++ API with a fluent interface called PiCo (Pipeline Composition). PiCo's programming model aims at making easier the programming of data analytics applications while preserving or enhancing their performance. This is attained through three key design choices: (1) unifying batch and stream data access models, (2) decoupling processing from data layout, and (3) exploiting a stream-oriented, scalable, efficient C++11 runtime system. PiCo proposes a programming model based on pipelines and operators that are polymorphic with respect to data types in the sense that it is possible to reuse the same algorithms and pipelines on different data models (e.g., streams, lists, sets, etc.). Preliminary results show that PiCo, when compared to Spark and Flink, can attain better performances in terms of execution times and can hugely improve memory utilization, both for batch and stream processing.},
    date-added = {2018-05-18 21:24:31 +0000},
    date-modified = {2020-11-15 17:22:30 +0100},
    url = {https://iris.unito.it/retrieve/handle/2318/1668444/414280/fgcs_pico.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1668444/414280/fgcs_pico.pdf},
    bdsk-url-2 = {https://doi.org/10.1016/j.future.2018.05.030},
    keywords = {toreador, bigdata, fastflow}
    }

  • P. Viviani, M. Drocco, and M. Aldinucci, "Scaling Dense Linear Algebra on Multicore and Beyond: a Survey," in Proc. of 26th Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP), Cambridge, United Kingdom, 2018. doi:10.1109/PDP2018.2018.00122
    [BibTeX] [Abstract] [Download PDF]

    The present trend in big-data analytics is to exploit algorithms with (sub-)linear time complexity, in this sense it is usually worth to investigate if the available techniques can be approximated to reach an affordable complexity. However, there are still problems in data science and engineering that involve algorithms with higher time complexity, like matrix inversion or Singular Value Decomposition (SVD). This work presents the results of a survey that reviews a number of tools meant to perform dense linear algebra at ``Big Data'' scale: namely, the proposed approach aims first to define a feasibility boundary for the problem size of shared-memory matrix factorizations, then to understand whether it is convenient to employ specific tools meant to scale out such dense linear algebra tasks on distributed platforms. The survey will eventually discuss the presented tools from the point of view of domain experts (data scientist, engineers), hence focusing on the trade-off between usability and performance.

    @inproceedings{svd:pdp:18,
    title = {Scaling Dense Linear Algebra on Multicore and Beyond: a Survey},
    author = {Paolo Viviani and Maurizio Drocco and Marco Aldinucci},
    year = {2018},
    booktitle = {Proc. of 26th Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP)},
    publisher = {IEEE},
    address = {Cambridge, United Kingdom},
    doi = {10.1109/PDP2018.2018.00122},
    abstract = {The present trend in big-data analytics is to exploit algorithms with (sub-)linear time complexity, in this sense it is usually worth to investigate if the available techniques can be approximated to reach an affordable complexity. However, there are still problems in data science and engineering that involve algorithms with higher time complexity, like matrix inversion or Singular Value Decomposition (SVD). This work presents the results of a survey that reviews a number of tools meant to perform dense linear algebra at ``Big Data'' scale: namely, the proposed approach aims first to define a feasibility boundary for the problem size of shared-memory matrix factorizations, then to understand whether it is convenient to employ specific tools meant to scale out such dense linear algebra tasks on distributed platforms. The survey will eventually discuss the presented tools from the point of view of domain experts (data scientist, engineers), hence focusing on the trade-off between usability and performance.},
    date-modified = {2021-04-24 23:23:42 +0200},
    url = {https://iris.unito.it/retrieve/handle/2318/1659340/387685/preprint_aperto.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1659340/387685/preprint_aperto.pdf},
    bdsk-url-2 = {https://doi.org/10.1109/PDP2018.2018.00122}
    }

  • F. Tordini, M. Aldinucci, P. Viviani, I. Merelli, and P. Liò, "Scientific Workflows on Clouds with Heterogeneous and Preemptible Instances," in Proc. of the Intl. Conference on Parallel Computing, ParCo 2017, 12-15 September 2017, Bologna, Italy, 2018. doi:10.3233/978-1-61499-843-3-605
    [BibTeX] [Abstract] [Download PDF]

    The cloud environment is increasingly appealing for the HPC community, which has always dealt with scientific applications. However, there is still some skepticism about moving from traditional physical infrastructures to virtual HPC clusters. This mistrusting probably originates from some well known factors, including the effective economy of using cloud services, data and software availability, and the longstanding matter of data stewardship. In this work we discuss the design of a framework (based on Mesos) aimed at achieving a cost-effective and efficient usage of heterogeneous Processing Elements (PEs) for workflow execution, which supports hybrid cloud bursting over preemptible cloud Virtual Machines.

    @inproceedings{18:parco:workflow,
    title = {Scientific Workflows on Clouds with Heterogeneous and Preemptible Instances},
    author = {Fabio Tordini and Marco Aldinucci and Paolo Viviani and Ivan Merelli and Pietro Li{\`{o}}},
    year = {2018},
    booktitle = {Proc. of the Intl. Conference on Parallel Computing, ParCo 2017, 12-15 September 2017, Bologna, Italy},
    publisher = {{IOS} Press},
    series = {Advances in Parallel Computing},
    doi = {10.3233/978-1-61499-843-3-605},
    abstract = {The cloud environment is increasingly appealing for the HPC community, which has always dealt with scientific applications. However, there is still some skepticism about moving from traditional physical infrastructures to virtual HPC clusters. This mistrusting probably originates from some well known factors, including the effective economy of using cloud services, data and software availability, and the longstanding matter of data stewardship. In this work we discuss the design of a framework (based on Mesos) aimed at achieving a cost-effective and efficient usage of heterogeneous Processing Elements (PEs) for workflow execution, which supports hybrid cloud bursting over preemptible cloud Virtual Machines.},
    date-added = {2018-01-21 15:15:01 +0000},
    date-modified = {2018-03-13 16:44:11 +0000},
    url = {https://iris.unito.it/retrieve/handle/2318/1658510/385411/main.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1658510/385411/main.pdf},
    bdsk-url-2 = {https://doi.org/10.3233/978-1-61499-843-3-605},
    keywords = {rephrase}
    }

  • P. Viviani, M. Aldinucci, R. d'Ippolito, J. Lemeire, and D. Vucinic, "A Flexible Numerical Framework for Engineering–-A Response Surface Modelling Application," in Improved Performance of Materials: Design and Experimental Approaches, Cham: Springer International Publishing, 2018, p. 93–106. doi:10.1007/978-3-319-59590-0_9
    [BibTeX] [Abstract]

    This work presents an innovative approach adopted for the development of a new numerical software framework for accelerating dense linear algebra calculations and its application within an engineering context. In particular, response surface models (RSM) are a key tool to reduce the computational effort involved in engineering design processes like design optimization. However, RSMs may prove to be too expensive to be computed when the dimensionality of the system and/or the size of the dataset to be synthesized is significantly high or when a large number of different response surfaces has to be calculated in order to improve the overall accuracy (e.g. like when using ensemble modelling techniques). On the other hand, the potential of modern hybrid hardware (e.g. multicore, GPUs) is not exploited by current engineering tools, while they can lead to a significant performance improvement. To fill this gap, a software framework is being developed that enables the hybrid and scalable acceleration of the linear algebra core for engineering applications and especially of RSMs calculations with a user-friendly syntax that allows good portability between different hardware architectures, with no need of specific expertise in parallel programming and accelerator technology. The effectiveness of this framework is shown by comparing an accelerated code to a single-core calculation of a radial basis function RSM on some benchmark datasets. This approach is then validated within a real-life engineering application and the achievements are presented and discussed.

    @inbook{17:viviani:advstruct,
    title = {A Flexible Numerical Framework for Engineering---A Response Surface Modelling Application},
    author = {Viviani, P. and Aldinucci, M. and d'Ippolito, R. and Lemeire, J. and Vucinic, D.},
    year = {2018},
    booktitle = {Improved Performance of Materials: Design and Experimental Approaches},
    publisher = {Springer International Publishing},
    address = {Cham},
    pages = {93--106},
    doi = {10.1007/978-3-319-59590-0_9},
    isbn = {978-3-319-59590-0},
    abstract = {This work presents an innovative approach adopted for the development of a new numerical software framework for accelerating dense linear algebra calculations and its application within an engineering context. In particular, response surface models (RSM) are a key tool to reduce the computational effort involved in engineering design processes like design optimization. However, RSMs may prove to be too expensive to be computed when the dimensionality of the system and/or the size of the dataset to be synthesized is significantly high or when a large number of different response surfaces has to be calculated in order to improve the overall accuracy (e.g. like when using ensemble modelling techniques). On the other hand, the potential of modern hybrid hardware (e.g. multicore, GPUs) is not exploited by current engineering tools, while they can lead to a significant performance improvement. To fill this gap, a software framework is being developed that enables the hybrid and scalable acceleration of the linear algebra core for engineering applications and especially of RSMs calculations with a user-friendly syntax that allows good portability between different hardware architectures, with no need of specific expertise in parallel programming and accelerator technology. The effectiveness of this framework is shown by comparing an accelerated code to a single-core calculation of a radial basis function RSM on some benchmark datasets. This approach is then validated within a real-life engineering application and the achievements are presented and discussed.},
    date-modified = {2018-03-13 16:40:21 +0000},
    opteditor = {{\"O}chsner, Andreas and Altenbach, Holm},
    bdsk-url-1 = {https://doi.org/10.1007/978-3-319-59590-0_9},
    keywords = {repara, rephrase}
    }

  • M. Aldinucci, M. Danelutto, M. Drocco, P. Kilpatrick, C. Misale, G. Peretti Pezzi, and M. Torquati, "A Parallel Pattern for Iterative Stencil + Reduce," Journal of Supercomputing, vol. 74, iss. 11, p. 5690–5705, 2018. doi:10.1007/s11227-016-1871-z
    [BibTeX] [Abstract] [Download PDF]

    We advocate the Loop-of-stencil-reduce pattern as a means of simplifying the implementation of data-parallel programs on heterogeneous multi-core platforms. Loop-of-stencil-reduce is general enough to subsume map, reduce, map-reduce, stencil, stencil-reduce, and, crucially, their usage in a loop in both data-parallel and streaming applications, or a combination of both. The pattern makes it possible to deploy a single stencil computation kernel on different GPUs. We discuss the implementation of Loop-of-stencil-reduce in FastFlow, a framework for the implementation of applications based on the parallel patterns. Experiments are presented to illustrate the use of Loop-of-stencil-reduce in developing data-parallel kernels running on heterogeneous systems.

    @article{16:stencilreduce:jsupe,
    title = {A Parallel Pattern for Iterative Stencil + Reduce},
    author = {Marco Aldinucci and Marco Danelutto and Maurizio Drocco and Peter Kilpatrick and Claudia Misale and Guilherme {Peretti Pezzi} and Massimo Torquati},
    year = {2018},
    journal = {Journal of Supercomputing},
    volume = {74},
    pages = {5690--5705},
    doi = {10.1007/s11227-016-1871-z},
    abstract = {We advocate the Loop-of-stencil-reduce pattern as a means of simplifying the implementation of data-parallel programs on heterogeneous multi-core platforms. Loop-of-stencil-reduce is general enough to subsume map, reduce, map-reduce, stencil, stencil-reduce, and, crucially, their usage in a loop in both data-parallel and streaming applications, or a combination of both. The pattern makes it possible to deploy a single stencil computation kernel on different GPUs. We discuss the implementation of Loop-of-stencil-reduce in FastFlow, a framework for the implementation of applications based on the parallel patterns. Experiments are presented to illustrate the use of Loop-of-stencil-reduce in developing data-parallel kernels running on heterogeneous systems.},
    date-added = {2016-08-19 21:52:17 +0000},
    date-modified = {2018-12-27 18:19:41 +0100},
    number = {11},
    url = {https://iris.unito.it/retrieve/0716fc42-53d7-48c0-9469-697aabfe7759/jspaper.pdf},
    bdsk-url-1 = {http://dx.doi.org/10.1007/s11227-016-1871-z},
    bdsk-url-2 = {http://arxiv.org/pdf/1609.04567v1.pdf},
    keywords = {nvidia, repara, rephrase}
    }

2017

  • S. Cuomo, M. Aldinucci, and M. Torquati, "Guest Editorial for Programming Models and Algorithms for Data Analysis in HPC Systems," International Journal of Parallel Programming, p. 1–3, 2017. doi:10.1007/s10766-017-0531-0
    [BibTeX] [Abstract] [Download PDF]

    Performance is still the hottest keyword in parallel and distributed systems: performance evaluation, design for performance, performance portability and scalability are just a few of the many possible declinations that nowadays are of paramount scientific importance. To tackle these challenges, system architects, applications programmers and data center managers need methodological tools to fit at best the overall workload and the available architecture, maximizing the overall performances and minimizing overheads, energy consumption or idle time while application developers mainly aim at algorithmic and software oriented performances. Proper methodologies for modeling and analysis are the way to turn complexity into opportunities. This Special Issue of the International Journal of Parallel Programming welcomes papers that present practical and methodological approaches to analytical and simulative performance evaluation for architecturally complex systems and high-performance parallel and computing algorithm. Successful contributions have been done on specific technologies, applications and innovative solutions to system specifications and algorithmic schemes both.

    @article{17:ijpp:cuomo:editorial,
    title = {Guest Editorial for Programming Models and Algorithms for Data Analysis in HPC Systems},
    author = {Cuomo, Salvatore and Aldinucci, Marco and Torquati, Massimo},
    year = {2017},
    month = oct,
    journal = {International Journal of Parallel Programming},
    pages = {1--3},
    doi = {10.1007/s10766-017-0531-0},
    issn = {0885-7458},
    note = {Editorial},
    abstract = {
    Performance is still the hottest keyword in parallel and distributed systems: performance evaluation, design for performance, performance portability and scalability are just a few of the many possible declinations that nowadays are of paramount scientific importance. To tackle these challenges, system architects, applications programmers and data center managers need methodological tools to fit at best the overall workload and the available architecture, maximizing the overall performances and minimizing overheads, energy consumption or idle time while application developers mainly aim at algorithmic and software oriented performances. Proper methodologies for modeling and analysis are the way to turn complexity into opportunities.
    This Special Issue of the International Journal of Parallel Programming welcomes papers that present practical and methodological approaches to analytical and simulative performance evaluation for architecturally complex systems and high-performance parallel and computing algorithm. Successful contributions have been done on specific technologies, applications and innovative solutions to system specifications and algorithmic schemes both.
    },
    date-added = {2017-11-12 11:10:10 +0000},
    date-modified = {2017-11-12 14:08:30 +0000},
    url = {https://doi.org/10.1007/s10766-017-0531-0},
    bdsk-url-1 = {https://doi.org/10.1007/s10766-017-0531-0},
    bdsk-url-2 = {https://dx.doi.org/10.1007/s10766-017-0531-0},
    bdsk-url-3 = {https://doi.org/10.1007/s10766-017-0531-0},
    keywords = {editorial}
    }

  • M. Drocco, "Parallel Programming with Global Asynchronous Memory: Models, C++ APIs and Implementations," PhD Thesis, 2017. doi:10.5281/zenodo.1037585
    [BibTeX] [Abstract] [Download PDF]

    In the realm of High Performance Computing (HPC), message passing has been the programming paradigm of choice for over twenty years. The durable MPI (Message Passing Interface) standard, with send/receive communication, broadcast, gather/scatter, and reduction collectives is still used to construct parallel programs where each communication is orchestrated by the de\-vel\-oper-based precise knowledge of data distribution and overheads; collective communications simplify the orchestration but might induce excessive synchronization. Early attempts to bring shared-memory programming model–-with its programming adv\-antages–-to distributed computing, referred as the Distributed Shared Memory (DSM) model, faded away; one of the main issue was to combine performance and programmability with the memory consistency model. The recently proposed Partitioned Global Address Space (PGAS) model is a modern revamp of DSM that exposes data placement to enable optimizations based on locality, but it still addresses (simple) data-parallelism only and it relies on expensive sharing protocols. We advocate an alternative programming model for distributed computing based on a Global Asynchronous Memory (GAM), aiming to \emph{avoid} coherency and consistency problems rather than solving them. We materialize GAM by designing and implementing a \emph{distributed smart pointers} library, inspired by C++ smart pointers. In this model, public and private pointers (resembling C++ shared and unique pointers, respectively) are moved around instead of messages (i.e., data), thus alleviating the user from the burden of minimizing transfers. On top of smart pointers, we propose a high-level C++ template library for writing applications in terms of dataflow-like networks, namely GAM nets, consisting of stateful processors exchanging pointers in fully asynchronous fashion. We demonstrate the validity of the proposed approach, from the expressiveness perspective, by showing how GAM nets can be exploited to implement higher-level parallel programming models, such as data and task parallelism. As for the performance perspective, the execution of two non-toy benchmarks on a number of different small-scale HPC clusters exhibits both close-to-ideal scalability and negligible overhead with respect to state-of-the-art benchmark implementations. For instance, the GAM implementation of a high-quality video restoration filter sustains a 100 fps throughput over 70\%-noisy high-quality video streams on a 4-node cluster of Graphics Processing Units (GPUs), with minimal programming effort.

    @phdthesis{17:gam:drocco:thesis,
    title = {Parallel Programming with Global Asynchronous Memory: Models, {C++} {API}s and Implementations},
    author = {Maurizio Drocco},
    year = {2017},
    month = oct,
    doi = {10.5281/zenodo.1037585},
    abstract = {In the realm of High Performance Computing (HPC), message passing has been the programming paradigm of choice for over twenty years. The durable MPI (Message Passing Interface) standard, with send/receive communication, broadcast, gather/scatter, and reduction collectives is still used to construct parallel programs where each communication is orchestrated by the de\-vel\-oper-based precise knowledge of data distribution and overheads; collective communications simplify the orchestration but might induce excessive synchronization. Early attempts to bring shared-memory programming model---with its programming adv\-antages---to distributed computing, referred as the Distributed Shared Memory (DSM) model, faded away; one of the main issue was to combine performance and programmability with the memory consistency model. The recently proposed Partitioned Global Address Space (PGAS) model is a modern revamp of DSM that exposes data placement to enable optimizations based on locality, but it still addresses (simple) data-parallelism only and it relies on expensive sharing protocols. We advocate an alternative programming model for distributed computing based on a Global Asynchronous Memory (GAM), aiming to \emph{avoid} coherency and consistency problems rather than solving them. We materialize GAM by designing and implementing a \emph{distributed smart pointers} library, inspired by C++ smart pointers. In this model, public and private pointers (resembling C++ shared and unique pointers, respectively) are moved around instead of messages (i.e., data), thus alleviating the user from the burden of minimizing transfers. On top of smart pointers, we propose a high-level C++ template library for writing applications in terms of dataflow-like networks, namely GAM nets, consisting of stateful processors exchanging pointers in fully asynchronous fashion. We demonstrate the validity of the proposed approach, from the expressiveness perspective, by showing how GAM nets can be exploited to implement higher-level parallel programming models, such as data and task parallelism. As for the performance perspective, the execution of two non-toy benchmarks on a number of different small-scale HPC clusters exhibits both close-to-ideal scalability and negligible overhead with respect to state-of-the-art benchmark implementations. For instance, the GAM implementation of a high-quality video restoration filter sustains a 100 fps throughput over 70\%-noisy high-quality video streams on a 4-node cluster of Graphics Processing Units (GPUs), with minimal programming effort.},
    date-modified = {2017-12-12 15:09:35 +0000},
    school = {Computer Science Department, University of Torino},
    url = {https://zenodo.org/record/1037585/files/Drocco_phd_thesis.pdf},
    bdsk-url-1 = {https://zenodo.org/record/1037585/files/Drocco_phd_thesis.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.5281/zenodo.1037585},
    keywords = {fastflow, rephrase, toreador, repara, paraphrase}
    }

  • P. Severi, L. Padovani, E. Tuosto, and M. Dezani-Ciancaglini, "On Sessions and Infinite Data," Logical Methods in Computer Science, vol. {Volume 13, Issue 2}, 2017. doi:10.23638/LMCS-13(2:9)2017
    [BibTeX] [Download PDF]
    @article{lmcs:3725,
    title = {On Sessions and Infinite Data},
    author = {Severi, Paula and Padovani, Luca and Tuosto, Emilio and Dezani-Ciancaglini, Mariangiola},
    year = {2017},
    month = jun,
    journal = {{Logical Methods in Computer Science}},
    volume = {{Volume 13, Issue 2}},
    doi = {10.23638/LMCS-13(2:9)2017},
    url = {http://lmcs.episciences.org/3725},
    bdsk-url-1 = {http://lmcs.episciences.org/3725},
    bdsk-url-2 = {http://dx.doi.org/10.23638/LMCS-13(2:9)2017},
    keywords = {rephrase, lambda}
    }

  • M. Drocco, C. Misale, G. Tremblay, and M. Aldinucci, "A Formal Semantics for Data Analytics Pipelines," Computer Science Department, University of Torino 2017. doi:10.5281/zenodo.571802
    [BibTeX] [Download PDF]
    @techreport{17:drocco:techreport,
    title = {A Formal Semantics for Data Analytics Pipelines},
    author = {Drocco, Maurizio and Misale, Claudia and Tremblay, Guy and Aldinucci, Marco},
    year = {2017},
    month = may,
    doi = {10.5281/zenodo.571802},
    note = {https://arxiv.org/abs/1705.01629},
    date-added = {2017-06-19 15:45:02 +0000},
    date-modified = {2018-03-13 16:45:57 +0000},
    institution = {Computer Science Department, University of Torino},
    url = {https://doi.org/10.5281/zenodo.571802},
    bdsk-url-1 = {https://doi.org/10.5281/zenodo.571802},
    bdsk-url-2 = {http://dx.doi.org/10.5281/zenodo.571802},
    keywords = {rephrase, toreador}
    }

  • C. Misale, "PiCo: A Domain-Specific Language for Data Analytics Pipelines," PhD Thesis, 2017. doi:10.5281/zenodo.579753
    [BibTeX] [Abstract] [Download PDF]

    In the world of Big Data analytics, there is a series of tools aiming at simplifying programming applications to be executed on clusters. Although each tool claims to provide better programming, data and execution models–-for which only informal (and often confusing) semantics is generally provided–-all share a common under- lying model, namely, the Dataflow model. Using this model as a starting point, it is possible to categorize and analyze almost all aspects about Big Data analytics tools from a high level perspective. This analysis can be considered as a first step toward a formal model to be exploited in the design of a (new) framework for Big Data analytics. By putting clear separations between all levels of abstraction (i.e., from the runtime to the user API), it is easier for a programmer or software designer to avoid mixing low level with high level aspects, as we are often used to see in state-of-the-art Big Data analytics frameworks. From the user-level perspective, we think that a clearer and simple semantics is preferable, together with a strong separation of concerns. For this reason, we use the Dataflow model as a starting point to build a programming environment with a simplified programming model implemented as a Domain-Specific Language, that is on top of a stack of layers that build a prototypical framework for Big Data analytics. The contribution of this thesis is twofold: first, we show that the proposed model is (at least) as general as existing batch and streaming frameworks (e.g., Spark, Flink, Storm, Google Dataflow), thus making it easier to understand high-level data-processing applications written in such frameworks. As result of this analysis, we provide a layered model that can represent tools and applications following the Dataflow paradigm and we show how the analyzed tools fit in each level. Second, we propose a programming environment based on such layered model in the form of a Domain-Specific Language (DSL) for processing data collections, called PiCo (Pipeline Composition). The main entity of this programming model is the Pipeline, basically a DAG-composition of processing elements. This model is intended to give the user an unique interface for both stream and batch processing, hiding completely data management and focusing only on operations, which are represented by Pipeline stages. Our DSL will be built on top of the FastFlow library, exploiting both shared and distributed parallelism, and implemented in C++11/14 with the aim of porting C++ into the Big Data world.

    @phdthesis{17:pico:misale:thesis,
    title = {PiCo: A Domain-Specific Language for Data Analytics Pipelines},
    author = {Claudia Misale},
    year = {2017},
    month = may,
    doi = {10.5281/zenodo.579753},
    abstract = {
    In the world of Big Data analytics, there is a series of tools aiming at simplifying programming applications to be executed on clusters. Although each tool claims to provide better programming, data and execution models---for which only informal (and often confusing) semantics is generally provided---all share a common under- lying model, namely, the Dataflow model. Using this model as a starting point, it is possible to categorize and analyze almost all aspects about Big Data analytics tools from a high level perspective. This analysis can be considered as a first step toward a formal model to be exploited in the design of a (new) framework for Big Data analytics. By putting clear separations between all levels of abstraction (i.e., from the runtime to the user API), it is easier for a programmer or software designer to avoid mixing low level with high level aspects, as we are often used to see in state-of-the-art Big Data analytics frameworks.
    From the user-level perspective, we think that a clearer and simple semantics is preferable, together with a strong separation of concerns. For this reason, we use the Dataflow model as a starting point to build a programming environment with a simplified programming model implemented as a Domain-Specific Language, that is on top of a stack of layers that build a prototypical framework for Big Data analytics.
    The contribution of this thesis is twofold: first, we show that the proposed model is (at least) as general as existing batch and streaming frameworks (e.g., Spark, Flink, Storm, Google Dataflow), thus making it easier to understand high-level data-processing applications written in such frameworks. As result of this analysis, we provide a layered model that can represent tools and applications following the Dataflow paradigm and we show how the analyzed tools fit in each level.
    Second, we propose a programming environment based on such layered model in the form of a Domain-Specific Language (DSL) for processing data collections, called PiCo (Pipeline Composition). The main entity of this programming model is the Pipeline, basically a DAG-composition of processing elements. This model is intended to give the user an unique interface for both stream and batch processing, hiding completely data management and focusing only on operations, which are represented by Pipeline stages. Our DSL will be built on top of the FastFlow library, exploiting both shared and distributed parallelism, and implemented in C++11/14 with the aim of porting C++ into the Big Data world.
    },
    date-added = {2017-06-19 15:15:52 +0000},
    date-modified = {2017-06-19 15:55:21 +0000},
    school = {Computer Science Department, University of Torino},
    url = {https://iris.unito.it/retrieve/handle/2318/1633743/320170/Misale_thesis.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1633743/320170/Misale_thesis.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.5281/zenodo.579753},
    keywords = {fastflow, rephrase, toreador, repara, paraphrase}
    }

  • P. Viviani, M. Torquati, M. Aldinucci, and R. d'Ippolito, "Multiple back-end support for the Armadillo linear algebra interface," in In proc. of the 32nd ACM Symposium on Applied Computing (SAC), Marrakesh, Morocco, 2017, p. 1566–1573.
    [BibTeX] [Abstract] [Download PDF]

    The Armadillo C++ library provides programmers with a high-level Matlab-like syntax for linear algebra. Its design aims at providing a good balance between speed and ease of use. It can be linked with different back-ends, i.e. different LAPACK-compliant libraries. In this work we present a novel run-time support of Armadillo, which gracefully extends mainstream implementation to enable back-end switching without recompilation and multiple back-end support. The extension is specifically designed to not affect Armadillo class template prototypes, thus to be easily interoperable with future evolutions of the Armadillo library itself. The proposed software stack is then tested for functionality and performance against a kernel code extracted from an industrial application.

    @inproceedings{17:sac:armadillo,
    title = {Multiple back-end support for the Armadillo linear algebra interface},
    author = {Paolo Viviani and Massimo Torquati and Marco Aldinucci and Roberto d'Ippolito},
    year = {2017},
    month = apr,
    booktitle = {In proc. of the 32nd ACM Symposium on Applied Computing (SAC)},
    address = {Marrakesh, Morocco},
    pages = {1566--1573},
    abstract = {The Armadillo C++ library provides programmers with a high-level Matlab-like syntax for linear algebra. Its design aims at providing a good balance between speed and ease of use. It can be linked with different back-ends, i.e. different LAPACK-compliant libraries. In this work we present a novel run-time support of Armadillo, which gracefully extends mainstream implementation to enable back-end switching without recompilation and multiple back-end support. The extension is specifically designed to not affect Armadillo class template prototypes, thus to be easily interoperable with future evolutions of the Armadillo library itself. The proposed software stack is then tested for functionality and performance against a kernel code extracted from an industrial application.},
    date-added = {2016-08-19 21:47:45 +0000},
    date-modified = {2017-06-13 15:54:43 +0000},
    url = {https://iris.unito.it/retrieve/handle/2318/1626229/299089/armadillo_4aperto.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1626229/299089/armadillo_4aperto.pdf},
    keywords = {nvidia, repara, rephrase, itea2}
    }

  • M. Aldinucci, M. Danelutto, D. De Sensi, G. Mencagli, and M. Torquati, "Towards Power-Aware Data Pipelining on Multicores," in Proceedings of the 10th International Symposium on High-Level Parallel Programming and Applications, Valladolid, Spain, 2017.
    [BibTeX] [Abstract] [Download PDF]

    Power consumption management has become a major concern in software development. Continuous streaming computations are usually com- posed by different modules, exchanging data through shared message queues. The selection of the algorithm used to access such queues (i.e., the concurrency control) is a critical aspect for both performance and power consumption. In this paper, we describe the design of an adaptive concurrency control algo- rithm for implementing power-efficient communications on shared memory multicores. The algorithm provides the throughput offered by a nonblocking implementation and the power efficiency of a blocking protocol. We demon- strate that our algorithm reduces the power consumption of data streaming computations without decreasing their throughput.

    @inproceedings{17:hlpp:powerstream,
    title = {Towards Power-Aware Data Pipelining on Multicores},
    author = {Marco Aldinucci and Marco Danelutto and De Sensi, Daniele and Gabriele Mencagli and Massimo Torquati},
    year = {2017},
    booktitle = {Proceedings of the 10th International Symposium on High-Level Parallel Programming and Applications},
    address = {Valladolid, Spain},
    abstract = {Power consumption management has become a major concern in software development. Continuous streaming computations are usually com- posed by different modules, exchanging data through shared message queues. The selection of the algorithm used to access such queues (i.e., the concurrency control) is a critical aspect for both performance and power consumption. In this paper, we describe the design of an adaptive concurrency control algo- rithm for implementing power-efficient communications on shared memory multicores. The algorithm provides the throughput offered by a nonblocking implementation and the power efficiency of a blocking protocol. We demon- strate that our algorithm reduces the power consumption of data streaming computations without decreasing their throughput.},
    date-added = {2017-07-13 09:02:32 +0000},
    date-modified = {2018-12-27 18:53:27 +0100},
    url = {https://iris.unito.it/retrieve/handle/2318/1644982/351415/17_HLPP_powerstream.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1644982/351415/17_HLPP_powerstream.pdf},
    keywords = {rephrase, fastflow}
    }

  • C. Misale, M. Drocco, M. Aldinucci, and G. Tremblay, "A Comparison of Big Data Frameworks on a Layered Dataflow Model," Parallel Processing Letters, vol. 27, iss. 01, p. 1–20, 2017. doi:10.1142/S0129626417400035
    [BibTeX] [Abstract] [Download PDF]

    In the world of Big Data analytics, there is a series of tools aiming at simplifying programming applications to be executed on clusters. Although each tool claims to provide better programming, data and execution models, for which only informal (and often confusing) semantics is generally provided, all share a common underlying model, namely, the Dataflow model. The Dataflow model we propose shows how various tools share the same expressiveness at different levels of abstraction. The contribution of this work is twofold: first, we show that the proposed model is (at least) as general as existing batch and streaming frameworks (e.g., Spark, Flink, Storm), thus making it easier to understand high-level data-processing applications written in such frameworks. Second, we provide a layered model that can represent tools and applications following the Dataflow paradigm and we show how the analyzed tools fit in each level.

    @article{17:bigdatasurvey:PPL,
    title = {A Comparison of Big Data Frameworks on a Layered Dataflow Model},
    author = {Misale, Claudia and Drocco, Maurizio and Aldinucci, Marco and Tremblay, Guy},
    year = {2017},
    journal = {Parallel Processing Letters},
    volume = {27},
    pages = {1--20},
    doi = {10.1142/S0129626417400035},
    abstract = {In the world of Big Data analytics, there is a series of tools aiming at simplifying programming applications to be executed on clusters. Although each tool claims to provide better programming, data and execution models, for which only informal (and often confusing) semantics is generally provided, all share a common underlying model, namely, the Dataflow model. The Dataflow model we propose shows how various tools share the same expressiveness at different levels of abstraction. The contribution of this work is twofold: first, we show that the proposed model is (at least) as general as existing batch and streaming frameworks (e.g., Spark, Flink, Storm), thus making it easier to understand high-level data-processing applications written in such frameworks. Second, we provide a layered model that can represent tools and applications following the Dataflow paradigm and we show how the analyzed tools fit in each level.},
    date-modified = {2017-12-12 12:16:32 +0000},
    date-published = {March 2017},
    date-received = {January 2017},
    eprint = {http://www.worldscientific.com/doi/pdf/10.1142/S0129626417400035},
    number = {01},
    url = {https://iris.unito.it/retrieve/handle/2318/1626287/303421/preprintPPL_4aperto.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1626287/303421/preprintPPL_4aperto.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1142/S0129626417400035},
    keywords = {toreador, rephrase, IBM}
    }

  • M. Aldinucci, M. Danelutto, P. Kilpatrick, and M. Torquati, "FastFlow: high-level and efficient streaming on multi-core," in Programming Multi-core and Many-core Computing Systems, S. Pllana and F. Xhafa, Eds., John Wiley & Sons, Ltd, 2017, p. 261–280. doi:10.1002/9781119332015.ch13
    [BibTeX] [Abstract] [Download PDF]

    This chapter first outlines FastFlow design and then shows sample use of the FastFlow programming environment together with performance results achieved on various state-of-the-art multicore architectures. The FastFlow framework has been designed according to four foundational principles: layered design; efficiency in base mechanisms; support for stream parallelism; and a programming model based on design pattern/algorithmic skeleton concepts. The core of the FastFlow framework provides an efficient implementation of single-producer-single-consumer (SPSC) first in-first out (FIFO) queues. The next tier up extends from one-to-one queues to one-to-many, many-to-one, and many-to-many synchronizations and data flows, which are implemented using only SPSC queues and arbiter threads, thus providing lock-free and wait-free arbitrary dataflow graphs. When designing and implementing new parallel applications using FastFlow, programmers instantiate patterns provided by FastFlow to adapt them to the specific needs of the application at hand. The chapter demonstrates how the principal FastFlow patterns may be used in a parallel application.

    @inbook{ff:wileybook:17,
    title = {FastFlow: high-level and efficient streaming on multi-core},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick and Massimo Torquati},
    year = {2017},
    booktitle = {Programming Multi-core and Many-core Computing Systems},
    publisher = {John Wiley \& Sons, Ltd},
    series = {Parallel and Distributed Computing},
    pages = {261--280},
    doi = {10.1002/9781119332015.ch13},
    isbn = {9781119332015},
    abstract = {This chapter first outlines FastFlow design and then shows sample use of the FastFlow programming environment together with performance results achieved on various state-of-the-art multicore architectures. The FastFlow framework has been designed according to four foundational principles: layered design; efficiency in base mechanisms; support for stream parallelism; and a programming model based on design pattern/algorithmic skeleton concepts. The core of the FastFlow framework provides an efficient implementation of single-producer-single-consumer (SPSC) first in-first out (FIFO) queues. The next tier up extends from one-to-one queues to one-to-many, many-to-one, and many-to-many synchronizations and data flows, which are implemented using only SPSC queues and arbiter threads, thus providing lock-free and wait-free arbitrary dataflow graphs. When designing and implementing new parallel applications using FastFlow, programmers instantiate patterns provided by FastFlow to adapt them to the specific needs of the application at hand. The chapter demonstrates how the principal FastFlow patterns may be used in a parallel application.},
    chapter = {13},
    date-added = {2011-06-18 18:28:00 +0200},
    date-modified = {2014-12-31 14:14:28 +0000},
    editor = {Sabri Pllana and Fatos Xhafa},
    optannote = {ISBN: 0470936908},
    url = {http://calvados.di.unipi.it/storage/paper_files/2011_FF_tutorial-draft.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2011_FF_tutorial-draft.pdf},
    bdsk-url-2 = {https://dx.doi.org/10.1002/9781119332015.ch13},
    keywords = {fastflow}
    }

  • F. Tordini, M. Drocco, C. Misale, L. Milanesi, P. Liò, I. Merelli, M. Torquati, and M. Aldinucci, "NuChart-II: the road to a fast and scalable tool for Hi-C data analysis," International Journal of High Performance Computing Applications, vol. 31, iss. 3, p. 196–211, 2017. doi:10.1177/1094342016668567
    [BibTeX] [Abstract] [Download PDF]

    Recent advances in molecular biology and bioinformatics techniques brought to an explosion of the information about the spatial organisation of the DNA in the nucleus of a cell. High-throughput molecular biology techniques provide a genome-wide capture of the spatial organization of chromosomes at unprecedented scales, which permit to identify physical interactions between genetic elements located throughout a genome. Recent results have shown that there is a large correlation between co-localization and co-regulation of genes, but these important information are hampered by the lack of biologists-friendly analysis and visualisation software. In this work we present NuChart-II, an efficient and highly optimized tool for genomic data analysis that provides a gene-centric, graph-based representation of genomic information. While designing NuChart-II we addressed several common issues in the parallelisation of memory bound algorithms for shared-memory systems. With performance and usability in mind, NuChart-II is a R package that embeds a C++ engine: computing capabilities and memory hierarchy of multi-core architectures are fully exploited, while the versatile R environment for statistical analysis and data visualisation rises the level of abstraction and permits to orchestrate analysis and visualisation of genomic data.

    @article{16:ijhpca:nuchart,
    title = {{NuChart-II}: the road to a fast and scalable tool for {Hi-C} data analysis},
    author = {Fabio Tordini and Maurizio Drocco and Claudia Misale and Luciano Milanesi and Pietro Li{\`o} and Ivan Merelli and Massimo Torquati and Marco Aldinucci},
    year = {2017},
    journal = {International Journal of High Performance Computing Applications},
    volume = {31},
    pages = {196--211},
    doi = {10.1177/1094342016668567},
    abstract = {Recent advances in molecular biology and bioinformatics techniques brought to an explosion of the information about the spatial organisation of the DNA in the nucleus of a cell. High-throughput molecular biology techniques provide a genome-wide capture of the spatial organization of chromosomes at unprecedented scales, which permit to identify physical interactions between genetic elements located throughout a genome. Recent results have shown that there is a large correlation between co-localization and co-regulation of genes, but these important information are hampered by the lack of biologists-friendly analysis and visualisation software. In this work we present NuChart-II, an efficient and highly optimized tool for genomic data analysis that provides a gene-centric, graph-based representation of genomic information. While designing NuChart-II we addressed several common issues in the parallelisation of memory bound algorithms for shared-memory systems. With performance and usability in mind, NuChart-II is a R package that embeds a C++ engine: computing capabilities and memory hierarchy of multi-core architectures are fully exploited, while the versatile R environment for statistical analysis and data visualisation rises the level of abstraction and permits to orchestrate analysis and visualisation of genomic data.},
    date-modified = {2018-12-27 19:06:22 +0100},
    number = {3},
    url = {https://iris.unito.it/retrieve/handle/2318/1607126/238747/main.pdf},
    bdsk-url-1 = {http://hdl.handle.net/2318/1607126},
    bdsk-url-2 = {http://dx.doi.org/10.1177/1094342016668567},
    bdsk-url-3 = {https://iris.unito.it/retrieve/handle/2318/1607126/238747/main.pdf},
    keywords = {fastflow, bioinformatics, repara, rephrase, interomics, mimomics}
    }

  • M. Aldinucci, S. Bagnasco, S. Lusso, P. Pasteris, and S. Rabellino, "OCCAM: a flexible, multi-purpose and extendable HPC cluster," in Journal of Physics: Conf. Series (CHEP 2016), San Francisco, USA, 2017, p. 82039. doi:10.1088/1742-6596/898/8/082039
    [BibTeX] [Abstract] [Download PDF]

    Obtaining CPU cycles on an HPC cluster is nowadays relatively simple and sometimes even cheap for academic institutions. However, in most of the cases providers of HPC services would not allow changes on the configuration, implementation of special features or a lower-level control on the computing infrastructure and networks, for example for testing new computing patterns or conducting research on HPC itself. The variety of use cases proposed by several departments of the University of Torino, including ones from solid-state chemistry, high-energy physics, computer science, big data analytics, computational biology, genomics and many others, called for different and sometimes conflicting configurations; furthermore, several R&D activities in the field of scientific computing, with topics ranging from GPU acceleration to Cloud Computing technologies, needed a platform to be carried out on. The Open Computing Cluster for Advanced data Manipulation (OCCAM) is a multi-purpose flexible HPC cluster designed and operated by a collaboration between the University of Torino and the Torino branch of the Istituto Nazionale di Fisica Nucleare. It is aimed at providing a flexible, reconfigurable and extendable infrastructure to cater to a wide range of different scientific computing needs, as well as a platform for R&D activities on computational technologies themselves. Extending it with novel architecture CPU, accelerator or hybrid microarchitecture (such as forthcoming Intel Xeon Phi Knights Landing) should be as a simple as plugging a node in a rack. The initial system counts slightly more than 1100 cpu cores and includes different types of computing nodes (standard dual-socket nodes, large quad-sockets nodes with 768 GB RAM, and multi-GPU nodes) and two separate disk storage subsystems: a smaller high-performance scratch area, based on the Lustre file system, intended for direct computational I/O and a larger one, of the order of 1PB, to archive near-line data for archival purposes. All the components of the system are interconnected through a 10Gb/s Ethernet layer with one-level topology and an InfiniBand FDR 56Gbps layer in fat-tree topology. A system of this kind, heterogeneous and reconfigurable by design, poses a number of challenges related to the frequency at which heterogeneous hardware resources might change their availability and shareability status, which in turn affect methods and means to allocate, manage, optimize, bill, monitor VMs, virtual farms, jobs, interactive bare-metal sessions, etc. This poster describes some of the use cases that prompted the design ad construction of the HPC cluster, its architecture and a first characterization of its performance by some synthetic benchmark tools and a few realistic use-case tests.

    @inproceedings{16:occam:chep,
    title = {OCCAM: a flexible, multi-purpose and extendable HPC cluster},
    author = {Marco Aldinucci and Stefano Bagnasco and Stefano Lusso and Paolo Pasteris and Sergio Rabellino},
    year = {2017},
    booktitle = {Journal of Physics: Conf. Series (CHEP 2016)},
    address = {San Francisco, USA},
    volume = {898},
    pages = {082039},
    doi = {10.1088/1742-6596/898/8/082039},
    abstract = {Obtaining CPU cycles on an HPC cluster is nowadays relatively simple and sometimes even cheap for academic institutions. However, in most of the cases providers of HPC services would not allow changes on the configuration, implementation of special features or a lower-level control on the computing infrastructure and networks, for example for testing new computing patterns or conducting research on HPC itself. The variety of use cases proposed by several departments of the University of Torino, including ones from solid-state chemistry, high-energy physics, computer science, big data analytics, computational biology, genomics and many others, called for different and sometimes conflicting configurations; furthermore, several R&D activities in the field of scientific computing, with topics ranging from GPU acceleration to Cloud Computing technologies, needed a platform to be carried out on. The Open Computing Cluster for Advanced data Manipulation (OCCAM) is a multi-purpose flexible HPC cluster designed and operated by a collaboration between the University of Torino and the Torino branch of the Istituto Nazionale di Fisica Nucleare. It is aimed at providing a flexible, reconfigurable and extendable infrastructure to cater to a wide range of different scientific computing needs, as well as a platform for R&D activities on computational technologies themselves. Extending it with novel architecture CPU, accelerator or hybrid microarchitecture (such as forthcoming Intel Xeon Phi Knights Landing) should be as a simple as plugging a node in a rack. The initial system counts slightly more than 1100 cpu cores and includes different types of computing nodes (standard dual-socket nodes, large quad-sockets nodes with 768 GB RAM, and multi-GPU nodes) and two separate disk storage subsystems: a smaller high-performance scratch area, based on the Lustre file system, intended for direct computational I/O and a larger one, of the order of 1PB, to archive near-line data for archival purposes. All the components of the system are interconnected through a 10Gb/s Ethernet layer with one-level topology and an InfiniBand FDR 56Gbps layer in fat-tree topology. A system of this kind, heterogeneous and reconfigurable by design, poses a number of challenges related to the frequency at which heterogeneous hardware resources might change their availability and shareability status, which in turn affect methods and means to allocate, manage, optimize, bill, monitor VMs, virtual farms, jobs, interactive bare-metal sessions, etc. This poster describes some of the use cases that prompted the design ad construction of the HPC cluster, its architecture and a first characterization of its performance by some synthetic benchmark tools and a few realistic use-case tests.},
    date-modified = {2018-12-27 18:44:37 +0100},
    number = {8},
    optmonth = {oct},
    url = {http://iopscience.iop.org/article/10.1088/1742-6596/898/8/082039/meta},
    bdsk-url-1 = {http://iopscience.iop.org/article/10.1088/1742-6596/898/8/082039/meta},
    bdsk-url-2 = {http://dx.doi.org/10.1088/1742-6596/898/8/082039},
    keywords = {nvidia, c3s}
    }

  • C. Spampinato, S. Palazzo, D. Giordano, M. Aldinucci, and R. Leonardi, "Deep learning for automated skeletal bone age assessment in X-ray images," Medical Image Analysis, vol. 36, p. 41–51, 2017. doi:10.1016/j.media.2016.10.010
    [BibTeX] [Abstract] [Download PDF]

    Skeletal bone age assessment is a common clinical practice to investigate endocrinology, genetic and growth disorders in children. It is generally performed by radiological examination of the left hand by using either the Greulich and Pyle (G&P) method or the Tanner–Whitehouse (TW) one. However, both clinical procedures show several limitations, from the examination effort of radiologists to (most importantly) significant intra- and inter-operator variability. To address these problems, several automated approaches (especially relying on the TW method) have been proposed; nevertheless, none of them has been proved able to generalize to different races, age ranges and genders. In this paper, we propose and test several deep learning approaches to assess skeletal bone age automatically; the results showed an average discrepancy between manual and automatic evaluation of about 0.8 years, which is state-of-the-art performance. Furthermore, this is the first automated skeletal bone age assessment work tested on a public dataset and for all age ranges, races and genders, for which the source code is available, thus representing an exhaustive baseline for future research in the field. Beside the specific application scenario, this paper aims at providing answers to more general questions about deep learning on medical images: from the comparison between deep-learned features and manually-crafted ones, to the usage of deep-learning methods trained on general imagery for medical problems, to how to train a CNN with few images.

    @article{17:deepx:conce,
    title = {Deep learning for automated skeletal bone age assessment in X-ray images},
    author = {Concetto Spampinato and Simone Palazzo and Daniela Giordano and Marco Aldinucci and Rosalia Leonardi},
    year = {2017},
    journal = {Medical Image Analysis},
    volume = {36},
    pages = {41--51},
    doi = {10.1016/j.media.2016.10.010},
    abstract = {Skeletal bone age assessment is a common clinical practice to investigate endocrinology, genetic and growth disorders in children. It is generally performed by radiological examination of the left hand by using either the Greulich and Pyle (G&P) method or the Tanner--Whitehouse (TW) one. However, both clinical procedures show several limitations, from the examination effort of radiologists to (most importantly) significant intra- and inter-operator variability. To address these problems, several automated approaches (especially relying on the TW method) have been proposed; nevertheless, none of them has been proved able to generalize to different races, age ranges and genders. In this paper, we propose and test several deep learning approaches to assess skeletal bone age automatically; the results showed an average discrepancy between manual and automatic evaluation of about 0.8 years, which is state-of-the-art performance. Furthermore, this is the first automated skeletal bone age assessment work tested on a public dataset and for all age ranges, races and genders, for which the source code is available, thus representing an exhaustive baseline for future research in the field. Beside the specific application scenario, this paper aims at providing answers to more general questions about deep learning on medical images: from the comparison between deep-learned features and manually-crafted ones, to the usage of deep-learning methods trained on general imagery for medical problems, to how to train a CNN with few images.},
    url = {https://iris.unito.it/retrieve/e27ce42b-5743-2581-e053-d805fe0acbaa/main.pdf},
    bdsk-url-1 = {http://dx.doi.org/10.1016/j.media.2016.10.010},
    bdsk-url-2 = {https://iris.unito.it/retrieve/e27ce42b-5743-2581-e053-d805fe0acbaa/main.pdf},
    keywords = {nvidia}
    }

  • M. Coppo, M. Dezani-Ciancaglini, A. Díaz-Caro, I. Margaria, and M. Zacchi, "Retractions in Intersection Types," in ITRS'16, 2017, p. 31–47. doi:10.4204/EPTCS.242.5
    [BibTeX] [Download PDF]
    @inproceedings{CDMZ16,
    title = {Retractions in Intersection Types},
    author = {Mario Coppo and Mariangiola Dezani-Ciancaglini and Alejandro D\'{\i}az-Caro and Ines Margaria and Maddalena Zacchi},
    year = {2017},
    booktitle = {ITRS'16},
    series = {EPTCS},
    volume = {242},
    pages = {31--47},
    doi = {10.4204/EPTCS.242.5},
    editor = {Naoki Kobayashi},
    url = {http://www.di.unito.it/~dezani/papers/cddmz.pdf},
    bdsk-url-1 = {http://www.di.unito.it/~dezani/papers/cddmz.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.4204/EPTCS.242.5},
    keywords = {rephrase, lambda}
    }

  • J. Romero, E. H. Phillips, G. Ruetsch, M. Fatica, F. Spiga, and P. Giannozzi, "A Performance Study of Quantum ESPRESSO's PWscf Code on Multi-core and GPU Systems," in High Performance Computing Systems. Performance Modeling, Benchmarking, and Simulation - 8th International Workshop, PMBS 2017, Denver, CO, USA, November 13, 2017, Proceedings, 2017, p. 67–87. doi:10.1007/978-3-319-72971-8_4
    [BibTeX] [Download PDF]
    @inproceedings{DBLP:conf/sc/RomeroPRFSG17,
    title = {A Performance Study of Quantum ESPRESSO's PWscf Code on Multi-core and {GPU} Systems},
    author = {Joshua Romero and Everett H. Phillips and Gregory Ruetsch and Massimiliano Fatica and Filippo Spiga and Paolo Giannozzi},
    year = {2017},
    booktitle = {High Performance Computing Systems. Performance Modeling, Benchmarking, and Simulation - 8th International Workshop, {PMBS} 2017, Denver, CO, USA, November 13, 2017, Proceedings},
    pages = {67--87},
    doi = {10.1007/978-3-319-72971-8_4},
    bibsource = {dblp computer science bibliography, http://dblp.org},
    biburl = {http://dblp.org/rec/bib/conf/sc/RomeroPRFSG17},
    optcrossref = {DBLP:conf/sc/2017pmbs},
    timestamp = {Wed, 03 Jan 2018 18:14:11 +0100},
    url = {https://doi.org/10.1007/978-3-319-72971-8_4},
    bdsk-url-1 = {https://doi.org/10.1007/978-3-319-72971-8_4}
    }

  • W. A. Ahmad, A. Bartolini, F. Beneventi, L. Benini, A. Borghesi, M. Cicala, P. Forestieri, C. Gianfreda, D. Gregori, A. Libri, F. Spiga, and S. Tinti, "Design of an Energy Aware Petaflops Class High Performance Cluster Based on Power Architecture," in 2017 IEEE International Parallel and Distributed Processing Symposium Workshops, IPDPS Workshops 2017, Orlando / Buena Vista, FL, USA, May 29 - June 2, 2017, 2017, p. 964–973. doi:10.1109/IPDPSW.2017.22
    [BibTeX] [Download PDF]
    @inproceedings{DBLP:conf/ipps/AhmadBBBBCFGGLS17,
    title = {Design of an Energy Aware Petaflops Class High Performance Cluster Based on Power Architecture},
    author = {Wissam Abu Ahmad and Andrea Bartolini and Francesco Beneventi and Luca Benini and Andrea Borghesi and Marco Cicala and Privato Forestieri and Cosimo Gianfreda and Daniele Gregori and Antonio Libri and Filippo Spiga and Simone Tinti},
    year = {2017},
    booktitle = {2017 {IEEE} International Parallel and Distributed Processing Symposium Workshops, {IPDPS} Workshops 2017, Orlando / Buena Vista, FL, USA, May 29 - June 2, 2017},
    pages = {964--973},
    doi = {10.1109/IPDPSW.2017.22},
    bibsource = {dblp computer science bibliography, http://dblp.org},
    biburl = {http://dblp.org/rec/bib/conf/ipps/AhmadBBBBCFGGLS17},
    optcrossref = {DBLP:conf/ipps/2017w},
    timestamp = {Mon, 10 Jul 2017 21:06:07 +0200},
    url = {https://doi.org/10.1109/IPDPSW.2017.22},
    bdsk-url-1 = {https://doi.org/10.1109/IPDPSW.2017.22}
    }

2016

  • C. Misale, M. Drocco, M. Aldinucci, and G. Tremblay, "A Comparison of Big Data Frameworks on a Layered Dataflow Model," in Proc. of Intl. Workshop on High-Level Parallel Programming (HLPP), Muenster, Germany, 2016, p. 1–19. doi:10.5281/zenodo.321866
    [BibTeX] [Abstract] [Download PDF]

    In the world of Big Data analytics, there is a series of tools aiming at simplifying programming applications to be executed on clusters. Although each tool claims to provide better programming, data and execution models, for which only informal (and often confusing) semantics is generally provided, all share a common underlying model, namely, the Dataflow model. The Dataflow model we propose shows how various tools share the same expressiveness at different levels of abstraction. The contribution of this work is twofold: first, we show that the proposed model is (at least) as general as existing batch and streaming frameworks (e.g., Spark, Flink, Storm), thus making it easier to understand high-level data-processing applications written in such frameworks. Second, we provide a layered model that can represent tools and applications following the Dataflow paradigm and we show how the analyzed tools fit in each level.

    @inproceedings{16:bigdatasurvey:hlpp,
    title = {A Comparison of Big Data Frameworks on a Layered Dataflow Model},
    author = {Claudia Misale and Maurizio Drocco and Marco Aldinucci and Guy Tremblay},
    year = {2016},
    month = jul,
    booktitle = {Proc. of Intl. Workshop on High-Level Parallel Programming (HLPP)},
    publisher = {arXiv.org},
    address = {Muenster, Germany},
    pages = {1--19},
    doi = {10.5281/zenodo.321866},
    abstract = {In the world of Big Data analytics, there is a series of tools aiming at simplifying programming applications to be executed on clusters. Although each tool claims to provide better programming, data and execution models, for which only informal (and often confusing) semantics is generally provided, all share a common underlying model, namely, the Dataflow model. The Dataflow model we propose shows how various tools share the same expressiveness at different levels of abstraction. The contribution of this work is twofold: first, we show that the proposed model is (at least) as general as existing batch and streaming frameworks (e.g., Spark, Flink, Storm), thus making it easier to understand high-level data-processing applications written in such frameworks. Second, we provide a layered model that can represent tools and applications following the Dataflow paradigm and we show how the analyzed tools fit in each level.},
    date-added = {2016-06-17 22:15:43 +0000},
    date-modified = {2017-12-12 14:49:47 +0000},
    url = {http://arxiv.org/pdf/1606.05293v1.pdf},
    bdsk-url-1 = {http://arxiv.org/pdf/1606.05293v1.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.5281/zenodo.321866},
    keywords = {toreador, rephrase, IBM}
    }

  • P. Viviani, M. Aldinucci, and R. d'Ippolito, "An hybrid linear algebra framework for engineering," in Advanced Computer Architecture and Compilation for High-Performance and Embedded Systems (ACACES) – Poster Abstracts, Fiuggi, Italy, 2016.
    [BibTeX] [Abstract] [Download PDF]

    The aim of this work is to provide developers and domain experts with simple (Matlab-like) inter- face for performing linear algebra tasks while retaining state-of-the-art computational speed. To achieve this goal we extend Armadillo C++ library is extended in order to support with multiple LAPACK-compliant back-ends targeting different architectures including CUDA GPUs; moreover our approach involves the possibility of dynamically switching between such back-ends in order to select the one which is most convenient based on the specific problem and hardware configura- tion. This approach is eventually validated within an industrial environment.

    @inproceedings{16:acaces:armadillo,
    title = {An hybrid linear algebra framework for engineering},
    author = {Paolo Viviani and Marco Aldinucci and Roberto d'Ippolito},
    year = {2016},
    month = jul,
    booktitle = {Advanced Computer Architecture and Compilation for High-Performance and Embedded Systems (ACACES) -- Poster Abstracts},
    address = {Fiuggi, Italy},
    abstract = {The aim of this work is to provide developers and domain experts with simple (Matlab-like) inter- face for performing linear algebra tasks while retaining state-of-the-art computational speed. To achieve this goal we extend Armadillo C++ library is extended in order to support with multiple LAPACK-compliant back-ends targeting different architectures including CUDA GPUs; moreover our approach involves the possibility of dynamically switching between such back-ends in order to select the one which is most convenient based on the specific problem and hardware configura- tion. This approach is eventually validated within an industrial environment.},
    date-added = {2016-08-20 17:22:51 +0000},
    date-modified = {2020-11-15 15:01:16 +0100},
    url = {https://iris.unito.it/retrieve/handle/2318/1622382/300198/armadillo.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1622382/300198/armadillo.pdf},
    keywords = {nvidia, gpu, itea2, repara}
    }

  • F. Tordini, "The road towards a Cloud-based High-Performance solution for genomic data analysis," PhD Thesis, 2016.
    [BibTeX] [Abstract] [Download PDF]

    Nowadays, molecular biology laboratories are delivering more and more data about DNA organisation, at increasing resolution and in a large number of samples. So much that genomic research is now facing many of the scale-out issues that high-performance computing has been addressing for years: they require powerful infrastructures with fast computing and storage capabilities, with substantial challenges in terms of data processing, statistical analysis and data representation. With this thesis we propose a high-performance pipeline for the analysis and interpretation of heterogeneous genomic information: beside performance, usability and availability are two essential requirements that novel Bioinformatics tools should satisfy. In this perspective, we propose and discuss our efforts towards a solid infrastructure for data processing and storage, where software that operates over data is exposed as a service, and is accessible by users through the Internet. We begin by presenting NuChart-II, a tool for the analysis and interpretation of spatial genomic information. With NuChart-II we propose a graph-based representation of genomic data, which can provide insights on the disposition of genomic elements in the DNA. We also discuss our approach for the normalisation of biases that affect raw sequenced data. We believe that many currently available tools for genomic data analysis are perceived as tricky and troublesome applications, that require highly specialised skills to obtain the desired outcomes. Concerning usability, we want to rise the level of abstraction perceived by the user, but maintain high performance and correctness while providing an exhaustive solution for data visualisation. We also intend to foster the availability of novel tools: in this work we also discuss a cloud solution that delivers computation and storage as dynamically allocated virtual resources via the Internet, while needed software is provided as a service. In this way, the computational demand of genomic research can be satisfied more economically by using lab-scale and enterprise-oriented technologies. Here we discuss our idea of a task farm for the integration of heterogeneous data resulting from different sequencing experiments: we believe that the integration of multi-omic features on a nuclear map can be a valuable mean for studying the interactions among genetic elements. This can reveal insights on biological mechanisms, such as genes regulation, translocations and epigenetic patterns.

    @phdthesis{tordiniThesis16,
    title = {{The road towards a Cloud-based High-Performance solution for genomic data analysis}},
    author = {Fabio Tordini},
    year = {2016},
    month = apr,
    abstract = {Nowadays, molecular biology laboratories are delivering more and more data about DNA organisation, at increasing resolution and in a large number of samples. So much that genomic research is now facing many of the scale-out issues that high-performance computing has been addressing for years: they require powerful infrastructures with fast computing and storage capabilities, with substantial challenges in terms of data processing, statistical analysis and data representation. With this thesis we propose a high-performance pipeline for the analysis and interpretation of heterogeneous genomic information: beside performance, usability and availability are two essential requirements that novel Bioinformatics tools should satisfy. In this perspective, we propose and discuss our efforts towards a solid infrastructure for data processing and storage, where software that operates over data is exposed as a service, and is accessible by users through the Internet. We begin by presenting NuChart-II, a tool for the analysis and interpretation of spatial genomic information. With NuChart-II we propose a graph-based representation of genomic data, which can provide insights on the disposition of genomic elements in the DNA. We also discuss our approach for the normalisation of biases that affect raw sequenced data. We believe that many currently available tools for genomic data analysis are perceived as tricky and troublesome applications, that require highly specialised skills to obtain the desired outcomes. Concerning usability, we want to rise the level of abstraction perceived by the user, but maintain high performance and correctness while providing an exhaustive solution for data visualisation. We also intend to foster the availability of novel tools: in this work we also discuss a cloud solution that delivers computation and storage as dynamically allocated virtual resources via the Internet, while needed software is provided as a service. In this way, the computational demand of genomic research can be satisfied more economically by using lab-scale and enterprise-oriented technologies. Here we discuss our idea of a task farm for the integration of heterogeneous data resulting from different sequencing experiments: we believe that the integration of multi-omic features on a nuclear map can be a valuable mean for studying the interactions among genetic elements. This can reveal insights on biological mechanisms, such as genes regulation, translocations and epigenetic patterns.},
    school = {Computer Science Department, University of Torino, Italy},
    url = {http://calvados.di.unipi.it/storage/paper_files/2016_tordini_phdthesis.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2016_tordini_phdthesis.pdf},
    keywords = {fastflow, bioinformatics}
    }

  • A. Bracciali, M. Aldinucci, M. Patterson, T. Marschall, N. Pisanti, I. Merelli, and M. Torquati, "pWhatsHap: efficient haplotyping for future generation sequencing," BMC Bioinformatics, vol. 17, iss. Suppl 11, p. 342, 2016. doi:10.1186/s12859-016-1170-y
    [BibTeX] [Abstract] [Download PDF]

    Background: Haplotype phasing is an important problem in the analysis of genomics information. Given a set of DNA fragments of an individual, it consists of determining which one of the possible alleles (alternative forms of a gene) each fragment comes from. Haplotype information is relevant to gene regulation, epigenetics, genome-wide association studies, evolutionary and population studies, and the study of mutations. Haplotyping is currently addressed as an optimisation problem aiming at solutions that minimise, for instance, error correction costs, where costs are a measure of the confidence in the accuracy of the information acquired from DNA sequencing. Solutions have typically an exponential computational complexity. WhatsHap is a recent optimal approach which moves computational complexity from DNA fragment length to fragment overlap, i.e., coverage, and is hence of particular interest when considering sequencing technology's current trends that are producing longer fragments. Results: Given the potential relevance of efficient haplotyping in several analysis pipelines, we have designed and engineered pWhatsHap, a parallel, high-performance version of WhatsHap. pWhatsHap is embedded in a toolkit developed in Python and supports genomics datasets in standard file formats. Building on WhatsHap, pWhatsHap exhibits the same complexity exploring a number of possible solutions which is exponential in the coverage of the dataset. The parallel implementation on multi-core architectures allows for a relevant reduction of the execution time for haplotyping, while the provided results enjoy the same high accuracy as that provided by WhatsHap, which increases with coverage. Conclusions: Due to its structure and management of the large datasets, the parallelisation of WhatsHap posed demanding technical challenges, which have been addressed exploiting a high-level parallel programming framework. The result, pWhatsHap, is a freely available toolkit that improves the efficiency of the analysis of genomics information.

    @article{16:pwhatshap:bmc,
    title = {pWhatsHap: efficient haplotyping for future generation sequencing},
    author = {Andrea Bracciali and Marco Aldinucci and Murray Patterson and Tobias Marschall and Nadia Pisanti and Ivan Merelli and Massimo Torquati},
    year = {2016},
    journal = {BMC Bioinformatics},
    volume = {17},
    pages = {342},
    doi = {10.1186/s12859-016-1170-y},
    abstract = {Background: Haplotype phasing is an important problem in the analysis of genomics information. Given a set of DNA fragments of an individual, it consists of determining which one of the possible alleles (alternative forms of a gene) each fragment comes from. Haplotype information is relevant to gene regulation, epigenetics, genome-wide association studies, evolutionary and population studies, and the study of mutations. Haplotyping is currently addressed as an optimisation problem aiming at solutions that minimise, for instance, error correction costs, where costs are a measure of the confidence in the accuracy of the information acquired from DNA sequencing. Solutions have typically an exponential computational complexity. WhatsHap is a recent optimal approach which moves computational complexity from DNA fragment length to fragment overlap, i.e., coverage, and is hence of particular interest when considering sequencing technology's current trends that are producing longer fragments. Results: Given the potential relevance of efficient haplotyping in several analysis pipelines, we have designed and engineered pWhatsHap, a parallel, high-performance version of WhatsHap. pWhatsHap is embedded in a toolkit developed in Python and supports genomics datasets in standard file formats. Building on WhatsHap, pWhatsHap exhibits the same complexity exploring a number of possible solutions which is exponential in the coverage of the dataset. The parallel implementation on multi-core architectures allows for a relevant reduction of the execution time for haplotyping, while the provided results enjoy the same high accuracy as that provided by WhatsHap, which increases with coverage. Conclusions: Due to its structure and management of the large datasets, the parallelisation of WhatsHap posed demanding technical challenges, which have been addressed exploiting a high-level parallel programming framework. The result, pWhatsHap, is a freely available toolkit that improves the efficiency of the analysis of genomics information.},
    date-modified = {2016-10-17 17:28:27 +0000},
    number = {Suppl 11},
    url = {http://bmcbioinformatics.biomedcentral.com/track/pdf/10.1186/s12859-016-1170-y?site=bmcbioinformatics.biomedcentral.com},
    bdsk-url-1 = {http://hdl.handle.net/2318/1607125},
    bdsk-url-2 = {http://bmcbioinformatics.biomedcentral.com/track/pdf/10.1186/s12859-016-1170-y?site=bmcbioinformatics.biomedcentral.com},
    bdsk-url-3 = {http://dx.doi.org/10.1186/s12859-016-1170-y},
    keywords = {fastflow, paraphrase, rephrase}
    }

  • M. Dezani-Ciancaglini, S. Ghilezan, S. Jaksic, J. Pantovic, and N. Yoshida, "Denotational and Operational Preciseness of Subtyping: A Roadmap," in Theory and Practice of Formal Methods, 2016, p. 155–172. doi:10.1007/978-3-319-30734-3_12
    [BibTeX] [Download PDF]
    @inproceedings{DGJPY16,
    title = {Denotational and Operational Preciseness of Subtyping: A Roadmap},
    author = {Mariangiola Dezani-Ciancaglini and Silvia Ghilezan and Svetlana Jaksic and Jovanka Pantovic and Nobuko Yoshida},
    year = {2016},
    booktitle = {Theory and Practice of Formal Methods},
    series = {LNCS},
    volume = {9660},
    pages = {155--172},
    doi = {10.1007/978-3-319-30734-3\_12},
    url = {http://www.di.unito.it/~dezani/papers/dgjpy16.pdf},
    bdsk-url-1 = {http://www.di.unito.it/~dezani/papers/dgjpy16.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-319-30734-3%5C_12},
    keywords = {rephrase, lambda}
    }

  • M. Coppo, M. Dezani-Ciancaglini, and B. Venneri, "Parallel Monitors for Self-adaptive Sessions," in PLACES'16, 2016, p. 25–36.
    [BibTeX] [Download PDF]
    @inproceedings{CDV16,
    title = {Parallel Monitors for Self-adaptive Sessions},
    author = {Mario Coppo and Mariangiola Dezani-Ciancaglini and Betti Venneri},
    year = {2016},
    booktitle = {PLACES'16},
    series = {EPTCS},
    volume = {211},
    pages = {25--36},
    url = {http://www.di.unito.it/~dezani/papers/cdv16.pdf},
    bdsk-url-1 = {http://www.di.unito.it/~dezani/papers/cdv16.pdf},
    keywords = {rephrase, lambda}
    }

  • I. Castellani, M. Dezani-Ciancaglini, and J. A. Pérez, "Self-Adaptation and Secure Information Flow in Multiparty Communications," Formal Aspects of Computing, vol. 28, iss. 4, p. 669–696, 2016.
    [BibTeX] [Download PDF]
    @article{CDP16,
    title = {Self-Adaptation and Secure Information Flow in Multiparty Communications},
    author = {Ilaria Castellani and Mariangiola Dezani-Ciancaglini and Jorge A. P\'{e}rez},
    year = {2016},
    journal = {{Formal Aspects of Computing}},
    publisher = {Springer},
    volume = {28},
    pages = {669--696},
    number = {4},
    url = {http://www.di.unito.it/~dezani/papers/cdp16.pdf},
    bdsk-url-1 = {http://www.di.unito.it/~dezani/papers/cdp16.pdf},
    keywords = {rephrase, lambda}
    }

  • I. Castellani, M. Dezani-Ciancaglini, and U. de' Liguoro, "Secure Multiparty Sessions with Topics," in PLACES'16, 2016, p. 1–12.
    [BibTeX] [Download PDF]
    @inproceedings{CDL16,
    title = {Secure Multiparty Sessions with Topics},
    author = {Ilaria Castellani and Mariangiola Dezani-Ciancaglini and Ugo de' Liguoro},
    year = {2016},
    booktitle = {PLACES'16},
    series = {EPTCS},
    volume = {211},
    pages = {1--12},
    url = {http://www.di.unito.it/~dezani/papers/cdl16.pdf},
    bdsk-url-1 = {http://www.di.unito.it/~dezani/papers/cdl16.pdf},
    keywords = {rephrase, lambda}
    }

  • B. Nicolae, C. H. A. Costa, C. Misale, K. Katrinis, and Y. Park, "Leveraging Adaptive I/O to Optimize Collective Data Shuffling Patterns for Big Data Analytics," IEEE Transactions on Parallel and Distributed Systems, vol. PP, iss. 99, 2016. doi:10.1109/TPDS.2016.2627558
    [BibTeX] [Abstract] [Download PDF]

    Big data analytics is an indispensable tool in transforming science, engineering, medicine, health-care, finance and ultimately business itself. With the explosion of data sizes and need for shorter time-to-solution, in-memory platforms such as Apache Spark gain increasing popularity. In this context, data shuffling, a particularly difficult transformation pattern, introduces important challenges. Specifically, data shuffling is a key component of complex computations that has a major impact on the overall performance and scalability. Thus, speeding up data shuffling is a critical goal. To this end, state-of-the-art solutions often rely on overlapping the data transfers with the shuffling phase. However, they employ simple mechanisms to decide how much data and where to fetch it from, which leads to sub-optimal performance and excessive auxiliary memory utilization for the purpose of prefetching. The latter aspect is a growing concern, given evidence that memory per computation unit is continuously decreasing while interconnect bandwidth is increasing. This paper contributes a novel shuffle data transfer strategy that addresses the two aforementioned dimensions by dynamically adapting the prefetching to the computation. We implemented this novel strategy in Spark, a popular in-memory data analytics framework. To demonstrate the benefits of our proposal, we run extensive experiments on an HPC cluster with large core count per node. Compared with the default Spark shuffle strategy, our proposal shows: up to 40\% better performance with 50\% less memory utilization for buffering and excellent weak scalability.

    @article{16:shuffle:tpds:misale,
    title = {Leveraging Adaptive I/O to Optimize Collective Data Shuffling Patterns for Big Data Analytics},
    author = {Bogdan Nicolae and Carlos H. A. Costa and Claudia Misale and Kostas Katrinis and Yoonho Park},
    year = {2016},
    journal = {IEEE Transactions on Parallel and Distributed Systems},
    volume = {PP},
    doi = {10.1109/TPDS.2016.2627558},
    abstract = {Big data analytics is an indispensable tool in transforming science, engineering, medicine, health-care, finance and ultimately business itself. With the explosion of data sizes and need for shorter time-to-solution, in-memory platforms such as Apache Spark gain increasing popularity. In this context, data shuffling, a particularly difficult transformation pattern, introduces important challenges. Specifically, data shuffling is a key component of complex computations that has a major impact on the overall performance and scalability. Thus, speeding up data shuffling is a critical goal. To this end, state-of-the-art solutions often rely on overlapping the data transfers with the shuffling phase. However, they employ simple mechanisms to decide how much data and where to fetch it from, which leads to sub-optimal performance and excessive auxiliary memory utilization for the purpose of prefetching. The latter aspect is a growing concern, given evidence that memory per computation unit is continuously decreasing while interconnect bandwidth is increasing. This paper contributes a novel shuffle data transfer strategy that addresses the two aforementioned dimensions by dynamically adapting the prefetching to the computation. We implemented this novel strategy in Spark, a popular in-memory data analytics framework. To demonstrate the benefits of our proposal, we run extensive experiments on an HPC cluster with large core count per node. Compared with the default Spark shuffle strategy, our proposal shows: up to 40\% better performance with 50\% less memory utilization for buffering and excellent weak scalability.},
    date-modified = {2017-04-01 21:55:16 +0000},
    number = {99},
    url = {https://iris.unito.it/retrieve/handle/2318/1624908/295954/tpds_4aperto.pdf},
    bdsk-url-1 = {http://dx.doi.org/10.1109/TPDS.2016.2627558},
    bdsk-url-2 = {https://iris.unito.it/retrieve/handle/2318/1624908/295954/tpds_4aperto.pdf},
    keywords = {ibm}
    }

  • F. Tordini, M. Aldinucci, L. Milanesi, P. Liò, and I. Merelli, "The Genome Conformation as an Integrator of Multi-Omic Data: The Example of Damage Spreading in Cancer," Frontiers in Genetics, vol. 7, iss. 194, p. 1–17, 2016. doi:10.3389/fgene.2016.00194
    [BibTeX] [Abstract] [Download PDF]

    Publicly available multi-omic databases, in particular if associated with medical annotations, are rich resources with the potential to lead a rapid transition from high-throughput molecular biology experiments to better clinical outcomes for patients. In this work, we propose a model for multi-omic data integration (i.e. genetic variations, gene expression, genome conformation and epigenetic patterns), which exploits a multi-layer network approach to analyse, visualize and obtain insights from such biological information, in order to use achieved results at a macroscopic level. Using this representation, we can describe how driver and passenger mutations accumulate during the development of diseases providing, for example, a tool able to characterise the evolution of cancer. Indeed, our test case concerns the MCF-7 breast cancer cell line, before and after the stimulation with estrogen, since many datasets are available for this case study. In particular, the integration of data about cancer mutations, gene functional annotations, genome conformation, epigenetic patterns, gene expression and metabolic pathways in our multi-layer representation will allow a better interpretation of the mechanisms behind a complex disease such as cancer. Thanks to this multi-layer approach, we focus on the interplay of chromatin conformation and cancer mutations in different pathways, such as metabolic processes, that are very important for tumour development. Working on this model, a variance analysis can be implemented to identify normal variations within each omics and to characterize, by contrast, variations that can be accounted to pathological samples compared to normal ones. This integrative model can be used to identify novel biomarkers and to provide innovative omic-based guidelines for treating many diseases, improving the efficacy of decision trees currently used in clinic.

    @article{2016_omics_fgenetics,
    title = {The Genome Conformation as an Integrator of Multi-Omic Data: The Example of Damage Spreading in Cancer},
    author = {Tordini, Fabio and Aldinucci, Marco and Milanesi, Luciano and Li{\`o}, Pietro and Merelli, Ivan},
    year = {2016},
    journal = {Frontiers in Genetics},
    volume = {7},
    pages = {1--17},
    doi = {10.3389/fgene.2016.00194},
    abstract = {Publicly available multi-omic databases, in particular if associated with medical annotations, are rich resources with the potential to lead a rapid transition from high-throughput molecular biology experiments to better clinical outcomes for patients. In this work, we propose a model for multi-omic data integration (i.e. genetic variations, gene expression, genome conformation and epigenetic patterns), which exploits a multi-layer network approach to analyse, visualize and obtain insights from such biological information, in order to use achieved results at a macroscopic level. Using this representation, we can describe how driver and passenger mutations accumulate during the development of diseases providing, for example, a tool able to characterise the evolution of cancer. Indeed, our test case concerns the MCF-7 breast cancer cell line, before and after the stimulation with estrogen, since many datasets are available for this case study. In particular, the integration of data about cancer mutations, gene functional annotations, genome conformation, epigenetic patterns, gene expression and metabolic pathways in our multi-layer representation will allow a better interpretation of the mechanisms behind a complex disease such as cancer. Thanks to this multi-layer approach, we focus on the interplay of chromatin conformation and cancer mutations in different pathways, such as metabolic processes, that are very important for tumour development. Working on this model, a variance analysis can be implemented to identify normal variations within each omics and to characterize, by contrast, variations that can be accounted to pathological samples compared to normal ones. This integrative model can be used to identify novel biomarkers and to provide innovative omic-based guidelines for treating many diseases, improving the efficacy of decision trees currently used in clinic.},
    date-modified = {2016-12-22 14:19:14 +0000},
    number = {194},
    url = {http://journal.frontiersin.org/article/10.3389/fgene.2016.00194},
    bdsk-url-1 = {http://journal.frontiersin.org/article/10.3389/fgene.2016.00194},
    bdsk-url-2 = {http://dx.doi.org/10.3389/fgene.2016.00194}
    }

  • B. Nicolae, C. H. A. Costa, C. Misale, K. Katrinis, and Y. Park, "Towards Memory-Optimized Data Shuffling Patterns for Big Data Analytics," in IEEE/ACM 16th Intl. Symposium on Cluster, Cloud and Grid Computing, CCGrid 2016, Cartagena, Colombia, 2016. doi:10.1109/CCGrid.2016.85
    [BibTeX] [Abstract] [Download PDF]

    Big data analytics is an indispensable tool in transforming science, engineering, medicine, healthcare, finance and ultimately business itself. With the explosion of data sizes and need for shorter time-to-solution, in-memory platforms such as Apache Spark gain increasing popularity. However, this introduces important challenges, among which data shuffling is particularly difficult: on one hand it is a key part of the computation that has a major impact on the overall performance and scalability so its efficiency is paramount, while on the other hand it needs to operate with scarce memory in order to leave as much memory available for data caching. In this context, efficient scheduling of data transfers such that it addresses both dimensions of the problem simultaneously is non-trivial. State-of-the-art solutions often rely on simple approaches that yield sub optimal performance and resource usage. This paper contributes a novel shuffle data transfer strategy that dynamically adapts to the computation with minimal memory utilization, which we briefly underline as a series of design principles.

    @inproceedings{16:ccgrid:misale,
    title = {Towards Memory-Optimized Data Shuffling Patterns for Big Data Analytics},
    author = {Bogdan Nicolae and Carlos H. A. Costa and Claudia Misale and Kostas Katrinis and Yoonho Park},
    year = {2016},
    booktitle = {{IEEE/ACM} 16th Intl. Symposium on Cluster, Cloud and Grid Computing, CCGrid 2016},
    publisher = {IEEE},
    address = {Cartagena, Colombia},
    doi = {10.1109/CCGrid.2016.85},
    abstract = {Big data analytics is an indispensable tool in transforming science, engineering, medicine, healthcare, finance and ultimately business itself. With the explosion of data sizes and need for shorter time-to-solution, in-memory platforms such as Apache Spark gain increasing popularity. However, this introduces important challenges, among which data shuffling is particularly difficult: on one hand it is a key part of the computation that has a major impact on the overall performance and scalability so its efficiency is paramount, while on the other hand it needs to operate with scarce memory in order to leave as much memory available for data caching. In this context, efficient scheduling of data transfers such that it addresses both dimensions of the problem simultaneously is non-trivial. State-of-the-art solutions often rely on simple approaches that yield sub optimal performance and resource usage. This paper contributes a novel shuffle data transfer strategy that dynamically adapts to the computation with minimal memory utilization, which we briefly underline as a series of design principles.},
    date-modified = {2021-04-24 23:23:27 +0200},
    url = {http://ieeexplore.ieee.org/document/7515716/},
    bdsk-url-1 = {http://ieeexplore.ieee.org/document/7515716/},
    bdsk-url-2 = {http://dx.doi.org/10.1109/CCGrid.2016.85}
    }

  • M. F. Dolz, D. del Rio Astorga, J. Fernández, D. J. García, F. García-Carballeira, M. Danelutto, and M. Torquati, "Embedding Semantics of the Single-Producer/Single-Consumer Lock-Free Queue into a Race Detection Tool," in Proceedings of the 7th International Workshop on Programming Models and Applications for Multicores and Manycores, New York, NY, USA, 2016, p. 20–29. doi:10.1145/2883404.2883406
    [BibTeX] [Download PDF]
    @inproceedings{16:PMAM:SPSC,
    title = {Embedding Semantics of the Single-Producer/Single-Consumer Lock-Free Queue into a Race Detection Tool},
    author = {Dolz, Manuel F. and del Rio Astorga, David and Fern\'{a}ndez, Javier and Garc\'{\i}a, J. Daniel and Garc\'{\i}a-Carballeira, F{\'e}lix and Danelutto, Marco and Torquati, Massimo},
    year = {2016},
    booktitle = {Proceedings of the 7th International Workshop on Programming Models and Applications for Multicores and Manycores},
    publisher = {ACM},
    address = {New York, NY, USA},
    series = {PMAM'16},
    pages = {20--29},
    doi = {10.1145/2883404.2883406},
    isbn = {978-1-4503-4196-7},
    acmid = {2883406},
    date-modified = {2016-04-21 17:33:36 +0000},
    location = {Barcelona, Spain},
    numpages = {10},
    url = {https://doi.acm.org/10.1145/2883404.2883406},
    bdsk-url-1 = {https://doi.acm.org/10.1145/2883404.2883406},
    bdsk-url-2 = {http://dx.doi.org/10.1145/2883404.2883406},
    keywords = {fastflow, repara}
    }

  • F. Tordini, "A cloud solution for multi-omics data integration," in Proceedings of the 16th IEEE International Conference on Scalable Computing and Communication, 2016, p. 559–566. doi:10.1109/UIC-ATC-ScalCom-CBDCom-IoP-SmartWorld.2016.131
    [BibTeX] [Abstract] [Download PDF]

    Recent advances in molecular biology and Bioinformatics techniques have brought to an explosion of the information about the spatial organisation of the DNA inside the nucleus. In particular, 3C-based techniques are revealing the genome folding for many different cell types, and permit to create a more effective representation of the disposition of genes in the three-dimensional space. This information can be used to re-interpret heterogeneous genomic data (multi-omic) relying on 3D maps of the chromosome. The storage and computational requirements needed to accomplish such operations on raw sequenced data have to be fulfilled using HPC solutions, and the the Cloud paradigm is a valuable and convenient mean for delivering HPC to Bioinformatics. In this work we describe a data analysis work-flow that allows the integration and the interpretation of multi-omic data on a sort of ``topographical'' nuclear map, capable of representing the effective disposition of genes in a graph-based representation. We propose a cloud-based task farm pattern to orchestrate the services needed to accomplish genomic data analysis, where each service represents a special-purpose tool, playing a part in well known data analysis pipelines.

    @inproceedings{16:scalcom:cloud,
    title = {{A cloud solution for multi-omics data integration}},
    author = {Fabio Tordini},
    year = {2016},
    booktitle = {Proceedings of the 16th IEEE International Conference on Scalable Computing and Communication},
    publisher = {IEEE Computer Society},
    pages = {559--566},
    doi = {10.1109/UIC-ATC-ScalCom-CBDCom-IoP-SmartWorld.2016.131},
    note = {Best paper award},
    abstract = {Recent advances in molecular biology and Bioinformatics techniques have brought to an explosion of the information about the spatial organisation of the DNA inside the nucleus. In particular, 3C-based techniques are revealing the genome folding for many different cell types, and permit to create a more effective representation of the disposition of genes in the three-dimensional space. This information can be used to re-interpret heterogeneous genomic data (multi-omic) relying on 3D maps of the chromosome. The storage and computational requirements needed to accomplish such operations on raw sequenced data have to be fulfilled using HPC solutions, and the the Cloud paradigm is a valuable and convenient mean for delivering HPC to Bioinformatics. In this work we describe a data analysis work-flow that allows the integration and the interpretation of multi-omic data on a sort of ``topographical'' nuclear map, capable of representing the effective disposition of genes in a graph-based representation. We propose a cloud-based task farm pattern to orchestrate the services needed to accomplish genomic data analysis, where each service represents a special-purpose tool, playing a part in well known data analysis pipelines.},
    date-modified = {2016-08-30 10:26:12 +0000},
    url = {http://calvados.di.unipi.it/storage/paper_files/2016_cloudpipeline_scalcom.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2016_cloudpipeline_scalcom.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1109/UIC-ATC-ScalCom-CBDCom-IoP-SmartWorld.2016.131},
    keywords = {fastflow, bioinformatics, rephrase}
    }

  • F. Tordini, I. Merelli, P. Liò, L. Milanesi, and M. Aldinucci, "NuchaRt: embedding high-level parallel computing in R for augmented Hi-C data analysis," in Computational Intelligence Methods for Bioinformatics and Biostatistics, S. I. Publishing, Ed., Cham (ZG): Springer International Publishing, 2016, vol. 9874, p. 259–272. doi:10.1007/978-3-319-44332-4
    [BibTeX] [Abstract] [Download PDF]

    Recent advances in molecular biology and Bioinformatics techniques brought to an explosion of the information about the spatial organisation of the DNA in the nucleus. High-throughput chromosome conformation capture techniques provide a genome-wide capture of chromatin contacts at unprecedented scales, which permit to identify physical interactions between genetic elements located throughout the human genome. These important studies are hampered by the lack of biologists-friendly software. In this work we present NuchaRt, an R package that wraps NuChart-II, an efficient and highly optimized C++ tool for the exploration of Hi-C data. By rising the level of abstraction, NuchaRt proposes a high-performance pipeline that allows users to orchestrate analysis and visualisation of multi-omics data, making optimal use of the computing capabilities offered by modern multi-core architectures, combined with the versatile and well known R environment for statistical analysis and data visualisation.

    @incollection{15:lnbi:nuchaRt,
    title = {{NuchaRt}: embedding high-level parallel computing in {R} for augmented {Hi-C} data analysis},
    author = {Fabio Tordini and Ivan Merelli and Pietro Li{\`o} and Luciano Milanesi and Marco Aldinucci},
    year = {2016},
    booktitle = {Computational Intelligence Methods for Bioinformatics and Biostatistics},
    publisher = {Springer International Publishing},
    address = {Cham (ZG)},
    series = {{Lecture Notes in Computer Science}},
    volume = {9874},
    pages = {259--272},
    doi = {10.1007/978-3-319-44332-4},
    isbn = {978-3-319-44331-7},
    abstract = {Recent advances in molecular biology and Bioinformatics techniques brought to an explosion of the information about the spatial organisation of the DNA in the nucleus. High-throughput chromosome conformation capture techniques provide a genome-wide capture of chromatin contacts at unprecedented scales, which permit to identify physical interactions between genetic elements located throughout the human genome. These important studies are hampered by the lack of biologists-friendly software. In this work we present NuchaRt, an R package that wraps NuChart-II, an efficient and highly optimized C++ tool for the exploration of Hi-C data. By rising the level of abstraction, NuchaRt proposes a high-performance pipeline that allows users to orchestrate analysis and visualisation of multi-omics data, making optimal use of the computing capabilities offered by modern multi-core architectures, combined with the versatile and well known R environment for statistical analysis and data visualisation.},
    editor = {Springer International Publishing},
    url = {https://iris.unito.it/retrieve/handle/2318/1608281/253372/rnuchart.pdf},
    bdsk-url-1 = {http://link.springer.com/book/10.1007%2F978-3-319-44332-4},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-319-44332-4},
    keywords = {fastflow, bioinformatics, repara, interomics, mimomics}
    }

  • M. Aldinucci, S. Campa, M. Danelutto, P. Kilpatrick, and M. Torquati, "Pool Evolution: A Parallel Pattern for Evolutionary and Symbolic Computing," International Journal of Parallel Programming, vol. 44, iss. 3, p. 531–551, 2016. doi:10.1007/s10766-015-0358-5
    [BibTeX] [Abstract] [Download PDF]

    We introduce a new parallel pattern derived from a specific application domain and show how it turns out to have application beyond its domain of origin. The pool evolution pattern models the parallel evolution of a population subject to mutations and evolving in such a way that a given fitness function is optimized. The pattern has been demonstrated to be suitable for capturing and modeling the parallel patterns underpinning various evolutionary algorithms, as well as other parallel patterns typical of symbolic computation. In this paper we introduce the pattern, we discuss its implementation on modern multi/many core architectures and finally present experimental results obtained with FastFlow and Erlang implementations to assess its feasibility and scalability.

    @article{pool:ijpp:15,
    title = {Pool Evolution: A Parallel Pattern for Evolutionary and Symbolic Computing},
    author = {Marco Aldinucci and Sonia Campa and Marco Danelutto and Peter Kilpatrick and Massimo Torquati},
    year = {2016},
    journal = {International Journal of Parallel Programming},
    publisher = {Springer US},
    volume = {44},
    pages = {531--551},
    doi = {10.1007/s10766-015-0358-5},
    issn = {0885-7458},
    abstract = {We introduce a new parallel pattern derived from a specific application domain and show how it turns out to have application beyond its domain of origin. The pool evolution pattern models the parallel evolution of a population subject to mutations and evolving in such a way that a given fitness function is optimized. The pattern has been demonstrated to be suitable for capturing and modeling the parallel patterns underpinning various evolutionary algorithms, as well as other parallel patterns typical of symbolic computation. In this paper we introduce the pattern, we discuss its implementation on modern multi/many core architectures and finally present experimental results obtained with FastFlow and Erlang implementations to assess its feasibility and scalability.},
    date-added = {2015-03-21 22:15:47 +0000},
    date-modified = {2015-09-24 11:15:53 +0000},
    number = {3},
    url = {https://iris.unito.it/retrieve/handle/2318/1522392/42139/2015_ff_pool_ijpp.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1522392/42139/2015_ff_pool_ijpp.pdf},
    bdsk-url-2 = {https://doi.org/10.1007/s10766-015-0358-5},
    keywords = {fastflow, paraphrase, repara}
    }

  • V. Janjic, C. Brown, K. MacKenzie, K. and Hammond, M. Danelutto, M. Aldinucci, and J. D. Garcia, "RPL: A Domain-Specific Language for Designing and Implementing Parallel C++ Applications," in Proc. of Intl. Euromicro PDP 2016: Parallel Distributed and network-based Processing, Crete, Greece, 2016. doi:10.1109/PDP.2016.122
    [BibTeX] [Abstract] [Download PDF]

    Parallelising sequential applications is usually a very hard job, due to many different ways in which an application can be parallelised and a large number of programming models (each with its own advantages and disadvantages) that can be used. In this paper, we describe a method to semi- automatically generate and evaluate different parallelisations of the same application, allowing programmers to find the best parallelisation without significant manual reengineering of the code. We describe a novel, high-level domain-specific language, Refactoring Pattern Language (RPL), that is used to represent the parallel structure of an application and to capture its extra-functional properties (such as service time). We then describe a set of RPL rewrite rules that can be used to generate alternative, but semantically equivalent, parallel structures (parallelisations) of the same application. We also describe the RPL Shell that can be used to evaluate these parallelisations, in terms of the desired extra-functional properties. Finally, we describe a set of C++ refactorings, targeting OpenMP, Intel TBB and FastFlow parallel programming models, that semi-automatically apply the desired parallelisation to the application's source code, therefore giving a parallel version of the code. We demonstrate how the RPL and the refactoring rules can be used to derive efficient parallelisations of two realistic C++ use cases (Image Convolution and Ant Colony Optimisation).

    @inproceedings{rpl:pdp:16,
    title = {{RPL}: A Domain-Specific Language for Designing and Implementing Parallel C++ Applications},
    author = {Vladimir Janjic and Christopher Brown and Kenneth MacKenzie and and Kevin Hammond and Marco Danelutto and Marco Aldinucci and Jose Daniel Garcia},
    year = {2016},
    booktitle = {Proc. of Intl. Euromicro PDP 2016: Parallel Distributed and network-based Processing},
    publisher = {IEEE},
    address = {Crete, Greece},
    doi = {10.1109/PDP.2016.122},
    abstract = {Parallelising sequential applications is usually a very hard job, due to many different ways in which an application can be parallelised and a large number of programming models (each with its own advantages and disadvantages) that can be used. In this paper, we describe a method to semi- automatically generate and evaluate different parallelisations of the same application, allowing programmers to find the best parallelisation without significant manual reengineering of the code. We describe a novel, high-level domain-specific language, Refactoring Pattern Language (RPL), that is used to represent the parallel structure of an application and to capture its extra-functional properties (such as service time). We then describe a set of RPL rewrite rules that can be used to generate alternative, but semantically equivalent, parallel structures (parallelisations) of the same application. We also describe the RPL Shell that can be used to evaluate these parallelisations, in terms of the desired extra-functional properties. Finally, we describe a set of C++ refactorings, targeting OpenMP, Intel TBB and FastFlow parallel programming models, that semi-automatically apply the desired parallelisation to the application's source code, therefore giving a parallel version of the code. We demonstrate how the RPL and the refactoring rules can be used to derive efficient parallelisations of two realistic C++ use cases (Image Convolution and Ant Colony Optimisation).},
    date-modified = {2017-06-20 08:19:39 +0000},
    url = {https://iris.unito.it/retrieve/handle/2318/1597172/299237/2016_jsupe_stencil_pp_4aperto.pdf},
    bdsk-url-1 = {http://hdl.handle.net/2318/1597172},
    bdsk-url-2 = {https://iris.unito.it/retrieve/handle/2318/1597172/299237/2016_jsupe_stencil_pp_4aperto.pdf},
    bdsk-url-3 = {http://dx.doi.org/10.1109/PDP.2016.122},
    keywords = {rephrase, fastflow}
    }

  • M. Drocco, C. Misale, and M. Aldinucci, "A Cluster-As-Accelerator approach for SPMD-free Data Parallelism," in Proc. of 24th Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP), Crete, Greece, 2016, p. 350–353. doi:10.1109/PDP.2016.97
    [BibTeX] [Abstract] [Download PDF]

    In this paper we present a novel approach for functional-style programming of distributed-memory clusters, targeting data-centric applications. The programming model proposed is purely sequential, SPMD-free and based on high- level functional features introduced since C++11 specification. Additionally, we propose a novel cluster-as-accelerator design principle. In this scheme, cluster nodes act as general inter- preters of user-defined functional tasks over node-local portions of distributed data structures. We envision coupling a simple yet powerful programming model with a lightweight, locality- aware distributed runtime as a promising step along the road towards high-performance data analytics, in particular under the perspective of the upcoming exascale era. We implemented the proposed approach in SkeDaTo, a prototyping C++ library of data-parallel skeletons exploiting cluster-as-accelerator at the bottom layer of the runtime software stack.

    @inproceedings{skedato:pdp:16,
    title = {A Cluster-As-Accelerator approach for {SPMD}-free Data Parallelism},
    author = {Maurizio Drocco and Claudia Misale and Marco Aldinucci},
    year = {2016},
    booktitle = {Proc. of 24th Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP)},
    publisher = {IEEE},
    address = {Crete, Greece},
    pages = {350--353},
    doi = {10.1109/PDP.2016.97},
    abstract = {In this paper we present a novel approach for functional-style programming of distributed-memory clusters, targeting data-centric applications. The programming model proposed is purely sequential, SPMD-free and based on high- level functional features introduced since C++11 specification. Additionally, we propose a novel cluster-as-accelerator design principle. In this scheme, cluster nodes act as general inter- preters of user-defined functional tasks over node-local portions of distributed data structures. We envision coupling a simple yet powerful programming model with a lightweight, locality- aware distributed runtime as a promising step along the road towards high-performance data analytics, in particular under the perspective of the upcoming exascale era. We implemented the proposed approach in SkeDaTo, a prototyping C++ library of data-parallel skeletons exploiting cluster-as-accelerator at the bottom layer of the runtime software stack.},
    date-modified = {2017-12-12 14:49:31 +0000},
    url = {https://iris.unito.it/retrieve/handle/2318/1611858/262689/2016_pdp_skedato.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1611858/262689/2016_pdp_skedato.pdf},
    bdsk-url-2 = {https://doi.org/10.1109/PDP.2016.97},
    keywords = {rephrase, fastflow}
    }

  • P. Viviani, M. Aldinucci, R. d'Ippolito, J. Lemeire, and D. Vucinic, A flexible numerical framework for engineering - a Response Surface Modelling application, 2016.
    [BibTeX] [Abstract]

    This work presents the innovative approach adopted for the development of a new numerical software framework for accelerating Dense Linear Algebra calculations and its application within an engineering context. In particular, Response Surface Models (RSM) are a key tool to reduce the computational effort involved in engineering design processes like design optimization. However, RSMs may prove to be too expensive to be computed when the dimensionality of the system and/or the size of the dataset to be synthesized is significantly high or when a large number of different Response Surfaces has to be calculated in order to improve the overall accuracy (e.g. like when using Ensemble Modelling techniques). On the other hand, it is a known challenge that the potential of modern hybrid hardware (e.g. multicore, GPUs) is not exploited by current engineering tools, while they can lead to a significant performance improvement. To fill this gap, a software framework is being developed that enables the hybrid and scalable acceleration of the linear algebra core for engineering applications and especially of RSMs calculations with a user-friendly syntax that allows good portability between different hardware architectures, with no need of specific expertise in parallel programming and accelerator technology. The effectiveness of this framework is shown by comparing an accelerated code to a single-core calculation of a Radial Basis Function RSM on some benchmark datasets. This approach is then validated within a real-life engineering application and the achievements are presented and discussed.

    @unpublished{16:acex:armadillo,
    title = {A flexible numerical framework for engineering - a Response Surface Modelling application},
    author = {Paolo Viviani and Marco Aldinucci and Roberto d'Ippolito and Jean Lemeire and Dean Vucinic},
    year = {2016},
    booktitle = {10th Intl. Conference on Advanced Computational Engineering and Experimenting (ACE-X)},
    abstract = {This work presents the innovative approach adopted for the development of a new numerical software framework for accelerating Dense Linear Algebra calculations and its application within an engineering context. In particular, Response Surface Models (RSM) are a key tool to reduce the computational effort involved in engineering design processes like design optimization. However, RSMs may prove to be too expensive to be computed when the dimensionality of the system and/or the size of the dataset to be synthesized is significantly high or when a large number of different Response Surfaces has to be calculated in order to improve the overall accuracy (e.g. like when using Ensemble Modelling techniques). On the other hand, it is a known challenge that the potential of modern hybrid hardware (e.g. multicore, GPUs) is not exploited by current engineering tools, while they can lead to a significant performance improvement. To fill this gap, a software framework is being developed that enables the hybrid and scalable acceleration of the linear algebra core for engineering applications and especially of RSMs calculations with a user-friendly syntax that allows good portability between different hardware architectures, with no need of specific expertise in parallel programming and accelerator technology. The effectiveness of this framework is shown by comparing an accelerated code to a single-core calculation of a Radial Basis Function RSM on some benchmark datasets. This approach is then validated within a real-life engineering application and the achievements are presented and discussed.},
    date-added = {2016-08-19 21:37:19 +0000},
    date-modified = {2019-08-08 11:27:47 +0200},
    keywords = {repara, rephrase, nvidia, gpu}
    }

  • M. Dezani-Ciancaglini and P. Giannini, "Reversible Multiparty Sessions with Checkpoints," in EXPRESS/SOS'16, 2016, p. 60–74.
    [BibTeX] [Download PDF]
    @inproceedings{DG16,
    title = {Reversible Multiparty Sessions with Checkpoints},
    author = {Mariangiola Dezani-Ciancaglini and Paola Giannini},
    year = {2016},
    booktitle = {EXPRESS/SOS'16},
    series = {EPTCS},
    volume = {222},
    pages = {60--74},
    url = {http://www.di.unito.it/~dezani/papers/dg16.pdf},
    bdsk-url-1 = {http://www.di.unito.it/~dezani/papers/dg16.pdf},
    keywords = {rephrase, lambda}
    }

2015

  • P. Inaudi, "Progettazione e sviluppo di un provider libfabric per la rete ad alte prestazioni Ronniee/A3Cube," Master Thesis, 2015.
    [BibTeX]
    @mastersthesis{tesi:inaudi:15,
    title = {Progettazione e sviluppo di un provider libfabric per la rete ad alte prestazioni Ronniee/A3Cube},
    author = {Paolo Inaudi},
    year = {2015},
    month = oct,
    school = {Computer Science Department, University of Torino},
    keywords = {fastflow}
    }

  • M. Aldinucci, M. Danelutto, M. Drocco, P. Kilpatrick, G. Peretti Pezzi, and M. Torquati, "The Loop-of-Stencil-Reduce paradigm," in Proc. of Intl. Workshop on Reengineering for Parallelism in Heterogeneous Parallel Platforms (RePara), Helsinki, Finland, 2015, p. 172–177. doi:10.1109/Trustcom.2015.628
    [BibTeX] [Abstract] [Download PDF]

    In this paper we advocate the Loop-of-stencil-reduce pattern as a way to simplify the parallel programming of heterogeneous platforms (multicore+GPUs). Loop-of-Stencil-reduce is general enough to subsume map, reduce, map-reduce, stencil, stencil-reduce, and, crucially, their usage in a loop. It transparently targets (by using OpenCL) combinations of CPU cores and GPUs, and it makes it possible to simplify the deployment of a single stencil computation kernel on different GPUs. The paper discusses the implementation of Loop-of-stencil-reduce within the FastFlow parallel framework, considering a simple iterative data-parallel application as running example (Game of Life) and a highly effective parallel filter for visual data restoration to assess performance. Thanks to the high-level design of the Loop-of-stencil-reduce, it was possible to run the filter seamlessly on a multicore machine, on multi-GPUs, and on both.

    @inproceedings{opencl:ff:ispa:15,
    title = {The Loop-of-Stencil-Reduce paradigm},
    author = {Marco Aldinucci and Marco Danelutto and Maurizio Drocco and Peter Kilpatrick and Guilherme {Peretti Pezzi} and Massimo Torquati},
    year = {2015},
    month = aug,
    booktitle = {Proc. of Intl. Workshop on Reengineering for Parallelism in Heterogeneous Parallel Platforms (RePara)},
    publisher = {IEEE},
    address = {Helsinki, Finland},
    pages = {172--177},
    doi = {10.1109/Trustcom.2015.628},
    abstract = {In this paper we advocate the Loop-of-stencil-reduce pattern as a way to simplify the parallel programming of heterogeneous platforms (multicore+GPUs). Loop-of-Stencil-reduce is general enough to subsume map, reduce, map-reduce, stencil, stencil-reduce, and, crucially, their usage in a loop. It transparently targets (by using OpenCL) combinations of CPU cores and GPUs, and it makes it possible to simplify the deployment of a single stencil computation kernel on different GPUs. The paper discusses the implementation of Loop-of-stencil-reduce within the FastFlow parallel framework, considering a simple iterative data-parallel application as running example (Game of Life) and a highly effective parallel filter for visual data restoration to assess performance. Thanks to the high-level design of the Loop-of-stencil-reduce, it was possible to run the filter seamlessly on a multicore machine, on multi-GPUs, and on both.},
    date-added = {2015-07-05 09:48:33 +0000},
    date-modified = {2015-09-24 11:14:56 +0000},
    url = {https://iris.unito.it/retrieve/handle/2318/1523738/52857/15_RePara_ISPA.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1523738/52857/15_RePara_ISPA.pdf},
    bdsk-url-2 = {https://doi.org/10.1109/Trustcom.2015.628},
    keywords = {fastflow, repara, nvidia}
    }

  • F. Tordini, M. Drocco, I. Merelli, L. Milanesi, P. Liò, and M. Aldinucci, "NuChart-II: a graph-based approach for the analysis and interpretation of Hi-C data," in Proc. of 11th Intl. Meeting on Computational Intelligence Methods for Bioinformatics and Biostatistics (CIBB), Cambridge, UK, 2015, p. 298–311. doi:10.1007/978-3-319-24462-4_25
    [BibTeX] [Abstract] [Download PDF]

    Long-range chromosomal associations between genomic regions, and their repositioning in the 3D space of the nucleus, are now considered to be key contributors to the regulation of gene expressions, and important links have been highlighted with other genomic features involved in DNA rearrangements. Recent Chromosome Conformation Capture (3C) measurements performed with high throughput sequencing (Hi-C) and molecular dynamics studies show that there is a large correlation between co-localization and co-regulation of genes, but these important researches are hampered by the lack of biologists-friendly analysis and visualisation software. In this work we present NuChart-II, a software that allows the user to annotate and visualize a list of input genes with information relying on Hi-C data, integrating knowledge data about genomic features that are involved in the chromosome spatial organization. This software works directly with sequenced reads to identify related Hi-C fragments, with the aim of creating gene-centric neighbourhood graphs on which multi-omics features can be mapped. NuChart-II is a highly optimized implementation of a previous prototype package developed in R, in which the graph-based representation of Hi-C data was tested. The prototype showed inevitable problems of scalability while working genome-wide on large datasets: particular attention has been paid in optimizing the data structures employed while constructing the neighbourhood graph, so as to foster an efficient parallel implementation of the software. The normalization of Hi-C data has been modified and improved, in order to provide a reliable estimation of proximity likelihood for the genes.

    @inproceedings{14:ff:nuchart:cibb,
    title = {{NuChart-II}: a graph-based approach for the analysis and interpretation of {Hi-C} data},
    author = {Fabio Tordini and Maurizio Drocco and Ivan Merelli and Luciano Milanesi and Pietro Li{\`o} and Marco Aldinucci},
    year = {2015},
    month = jun,
    booktitle = {Proc. of 11th Intl. Meeting on Computational Intelligence Methods for Bioinformatics and Biostatistics (CIBB)},
    publisher = {Springer},
    address = {Cambridge, UK},
    series = {{LNCS}},
    volume = {8623},
    pages = {298--311},
    doi = {10.1007/978-3-319-24462-4_25},
    isbn = {978-3-319-24461-7},
    abstract = {Long-range chromosomal associations between genomic regions, and their repositioning in the 3D space of the nucleus, are now considered to be key contributors to the regulation of gene expressions, and important links have been highlighted with other genomic features involved in DNA rearrangements. Recent Chromosome Conformation Capture (3C) measurements performed with high throughput sequencing (Hi-C) and molecular dynamics studies show that there is a large correlation between co-localization and co-regulation of genes, but these important researches are hampered by the lack of biologists-friendly analysis and visualisation software. In this work we present NuChart-II, a software that allows the user to annotate and visualize a list of input genes with information relying on Hi-C data, integrating knowledge data about genomic features that are involved in the chromosome spatial organization. This software works directly with sequenced reads to identify related Hi-C fragments, with the aim of creating gene-centric neighbourhood graphs on which multi-omics features can be mapped. NuChart-II is a highly optimized implementation of a previous prototype package developed in R, in which the graph-based representation of Hi-C data was tested. The prototype showed inevitable problems of scalability while working genome-wide on large datasets: particular attention has been paid in optimizing the data structures employed while constructing the neighbourhood graph, so as to foster an efficient parallel implementation of the software. The normalization of Hi-C data has been modified and improved, in order to provide a reliable estimation of proximity likelihood for the genes.},
    date-modified = {2017-12-12 15:19:25 +0000},
    editor = {Clelia Di Serio and Pietro Li{\`{o}} and Alessandro Nonis and Roberto Tagliaferri},
    url = {http://calvados.di.unipi.it/storage/paper_files/2014_nuchart_cibb.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2014_nuchart_cibb.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-319-24462-4_25},
    keywords = {fastflow, bioinformatics, paraphrase, repara, interomics, mimomics, hirma}
    }

  • M. Drocco, C. Misale, G. Peretti Pezzi, F. Tordini, and M. Aldinucci, "Memory-Optimised Parallel Processing of Hi-C Data," in Proc. of 23rd Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP), 2015, p. 1–8. doi:10.1109/PDP.2015.63
    [BibTeX] [Abstract] [Download PDF]

    This paper presents the optimisation efforts on the creation of a graph-based mapping representation of gene adjacency. The method is based on the Hi-C process, starting from Next Generation Sequencing data, and it analyses a huge amount of static data in order to produce maps for one or more genes. Straightforward parallelisation of this scheme does not yield acceptable performance on multicore architectures since the scalability is rather limited due to the memory bound nature of the problem. This work focuses on the memory optimisations that can be applied to the graph construction algorithm and its (complex) data structures to derive a cache-oblivious algorithm and eventually to improve the memory bandwidth utilisation. We used as running example NuChart-II, a tool for annotation and statistic analysis of Hi-C data that creates a gene-centric neighborhood graph. The proposed approach, which is exemplified for Hi-C, addresses several common issue in the parallelisation of memory bound algorithms for multicore. Results show that the proposed approach is able to increase the parallel speedup from 7x to 22x (on a 32-core platform). Finally, the proposed C++ implementation outperforms the first R NuChart prototype, by which it was not possible to complete the graph generation because of strong memory-saturation problems.

    @inproceedings{nuchart:speedup:15,
    title = {Memory-Optimised Parallel Processing of {Hi-C} Data},
    author = {Maurizio Drocco and Claudia Misale and Guilherme {Peretti Pezzi} and Fabio Tordini and Marco Aldinucci},
    year = {2015},
    month = mar,
    booktitle = {Proc. of 23rd Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP)},
    publisher = {IEEE},
    pages = {1--8},
    doi = {10.1109/PDP.2015.63},
    abstract = {This paper presents the optimisation efforts on the creation of a graph-based mapping representation of gene adjacency. The method is based on the Hi-C process, starting from Next Generation Sequencing data, and it analyses a huge amount of static data in order to produce maps for one or more genes. Straightforward parallelisation of this scheme does not yield acceptable performance on multicore architectures since the scalability is rather limited due to the memory bound nature of the problem. This work focuses on the memory optimisations that can be applied to the graph construction algorithm and its (complex) data structures to derive a cache-oblivious algorithm and eventually to improve the memory bandwidth utilisation. We used as running example NuChart-II, a tool for annotation and statistic analysis of Hi-C data that creates a gene-centric neighborhood graph. The proposed approach, which is exemplified for Hi-C, addresses several common issue in the parallelisation of memory bound algorithms for multicore. Results show that the proposed approach is able to increase the parallel speedup from 7x to 22x (on a 32-core platform). Finally, the proposed C++ implementation outperforms the first R NuChart prototype, by which it was not possible to complete the graph generation because of strong memory-saturation problems.},
    date-added = {2014-12-03 13:54:08 +0000},
    date-modified = {2017-12-12 14:45:09 +0000},
    url = {https://iris.unito.it/retrieve/handle/2318/1521910/40615/2015_pdp_memopt.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1521910/40615/2015_pdp_memopt.pdf},
    bdsk-url-2 = {https://doi.org/10.1109/PDP.2015.63},
    keywords = {fastflow,bioinformatics, paraphrase, repara, impact}
    }

  • F. Tordini, M. Drocco, C. Misale, L. Milanesi, P. Liò, I. Merelli, and M. Aldinucci, "Parallel Exploration of the Nuclear Chromosome Conformation with NuChart-II," in Proc. of 23rd Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP), 2015. doi:10.1109/PDP.2015.104
    [BibTeX] [Abstract] [Download PDF]

    High-throughput molecular biology techniques are widely used to identify physical interactions between genetic elements located throughout the human genome. Chromosome Conformation Capture (3C) and other related techniques allow to investigate the spatial organisation of chromosomes in the cell's natural state. Recent results have shown that there is a large correlation between co-localization and co-regulation of genes, but these important information are hampered by the lack of biologists-friendly analysis and visualisation software. In this work we introduce NuChart-II, a tool for Hi-C data analysis that provides a gene-centric view of the chromosomal neighbour- hood in a graph-based manner. NuChart-II is an efficient and highly optimized C++ re-implementation of a previous prototype package developed in R. Representing Hi-C data using a graph-based approach overcomes the common view relying on genomic coordinates and permits the use of graph analysis techniques to explore the spatial conformation of a gene neighbourhood.

    @inproceedings{nuchar:tool:15,
    title = {Parallel Exploration of the Nuclear Chromosome Conformation with {NuChart-II}},
    author = {Fabio Tordini and Maurizio Drocco and Claudia Misale and Luciano Milanesi and Pietro Li{\`o} and Ivan Merelli and Marco Aldinucci},
    year = {2015},
    month = mar,
    booktitle = {Proc. of 23rd Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP)},
    publisher = {IEEE},
    doi = {10.1109/PDP.2015.104},
    abstract = {High-throughput molecular biology techniques are widely used to identify physical interactions between genetic elements located throughout the human genome. Chromosome Conformation Capture (3C) and other related techniques allow to investigate the spatial organisation of chromosomes in the cell's natural state. Recent results have shown that there is a large correlation between co-localization and co-regulation of genes, but these important information are hampered by the lack of biologists-friendly analysis and visualisation software. In this work we introduce NuChart-II, a tool for Hi-C data analysis that provides a gene-centric view of the chromosomal neighbour- hood in a graph-based manner. NuChart-II is an efficient and highly optimized C++ re-implementation of a previous prototype package developed in R. Representing Hi-C data using a graph-based approach overcomes the common view relying on genomic coordinates and permits the use of graph analysis techniques to explore the spatial conformation of a gene neighbourhood.},
    date-added = {2014-12-03 13:51:17 +0000},
    date-modified = {2017-12-12 13:55:10 +0000},
    url = {https://iris.unito.it/retrieve/handle/2318/1522038/40619/2015_pdp_nuchartff.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1522038/40619/2015_pdp_nuchartff.pdf},
    bdsk-url-2 = {https://doi.org/10.1109/PDP.2015.104},
    keywords = {fastflow, bioinformatics, paraphrase, repara, impact}
    }

  • P. Viviani, "Parallel Computing Techniques for High Energy Physics," Master Thesis, 2015.
    [BibTeX] [Abstract]

    Modern experimental achievements, with LHC results as a prominent but not exclusive representative, have undisclosed a new range of challenges concerning theoretical com- putations. Tree level QED calculation are no more satisfactory due to the very small experimental uncertainty of precision e+ e- measurements, so Next To Leading and Next to Next to Leading Order calculations are required. At the same time many-legs, high-order QCD processes needed to simulate LHC events are raising even more the bar of computational complexity. The drive for the present work has been the interest in calculating high multiplicity Higgs boson processes with a dedicated software library (RECOLA) currently under development at the University of Torino, as well as the related technological challenges. This thesis undertakes the task of exploring the possibilities offered by present and upcoming computing technologies in order to face these challenges properly. The first two chapters outlines the theoretical context and the available technologies. In chapter 3 a a case study is examined in full detail, in order to explore the suitability of different parallel computing solutions. In the chapter 4, some of those solutions are implemented in the context of the RECOLA library, allowing it to handle processes at a previously unexplored scale of complexity. Alongside, the potential of new, cost-effective parallel architectures is tested.

    @mastersthesis{tesi:viviani:15,
    title = {Parallel Computing Techniques for High Energy Physics},
    author = {Paolo Viviani},
    year = {2015},
    abstract = {Modern experimental achievements, with LHC results as a prominent but not exclusive representative, have undisclosed a new range of challenges concerning theoretical com- putations. Tree level QED calculation are no more satisfactory due to the very small experimental uncertainty of precision e+ e- measurements, so Next To Leading and Next to Next to Leading Order calculations are required. At the same time many-legs, high-order QCD processes needed to simulate LHC events are raising even more the bar of computational complexity. The drive for the present work has been the interest in calculating high multiplicity Higgs boson processes with a dedicated software library (RECOLA) currently under development at the University of Torino, as well as the related technological challenges. This thesis undertakes the task of exploring the possibilities offered by present and upcoming computing technologies in order to face these challenges properly. The first two chapters outlines the theoretical context and the available technologies. In chapter 3 a a case study is examined in full detail, in order to explore the suitability of different parallel computing solutions. In the chapter 4, some of those solutions are implemented in the context of the RECOLA library, allowing it to handle processes at a previously unexplored scale of complexity. Alongside, the potential of new, cost-effective parallel architectures is tested.},
    date-added = {2015-09-27 12:36:54 +0000},
    date-modified = {2015-09-27 13:28:24 +0000},
    school = {Physics Department, University of Torino},
    keywords = {fastflow,impact}
    }

  • I. Merelli, F. Tordini, M. Drocco, M. Aldinucci, P. Liò, and L. Milanesi, "Integrating Multi-omic features exploiting Chromosome Conformation Capture data," Frontiers in Genetics, vol. 6, iss. 40, 2015. doi:10.3389/fgene.2015.00040
    [BibTeX] [Abstract] [Download PDF]

    The representation, integration and interpretation of omic data is a complex task, in particular considering the huge amount of information that is daily produced in molecular biology laboratories all around the world. The reason is that sequencing data regarding expression profiles, methylation patterns, and chromatin domains is difficult to harmonize in a systems biology view, since genome browsers only allow coordinate-based representations, discarding functional clusters created by the spatial conformation of the DNA in the nucleus. In this context, recent progresses in high throughput molecular biology techniques and bioinformatics have provided insights into chromatin interactions on a larger scale and offer a formidable support for the interpretation of multi-omic data. In particular, a novel sequencing technique called Chromosome Conformation Capture (3C) allows the analysis of the chromosome organization in the cell's natural state. While performed genome wide, this technique is usually called Hi-C. Inspired by service applications such as Google Maps, we developed NuChart, an R package that integrates Hi-C data to describe the chromosomal neighbourhood starting from the information about gene positions, with the possibility of mapping on the achieved graphs genomic features such as methylation patterns and histone modifications, along with expression profiles. In this paper we show the importance of the NuChart application for the integration of multi-omic data in a systems biology fashion, with particular interest in cytogenetic applications of these techniques. Moreover, we demonstrate how the integration of multi-omic data can provide useful information in understanding why genes are in certain specific positions inside the nucleus and how epigenetic patterns correlate with their expression.

    @article{nuchart:frontiers:15,
    title = {Integrating Multi-omic features exploiting {Chromosome Conformation Capture} data},
    author = {Merelli, Ivan and Tordini, Fabio and Drocco, Maurizio and Aldinucci, Marco and Li{\`o}, Pietro and Milanesi, Luciano},
    year = {2015},
    journal = {Frontiers in Genetics},
    volume = {6},
    doi = {10.3389/fgene.2015.00040},
    issn = {1664-8021},
    abstract = {The representation, integration and interpretation of omic data is a complex task, in particular considering the huge amount of information that is daily produced in molecular biology laboratories all around the world. The reason is that sequencing data regarding expression profiles, methylation patterns, and chromatin domains is difficult to harmonize in a systems biology view, since genome browsers only allow coordinate-based representations, discarding functional clusters created by the spatial conformation of the DNA in the nucleus. In this context, recent progresses in high throughput molecular biology techniques and bioinformatics have provided insights into chromatin interactions on a larger scale and offer a formidable support for the interpretation of multi-omic data. In particular, a novel sequencing technique called Chromosome Conformation Capture (3C) allows the analysis of the chromosome organization in the cell's natural state. While performed genome wide, this technique is usually called Hi-C. Inspired by service applications such as Google Maps, we developed NuChart, an R package that integrates Hi-C data to describe the chromosomal neighbourhood starting from the information about gene positions, with the possibility of mapping on the achieved graphs genomic features such as methylation patterns and histone modifications, along with expression profiles. In this paper we show the importance of the NuChart application for the integration of multi-omic data in a systems biology fashion, with particular interest in cytogenetic applications of these techniques. Moreover, we demonstrate how the integration of multi-omic data can provide useful information in understanding why genes are in certain specific positions inside the nucleus and how epigenetic patterns correlate with their expression.},
    date-added = {2015-02-01 16:38:47 +0000},
    date-modified = {2015-09-24 11:23:10 +0000},
    number = {40},
    url = {http://journal.frontiersin.org/Journal/10.3389/fgene.2015.00040/pdf},
    bdsk-url-1 = {http://journal.frontiersin.org/Journal/10.3389/fgene.2015.00040/pdf},
    bdsk-url-2 = {http://dx.doi.org/10.3389/fgene.2015.00040},
    keywords = {bioinformatics, fastflow, interomics, hirma, mimomics}
    }

  • M. Aldinucci, A. Bracciali, T. Marschall, M. Patterson, N. Pisanti, and M. Torquati, "High-Performance Haplotype Assembly," in Computational Intelligence Methods for Bioinformatics and Biostatistics - 11th International Meeting, CIBB 2014, Cambridge, UK, June 26-28, 2014, Revised Selected Papers, Cambridge, UK, 2015, p. 245–258. doi:10.1007/978-3-319-24462-4_21
    [BibTeX] [Abstract] [Download PDF]

    The problem of Haplotype Assembly is an essential step in human genome analysis. It is typically formalised as the Minimum Error Correction (MEC) problem which is NP-hard. MEC has been approached using heuristics, integer linear programming, and fixed-parameter tractability (FPT), including approaches whose runtime is exponential in the length of the DNA fragments obtained by the sequencing process. Technological improvements are currently increasing fragment length, which drastically elevates computational costs for such methods. We present pWhatsHap, a multi-core parallelisation of WhatsHap, a recent FPT optimal approach to MEC. WhatsHap moves complexity from fragment length to fragment overlap and is hence of particular interest when considering sequencing technology's current trends. pWhatsHap further improves the efficiency in solving the MEC problem, as shown by experiments performed on datasets with high coverage.

    @inproceedings{14:ff:whatsapp:cibb,
    title = {High-Performance Haplotype Assembly},
    author = {Marco Aldinucci and Andrea Bracciali and Tobias Marschall and Murray Patterson and Nadia Pisanti and Massimo Torquati},
    year = {2015},
    booktitle = {Computational Intelligence Methods for Bioinformatics and Biostatistics - 11th International Meeting, {CIBB} 2014, Cambridge, UK, June 26-28, 2014, Revised Selected Papers},
    publisher = {Springer},
    address = {Cambridge, UK},
    series = {{LNCS}},
    volume = {8623},
    pages = {245--258},
    doi = {10.1007/978-3-319-24462-4_21},
    abstract = {The problem of Haplotype Assembly is an essential step in human genome analysis. It is typically formalised as the Minimum Error Correction (MEC) problem which is NP-hard. MEC has been approached using heuristics, integer linear programming, and fixed-parameter tractability (FPT), including approaches whose runtime is exponential in the length of the DNA fragments obtained by the sequencing process. Technological improvements are currently increasing fragment length, which drastically elevates computational costs for such methods. We present pWhatsHap, a multi-core parallelisation of WhatsHap, a recent FPT optimal approach to MEC. WhatsHap moves complexity from fragment length to fragment overlap and is hence of particular interest when considering sequencing technology's current trends. pWhatsHap further improves the efficiency in solving the MEC problem, as shown by experiments performed on datasets with high coverage.},
    date-added = {2014-12-01 23:07:21 +0000},
    date-modified = {2016-08-20 14:15:59 +0000},
    editor = {Clelia Di Serio and Pietro Li{\`{o}} and Alessandro Nonis and Roberto Tagliaferri},
    url = {https://iris.unito.it/retrieve/handle/2318/1523292/46714/2014_pHaplo_cibb.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1523292/46714/2014_pHaplo_cibb.pdf},
    bdsk-url-2 = {https://doi.org/10.1007/978-3-319-24462-4_21},
    keywords = {fastflow, bioinformatics}
    }

  • M. Aldinucci, G. Peretti Pezzi, M. Drocco, C. Spampinato, and M. Torquati, "Parallel Visual Data Restoration on Multi-GPGPUs using Stencil-Reduce Pattern," International Journal of High Performance Computing Applications, vol. 29, iss. 4, p. 461–472, 2015. doi:10.1177/1094342014567907
    [BibTeX] [Abstract] [Download PDF]

    In this paper, a highly effective parallel filter for visual data restoration is presented. The filter is designed following a skeletal approach, using a newly proposed stencil-reduce, and has been implemented by way of the FastFlow parallel programming library. As a result of its high-level design, it is possible to run the filter seamlessly on a multicore machine, on multi-GPGPUs, or on both. The design and implementation of the filter are discussed, and an experimental evaluation is presented.

    @article{ff:denoiser:ijhpca:15,
    title = {Parallel Visual Data Restoration on Multi-{GPGPUs} using Stencil-Reduce Pattern},
    author = {Marco Aldinucci and Guilherme {Peretti Pezzi} and Maurizio Drocco and Concetto Spampinato and Massimo Torquati},
    year = {2015},
    journal = {International Journal of High Performance Computing Applications},
    volume = {29},
    pages = {461--472},
    doi = {10.1177/1094342014567907},
    abstract = {In this paper, a highly effective parallel filter for visual data restoration is presented. The filter is designed following a skeletal approach, using a newly proposed stencil-reduce, and has been implemented by way of the FastFlow parallel programming library. As a result of its high-level design, it is possible to run the filter seamlessly on a multicore machine, on multi-GPGPUs, or on both. The design and implementation of the filter are discussed, and an experimental evaluation is presented.},
    date-added = {2014-08-23 00:06:10 +0000},
    date-modified = {2015-09-24 11:21:20 +0000},
    number = {4},
    url = {https://iris.unito.it/retrieve/handle/2318/1522073/299200/ijhpca_4aperto.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/1522073/299200/ijhpca_4aperto.pdf},
    bdsk-url-2 = {https://doi.org/10.1177/1094342014567907},
    keywords = {fastflow, paraphrase, impact, nvidia}
    }

  • G. P. Pezzi, E. Vaissié, Y. Viala, D. Caromel, and P. Gourbesville, "Parallel profiling of water distribution networks using the Clément formula," Applied Mathematics and Computation, vol. 267, p. 83–95, 2015. doi:10.1016/j.amc.2015.05.084
    [BibTeX] [Abstract] [Download PDF]

    Abstract Optimization of water distribution is a crucial issue which has been targeted by many modeling tools. Useful models, implemented several decades ago, need to be updated and implemented in more powerful computing environments. This paper presents the distributed and redesigned version of a legacy hydraulic simulation software written in Fortran (IRMA) that has been used for over 30 years by the Société du Canal de Provence in order to design and to maintain water distribution networks. \{IRMA\} was developed aiming mainly at the treatment of irrigation networks – by using the Clément demand model and is now used to manage more than 6000 km of piped networks. The complexity and size of networks have been growing since the creation of \{IRMA\} and the legacy software could not handle the simulation of very large networks in terms of performance.SAC This limitation has finally imposed to redesign the code by using modern tools and language (Java), and also to run distributed simulations by using the ProActive Parallel Suite.

    @article{PerettiPezzi201583,
    title = {Parallel profiling of water distribution networks using the Cl{\'e}ment formula},
    author = {Guilherme Peretti Pezzi and Evelyne Vaissi{\'e} and Yann Viala and Denis Caromel and Philippe Gourbesville},
    year = {2015},
    journal = {Applied Mathematics and Computation},
    volume = {267},
    pages = {83--95},
    doi = {10.1016/j.amc.2015.05.084},
    issn = {0096-3003},
    note = {The Fourth European Seminar on Computing (ESCO 2014)},
    abstract = {Abstract Optimization of water distribution is a crucial issue which has been targeted by many modeling tools. Useful models, implemented several decades ago, need to be updated and implemented in more powerful computing environments. This paper presents the distributed and redesigned version of a legacy hydraulic simulation software written in Fortran (IRMA) that has been used for over 30 years by the Soci{\'e}t{\'e} du Canal de Provence in order to design and to maintain water distribution networks. \{IRMA\} was developed aiming mainly at the treatment of irrigation networks -- by using the Cl{\'e}ment demand model and is now used to manage more than 6000 km of piped networks. The complexity and size of networks have been growing since the creation of \{IRMA\} and the legacy software could not handle the simulation of very large networks in terms of performance.SAC This limitation has finally imposed to redesign the code by using modern tools and language (Java), and also to run distributed simulations by using the ProActive Parallel Suite.},
    date-modified = {2021-04-24 23:23:51 +0200},
    url = {http://www.sciencedirect.com/science/article/pii/S0096300315007080},
    bdsk-url-1 = {http://www.sciencedirect.com/science/article/pii/S0096300315007080},
    bdsk-url-2 = {http://dx.doi.org/10.1016/j.amc.2015.05.084},
    keywords = {impact}
    }

  • M. Sourouri, J. Langguth, F. Spiga, S. B. Baden, and X. Cai, "CPU+GPU Programming of Stencil Computations for Resource-Efficient Use of GPU Clusters," in 18th IEEE International Conference on Computational Science and Engineering, CSE 2015, Porto, Portugal, October 21-23, 2015, 2015, p. 17–26. doi:10.1109/CSE.2015.33
    [BibTeX] [Download PDF]
    @inproceedings{DBLP:conf/cse/SourouriLSBC15,
    title = {{CPU+GPU} Programming of Stencil Computations for Resource-Efficient Use of {GPU} Clusters},
    author = {Mohammed Sourouri and Johannes Langguth and Filippo Spiga and Scott B. Baden and Xing Cai},
    year = {2015},
    booktitle = {18th {IEEE} International Conference on Computational Science and Engineering, {CSE} 2015, Porto, Portugal, October 21-23, 2015},
    pages = {17--26},
    doi = {10.1109/CSE.2015.33},
    bibsource = {dblp computer science bibliography, http://dblp.org},
    biburl = {http://dblp.org/rec/bib/conf/cse/SourouriLSBC15},
    optcrossref = {DBLP:conf/cse/2015},
    timestamp = {Thu, 15 Jun 2017 21:45:34 +0200},
    url = {https://doi.org/10.1109/CSE.2015.33},
    bdsk-url-1 = {https://doi.org/10.1109/CSE.2015.33}
    }

  • J. Glaser, T. D. Nguyen, J. A. Anderson, P. Lui, F. Spiga, J. A. Millan, D. C. Morse, and S. C. Glotzer, "Strong scaling of general-purpose molecular dynamics simulations on GPUs," Computer Physics Communications, vol. 192, p. 97–107, 2015. doi:10.1016/j.cpc.2015.02.028
    [BibTeX] [Download PDF]
    @article{DBLP:journals/cphysics/GlaserNALSMMG15,
    title = {Strong scaling of general-purpose molecular dynamics simulations on GPUs},
    author = {Jens Glaser and Trung Dac Nguyen and Joshua A. Anderson and Pak Lui and Filippo Spiga and Jaime A. Millan and David C. Morse and Sharon C. Glotzer},
    year = {2015},
    journal = {Computer Physics Communications},
    volume = {192},
    pages = {97--107},
    doi = {10.1016/j.cpc.2015.02.028},
    bibsource = {dblp computer science bibliography, http://dblp.org},
    biburl = {http://dblp.org/rec/bib/journals/cphysics/GlaserNALSMMG15},
    timestamp = {Wed, 14 Jun 2017 20:36:08 +0200},
    url = {https://doi.org/10.1016/j.cpc.2015.02.028},
    bdsk-url-1 = {https://doi.org/10.1016/j.cpc.2015.02.028}
    }

2014

  • M. G. Epitropakis, A. Bracciali, M. Aldinucci, E. Potts, and E. K. Burke, "Predictive scheduling for optimal cloud configuration," in Proc. of 10th Intl. Conference on the Practice and Theory of Automated Timetabling, York, United Kingdom, 2014. doi:978-0-9929984-0-0
    [BibTeX] [Download PDF]
    @inproceedings{cloud:patat:14,
    title = {Predictive scheduling for optimal cloud configuration},
    author = {Michael G. Epitropakis and Andrea Bracciali and Marco Aldinucci and Emily Potts and Edmund K. Burke},
    year = {2014},
    month = aug,
    booktitle = {Proc. of 10th Intl. Conference on the Practice and Theory of Automated Timetabling},
    publisher = {PATAT},
    address = {York, United Kingdom},
    doi = {978-0-9929984-0-0},
    date-added = {2015-03-15 14:34:03 +0000},
    date-modified = {2021-03-26 23:55:53 +0100},
    editor = {Ender {\"O}zcan and Edmund K. Burke and Barry MCCollum},
    url = {http://www.patatconference.org/patat2014/proceedings/3_12.pdf},
    bdsk-url-1 = {http://www.patatconference.org/patat2014/proceedings/3_12.pdf}
    }

  • M. Aldinucci, S. Campa, M. Danelutto, P. Kilpatrick, and M. Torquati, "Pool evolution: a domain specific parallel pattern," in Proc.of the 7th Intl. Symposium on High-level Parallel Programming and Applications (HLPP), Amsterdam, The Netherlands, 2014.
    [BibTeX] [Abstract] [Download PDF]

    We introduce a new parallel pattern derived from a specific application domain and show how it turns out to have application beyond its domain of origin. The pool evolution pattern models the parallel evolution of a population subject to mutations and evolving in such a way that a given fitness function is optimized. The pattern has been demonstrated to be suitable for capturing and modeling the parallel patterns underpinning various evolutionary algorithms, as well as other parallel patterns typical of symbolic computation. In this paper we introduce the pattern, developed in the framework of the ParaPhrase EU-funded FP7 project, we discuss its implementation on modern multi/many core architectures and finally present experimental results obtained with FastFlow and Erlang implementations to assess its feasibility and scalability.

    @inproceedings{2014:ff:pool:hlpp,
    title = {Pool evolution: a domain specific parallel pattern},
    author = {Marco Aldinucci and Sonia Campa and Marco Danelutto and Peter Kilpatrick and Massimo Torquati},
    year = {2014},
    month = jul,
    booktitle = {Proc.of the 7th Intl. Symposium on High-level Parallel Programming and Applications (HLPP)},
    address = {Amsterdam, The Netherlands},
    abstract = {We introduce a new parallel pattern derived from a specific application domain and show how it turns out to have application beyond its domain of origin. The pool evolution pattern models the parallel evolution of a population subject to mutations and evolving in such a way that a given fitness function is optimized. The pattern has been demonstrated to be suitable for capturing and modeling the parallel patterns underpinning various evolutionary algorithms, as well as other parallel patterns typical of symbolic computation. In this paper we introduce the pattern, developed in the framework of the ParaPhrase EU-funded FP7 project, we discuss its implementation on modern multi/many core architectures and finally present experimental results obtained with FastFlow and Erlang implementations to assess its feasibility and scalability.},
    date-modified = {2015-09-27 12:14:30 +0000},
    url = {http://calvados.di.unipi.it/storage/paper_files/2014_hlpp_pool.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2014_hlpp_pool.pdf},
    keywords = {fastflow, paraphrase, repara}
    }

  • M. Aldinucci, M. Torquati, M. Drocco, G. Peretti Pezzi, and C. Spampinato, "FastFlow: Combining Pattern-Level Abstraction and Efficiency in GPGPUs," in GPU Technology Conference (GTC), San Jose, CA, USA, 2014.
    [BibTeX] [Abstract] [Download PDF]

    Learn how FastFlow's parallel patterns can be used to design parallel applications for execution on both CPUs and GPGPUs while avoiding most of the complex low-level detail needed to make them efficient, portable and rapid to prototype. As use case, we will show the design and effectiveness of a novel universal image filtering template based on the variational approach.

    @inproceedings{ff:gtc:2014,
    title = {FastFlow: Combining Pattern-Level Abstraction and Efficiency in {GPGPUs}},
    author = {Marco Aldinucci and Massimo Torquati and Maurizio Drocco and Guilherme {Peretti Pezzi} and Concetto Spampinato},
    year = {2014},
    month = mar,
    booktitle = {GPU Technology Conference (GTC)},
    address = {San Jose, CA, USA},
    abstract = {Learn how FastFlow's parallel patterns can be used to design parallel applications for execution on both CPUs and GPGPUs while avoiding most of the complex low-level detail needed to make them efficient, portable and rapid to prototype. As use case, we will show the design and effectiveness of a novel universal image filtering template based on the variational approach.},
    date-added = {2014-04-19 12:52:40 +0000},
    date-modified = {2017-12-12 13:54:25 +0000},
    url = {http://calvados.di.unipi.it/storage/talks/2014_S4729-Marco-Aldinucci.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/talks/2014_S4729-Marco-Aldinucci.pdf},
    keywords = {fastflow, gpu, nvidia, impact, paraphrase}
    }

  • M. Aldinucci, M. Torquati, M. Drocco, G. Peretti Pezzi, and C. Spampinato, "An Overview of FastFlow: Combining Pattern-Level Abstraction and Efficiency in GPGPUs," in GPU Technology Conference (GTC), San Jose, CA, USA, 2014.
    [BibTeX] [Abstract] [Download PDF]

    Get an overview of FastFlow's parallel patterns can be used to design parallel applications for execution on both CPUs and GPGPUs while avoiding most of the complex low-level detail needed to make them efficient, portable and rapid to prototype. For a more detailed and technical review of FastFlow's parallel patterns as well as a use case where we will show the design and effectiveness of a novel universal image filtering template based on the variational approach.

    @inproceedings{ff:gtc:2014:short,
    title = {An Overview of FastFlow: Combining Pattern-Level Abstraction and Efficiency in {GPGPUs}},
    author = {Marco Aldinucci and Massimo Torquati and Maurizio Drocco and Guilherme {Peretti Pezzi} and Concetto Spampinato},
    year = {2014},
    month = mar,
    booktitle = {GPU Technology Conference (GTC)},
    address = {San Jose, CA, USA},
    abstract = {Get an overview of FastFlow's parallel patterns can be used to design parallel applications for execution on both CPUs and GPGPUs while avoiding most of the complex low-level detail needed to make them efficient, portable and rapid to prototype. For a more detailed and technical review of FastFlow's parallel patterns as well as a use case where we will show the design and effectiveness of a novel universal image filtering template based on the variational approach.},
    date-added = {2014-04-13 23:20:52 +0000},
    date-modified = {2017-12-12 13:54:20 +0000},
    url = {http://calvados.di.unipi.it/storage/talks/2014_S4585-Marco-Aldinucci.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/talks/2014_S4585-Marco-Aldinucci.pdf},
    keywords = {fastflow, gpu, nvidia, impact, paraphrase}
    }

  • D. Buono, M. Danelutto, T. De Matteis, G. Mencagli, and M. Torquati, "A Lightweight Run-Time Support For Fast Dense Linear Algebra on Multi-Core," in Proc. of the 12th International Conference on Parallel and Distributed Computing and Networks (PDCN 2014), 2014.
    [BibTeX]
    @inproceedings{ff:ffmdf:pdcn:14,
    title = {A Lightweight Run-Time Support For Fast Dense Linear Algebra on Multi-Core},
    author = {Daniele Buono and Marco Danelutto and De Matteis, Tiziano and Gabriele Mencagli and Massimo Torquati},
    year = {2014},
    month = feb,
    booktitle = {Proc. of the 12th International Conference on Parallel and Distributed Computing and Networks (PDCN 2014)},
    publisher = {IASTED, ACTA press},
    date-modified = {2018-12-27 18:52:39 +0100},
    keywords = {fastflow}
    }

  • C. Spampinato, I. Kavasidis, M. Aldinucci, C. Pino, D. Giordano, and A. Faro, "Discovering Biological Knowledge by Integrating High Throughput Data and Scientific Literature on the Cloud," Concurrency and Computation: Practice and Experience, vol. 26, iss. 10, p. 1771–1786, 2014. doi:10.1002/cpe.3130
    [BibTeX] [Abstract] [Download PDF]

    In this paper, we present a bioinformatics knowledge discovery tool for extracting and validating associations between biological entities. By mining specialised scientific literature, the tool not only generates biological hypotheses in the form of associations between genes, proteins, miRNA and diseases, but also validates the plausibility of such associations against high-throughput biological data (e.g. microarray) and annotated databases (e.g. Gene Ontology). Both the knowledge discovery system and its validation are carried out by exploiting the advantages and the potentialities of the Cloud, which allowed us to derive and check the validity of thousands of biological associations in a reasonable amount of time. The system was tested on a dataset containing more than 1000 gene-disease associations achieving an average recall of about 71\%, outperforming existing approaches. The results also showed that porting a data-intensive application in an IaaS cloud environment boosts significantly the application's efficiency.

    @article{biocloud:ccpe:13,
    title = {Discovering Biological Knowledge by Integrating High Throughput Data and Scientific Literature on the Cloud},
    author = {Concetto Spampinato and Isaak Kavasidis and Marco Aldinucci and Carmelo Pino and Daniela Giordano and Alberto Faro},
    year = {2014},
    journal = {Concurrency and Computation: Practice and Experience},
    volume = {26},
    pages = {1771--1786},
    doi = {10.1002/cpe.3130},
    abstract = {In this paper, we present a bioinformatics knowledge discovery tool for extracting and validating associations between biological entities. By mining specialised scientific literature, the tool not only generates biological hypotheses in the form of associations between genes, proteins, miRNA and diseases, but also validates the plausibility of such associations against high-throughput biological data (e.g. microarray) and annotated databases (e.g. Gene Ontology). Both the knowledge discovery system and its validation are carried out by exploiting the advantages and the potentialities of the Cloud, which allowed us to derive and check the validity of thousands of biological associations in a reasonable amount of time. The system was tested on a dataset containing more than 1000 gene-disease associations achieving an average recall of about 71\%, outperforming existing approaches. The results also showed that porting a data-intensive application in an IaaS cloud environment boosts significantly the application's efficiency.},
    date-added = {2014-12-21 17:48:24 +0000},
    date-modified = {2021-04-24 23:24:19 +0200},
    number = {10},
    url = {https://iris.unito.it/retrieve/handle/2318/139542/22526/2013_biocloud_ccpe.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/139542/22526/2013_biocloud_ccpe.pdf},
    bdsk-url-2 = {https://doi.org/10.1002/cpe.3130}
    }

  • M. Aldinucci, S. Campa, M. Danelutto, P. Kilpatrick, and M. Torquati, "Design patterns percolating to parallel programming framework implementation," International Journal of Parallel Programming, vol. 42, iss. 6, p. 1012–1031, 2014. doi:10.1007/s10766-013-0273-6
    [BibTeX] [Abstract] [Download PDF]

    Structured parallel programming is recognised as a viable and effective means of tackling parallel programming problems. Recently, a set of simple and powerful parallel building blocks (RISC-pb2l) has been proposed to support modelling and implementation of parallel frameworks. In this work we demonstrate how that same parallel building block set may be used to model both general purpose parallel programming abstractions, not usually listed in classical skeleton sets, and more specialized domain specific parallel patterns. We show how an implementation of RISC-pb2l can be realised via the FastFlow framework and present experimental evidence of the feasibility and efficiency of the approach.

    @article{ijpp:patterns:13,
    title = {Design patterns percolating to parallel programming framework implementation},
    author = {Marco Aldinucci and Sonia Campa and Marco Danelutto and Peter Kilpatrick and Massimo Torquati},
    year = {2014},
    journal = {International Journal of Parallel Programming},
    volume = {42},
    pages = {1012--1031},
    doi = {10.1007/s10766-013-0273-6},
    issn = {0885-7458},
    abstract = {Structured parallel programming is recognised as a viable and effective means of tackling parallel programming problems. Recently, a set of simple and powerful parallel building blocks (RISC-pb2l) has been proposed to support modelling and implementation of parallel frameworks. In this work we demonstrate how that same parallel building block set may be used to model both general purpose parallel programming abstractions, not usually listed in classical skeleton sets, and more specialized domain specific parallel patterns. We show how an implementation of RISC-pb2l can be realised via the FastFlow framework and present experimental evidence of the feasibility and efficiency of the approach.},
    date-added = {2014-12-21 17:47:21 +0000},
    date-modified = {2015-09-27 12:32:37 +0000},
    number = {6},
    url = {https://iris.unito.it/retrieve/handle/2318/140069/22527/2013_ijpp_patterns-web_4aperto_1238811.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/140069/22527/2013_ijpp_patterns-web_4aperto_1238811.pdf},
    bdsk-url-2 = {https://doi.org/10.1007/s10766-013-0273-6},
    keywords = {fastflow, paraphrase}
    }

  • M. Aldinucci, S. Ruggieri, and M. Torquati, "Decision Tree Building on Multi-Core using FastFlow," Concurrency and Computation: Practice and Experience, vol. 26, iss. 3, p. 800–820, 2014. doi:10.1002/cpe.3063
    [BibTeX] [Abstract] [Download PDF]

    The whole computer hardware industry embraced multi-core. The extreme optimisation of sequential algorithms is then no longer sufficient to squeeze the real machine power, which can be only exploited via thread-level parallelism. Decision tree algorithms exhibit natural concurrency that makes them suitable to be parallelised. This paper presents an in-depth study of the parallelisation of an implementation of the C4.5 algorithm for multi-core architectures. We characterise elapsed time lower bounds for the forms of parallelisations adopted, and achieve close to optimal performances. Our implementation is based on the FastFlow parallel programming environment and it requires minimal changes to the original sequential code.

    @article{yadtff:ccpe:13,
    title = {Decision Tree Building on Multi-Core using FastFlow},
    author = {Marco Aldinucci and Salvatore Ruggieri and Massimo Torquati},
    year = {2014},
    journal = {Concurrency and Computation: Practice and Experience},
    volume = {26},
    pages = {800--820},
    doi = {10.1002/cpe.3063},
    abstract = {The whole computer hardware industry embraced multi-core. The extreme optimisation of sequential algorithms is then no longer sufficient to squeeze the real machine power, which can be only exploited via thread-level parallelism. Decision tree algorithms exhibit natural concurrency that makes them suitable to be parallelised. This paper presents an in-depth study of the parallelisation of an implementation of the C4.5 algorithm for multi-core architectures. We characterise elapsed time lower bounds for the forms of parallelisations adopted, and achieve close to optimal performances. Our implementation is based on the FastFlow parallel programming environment and it requires minimal changes to the original sequential code.},
    date-added = {2014-12-21 17:46:33 +0000},
    date-modified = {2015-09-27 12:17:52 +0000},
    number = {3},
    url = {https://iris.unito.it/retrieve/handle/2318/139522/118602/yadtff-j.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/139522/118602/yadtff-j.pdf},
    bdsk-url-2 = {https://doi.org/10.1002/cpe.3063},
    keywords = {fastflow, paraphrase}
    }

  • G. P. Pezzi, E. Vaissié, Y. Viala, D. Caromel, and P. Gourbesville, "Parallel Profiling of Water Distribution Networks Using the Clément Formula," in 4th European Seminar on Computing, 2014.
    [BibTeX] [Abstract]

    Optimization of water distribution is a crucial issue which has been targeted by many modelling tools. Useful models, implemented several decades ago, need to be updated and implemented in more powerful computing environments. This paper presents the distributed and redesigned version of a legacy hydraulic simulation software written in Fortran (IRMA) that has been used for over 30 years by the Societé du Canal de Provence in order to design and to maintain water distribution networks. IRMA was developed aiming mainly the treatment of irrigation networks – by using the Clément demand model and is now used to manage more than 6.000 km of piped networks. The growing complexity and size of networks requested to redesign the code by using modern tools and language (Java) and also to run distributed simulations by using the ProActive Parallel Suite.

    @inproceedings{pezzi-clement:14,
    title = {Parallel Profiling of Water Distribution Networks Using the Cl{\'e}ment Formula},
    author = {Guilherme Peretti Pezzi and Evelyne Vaissi{\'e} and Yann Viala and Denis Caromel and Philippe Gourbesville},
    year = {2014},
    booktitle = {4th European Seminar on Computing},
    abstract = {Optimization of water distribution is a crucial issue which has been targeted by many modelling tools. Useful models, implemented several decades ago, need to be updated and implemented in more powerful computing environments. This paper presents the distributed and redesigned version of a legacy hydraulic simulation software written in Fortran (IRMA) that has been used for over 30 years by the Societ{\'e} du Canal de Provence in order to design and to maintain water distribution networks. IRMA was developed aiming mainly the treatment of irrigation networks -- by using the Cl{\'e}ment demand model and is now used to manage more than 6.000 km of piped networks. The growing complexity and size of networks requested to redesign the code by using modern tools and language (Java) and also to run distributed simulations by using the ProActive Parallel Suite.},
    date-added = {2014-12-20 15:54:08 +0000},
    date-modified = {2015-09-27 12:44:01 +0000},
    keywords = {impact}
    }

  • M. Aldinucci, C. Calcagno, M. Coppo, F. Damiani, M. Drocco, E. Sciacca, S. Spinella, M. Torquati, and A. Troina, "On designing multicore-aware simulators for systems biology endowed with on-line statistics," BioMed Research International, 2014. doi:10.1155/2014/207041
    [BibTeX] [Abstract] [Download PDF]

    The paper arguments are on enabling methodologies for the design of a fully parallel, online, interactive tool aiming to support the bioinformatics scientists .In particular, the features of these methodologies, supported by the FastFlow parallel programming framework, are shown on a simulation tool to perform the modeling, the tuning, and the sensitivity analysis of stochastic biological models. A stochastic simulation needs thousands of independent simulation trajectories turning into big data that should be analysed by statistic and data mining tools. In the considered approach the two stages are pipelined in such a way that the simulation stage streams out the partial results of all simulation trajectories to the analysis stage that immediately produces a partial result. The simulation-analysis workflow is validated for performance and effectiveness of the online analysis in capturing biological systems behavior on a multicore platform and representative proof-of-concept biological systems. The exploited methodologies include pattern-based parallel programming and data streaming that provide key features to the software designers such as performance portability and efficient in-memory (big) data management and movement. Two paradigmatic classes of biological systems exhibiting multistable and oscillatory behavior are used as a testbed.

    @article{cwcsim:ff:multicore:biomed:14,
    title = {On designing multicore-aware simulators for systems biology endowed with on-line statistics},
    author = {Marco Aldinucci and Cristina Calcagno and Mario Coppo and Ferruccio Damiani and Maurizio Drocco and Eva Sciacca and Salvatore Spinella and Massimo Torquati and Angelo Troina},
    year = {2014},
    journal = {BioMed Research International},
    doi = {10.1155/2014/207041},
    abstract = {The paper arguments are on enabling methodologies for the design of a fully parallel, online, interactive tool aiming to support the bioinformatics scientists .In particular, the features of these methodologies, supported by the FastFlow parallel programming framework, are shown on a simulation tool to perform the modeling, the tuning, and the sensitivity analysis of stochastic biological models. A stochastic simulation needs thousands of independent simulation trajectories turning into big data that should be analysed by statistic and data mining tools. In the considered approach the two stages are pipelined in such a way that the simulation stage streams out the partial results of all simulation trajectories to the analysis stage that immediately produces a partial result. The simulation-analysis workflow is validated for performance and effectiveness of the online analysis in capturing biological systems behavior on a multicore platform and representative proof-of-concept biological systems. The exploited methodologies include pattern-based parallel programming and data streaming that provide key features to the software designers such as performance portability and efficient in-memory (big) data management and movement. Two paradigmatic classes of biological systems exhibiting multistable and oscillatory behavior are used as a testbed.},
    date-added = {2014-06-26 21:30:32 +0000},
    date-modified = {2015-09-27 12:17:05 +0000},
    url = {http://downloads.hindawi.com/journals/bmri/2014/207041.pdf},
    bdsk-url-1 = {http://downloads.hindawi.com/journals/bmri/2014/207041.pdf},
    bdsk-url-2 = {https://doi.org/10.1155/2014/207041},
    keywords = {fastflow,bioinformatics, paraphrase, biobits}
    }

  • M. Aldinucci, M. Torquati, C. Spampinato, M. Drocco, C. Misale, C. Calcagno, and M. Coppo, "Parallel stochastic systems biology in the cloud," Briefings in Bioinformatics, vol. 15, iss. 5, p. 798–813, 2014. doi:10.1093/bib/bbt040
    [BibTeX] [Abstract] [Download PDF]

    The stochastic modelling of biological systems, coupled with Monte Carlo simulation of models, is an increasingly popular technique in bioinformatics. The simulation-analysis workflow may result computationally expensive reducing the interactivity required in the model tuning. In this work, we advocate the high-level software design as a vehicle for building efficient and portable parallel simulators for the cloud. In particular, the Calculus of Wrapped Components (CWC) simulator for systems biology, which is designed according to the FastFlow pattern-based approach, is presented and discussed. Thanks to the FastFlow framework, the CWC simulator is designed as a high-level workflow that can simulate CWC models, merge simulation results and statistically analyse them in a single parallel workflow in the cloud. To improve interactivity, successive phases are pipelined in such a way that the workflow begins to output a stream of analysis results immediately after simulation is started. Performance and effectiveness of the CWC simulator are validated on the Amazon Elastic Compute Cloud.

    @article{cwc:cloud:bib:13,
    title = {Parallel stochastic systems biology in the cloud},
    author = {Marco Aldinucci and Massimo Torquati and Concetto Spampinato and Maurizio Drocco and Claudia Misale and Cristina Calcagno and Mario Coppo},
    year = {2014},
    journal = {Briefings in Bioinformatics},
    volume = {15},
    pages = {798--813},
    doi = {10.1093/bib/bbt040},
    issn = {1467-5463},
    abstract = {The stochastic modelling of biological systems, coupled with Monte Carlo simulation of models, is an increasingly popular technique in bioinformatics. The simulation-analysis workflow may result computationally expensive reducing the interactivity required in the model tuning. In this work, we advocate the high-level software design as a vehicle for building efficient and portable parallel simulators for the cloud. In particular, the Calculus of Wrapped Components (CWC) simulator for systems biology, which is designed according to the FastFlow pattern-based approach, is presented and discussed. Thanks to the FastFlow framework, the CWC simulator is designed as a high-level workflow that can simulate CWC models, merge simulation results and statistically analyse them in a single parallel workflow in the cloud. To improve interactivity, successive phases are pipelined in such a way that the workflow begins to output a stream of analysis results immediately after simulation is started. Performance and effectiveness of the CWC simulator are validated on the Amazon Elastic Compute Cloud.},
    date-added = {2014-12-21 17:49:54 +0000},
    date-modified = {2021-04-24 23:24:25 +0200},
    number = {5},
    url = {https://iris.unito.it/retrieve/handle/2318/140080/22528/FF_Cloud_briefings_final_submitted_copy.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/140080/22528/FF_Cloud_briefings_final_submitted_copy.pdf},
    bdsk-url-2 = {https://doi.org/10.1093/bib/bbt040},
    keywords = {fastflow, bioinformatics, paraphrase, impact, biobits}
    }

  • M. Aldinucci, G. Peretti Pezzi, M. Drocco, F. Tordini, P. Kilpatrick, and M. Torquati, "Parallel video denoising on heterogeneous platforms," in Proc. of Intl. Workshop on High-level Programming for Heterogeneous and Hierarchical Parallel Systems (HLPGPU), 2014.
    [BibTeX] [Abstract] [Download PDF]

    In this paper, a highly-effective parallel filter for video denoising is presented. The filter is designed using a skeletal approach, and has been implemented by way of the FastFlow parallel programming library. As a result of its high-level design, it is possible to run the filter seamlessly on a multi-core machine, on GPGPU(s), or on both. The design and the implementation of the filter are discussed, and an experimental evaluation is presented. Various mappings of the filtering stages are comparatively discussed.

    @inproceedings{ff:video:hlpgpu:14,
    title = {Parallel video denoising on heterogeneous platforms},
    author = {Marco Aldinucci and Guilherme {Peretti Pezzi} and Maurizio Drocco and Fabio Tordini and Peter Kilpatrick and Massimo Torquati},
    year = {2014},
    booktitle = {Proc. of Intl. Workshop on High-level Programming for Heterogeneous and Hierarchical Parallel Systems (HLPGPU)},
    abstract = {In this paper, a highly-effective parallel filter for video denoising is presented. The filter is designed using a skeletal approach, and has been implemented by way of the FastFlow parallel programming library. As a result of its high-level design, it is possible to run the filter seamlessly on a multi-core machine, on GPGPU(s), or on both. The design and the implementation of the filter are discussed, and an experimental evaluation is presented. Various mappings of the filtering stages are comparatively discussed.},
    date-added = {2013-12-07 18:28:32 +0000},
    date-modified = {2015-09-27 12:42:02 +0000},
    url = {http://calvados.di.unipi.it/storage/paper_files/2014_ff_video_denoiser_hlpgpu.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2014_ff_video_denoiser_hlpgpu.pdf},
    keywords = {fastflow, paraphrase, impact}
    }

  • C. Misale, "Accelerating Bowtie2 with a lock-less concurrency approach and memory affinity," in Proc. of Intl. Euromicro PDP 2014: Parallel Distributed and network-based Processing, Torino, Italy, 2014. doi:10.1109/PDP.2014.50
    [BibTeX] [Abstract] [Download PDF]

    The implementation of DNA alignment tools for Bioinformatics lead to face different problems that dip into performances. A single alignment takes an amount of time that is not predictable and there are different factors that can affect performances, for instance the length of sequences can determine the computational grain of the task and mismatches or insertion/deletion (indels) increase time needed to complete an alignment. Moreover, an alignment is a strong memory- bound problem because of the irregular memory access pat- terns and limitations in memory-bandwidth. Over the years, many alignment tools were implemented. A concrete example is Bowtie2, one of the fastest (concurrent, Pthread-based) and state of the art not GPU-based alignment tool. Bowtie2 exploits concurrency by instantiating a pool of threads, which have access to a global input dataset, share the reference genome and have access to different objects for collecting alignment results. In this paper a modified implementation of Bowtie2 is presented, in which the concurrency structure has been changed. The proposed implementation exploits the task-farm skeleton pattern implemented as a Master-Worker. The Master-Worker pattern permits to delegate only to the Master thread dataset reading and to make private to each Worker data structures that are shared in the original version. Only the reference genome is left shared. As a further optimisation, the Master and each Worker were pinned on cores and the reference genome was allocated interleaved among memory nodes. The proposed implementation is able to gain up to 10 speedup points over the original implementation.

    @inproceedings{ff:bowtie2:pdp:14,
    title = {Accelerating Bowtie2 with a lock-less concurrency approach and memory affinity},
    author = {Claudia Misale},
    year = {2014},
    booktitle = {Proc. of Intl. Euromicro PDP 2014: Parallel Distributed and network-based Processing},
    publisher = {IEEE},
    address = {Torino, Italy},
    doi = {10.1109/PDP.2014.50},
    note = {(Best paper award)},
    abstract = {The implementation of DNA alignment tools for Bioinformatics lead to face different problems that dip into performances. A single alignment takes an amount of time that is not predictable and there are different factors that can affect performances, for instance the length of sequences can determine the computational grain of the task and mismatches or insertion/deletion (indels) increase time needed to complete an alignment. Moreover, an alignment is a strong memory- bound problem because of the irregular memory access pat- terns and limitations in memory-bandwidth. Over the years, many alignment tools were implemented. A concrete example is Bowtie2, one of the fastest (concurrent, Pthread-based) and state of the art not GPU-based alignment tool. Bowtie2 exploits concurrency by instantiating a pool of threads, which have access to a global input dataset, share the reference genome and have access to different objects for collecting alignment results. In this paper a modified implementation of Bowtie2 is presented, in which the concurrency structure has been changed. The proposed implementation exploits the task-farm skeleton pattern implemented as a Master-Worker. The Master-Worker pattern permits to delegate only to the Master thread dataset reading and to make private to each Worker data structures that are shared in the original version. Only the reference genome is left shared. As a further optimisation, the Master and each Worker were pinned on cores and the reference genome was allocated interleaved among memory nodes. The proposed implementation is able to gain up to 10 speedup points over the original implementation.},
    date-added = {2013-12-07 18:25:55 +0000},
    date-modified = {2015-09-27 12:41:24 +0000},
    editor = {Marco Aldinucci and Daniele D'Agostino and Peter Kilpatrick},
    url = {http://calvados.di.unipi.it/storage/paper_files/2014_pdp_bowtieff.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2014_pdp_bowtieff.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1109/PDP.2014.50},
    keywords = {fastflow, paraphrase}
    }

  • A. Secco, I. Uddin, G. Peretti Pezzi, and M. Torquati, "Message passing on InfiniBand RDMA for parallel run-time supports," in Proc. of Intl. Euromicro PDP 2014: Parallel Distributed and network-based Processing, Torino, Italy, 2014. doi:10.1109/PDP.2014.23
    [BibTeX] [Abstract] [Download PDF]

    InfiniBand networks are commonly used in the high performance computing area. They offer RDMA-based opera- tions that help to improve the performance of communication subsystems. In this paper, we propose a minimal message-passing communication layer providing the programmer with a point-to- point communication channel implemented by way of InfiniBand RDMA features. Differently from other libraries exploiting the InfiniBand features, such as the well-known Message Passing Interface (MPI), the proposed library is a communication layer only rather than a programming model, and can be easily used as building block for high-level parallel programming frameworks. Evaluated on micro-benchmarks, the proposed RDMA-based communication channel implementation achieves a comparable performance with highly optimised MPI/InfiniBand implemen- tations. Eventually, the flexibility of the communication layer is evaluated by integrating it within the FastFlow parallel frame- work, currently supporting TCP/IP networks (via the ZeroMQ communication library).

    @inproceedings{ff:infiniband:pdp:14,
    title = {Message passing on InfiniBand {RDMA} for parallel run-time supports},
    author = {Alessandro Secco and Irfan Uddin and Guilherme {Peretti Pezzi} and Massimo Torquati},
    year = {2014},
    booktitle = {Proc. of Intl. Euromicro PDP 2014: Parallel Distributed and network-based Processing},
    publisher = {IEEE},
    address = {Torino, Italy},
    doi = {10.1109/PDP.2014.23},
    abstract = {InfiniBand networks are commonly used in the high performance computing area. They offer RDMA-based opera- tions that help to improve the performance of communication subsystems. In this paper, we propose a minimal message-passing communication layer providing the programmer with a point-to- point communication channel implemented by way of InfiniBand RDMA features. Differently from other libraries exploiting the InfiniBand features, such as the well-known Message Passing Interface (MPI), the proposed library is a communication layer only rather than a programming model, and can be easily used as building block for high-level parallel programming frameworks. Evaluated on micro-benchmarks, the proposed RDMA-based communication channel implementation achieves a comparable performance with highly optimised MPI/InfiniBand implemen- tations. Eventually, the flexibility of the communication layer is evaluated by integrating it within the FastFlow parallel frame- work, currently supporting TCP/IP networks (via the ZeroMQ communication library).},
    date-added = {2013-12-07 18:22:35 +0000},
    date-modified = {2015-09-27 12:35:04 +0000},
    editor = {Marco Aldinucci and Daniele D'Agostino and Peter Kilpatrick},
    url = {https://iris.unito.it/retrieve/handle/2318/151178/690885/2014_ff_infiniband_pdp.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2014_ff_infiniband_pdp.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1109/PDP.2014.23},
    keywords = {fastflow, paraphrase, impact}
    }

  • M. Drocco, M. Aldinucci, and M. Torquati, "A Dynamic Memory Allocator for heterogeneous platforms," in Advanced Computer Architecture and Compilation for High-Performance and Embedded Systems (ACACES) – Poster Abstracts, Fiuggi, Italy, 2014.
    [BibTeX] [Abstract] [Download PDF]

    Modern computers are built upon heterogeneous multi-core/many cores architectures (e.g. GPGPU connected to multi-core CPU). Achieving peak performance on these architectures is hard and may require a substantial programming effort. High-level programming patterns, coupled with efficient low-level runtime supports, have been proposed to relieve the programmer from worrying about low-level details such as synchronisation of racing processes as well as those fine tunings needed to improve the overall performance. Among them are (parallel) dynamic memory allocation and effective exploitation of the memory hierarchy. The memory allocator is often a bottleneck that severely limits program scalability, robustness and portability on parallel systems. In this work we introduce a novel memory allocator, based on the FastFlow's allocator and the recently proposed CUDA Unified Memory, which aims to efficiently integrate host and device memories into a unique dynamic-allocable memory space, accessible transparently by both host and device code.

    @inproceedings{ff:acaces:14,
    title = {A Dynamic Memory Allocator for heterogeneous platforms},
    author = {Maurizio Drocco and Marco Aldinucci and Massimo Torquati},
    year = {2014},
    booktitle = {Advanced Computer Architecture and Compilation for High-Performance and Embedded Systems (ACACES) -- Poster Abstracts},
    publisher = {HiPEAC},
    address = {Fiuggi, Italy},
    abstract = {Modern computers are built upon heterogeneous multi-core/many cores architectures (e.g. GPGPU connected to multi-core CPU). Achieving peak performance on these architectures is hard and may require a substantial programming effort. High-level programming patterns, coupled with efficient low-level runtime supports, have been proposed to relieve the programmer from worrying about low-level details such as synchronisation of racing processes as well as those fine tunings needed to improve the overall performance. Among them are (parallel) dynamic memory allocation and effective exploitation of the memory hierarchy. The memory allocator is often a bottleneck that severely limits program scalability, robustness and portability on parallel systems. In this work we introduce a novel memory allocator, based on the FastFlow's allocator and the recently proposed CUDA Unified Memory, which aims to efficiently integrate host and device memories into a unique dynamic-allocable memory space, accessible transparently by both host and device code.},
    date-modified = {2016-08-20 17:29:47 +0000},
    url = {http://calvados.di.unipi.it/storage/paper_files/2014_ACACES_ex-abstract.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2013_ACACES_ex-abstract.pdf},
    bdsk-url-2 = {http://calvados.di.unipi.it/storage/paper_files/2014_ACACES_ex-abstract.pdf},
    keywords = {fastflow, nvidia}
    }

  • C. Misale, G. Ferrero, M. Torquati, and M. Aldinucci, "Sequence alignment tools: one parallel pattern to rule them all?," BioMed Research International, 2014. doi:10.1155/2014/539410
    [BibTeX] [Abstract] [Download PDF]

    In this paper we advocate high-level programming methodology for Next Generation Sequencers (NGS) alignment tools for both productivity and absolute performance. We analyse the problem of parallel alignment and review the parallelisation strategies of the most popular alignment tools, which can all be abstracted to a single parallel paradigm. We compare these tools against their porting onto the FastFlow pattern-based programming framework, which provides programmers with high-level parallel patterns. By using a high-level approach, programmers are liberated from all complex aspects of parallel programming, such as synchronisation protocols and task scheduling, gaining more possibility for seamless performance tuning. In this work we show some use case in which, by using a high-level approach for parallelising NGS tools, it is possible to obtain comparable or even better absolute performance for all used datasets.

    @article{bowtie-bwa:ff:multicore:biomed:14,
    title = {Sequence alignment tools: one parallel pattern to rule them all?},
    author = {Claudia Misale and Giulio Ferrero and Massimo Torquati and Marco Aldinucci},
    year = {2014},
    journal = {BioMed Research International},
    doi = {10.1155/2014/539410},
    abstract = {In this paper we advocate high-level programming methodology for Next Generation Sequencers (NGS) alignment tools for both productivity and absolute performance. We analyse the problem of parallel alignment and review the parallelisation strategies of the most popular alignment tools, which can all be abstracted to a single parallel paradigm. We compare these tools against their porting onto the FastFlow pattern-based programming framework, which provides programmers with high-level parallel patterns. By using a high-level approach, programmers are liberated from all complex aspects of parallel programming, such as synchronisation protocols and task scheduling, gaining more possibility for seamless performance tuning. In this work we show some use case in which, by using a high-level approach for parallelising NGS tools, it is possible to obtain comparable or even better absolute performance for all used datasets.},
    date-added = {2013-01-15 15:55:59 +0000},
    date-modified = {2015-09-27 12:16:28 +0000},
    url = {http://downloads.hindawi.com/journals/bmri/2014/539410.pdf},
    bdsk-url-1 = {http://downloads.hindawi.com/journals/bmri/2014/539410.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1155/2014/539410},
    keywords = {fastflow,bioinformatics, paraphrase, repara}
    }

  • M. Aldinucci, M. Drocco, G. Peretti Pezzi, C. Misale, F. Tordini, and M. Torquati, "Exercising high-level parallel programming on streams: a systems biology use case," in Proc. of 34th IEEE Intl. Conference on Distributed Computing Systems Workshops (ICDCSW), Madrid, Spain, 2014. doi:10.1109/ICDCSW.2014.38
    [BibTeX] [Abstract] [Download PDF]

    The stochastic modelling of biological systems, cou- pled with Monte Carlo simulation of models, is an increasingly popular technique in Bioinformatics. The simulation-analysis workflow may result into a computationally expensive task reducing the interactivity required in the model tuning. In this work, we advocate high-level software design as a vehicle for building efficient and portable parallel simulators for a variety of platforms, ranging from multi-core platforms to GPGPUs to cloud. In particular, the Calculus of Wrapped Compartments (CWC) parallel simulator for systems biology equipped with on- line mining of results, which is designed according to the FastFlow pattern-based approach, is discussed as a running example. In this work, the CWC simulator is used as a paradigmatic example of a complex C++ application where the quality of results is correlated with both computation and I/O bounds, and where high-quality results might turn into big data. The FastFlow parallel programming framework, which advocates C++ pattern- based parallel programming makes it possible to develop portable parallel code without relinquish neither run-time efficiency nor performance tuning opportunities. Performance and effectiveness of the approach are validated on a variety of platforms, inter-alia cache-coherent multi-cores, cluster of multi-core (Ethernet and Infiniband) and the Amazon Elastic Compute Cloud.

    @inproceedings{cwc:gpu:dcperf:14,
    title = {Exercising high-level parallel programming on streams: a systems biology use case},
    author = {Marco Aldinucci and Maurizio Drocco and Guilherme {Peretti Pezzi} and Claudia Misale and Fabio Tordini and Massimo Torquati},
    year = {2014},
    booktitle = {Proc. of 34th IEEE Intl. Conference on Distributed Computing Systems Workshops (ICDCSW)},
    publisher = {IEEE},
    address = {Madrid, Spain},
    doi = {10.1109/ICDCSW.2014.38},
    abstract = {The stochastic modelling of biological systems, cou- pled with Monte Carlo simulation of models, is an increasingly popular technique in Bioinformatics. The simulation-analysis workflow may result into a computationally expensive task reducing the interactivity required in the model tuning. In this work, we advocate high-level software design as a vehicle for building efficient and portable parallel simulators for a variety of platforms, ranging from multi-core platforms to GPGPUs to cloud. In particular, the Calculus of Wrapped Compartments (CWC) parallel simulator for systems biology equipped with on- line mining of results, which is designed according to the FastFlow pattern-based approach, is discussed as a running example. In this work, the CWC simulator is used as a paradigmatic example of a complex C++ application where the quality of results is correlated with both computation and I/O bounds, and where high-quality results might turn into big data. The FastFlow parallel programming framework, which advocates C++ pattern- based parallel programming makes it possible to develop portable parallel code without relinquish neither run-time efficiency nor performance tuning opportunities. Performance and effectiveness of the approach are validated on a variety of platforms, inter-alia cache-coherent multi-cores, cluster of multi-core (Ethernet and Infiniband) and the Amazon Elastic Compute Cloud.},
    date-added = {2014-04-19 12:44:39 +0000},
    date-modified = {2021-04-24 23:23:10 +0200},
    url = {https://iris.unito.it/retrieve/handle/2318/154516/26657/2014_dcperf_cwc_gpu.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/154516/26657/2014_dcperf_cwc_gpu.pdf},
    bdsk-url-2 = {https://doi.org/10.1109/ICDCSW.2014.38},
    keywords = {fastflow, bioinformatics, paraphrase, impact}
    }

2013

  • M. Drocco, "Parallel stochastic simulators in systems biology: the evolution of the species," Master Thesis, 2013.
    [BibTeX] [Abstract] [Download PDF]

    The stochastic simulation of biological systems is an increasingly popular technique in bioinformatics. It is often an enlightening technique, especially for multi-stable systems whose dynamics can be hardly captured with ordinary differential equations. To be effective, stochastic simulations should be supported by powerful statistical analysis tools. The simulation/analysis workflow may however result in being computationally expensive, thus compromising the interactivity required especially in model tuning. In this work we discuss the main opportunities to speed up the framework by parallelisation on modern multicore and hybrid multicore and distributed platforms, advocating the high-level design of simulators for stochastic systems as a vehicle for building efficient and portable parallel simulators endowed with on-line statistical analysis. In particular, the Calculus of Wrapped Compartments (CWC) Simulator, which is designed according to the FastFlow's pattern-based approach, is presented and discussed in this work.

    @mastersthesis{tesi:drocco:13,
    title = {Parallel stochastic simulators in systems biology: the evolution of the species},
    author = {Maurizio Drocco},
    year = {2013},
    month = jul,
    abstract = {The stochastic simulation of biological systems is an increasingly popular technique in bioinformatics. It is often an enlightening technique, especially for multi-stable systems whose dynamics can be hardly captured with ordinary differential equations. To be effective, stochastic simulations should be supported by powerful statistical analysis tools. The simulation/analysis workflow may however result in being computationally expensive, thus compromising the interactivity required especially in model tuning. In this work we discuss the main opportunities to speed up the framework by parallelisation on modern multicore and hybrid multicore and distributed platforms, advocating the high-level design of simulators for stochastic systems as a vehicle for building efficient and portable parallel simulators endowed with on-line statistical analysis. In particular, the Calculus of Wrapped Compartments (CWC) Simulator, which is designed according to the FastFlow's pattern-based approach, is presented and discussed in this work.},
    date-modified = {2013-11-24 00:29:54 +0000},
    school = {Computer Science Department, University of Torino, Italy},
    url = {http://calvados.di.unipi.it/storage/paper_files/2013_tesi_drocco.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2013_tesi_drocco.pdf},
    keywords = {fastflow}
    }

  • M. Aldinucci, F. Tordini, M. Drocco, M. Torquati, and M. Coppo, "Parallel stochastic simulators in system biology: the evolution of the species," in Proc. of 21st Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP), Belfast, Nothern Ireland, U.K., 2013. doi:10.1109/PDP.2013.66
    [BibTeX] [Abstract] [Download PDF]

    The stochastic simulation of biological systems is an increasingly popular technique in Bioinformatics. It is often an enlightening technique, especially for multi-stable systems which dynamics can be hardly captured with ordinary differential equations. To be effective, stochastic simulations should be supported by powerful statistical analysis tools. The simulation-analysis workflow may however result in being computationally expensive, thus compromising the interactivity required in model tuning. In this work we advocate the high-level design of simulators for stochastic systems as a vehicle for building efficient and portable parallel simulators. In particular, the Calculus of Wrapped Components (CWC) simulator, which is designed according to the FastFlow's pattern-based approach, is presented and discussed in this work. FastFlow has been extended to support also clusters of multi-cores with minimal coding effort, assessing the portability of the approach.

    @inproceedings{ff_cwc_distr:pdp:13,
    title = {Parallel stochastic simulators in system biology: the evolution of the species},
    author = {Marco Aldinucci and Fabio Tordini and Maurizio Drocco and Massimo Torquati and Mario Coppo},
    year = {2013},
    month = feb,
    booktitle = {Proc. of 21st Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP)},
    publisher = {IEEE},
    address = {Belfast, Nothern Ireland, U.K.},
    doi = {10.1109/PDP.2013.66},
    abstract = {The stochastic simulation of biological systems is an increasingly popular technique in Bioinformatics. It is often an enlightening technique, especially for multi-stable systems which dynamics can be hardly captured with ordinary differential equations. To be effective, stochastic simulations should be supported by powerful statistical analysis tools. The simulation-analysis workflow may however result in being computationally expensive, thus compromising the interactivity required in model tuning. In this work we advocate the high-level design of simulators for stochastic systems as a vehicle for building efficient and portable parallel simulators. In particular, the Calculus of Wrapped Components (CWC) simulator, which is designed according to the FastFlow's pattern-based approach, is presented and discussed in this work. FastFlow has been extended to support also clusters of multi-cores with minimal coding effort, assessing the portability of the approach.},
    date-added = {2012-01-20 19:22:15 +0100},
    date-modified = {2017-12-12 13:53:10 +0000},
    url = {http://calvados.di.unipi.it/storage/paper_files/2013_cwc_d_PDP.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2013_cwc_d_PDP.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1109/PDP.2013.66},
    keywords = {fastflow, bioinformatics}
    }

  • C. Misale, M. Aldinucci, and M. Torquati, "Memory affinity in multi-threading: the Bowtie2 case study," in Advanced Computer Architecture and Compilation for High-Performance and Embedded Systems (ACACES) – Poster Abstracts, Fiuggi, Italy, 2013.
    [BibTeX] [Abstract] [Download PDF]

    The diffusion of the Next Generation Sequencing (NGS) has increased the amount of data obtainable by genomic experiments. From a DNA sample a NGS run is able to produce millions of short sequences (called reads), which should be mapped into a reference genome. In this paper, we analyse the performance of Bowtie2, a fast and popular DNA mapping tool. Bowtie2 exhibits a multithreading implementation on top of pthreads, spin-locks and SSE2 SIMD extension. From parallel computing viewpoint, is a paradigmatic example of a software requiring to address three fundamental problems in shared-memory programming for cache-coherent multi-core platforms: synchronisation efficiency at very fine grain (due to short reads), load-balancing (due to long reads), and efficient usage of memory subsystem (due to SSE2 memory pressure). We compare the original implementation against an alternative implementation on top of the FastFlow pattern-based programming framework. The proposed design exploits the high-level farm pattern of FastFlow, which is implemented top of nonblocking multi-threading and lock-less (CAS-free) queues, and provides the programmer with high-level mechanism to tune task scheduling to achieve both load-balancing and memory affinity. The proposed design, despite the high-level design, is always faster and more scalable with respect to the original one. The design of both original and alternative version will be presented along with their experimental evaluation on real-world data sets.

    @inproceedings{ff:acaces:13,
    title = {Memory affinity in multi-threading: the Bowtie2 case study},
    author = {Claudia Misale and Marco Aldinucci and Massimo Torquati},
    year = {2013},
    booktitle = {Advanced Computer Architecture and Compilation for High-Performance and Embedded Systems (ACACES) -- Poster Abstracts},
    publisher = {HiPEAC},
    address = {Fiuggi, Italy},
    isbn = {9789038221908},
    abstract = {The diffusion of the Next Generation Sequencing (NGS) has increased the amount of data obtainable by genomic experiments. From a DNA sample a NGS run is able to produce millions of short sequences (called reads), which should be mapped into a reference genome. In this paper, we analyse the performance of Bowtie2, a fast and popular DNA mapping tool. Bowtie2 exhibits a multithreading implementation on top of pthreads, spin-locks and SSE2 SIMD extension. From parallel computing viewpoint, is a paradigmatic example of a software requiring to address three fundamental problems in shared-memory programming for cache-coherent multi-core platforms: synchronisation efficiency at very fine grain (due to short reads), load-balancing (due to long reads), and efficient usage of memory subsystem (due to SSE2 memory pressure). We compare the original implementation against an alternative implementation on top of the FastFlow pattern-based programming framework. The proposed design exploits the high-level farm pattern of FastFlow, which is implemented top of nonblocking multi-threading and lock-less (CAS-free) queues, and provides the programmer with high-level mechanism to tune task scheduling to achieve both load-balancing and memory affinity. The proposed design, despite the high-level design, is always faster and more scalable with respect to the original one. The design of both original and alternative version will be presented along with their experimental evaluation on real-world data sets.},
    date-added = {2015-03-21 15:12:59 +0000},
    date-modified = {2015-03-21 15:12:59 +0000},
    url = {https://iris.unito.it/retrieve/handle/2318/143005/23874/2013_ACACES_ex-abstract.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/143005/23874/2013_ACACES_ex-abstract.pdf},
    keywords = {fastflow}
    }

  • M. Aldinucci, M. Danelutto, P. Kilpatrick, C. Montangero, and L. Semini, "Managing Adaptivity in Parallel Systems," in Formal Methods for Components and Objects: Intl. Symposium, FMCO 2011, Torino, Italy, October 3-5, 2011, Revised Invited Lectures, B. Beckert, F. Damiani, F. S. de Boer, and M. M. Bonsangue, Eds., Springer, 2013, vol. 7542, p. 199–217. doi:10.1007/978-3-642-35887-6_11
    [BibTeX] [Abstract] [Download PDF]

    The management of non-functional features (performance, security, power management, etc.) is traditionally a difficult, error prone task for programmers of parallel applications. To take care of these non-functional features, autonomic managers running policies represented as rules using sensors and actuators to monitor and transform a running parallel application may be used. We discuss an approach aimed at providing formal tool support to the integration of independently developed autonomic managers taking care of different non-functional concerns within the same parallel application. Our approach builds on the Behavioural Skeleton experience (autonomic management of non-functional features in structured parallel applications) and on previous results on conflict detection and resolution in rule-based systems.

    @incollection{adaptivity:fmco:11,
    title = {Managing Adaptivity in Parallel Systems},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick and Carlo Montangero and Laura Semini},
    year = {2013},
    booktitle = {Formal Methods for Components and Objects: Intl. Symposium, FMCO 2011, Torino, Italy, October 3-5, 2011, Revised Invited Lectures},
    publisher = {Springer},
    series = {LNCS},
    volume = {7542},
    pages = {199--217},
    doi = {10.1007/978-3-642-35887-6_11},
    isbn = {978-3-642-35886-9},
    abstract = {The management of non-functional features (performance, security, power management, etc.) is traditionally a difficult, error prone task for programmers of parallel applications. To take care of these non-functional features, autonomic managers running policies represented as rules using sensors and actuators to monitor and transform a running parallel application may be used. We discuss an approach aimed at providing formal tool support to the integration of independently developed autonomic managers taking care of different non-functional concerns within the same parallel application. Our approach builds on the Behavioural Skeleton experience (autonomic management of non-functional features in structured parallel applications) and on previous results on conflict detection and resolution in rule-based systems.},
    date-added = {2012-06-04 19:05:16 +0200},
    date-modified = {2021-04-24 23:22:33 +0200},
    editor = {Bernhard Beckert and Ferruccio Damiani and Frank S. de Boer and Marcello M. Bonsangue},
    url = {http://calvados.di.unipi.it/storage/paper_files/2013_fmco11_adaptivity.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2013_fmco11_adaptivity.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-642-35887-6_11},
    keywords = {paraphrase}
    }

  • M. Aldinucci, S. Campa, P. Kilpatrick, and M. Torquati, "Structured Data Access Annotations for Massively Parallel Computations," in Euro-Par 2012 Workshops, Proc. of the ParaPhrase Workshop on Parallel Processing, 2013, p. 381–390. doi:10.1007/978-3-642-36949-0_42
    [BibTeX] [Abstract] [Download PDF]

    We describe an approach aimed at addressing the issue of joint exploitation of control (stream) and data parallelism in a skele-ton based parallel programming environment, based on annotations and refactoring. Annotations drive efficient implementation of a parallel com-putation. Refactoring is used to transform the associated skeleton tree into a more efficient, functionally equivalent skeleton tree. In most cases,cost models are used to drive the refactoring process. We show howsample use case applications/kernels may be optimized and discuss pre-liminary experiments with FastFlow assessing the theoretical results.

    @inproceedings{annotation:para:12,
    title = {Structured Data Access Annotations for Massively Parallel Computations},
    author = {Marco Aldinucci and Sonia Campa and Peter Kilpatrick and Massimo Torquati},
    year = {2013},
    booktitle = {Euro-Par 2012 Workshops, Proc. of the ParaPhrase Workshop on Parallel Processing},
    publisher = {Springer},
    series = {LNCS},
    volume = {7640},
    pages = {381--390},
    doi = {10.1007/978-3-642-36949-0_42},
    abstract = {We describe an approach aimed at addressing the issue of joint exploitation of control (stream) and data parallelism in a skele-ton based parallel programming environment, based on annotations and refactoring. Annotations drive efficient implementation of a parallel com-putation. Refactoring is used to transform the associated skeleton tree into a more efficient, functionally equivalent skeleton tree. In most cases,cost models are used to drive the refactoring process. We show howsample use case applications/kernels may be optimized and discuss pre-liminary experiments with FastFlow assessing the theoretical results.},
    date-added = {2012-07-23 21:22:03 +0000},
    date-modified = {2015-09-27 12:49:52 +0000},
    url = {http://calvados.di.unipi.it/storage/paper_files/2013_annot_europar_workshops.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2013_annot_europar_workshops.pdf},
    bdsk-url-2 = {https://doi.org/10.1007/978-3-642-36949-0_42},
    keywords = {fastflow, paraphrase}
    }

  • M. Aldinucci, S. Campa, M. Danelutto, P. Kilpatrick, and M. Torquati, "Targeting Distributed Systems in FastFlow," in Euro-Par 2012 Workshops, Proc. of the CoreGrid Workshop on Grids, Clouds and P2P Computing, 2013, p. 47–56. doi:10.1007/978-3-642-36949-0_7
    [BibTeX] [Abstract] [Download PDF]

    FastFlow is a structured parallel programming framework targeting shared memory multi-core architectures. In this paper we introduce a FastFlow extension aimed at supporting a network of multi-core workstation as well. The extension supports the execution of FastFlow programs by coordinating – in a structured way – the fine grain parallel activities running on a single workstation. We discuss the design and the implementation of this extension presenting preliminary experimental results validating it on state-of-the-art networked multi-core nodes.

    @inproceedings{ff:distr:cgs:12,
    title = {Targeting Distributed Systems in FastFlow},
    author = {Marco Aldinucci and Sonia Campa and Marco Danelutto and Peter Kilpatrick and Massimo Torquati},
    year = {2013},
    booktitle = {Euro-Par 2012 Workshops, Proc. of the CoreGrid Workshop on Grids, Clouds and P2P Computing},
    publisher = {Springer},
    series = {LNCS},
    volume = {7640},
    pages = {47--56},
    doi = {10.1007/978-3-642-36949-0_7},
    abstract = {FastFlow is a structured parallel programming framework targeting shared memory multi-core architectures. In this paper we introduce a FastFlow extension aimed at supporting a network of multi-core workstation as well. The extension supports the execution of FastFlow programs by coordinating -- in a structured way -- the fine grain parallel activities running on a single workstation. We discuss the design and the implementation of this extension presenting preliminary experimental results validating it on state-of-the-art networked multi-core nodes.},
    date-added = {2012-07-23 21:22:03 +0000},
    date-modified = {2015-09-27 12:47:54 +0000},
    url = {http://calvados.di.unipi.it/storage/paper_files/2012_distr_ff_cgsymph.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2012_distr_ff_cgsymph.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-642-36949-0_7},
    keywords = {fastflow, paraphrase}
    }

  • K. Hammond, M. Aldinucci, C. Brown, F. Cesarini, M. Danelutto, H. González-Vélez, P. Kilpatrick, R. Keller, M. Rossbory, and G. Shainer, "The ParaPhrase Project: Parallel Patterns for Adaptive Heterogeneous Multicore Systems," in Formal Methods for Components and Objects: Intl. Symposium, FMCO 2011, Torino, Italy, October 3-5, 2011, Revised Invited Lectures, B. Beckert, F. Damiani, F. S. de Boer, and M. M. Bonsangue, Eds., Springer, 2013, vol. 7542, p. 218–236. doi:10.1007/978-3-642-35887-6_12
    [BibTeX] [Abstract] [Download PDF]

    This paper describes the ParaPhrase project, a new 3-year targeted research project funded under EU Framework 7 Objective 3.4 (Computer Systems), starting in October 2011. ParaPhrase aims to follow a new approach to introducing parallelism using advanced refactoring techniques coupled with high-level parallel design patterns. The refactoring approach will use these design patterns to restructure programs defined as networks of software components into other forms that are more suited to parallel execution. The programmer will be aided by high-level cost information that will be integrated into the refactoring tools. The implementation of these patterns will then use a well-understood algorithmic skeleton approach to achieve good parallelism. A key ParaPhrase design goal is that parallel components are intended to match heterogeneous architectures, defined in terms of CPU/GPU combinations, for example. In order to achieve this, the ParaPhrase approach will map components at link time to the available hardware, and will then re-map them during program execution, taking account of multiple applications, changes in hardware resource availability, the desire to reduce communication costs etc. In this way, we aim to develop a new approach to programming that will be able to produce software that can adapt to dynamic changes in the system environment. Moreover, by using a strong component basis for parallelism, we can achieve potentially significant gains in terms of reducing sharing at a high level of abstraction, and so in reducing or even eliminating the costs that are usually associated with cache management, locking, and synchronisation.

    @incollection{paraphrase:fmco:11,
    title = {The ParaPhrase Project: Parallel Patterns for Adaptive Heterogeneous Multicore Systems},
    author = {Kevin Hammond and Marco Aldinucci and Chris Brown and Francesco Cesarini and Marco Danelutto and Horacio Gonz\'alez-V\'elez and Peter Kilpatrick and Rainer Keller and Michael Rossbory and Gilad Shainer},
    year = {2013},
    booktitle = {Formal Methods for Components and Objects: Intl. Symposium, FMCO 2011, Torino, Italy, October 3-5, 2011, Revised Invited Lectures},
    publisher = {Springer},
    series = {LNCS},
    volume = {7542},
    pages = {218--236},
    doi = {10.1007/978-3-642-35887-6_12},
    isbn = {978-3-642-35886-9},
    abstract = {This paper describes the ParaPhrase project, a new 3-year targeted research project funded under EU Framework 7 Objective 3.4 (Computer Systems), starting in October 2011. ParaPhrase aims to follow a new approach to introducing parallelism using advanced refactoring techniques coupled with high-level parallel design patterns. The refactoring approach will use these design patterns to restructure programs defined as networks of software components into other forms that are more suited to parallel execution. The programmer will be aided by high-level cost information that will be integrated into the refactoring tools. The implementation of these patterns will then use a well-understood algorithmic skeleton approach to achieve good parallelism. A key ParaPhrase design goal is that parallel components are intended to match heterogeneous architectures, defined in terms of CPU/GPU combinations, for example. In order to achieve this, the ParaPhrase approach will map components at link time to the available hardware, and will then re-map them during program execution, taking account of multiple applications, changes in hardware resource availability, the desire to reduce communication costs etc. In this way, we aim to develop a new approach to programming that will be able to produce software that can adapt to dynamic changes in the system environment. Moreover, by using a strong component basis for parallelism, we can achieve potentially significant gains in terms of reducing sharing at a high level of abstraction, and so in reducing or even eliminating the costs that are usually associated with cache management, locking, and synchronisation.},
    date-added = {2012-06-04 19:21:18 +0200},
    date-modified = {2013-11-24 00:33:27 +0000},
    editor = {Bernhard Beckert and Ferruccio Damiani and Frank S. de Boer and Marcello M. Bonsangue},
    url = {http://calvados.di.unipi.it/storage/paper_files/2013_fmco11_paraphrase.pdf},
    bdsk-url-1 = {http://dx.doi.org/10.1007/978-3-642-35887-6_12},
    bdsk-url-2 = {http://calvados.di.unipi.it/storage/paper_files/2013_fmco11_paraphrase.pdf},
    keywords = {paraphrase}
    }

  • M. Aldinucci, S. Campa, F. Tordini, M. Torquati, and P. Kilpatrick, "An abstract annotation model for skeletons," in Formal Methods for Components and Objects: Intl. Symposium, FMCO 2011, Torino, Italy, October 3-5, 2011, Revised Invited Lectures, B. Beckert, F. Damiani, F. S. de Boer, and M. M. Bonsangue, Eds., Springer, 2013, vol. 7542, p. 257–276. doi:10.1007/978-3-642-35887-6_14
    [BibTeX] [Abstract] [Download PDF]

    Multi-core and many-core platforms are becoming increasingly heterogeneous and asymmetric. This significantly increases the porting and tuning effort required for parallel codes, which in turn often leads to a growing gap between peak machine power and actual application performance. In this work a first step toward the automated optimization of high level skeleton-based parallel code is discussed. The paper presents an abstract annotation model for skeleton programs aimed at formally describing suitable mapping of parallel activities on a high-level platform representation. The derived mapping and scheduling strategies are used to generate optimized run-time code.

    @incollection{toolchain:fmco:11,
    title = {An abstract annotation model for skeletons},
    author = {Marco Aldinucci and Sonia Campa and Fabio Tordini and Massimo Torquati and Peter Kilpatrick},
    year = {2013},
    booktitle = {Formal Methods for Components and Objects: Intl. Symposium, FMCO 2011, Torino, Italy, October 3-5, 2011, Revised Invited Lectures},
    publisher = {Springer},
    series = {LNCS},
    volume = {7542},
    pages = {257--276},
    doi = {10.1007/978-3-642-35887-6_14},
    isbn = {978-3-642-35886-9},
    abstract = {Multi-core and many-core platforms are becoming increasingly heterogeneous and asymmetric. This significantly increases the porting and tuning effort required for parallel codes, which in turn often leads to a growing gap between peak machine power and actual application performance. In this work a first step toward the automated optimization of high level skeleton-based parallel code is discussed. The paper presents an abstract annotation model for skeleton programs aimed at formally describing suitable mapping of parallel activities on a high-level platform representation. The derived mapping and scheduling strategies are used to generate optimized run-time code.},
    date-added = {2012-06-04 19:23:25 +0200},
    date-modified = {2013-11-24 00:33:41 +0000},
    editor = {Bernhard Beckert and Ferruccio Damiani and Frank S. de Boer and Marcello M. Bonsangue},
    url = {http://calvados.di.unipi.it/storage/paper_files/2013_fmco11_annotation.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2013_fmco11_annotation.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-642-35887-6_14},
    keywords = {fastflow, paraphrase}
    }

2012

  • M. Aldinucci, C. Spampinato, M. Drocco, M. Torquati, and S. Palazzo, "A Parallel Edge Preserving Algorithm for Salt and Pepper Image Denoising," in Proc. of 2nd Intl. Conference on Image Processing Theory Tools and Applications (IPTA), Istambul, Turkey, 2012, p. 97–102. doi:10.1109/IPTA.2012.6469567
    [BibTeX] [Abstract] [Download PDF]

    In this paper a two-phase filter for removing ``salt and pepper'' noise is proposed. In the first phase, an adaptive median filter is used to identify the set of the noisy pixels; in the second phase, these pixels are restored according to a regularization method, which contains a data-fidelity term reflecting the impulse noise characteristics. The algorithm, which exhibits good performance both in denoising and in restoration, can be easily and effectively parallelized to exploit the full power of multi-core CPUs and GPGPUs; the proposed implementation based on the FastFlow library achieves both close-to-ideal speedup and very good wall-clock execution figures.

    @inproceedings{denoiser:ff:ipta:12,
    title = {A Parallel Edge Preserving Algorithm for Salt and Pepper Image Denoising},
    author = {Marco Aldinucci and Concetto Spampinato and Maurizio Drocco and Massimo Torquati and Simone Palazzo},
    year = {2012},
    month = oct,
    booktitle = {Proc. of 2nd Intl. Conference on Image Processing Theory Tools and Applications (IPTA)},
    publisher = {IEEE},
    address = {Istambul, Turkey},
    pages = {97--102},
    doi = {10.1109/IPTA.2012.6469567},
    isbn = {978-1-4673-2582-0},
    abstract = {In this paper a two-phase filter for removing ``salt and pepper'' noise is proposed. In the first phase, an adaptive median filter is used to identify the set of the noisy pixels; in the second phase, these pixels are restored according to a regularization method, which contains a data-fidelity term reflecting the impulse noise characteristics. The algorithm, which exhibits good performance both in denoising and in restoration, can be easily and effectively parallelized to exploit the full power of multi-core CPUs and GPGPUs; the proposed implementation based on the FastFlow library achieves both close-to-ideal speedup and very good wall-clock execution figures.},
    date-added = {2012-06-04 18:38:01 +0200},
    date-modified = {2015-09-27 12:53:53 +0000},
    editor = {K. Djemal and M. Deriche and W. Puech and Osman N. Ucan},
    url = {http://calvados.di.unipi.it/storage/paper_files/2012_2phasedenoiser_ff_ipta.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2012_2phasedenoiser_ff_ipta.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1109/IPTA.2012.6469567},
    keywords = {fastflow, impact}
    }

  • M. Aldinucci, M. Danelutto, P. Kilpatrick, M. Meneghin, and M. Torquati, "An Efficient Unbounded Lock-Free Queue for Multi-core Systems," in Proc. of 18th Intl. Euro-Par 2012 Parallel Processing, Rhodes Island, Greece, 2012, p. 662–673. doi:10.1007/978-3-642-32820-6_65
    [BibTeX] [Abstract] [Download PDF]

    The use of efficient synchronization mechanisms is crucial for implementing fine grained parallel programs on modern shared cache multi-core architectures. In this paper we study this problem by considering Single-Producer/Single-Consumer (SPSC) coordination using unbounded queues. A novel unbounded SPSC algorithm capable of reducing the row synchronization latency and speeding up Producer-Consumer coordination is presented. The algorithm has been extensively tested on a shared-cache multi-core platform and a sketch proof of correctness is presented. The queues proposed have been used as basic building blocks to implement the FastFlow parallel framework, which has been demonstrated to offer very good performance for fine-grain parallel applications.

    @inproceedings{ff:spsc:europar:12,
    title = {An Efficient Unbounded Lock-Free Queue for Multi-core Systems},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick and Massimiliano Meneghin and Massimo Torquati},
    year = {2012},
    month = aug,
    booktitle = {Proc. of 18th Intl. Euro-Par 2012 Parallel Processing},
    publisher = {Springer},
    address = {Rhodes Island, Greece},
    series = {LNCS},
    volume = {7484},
    pages = {662--673},
    doi = {10.1007/978-3-642-32820-6_65},
    abstract = {The use of efficient synchronization mechanisms is crucial for implementing fine grained parallel programs on modern shared cache multi-core architectures. In this paper we study this problem by considering Single-Producer/Single-Consumer (SPSC) coordination using unbounded queues. A novel unbounded SPSC algorithm capable of reducing the row synchronization latency and speeding up Producer-Consumer coordination is presented. The algorithm has been extensively tested on a shared-cache multi-core platform and a sketch proof of correctness is presented. The queues proposed have been used as basic building blocks to implement the FastFlow parallel framework, which has been demonstrated to offer very good performance for fine-grain parallel applications.},
    date-added = {2011-04-19 10:22:00 +0200},
    date-modified = {2015-09-27 12:55:20 +0000},
    url = {http://calvados.di.unipi.it/storage/paper_files/2012_spsc_europar.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2012_spsc_europar.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-642-32820-6_65},
    keywords = {fastflow, paraphrase}
    }

  • M. Aldinucci, M. Danelutto, P. Kilpatrick, and M. Torquati, "Targeting heterogeneous architectures via macro data flow," Parallel Processing Letters, vol. 22, iss. 2, 2012. doi:10.1142/S0129626412400063
    [BibTeX] [Abstract] [Download PDF]

    We propose a data flow based run time system as an efficient tool for supporting execution of parallel code on heterogeneous architectures hosting both multicore CPUs and GPUs. We discuss how the proposed run time system may be the target of both structured parallel applications developed using algorithmic skeletons/parallel design patterns and also more ``domain specific'' programming models. Experimental results demonstrating the feasibility of the approach are presented.

    @article{mdf:hplgpu:ppl:12,
    title = {Targeting heterogeneous architectures via macro data flow},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick and Massimo Torquati},
    year = {2012},
    month = jun,
    journal = {Parallel Processing Letters},
    volume = {22},
    doi = {10.1142/S0129626412400063},
    issn = {0129-6264},
    abstract = {We propose a data flow based run time system as an efficient tool for supporting execution of parallel code on heterogeneous architectures hosting both multicore CPUs and GPUs. We discuss how the proposed run time system may be the target of both structured parallel applications developed using algorithmic skeletons/parallel design patterns and also more ``domain specific'' programming models. Experimental results demonstrating the feasibility of the approach are presented.},
    annote = {Extended version of Intl. Workshop on High-level Programming for Heterogeneous and Hierarchical Parallel Systems (HLPGPU)},
    date-added = {2012-04-25 13:20:40 +0000},
    date-modified = {2015-09-27 12:55:11 +0000},
    number = {2},
    url = {http://calvados.di.unipi.it/storage/paper_files/2012_mdf_PPL-hplgpu.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2012_mdf_PPL-hplgpu.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1142/S0129626412400063},
    keywords = {fastflow, paraphrase}
    }

  • M. Aldinucci, M. Danelutto, and M. Torquati, "FastFlow tutorial," Università di Pisa, Dipartimento di Informatica, Italy, TR-12-04, 2012.
    [BibTeX] [Download PDF]
    @techreport{fastflow_tutorial:TR-12-04:12,
    title = {FastFlow tutorial},
    author = {Marco Aldinucci and Marco Danelutto and Massimo Torquati},
    year = {2012},
    month = mar,
    date-added = {2011-03-17 23:19:05 +0100},
    date-modified = {2013-11-24 00:34:55 +0000},
    institution = {Universit\`a di Pisa, Dipartimento di Informatica, Italy},
    number = {TR-12-04},
    url = {http://compass2.di.unipi.it/TR/Files/TR-12-04.pdf.gz},
    bdsk-url-1 = {http://compass2.di.unipi.it/TR/Files/TR-12-04.pdf.gz},
    keywords = {fastflow}
    }

  • M. Torquati, M. Vanneschi, M. Amini, S. Guelton, R. Keryell, V. Lanore, F. -X. Pasquier, M. Barreteau, R. Barrere, T. Petrisor, E. Lenormand, C. Cantini, and D. F. Stefani, "An innovative compilation tool-chain for embedded multi-core architectures," in Embedded World Conference, Nuremberg, Germany, 2012.
    [BibTeX]
    @inproceedings{artemis:toolchain:12,
    title = {An innovative compilation tool-chain for embedded multi-core architectures},
    author = {Massimo Torquati and Marco Vanneschi and M. Amini and S. Guelton and R. Keryell and V. Lanore and F.-X. Pasquier and M. Barreteau and R. Barrere and T. Petrisor and E. Lenormand and C. Cantini and F. De Stefani},
    year = {2012},
    month = feb,
    booktitle = {Embedded World Conference},
    address = {Nuremberg, Germany},
    date-added = {2012-04-04 12:18:38 +0000},
    date-modified = {2012-04-04 12:21:40 +0000}
    }

  • M. Aldinucci, M. Danelutto, L. Anardu, M. Torquati, and P. Kilpatrick, "Parallel patterns + Macro Data Flow for multi-core programming," in Proc. of Intl. Euromicro PDP 2012: Parallel Distributed and network-based Processing, Garching, Germany, 2012, p. 27–36. doi:10.1109/PDP.2012.44
    [BibTeX] [Abstract] [Download PDF]

    Data flow techniques have been around since the early '70s when they were used in compilers for sequential languages. Shortly after their introduction they were also considered as a possible model for parallel computing, although the impact here was limited. Recently, however, data flow has been identified as a candidate for efficient implementation of various programming models on multi-core architectures. In most cases, however, the burden of determining data flow ``macro'' instructions is left to the programmer, while the compiler/run time system manages only the efficient scheduling of these instructions. We discuss a structured parallel programming approach supporting automatic compilation of programs to macro data flow and we show experimental results demonstrating the feasibility of the approach and the efficiency of the resulting ``object'' code on different classes of state-of-the-art multi-core architectures. The experimental results use different base mechanisms to implement the macro data flow run time support, from plain pthreads with condition variables to more modern and effective lock- and fence-free parallel frameworks. Experimental results comparing efficiency of the proposed approach with those achieved using other, more classical, parallel frameworks are also presented.

    @inproceedings{dataflow:pdp:12,
    title = {Parallel patterns + Macro Data Flow for multi-core programming},
    author = {Marco Aldinucci and Marco Danelutto and Lorenzo Anardu and Massimo Torquati and Peter Kilpatrick},
    year = {2012},
    month = feb,
    booktitle = {Proc. of Intl. Euromicro PDP 2012: Parallel Distributed and network-based Processing},
    publisher = {IEEE},
    address = {Garching, Germany},
    pages = {27--36},
    doi = {10.1109/PDP.2012.44},
    abstract = {Data flow techniques have been around since the early '70s when they were used in compilers for sequential languages. Shortly after their introduction they were also considered as a possible model for parallel computing, although the impact here was limited. Recently, however, data flow has been identified as a candidate for efficient implementation of various programming models on multi-core architectures. In most cases, however, the burden of determining data flow ``macro'' instructions is left to the programmer, while the compiler/run time system manages only the efficient scheduling of these instructions. We discuss a structured parallel programming approach supporting automatic compilation of programs to macro data flow and we show experimental results demonstrating the feasibility of the approach and the efficiency of the resulting ``object'' code on different classes of state-of-the-art multi-core architectures. The experimental results use different base mechanisms to implement the macro data flow run time support, from plain pthreads with condition variables to more modern and effective lock- and fence-free parallel frameworks. Experimental results comparing efficiency of the proposed approach with those achieved using other, more classical, parallel frameworks are also presented.},
    date-added = {2012-10-24 17:29:14 +0000},
    date-modified = {2013-11-24 00:35:34 +0000},
    url = {http://calvados.di.unipi.it/storage/paper_files/2012_mdf_PDP.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2012_mdf_PDP.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1109/PDP.2012.44},
    keywords = {fastflow}
    }

  • M. Coppo, F. Damiani, M. Drocco, E. Grassi, E. Sciacca, S. Spinella, and A. Troina, "Simulation techniques for the calculus of wrapped compartments," Theoretical Computer Science, vol. 431, p. 75–95, 2012. doi:10.1016/j.tcs.2011.12.063
    [BibTeX] [Abstract]

    The modelling and analysis of biological systems has deep roots in Mathematics, specifically in the field of Ordinary Differential Equations (ODEs). Alternative approaches based on formal calculi, often derived from process algebras or term rewriting systems, provide a quite complementary way to analyse the behaviour of biological systems. These calculi allow to cope in a natural way with notions like compartments and membranes, which are not easy (sometimes impossible) to handle with purely numerical approaches, and are often based on stochastic simulation methods. Recently, it has also become evident that stochastic effects in regulatory networks play a crucial role in the analysis of such systems. Actually, in many situations it is necessary to use stochastic models. For example when the system to be described is based on the interaction of few molecules, when we are at the presence of a chemical instability, or when we want to simulate the functioning of a pool of entities whose compartmentalised structure evolves dynamically. In contrast, stable metabolic networks, involving a large number of reagents, for which the computational cost of a stochastic simulation becomes an insurmountable obstacle, are efficiently modelled with ODEs. In this paper we define a hybrid simulation method, combining the stochastic approach with ODEs, for systems described in the Calculus of Wrapped Compartments (CWC), a calculus on which we can express the compartmentalisation of a biological system whose evolution is defined by a set of rewrite rules.

    @article{DBLP:journals/tcs/CoppoDDGSST12,
    title = {Simulation techniques for the calculus of wrapped compartments},
    author = {Mario Coppo and Ferruccio Damiani and Maurizio Drocco and Elena Grassi and Eva Sciacca and Salvatore Spinella and Angelo Troina},
    year = {2012},
    journal = {Theoretical Computer Science},
    volume = {431},
    pages = {75--95},
    doi = {10.1016/j.tcs.2011.12.063},
    abstract = {The modelling and analysis of biological systems has deep roots in Mathematics, specifically in the field of Ordinary Differential Equations (ODEs). Alternative approaches based on formal calculi, often derived from process algebras or term rewriting systems, provide a quite complementary way to analyse the behaviour of biological systems. These calculi allow to cope in a natural way with notions like compartments and membranes, which are not easy (sometimes impossible) to handle with purely numerical approaches, and are often based on stochastic simulation methods. Recently, it has also become evident that stochastic effects in regulatory networks play a crucial role in the analysis of such systems. Actually, in many situations it is necessary to use stochastic models. For example when the system to be described is based on the interaction of few molecules, when we are at the presence of a chemical instability, or when we want to simulate the functioning of a pool of entities whose compartmentalised structure evolves dynamically. In contrast, stable metabolic networks, involving a large number of reagents, for which the computational cost of a stochastic simulation becomes an insurmountable obstacle, are efficiently modelled with ODEs. In this paper we define a hybrid simulation method, combining the stochastic approach with ODEs, for systems described in the Calculus of Wrapped Compartments (CWC), a calculus on which we can express the compartmentalisation of a biological system whose evolution is defined by a set of rewrite rules.},
    bibsource = {DBLP, http://dblp.uni-trier.de},
    date-added = {2013-12-12 22:28:07 +0000},
    date-modified = {2013-12-13 10:37:47 +0000},
    ee = {http://dx.doi.org/10.1016/j.tcs.2011.12.063},
    bdsk-url-1 = {http://dx.doi.org/10.1016/j.tcs.2011.12.063}
    }

  • M. Aldinucci, M. Coppo, F. Damiani, M. Drocco, E. Sciacca, S. Spinella, M. Torquati, and A. Troina, "On Parallelizing On-Line Statistics for Stochastic Biological Simulations," in Proc. of Euro-Par Workshops: 2nd Workshop on High Performance Bioinformatics and Biomedicine (HiBB), Bordeaux, France, 2012, p. 3–12. doi:10.1007/978-3-642-29740-3_2
    [BibTeX] [Abstract] [Download PDF]

    This work concerns a general technique to enrich parallel version of stochastic simulators for biological systems with tools for on-line statistical analysis of the results. In particular, within the FastFlow parallel programming framework, we describe the methodology and the implementation of a parallel Monte Carlo simulation infrastructure extended with user-defined on-line data filtering and mining functions. The simulator and the on-line analysis were validated on large multi-core platforms and representative proof-of-concept biological systems.

    @inproceedings{cwcsim:onlinestats:ff:hibb:11,
    title = {On Parallelizing On-Line Statistics for Stochastic Biological Simulations},
    author = {Marco Aldinucci and Mario Coppo and Ferruccio Damiani and Maurizio Drocco and Eva Sciacca and Salvatore Spinella and Massimo Torquati and Angelo Troina},
    year = {2012},
    booktitle = {Proc. of Euro-Par Workshops: 2nd Workshop on High Performance Bioinformatics and Biomedicine (HiBB)},
    publisher = {Springer},
    address = {Bordeaux, France},
    series = {LNCS},
    volume = {7156},
    pages = {3--12},
    doi = {10.1007/978-3-642-29740-3_2},
    abstract = {This work concerns a general technique to enrich parallel version of stochastic simulators for biological systems with tools for on-line statistical analysis of the results. In particular, within the FastFlow parallel programming framework, we describe the methodology and the implementation of a parallel Monte Carlo simulation infrastructure extended with user-defined on-line data filtering and mining functions. The simulator and the on-line analysis were validated on large multi-core platforms and representative proof-of-concept biological systems.},
    date-added = {2010-08-15 00:50:09 +0200},
    date-modified = {2017-12-12 14:47:15 +0000},
    editor = {Michael Alexander and Pasqua D'Ambra and Adam Belloum and George Bosilca and Mario Cannataro and Marco Danelutto and Beniamino Di Martino and Michael Gerndt and Emmanuel Jeannot and Raymond Namyst and Jean Roman and Stephen L. Scott and Jesper Larsson Tr{\"a}ff and Geoffroy Vall{\'e}e and Josef Weidendorfer},
    url = {http://calvados.di.unipi.it/storage/paper_files/2012_onlinestat_HiBB2011.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2012_onlinestat_HiBB2011.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-642-29740-3_2},
    keywords = {bioinformatics, fastflow}
    }

  • F. Tordini, M. Aldinucci, and M. Torquati, "High-level lock-less programming for multicore," in Advanced Computer Architecture and Compilation for High-Performance and Embedded Systems (ACACES) – Poster Abstracts, Fiuggi, Italy, 2012.
    [BibTeX] [Abstract] [Download PDF]

    Modern computers are built upon multi-core architectures. Achieving peak performance on these architectures is hard and may require a substantial programming effort. The synchronisation of many processes racing to access a common resource (the shared memory) has been a fundamental problem on parallel computing for years, and many solutions have been proposed to address this issue. Non-blocking synchronisation and transactional primitives have been envisioned as a way to reduce memory wall problem. Despite sometimes effective (and exhibiting a great momentum in the research community), they are only one facet of the problem, as their exploitation still requires non-trivial programming skills. With non-blocking philosophy in mind, we propose high-level programming patterns that will relieve the programmer from worrying about low-level details such as synchronisation of racing processes as well as those fine tunings needed to improve the overall performance, like proper (distributed) dynamic memory allocation and effective exploitation of the memory hierarchy.

    @inproceedings{ff:acaces:12,
    title = {High-level lock-less programming for multicore},
    author = {Fabio Tordini and Marco Aldinucci and Massimo Torquati},
    year = {2012},
    booktitle = {Advanced Computer Architecture and Compilation for High-Performance and Embedded Systems (ACACES) -- Poster Abstracts},
    publisher = {HiPEAC},
    address = {Fiuggi, Italy},
    isbn = {9789038219875},
    abstract = {Modern computers are built upon multi-core architectures. Achieving peak performance on these architectures is hard and may require a substantial programming effort. The synchronisation of many processes racing to access a common resource (the shared memory) has been a fundamental problem on parallel computing for years, and many solutions have been proposed to address this issue. Non-blocking synchronisation and transactional primitives have been envisioned as a way to reduce memory wall problem. Despite sometimes effective (and exhibiting a great momentum in the research community), they are only one facet of the problem, as their exploitation still requires non-trivial programming skills. With non-blocking philosophy in mind, we propose high-level programming patterns that will relieve the programmer from worrying about low-level details such as synchronisation of racing processes as well as those fine tunings needed to improve the overall performance, like proper (distributed) dynamic memory allocation and effective exploitation of the memory hierarchy.},
    date-added = {2012-07-17 17:58:06 +0200},
    date-modified = {2013-11-24 00:36:10 +0000},
    url = {http://calvados.di.unipi.it/storage/paper_files/2012_ACACES_ex-abstract.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2012_ACACES_ex-abstract.pdf},
    keywords = {fastflow}
    }

  • T. Weigold, M. Aldinucci, M. Danelutto, and V. Getov, "Process-Driven Biometric Identification by means of Autonomic Grid Components," Int. J. of Autonomous and Adaptive Communications Systems, vol. 5, iss. 3, p. 274–291, 2012. doi:10.1504/IJAACS.2012.047659
    [BibTeX] [Abstract] [Download PDF]

    Today's business applications are increasingly process driven, meaning that the main application logic is executed by a dedicate process engine. In addition, component-oriented software development has been attracting attention for building complex distributed applications. In this paper we present the experiences gained from building a process-driven biometric identification application that makes use of Grid infrastructures via the Grid Component Model (GCM). GCM, besides guaranteeing access to Grid resources, supports autonomic management of notable parallel composite components. This feature is exploited within our biometric identification application to ensure real time identification of fingerprints. Therefore, we briefly introduce the GCM framework and the process engine used, and we describe the implementation of the application by means of autonomic GCM components. Finally, we summarize the results, experiences, and lessons learned focusing on the integration of autonomic GCM components and the process-driven approach.

    @article{ibm:ijaacs:12,
    title = {Process-Driven Biometric Identification by means of Autonomic Grid Components},
    author = {Thomas Weigold and Marco Aldinucci and Marco Danelutto and Vladimir Getov},
    year = {2012},
    journal = {Int. J. of Autonomous and Adaptive Communications Systems},
    publisher = {Inderscience Enterprises Ltd.},
    volume = {5},
    pages = {274--291},
    doi = {10.1504/IJAACS.2012.047659},
    issn = {1754-8632},
    abstract = {Today's business applications are increasingly process driven, meaning that the main application logic is executed by a dedicate process engine. In addition, component-oriented software development has been attracting attention for building complex distributed applications. In this paper we present the experiences gained from building a process-driven biometric identification application that makes use of Grid infrastructures via the Grid Component Model (GCM). GCM, besides guaranteeing access to Grid resources, supports autonomic management of notable parallel composite components. This feature is exploited within our biometric identification application to ensure real time identification of fingerprints. Therefore, we briefly introduce the GCM framework and the process engine used, and we describe the implementation of the application by means of autonomic GCM components. Finally, we summarize the results, experiences, and lessons learned focusing on the integration of autonomic GCM components and the process-driven approach.},
    date-added = {2009-08-01 21:01:36 +0200},
    date-modified = {2013-06-17 14:14:36 +0000},
    number = {3},
    url = {http://calvados.di.unipi.it/storage/paper_files/2012_JAACS_Weigold.pdf},
    bdsk-url-1 = {http://www.inderscience.com/info/inarticletoc.php?jcode=ijaacs&year=2012&vol=5&issue=3},
    bdsk-url-2 = {http://calvados.di.unipi.it/storage/paper_files/2012_JAACS_Weigold.pdf},
    bdsk-url-3 = {http://dx.doi.org/10.1504/IJAACS.2012.047659}
    }

  • F. Spiga and I. Girotto, "phiGEMM: A CPU-GPU Library for Porting Quantum ESPRESSO on Hybrid Systems," in Proceedings of the 20th Euromicro International Conference on Parallel, Distributed and Network-Based Processing, PDP 2012, Munich, Germany, February 15-17, 2012, 2012, p. 368–375. doi:10.1109/PDP.2012.72
    [BibTeX] [Download PDF]
    @inproceedings{DBLP:conf/pdp/SpigaG12,
    title = {phiGEMM: {A} {CPU-GPU} Library for Porting Quantum {ESPRESSO} on Hybrid Systems},
    author = {Filippo Spiga and Ivan Girotto},
    year = {2012},
    booktitle = {Proceedings of the 20th Euromicro International Conference on Parallel, Distributed and Network-Based Processing, {PDP} 2012, Munich, Germany, February 15-17, 2012},
    pages = {368--375},
    doi = {10.1109/PDP.2012.72},
    bibsource = {dblp computer science bibliography, http://dblp.org},
    biburl = {http://dblp.org/rec/bib/conf/pdp/SpigaG12},
    optcrossref = {DBLP:conf/pdp/2012},
    timestamp = {Thu, 25 May 2017 00:43:12 +0200},
    url = {https://doi.org/10.1109/PDP.2012.72},
    bdsk-url-1 = {https://doi.org/10.1109/PDP.2012.72}
    }

2011

  • C. Grandi, B. Bockelman, D. Bonacorsi, I. Fisk, I. González Caballero, F. Farina, M. Hernández, S. Padhi, S. Sarkar, A. Sciabà, I. Sfiligoi, F. Spiga, M. Úbeda Garc{'i}a, D. C Van Der Ster, and M. Zvada, "CMS Distributed Computing Integration in the LHC sustained operations era," , vol. 331, p. 62032, 2011.
    [BibTeX]
    @article{11:cms,
    title = {CMS Distributed Computing Integration in the LHC sustained operations era},
    author = {Grandi, C and Bockelman, B and Bonacorsi, D and Fisk, I and Gonz{\'a}lez Caballero, Isidro and Farina, Fabio and Hern{\'a}ndez, M and Padhi, Sanjay and Sarkar, Sabyasachi and Sciab{\`a}, Andrea and Sfiligoi, I and Spiga, F and {\'U}beda Garc{\'\i}a, M and C Van Der Ster, D and Zvada, Marian},
    year = {2011},
    month = dec,
    booktitle = {Journal of Physics: Conference Series},
    volume = {331},
    pages = {062032}
    }

  • C. Calcagno, M. Coppo, F. Damiani, M. Drocco, E. Sciacca, S. Spinella, and A. Troina, "Modelling Spatial Interactions in the Arbuscular Mycorrhizal Symbiosis using the Calculus of Wrapped Compartments," in Proc. of 3rd Intl. Workshop on Computational Models for Cell Processes (CompMod), Aachen, Germany, 2011, p. 3–18.
    [BibTeX] [Abstract]

    Arbuscular mycorrhiza (AM) is the most wide-spread plant-fungus symbiosis on earth. Investigating this kind of symbiosis is considered one of the most promising ways to develop methods to nurture plants in more natural manners, avoiding the complex chemical productions used nowadays to produce artificial fertilizers. In previous work we used the Calculus of Wrapped Compartments (CWC) to investigate different phases of the AM symbiosis. In this paper, we continue this line of research by modelling the colonisation of the plant root cells by the fungal hyphae spreading in the soil. This study requires the description of some spatial interaction. Although CWC has no explicit feature modelling a spatial geometry, the compartment labelling feature can be effectively exploited to define a discrete surface topology outlining the relevant sectors which determine the spatial properties of the system under consideration. Different situations and interesting spatial properties can be modelled and analysed in such a lightweight framework (which has not an explicit notion of geometry with coordinates and spatial metrics), thus exploiting the existing CWC simulation tool.

    @inproceedings{DBLP:journals/corr/abs-1109-1363,
    title = {Modelling Spatial Interactions in the Arbuscular Mycorrhizal Symbiosis using the Calculus of Wrapped Compartments},
    author = {Cristina Calcagno and Mario Coppo and Ferruccio Damiani and Maurizio Drocco and Eva Sciacca and Salvatore Spinella and Angelo Troina},
    year = {2011},
    month = sep,
    booktitle = {Proc. of 3rd Intl. Workshop on Computational Models for Cell Processes (CompMod)},
    address = {Aachen, Germany},
    series = {EPTCS},
    volume = {67},
    pages = {3--18},
    abstract = {Arbuscular mycorrhiza (AM) is the most wide-spread plant-fungus symbiosis on earth. Investigating this kind of symbiosis is considered one of the most promising ways to develop methods to nurture plants in more natural manners, avoiding the complex chemical productions used nowadays to produce artificial fertilizers. In previous work we used the Calculus of Wrapped Compartments (CWC) to investigate different phases of the AM symbiosis. In this paper, we continue this line of research by modelling the colonisation of the plant root cells by the fungal hyphae spreading in the soil. This study requires the description of some spatial interaction. Although CWC has no explicit feature modelling a spatial geometry, the compartment labelling feature can be effectively exploited to define a discrete surface topology outlining the relevant sectors which determine the spatial properties of the system under consideration. Different situations and interesting spatial properties can be modelled and analysed in such a lightweight framework (which has not an explicit notion of geometry with coordinates and spatial metrics), thus exploiting the existing CWC simulation tool.},
    bibsource = {DBLP, http://dblp.uni-trier.de},
    date-added = {2013-12-12 22:25:03 +0000},
    date-modified = {2017-12-12 13:51:04 +0000},
    editor = {Ion Petre and Erik P. de Vink},
    ee = {http://dx.doi.org/10.4204/EPTCS.67.3}
    }

  • M. Aldinucci, M. Danelutto, P. Kilpatrick, and V. Xhagjika, "LIBERO: a framework for autonomic management of multiple non-functional concerns," in Euro-Par 2010 Workshops, Proc. of the CoreGrid Workshop on Grids, Clouds and P2P Computing, Ischia, Italy, 2011, p. 237–245. doi:10.1007/978-3-642-21878-1_30
    [BibTeX] [Abstract] [Download PDF]

    We describe a lightweight prototype framework (LIBERO) designed for experimentation with behavioural skeletons-components implementing a well-known parallelism exploitation pattern and a rule-based autonomic manager taking care of some non-functional feature related to pattern computation. LIBERO supports multiple autonomic managers within the same behavioural skeleton, each taking care of a different non-functional concern. We introduce LIBERO – built on plain Java and JBoss – and discuss how multiple managers may be coordinated to achieve a common goal using a two-phase coordination protocol developed in earlier work. We present experimental results that demonstrate how the prototype may be used to investigate autonomic management of multiple, independent concerns.

    @inproceedings{libero:cgsymph:10,
    title = {LIBERO: a framework for autonomic management of multiple non-functional concerns},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick and Vamir Xhagjika},
    year = {2011},
    month = sep,
    booktitle = {Euro-Par 2010 Workshops, Proc. of the CoreGrid Workshop on Grids, Clouds and P2P Computing},
    publisher = {Springer},
    address = {Ischia, Italy},
    series = {LNCS},
    volume = {6586},
    pages = {237--245},
    doi = {10.1007/978-3-642-21878-1_30},
    abstract = {We describe a lightweight prototype framework (LIBERO) designed for experimentation with behavioural skeletons-components implementing a well-known parallelism exploitation pattern and a rule-based autonomic manager taking care of some non-functional feature related to pattern computation. LIBERO supports multiple autonomic managers within the same behavioural skeleton, each taking care of a different non-functional concern. We introduce LIBERO -- built on plain Java and JBoss -- and discuss how multiple managers may be coordinated to achieve a common goal using a two-phase coordination protocol developed in earlier work. We present experimental results that demonstrate how the prototype may be used to investigate autonomic management of multiple, independent concerns.},
    date-added = {2011-09-12 14:58:27 +0200},
    date-modified = {2012-12-27 14:26:15 +0000},
    editor = {M. R. Guarracino and F. Vivien and J. L. Tr\"aff and M. Cannataro and M. Danelutto and A. Hast and F. Perla and A. Kn\"upfer and B. Di Martino and M. Alexander},
    url = {http://calvados.di.unipi.it/storage/paper_files/2011_libero_coregridworkshop2010.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2011_libero_coregridworkshop2010.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-642-21878-1_30}
    }

  • M. Aldinucci, L. Anardu, M. Danelutto, P. Kilpatrick, and M. Torquati, "Targeting multi cores by structured programming and data flow," Università di Pisa, Dipartimento di Informatica, Italy, TR-11-13, 2011.
    [BibTeX] [Download PDF]
    @techreport{TR-11-13,
    title = {Targeting multi cores by structured programming and data flow},
    author = {Marco Aldinucci and Lorenzo Anardu and Marco Danelutto and Peter Kilpatrick and Massimo Torquati},
    year = {2011},
    month = sep,
    date-added = {2012-06-06 22:55:41 +0000},
    date-modified = {2012-06-06 22:57:26 +0000},
    institution = {Universit\`a di Pisa, Dipartimento di Informatica, Italy},
    number = {TR-11-13},
    url = {http://compass2.di.unipi.it/TR/Files/TR-11-13.pdf.gz},
    bdsk-url-1 = {http://compass2.di.unipi.it/TR/Files/TR-11-06.pdf.gz},
    bdsk-url-2 = {http://compass2.di.unipi.it/TR/Files/TR-11-13.pdf.gz}
    }

  • M. Aldinucci, M. Danelutto, P. Kilpatrick, M. Meneghin, and M. Torquati, "Accelerating code on multi-cores with FastFlow," in Proc. of 17th Intl. Euro-Par 2011 Parallel Processing, Bordeaux, France, 2011, p. 170–181. doi:10.1007/978-3-642-23397-5_17
    [BibTeX] [Abstract] [Download PDF]

    FastFlow is a programming framework specifically targeting cache-coherent shared-memory multicores. It is implemented as a stack of C++ template libraries built on top of lock-free (and memory fence free) synchronization mechanisms. Its philosophy is to combine programmability with performance. In this paper a new FastFlow programming methodology aimed at supporting parallelization of existing sequential code via offloading onto a dynamically created software accelerator is presented. The new methodology has been validated using a set of simple micro-benchmarks and some real applications.

    @inproceedings{ff:acc:europar:11,
    title = {Accelerating code on multi-cores with FastFlow},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick and Massimiliano Meneghin and Massimo Torquati},
    year = {2011},
    month = aug,
    booktitle = {Proc. of 17th Intl. Euro-Par 2011 Parallel Processing},
    publisher = {Springer},
    address = {Bordeaux, France},
    series = {LNCS},
    volume = {6853},
    pages = {170--181},
    doi = {10.1007/978-3-642-23397-5_17},
    abstract = {FastFlow is a programming framework specifically targeting cache-coherent shared-memory multicores. It is implemented as a stack of C++ template libraries built on top of lock-free (and memory fence free) synchronization mechanisms. Its philosophy is to combine programmability with performance. In this paper a new FastFlow programming methodology aimed at supporting parallelization of existing sequential code via offloading onto a dynamically created software accelerator is presented. The new methodology has been validated using a set of simple micro-benchmarks and some real applications.},
    date-added = {2012-06-04 18:35:57 +0200},
    date-modified = {2013-12-12 00:46:59 +0000},
    editor = {E. Jeannot and R. Namyst and J. Roman},
    url = {http://calvados.di.unipi.it/storage/paper_files/2011_fastflow_acc_europar.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2011_fastflow_acc_europar.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-642-23397-5_17},
    keywords = {fastflow}
    }

  • M. Aldinucci, A. Bracciali, P. Liò, A. Sorathiya, and M. Torquati, "StochKit-FF: Efficient Systems Biology on Multicore Architectures," in Euro-Par 2010 Workshops, Proc. of the 1st Workshop on High Performance Bioinformatics and Biomedicine (HiBB), Ischia, Italy, 2011, p. 167–175. doi:10.1007/978-3-642-21878-1_21
    [BibTeX] [Abstract] [Download PDF]

    The stochastic modelling of biological systems is an informative, and in some cases, very adequate technique, which may however result in being more expensive than other modelling approaches, such as differential equations. We present StochKit-FF, a parallel version of StochKit, a reference toolkit for stochastic simulations. StochKit-FF is based on the FastFlow programming toolkit for multicores and exploits the novel concept of selective memory. We experiment StochKit-FF on a model of HIV infection dynamics, with the aim of extracting information from efficiently run experiments, here in terms of average and variance and, on a longer term, of more structured data.

    @inproceedings{stochkit-ff:hibb:10,
    title = {{StochKit-FF}: Efficient Systems Biology on Multicore Architectures},
    author = {Marco Aldinucci and Andrea Bracciali and Pietro Li\`o and Anil Sorathiya and Massimo Torquati},
    year = {2011},
    month = aug,
    booktitle = {Euro-Par 2010 Workshops, Proc. of the 1st Workshop on High Performance Bioinformatics and Biomedicine (HiBB)},
    publisher = {Springer},
    address = {Ischia, Italy},
    series = {{LNCS}},
    volume = {6586},
    pages = {167--175},
    doi = {10.1007/978-3-642-21878-1_21},
    abstract = {The stochastic modelling of biological systems is an informative, and in some cases, very adequate technique, which may however result in being more expensive than other modelling approaches, such as differential equations. We present StochKit-FF, a parallel version of StochKit, a reference toolkit for stochastic simulations. StochKit-FF is based on the FastFlow programming toolkit for multicores and exploits the novel concept of selective memory. We experiment StochKit-FF on a model of HIV infection dynamics, with the aim of extracting information from efficiently run experiments, here in terms of average and variance and, on a longer term, of more structured data.},
    date-added = {2012-04-12 11:23:46 +0000},
    date-modified = {2013-11-24 00:36:38 +0000},
    editor = {M. R. Guarracino and F. Vivien and J. L. Tr\"aff and M. Cannataro and M. Danelutto and A. Hast and F. Perla and A. Kn\"upfer and B. Di Martino and M. Alexander},
    url = {http://calvados.di.unipi.it/storage/paper_files/2010_stochkit-ff_hibb.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2010_stochkit-ff_hibb.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-642-21878-1_21},
    keywords = {bioinformatics}
    }

  • M. Aldinucci, M. Drocco, D. Giordano, C. Spampinato, and M. Torquati, "A Parallel Edge Preserving Algorithm for Salt and Pepper Image Denoising," Università degli Studi di Torino, Dip. di Informatica, Italy, 138/2011, 2011.
    [BibTeX] [Download PDF]
    @techreport{ff:denoiser:tr138-2011,
    title = {A Parallel Edge Preserving Algorithm for Salt and Pepper Image Denoising},
    author = {Marco Aldinucci and Maurizio Drocco and Daniela Giordano and Concetto Spampinato and Massimo Torquati},
    year = {2011},
    month = may,
    date-added = {2010-12-08 19:31:00 +0100},
    date-modified = {2013-11-24 00:36:56 +0000},
    institution = {Universit\`a degli Studi di Torino, Dip. di Informatica, Italy},
    number = {138/2011},
    url = {http://calvados.di.unipi.it/storage/paper_files/2012_2phasedenoiser_ff_ipta.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2012_2phasedenoiser_ff_ipta.pdf},
    keywords = {fastflow}
    }

  • M. Aldinucci, S. Ruggieri, and M. Torquati, "Porting Decision Tree Building and Pruning Algorithms to Multicore using FastFlow," Università di Pisa, Dipartimento di Informatica, Italy, TR-11-06, 2011.
    [BibTeX] [Download PDF]
    @techreport{TR-11-06,
    title = {Porting Decision Tree Building and Pruning Algorithms to Multicore using FastFlow},
    author = {Marco Aldinucci and Salvatore Ruggieri and Massimo Torquati},
    year = {2011},
    month = mar,
    date-added = {2012-04-15 18:40:07 +0000},
    date-modified = {2013-11-24 00:37:04 +0000},
    institution = {Universit\`a di Pisa, Dipartimento di Informatica, Italy},
    number = {TR-11-06},
    url = {http://compass2.di.unipi.it/TR/Files/TR-11-06.pdf.gz},
    bdsk-url-1 = {http://compass2.di.unipi.it/TR/Files/TR-11-06.pdf.gz},
    keywords = {fastflow}
    }

  • M. Aldinucci, M. Coppo, F. Damiani, M. Drocco, M. Torquati, and A. Troina, "On Designing Multicore-Aware Simulators for Biological Systems," in Proc. of 19th Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP), Ayia Napa, Cyprus, 2011, p. 318–325. doi:10.1109/PDP.2011.81
    [BibTeX] [Abstract] [Download PDF]

    The stochastic simulation of biological systems is an increasingly popular technique in bioinformatics. It often is an enlightening technique, which may however result in being computational expensive. We discuss the main opportunities to speed it up on multi-core platforms, which pose new challenges for parallelisation techniques. These opportunities are developed in two general families of solutions involving both the single simulation and a bulk of independent simulations (either replicas of derived from parameter sweep). Proposed solutions are tested on the parallelisation of the CWC simulator (Calculus of Wrapped Compartments) that is carried out according to proposed solutions by way of the FastFlow programming framework making possible fast development and efficient execution on multi-cores.

    @inproceedings{ff:cwc:pdp:11,
    title = {On Designing Multicore-Aware Simulators for Biological Systems},
    author = {Marco Aldinucci and Mario Coppo and Ferruccio Damiani and Maurizio Drocco and Massimo Torquati and Angelo Troina},
    year = {2011},
    month = feb,
    booktitle = {Proc. of 19th Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP)},
    publisher = {IEEE},
    address = {Ayia Napa, Cyprus},
    pages = {318--325},
    doi = {10.1109/PDP.2011.81},
    abstract = {The stochastic simulation of biological systems is an increasingly popular technique in bioinformatics. It often is an enlightening technique, which may however result in being computational expensive. We discuss the main opportunities to speed it up on multi-core platforms, which pose new challenges for parallelisation techniques. These opportunities are developed in two general families of solutions involving both the single simulation and a bulk of independent simulations (either replicas of derived from parameter sweep). Proposed solutions are tested on the parallelisation of the CWC simulator (Calculus of Wrapped Compartments) that is carried out according to proposed solutions by way of the FastFlow programming framework making possible fast development and efficient execution on multi-cores.},
    date-added = {2012-02-25 01:21:25 +0000},
    date-modified = {2017-12-12 13:51:21 +0000},
    editor = {Yiannis Cotronis and Marco Danelutto and George Angelos Papadopoulos},
    url = {http://calvados.di.unipi.it/storage/paper_files/2011_ff_cwc_sim_PDP.pdf},
    bdsk-url-1 = {http://arxiv.org/pdf/1010.2438v2},
    bdsk-url-2 = {http://calvados.di.unipi.it/storage/paper_files/2011_ff_cwc_sim_PDP.pdf},
    bdsk-url-3 = {http://dx.doi.org/10.1109/PDP.2011.81},
    keywords = {fastflow}
    }

  • M. Coppo, F. Damiani, M. Drocco, E. Grassi, M. Guether, and A. Troina, "Modelling Ammonium Transporters in Arbuscular Mycorrhiza Symbiosis," Transactions on Computational Systems Biology, vol. 6575, iss. 13, p. 85–109, 2011. doi:10.1007/978-3-642-19748-2_5
    [BibTeX] [Abstract]

    The Stochastic Calculus of Wrapped Compartments (SCWC) is a recently proposed variant of the Stochastic Calculus of Looping Sequences (SCLS), a language for the representation and simulation of biological systems. In this work we apply SCWC to model a newly discovered ammonium transporter. This transporter is believed to play a fundamental role for plant mineral acquisition, which takes place in the arbuscular mycorrhiza, the most wide-spread plant-fungus symbiosis on earth. Investigating this kind of symbiosis is considered one of the most promising ways to develop methods to nurture plants in more natural manners, avoiding the complex chemical productions used nowadays to produce artificial fertilizers. In our experiments the passage of NH3/NH4+ from the fungus to the plant has been dissected in known and hypothetical mechanisms; with the model so far we have been able to simulate the behavior of the system under different conditions. Our simulations confirmed some of the latest experimental results about the LjAMT2;2 transporter. Moreover, by comparing the behaviour of LjAMT2;2 with the behaviour of another ammonium transporter which exists in plants, viz. LjAMT1;1, our simulations support an hypothesis about why LjAMT2;2 is so selectively expressed in arbusculated cells.

    @article{DBLP:journals/tcsb/Coppo/DDGGT11,
    title = {Modelling Ammonium Transporters in Arbuscular Mycorrhiza Symbiosis},
    author = {Mario Coppo and Ferruccio Damiani and Maurizio Drocco and Elena Grassi and Mike Guether and Angelo Troina},
    year = {2011},
    journal = {Transactions on Computational Systems Biology},
    volume = {6575},
    pages = {85--109},
    doi = {10.1007/978-3-642-19748-2_5},
    abstract = {The Stochastic Calculus of Wrapped Compartments (SCWC) is a recently proposed variant of the Stochastic Calculus of Looping Sequences (SCLS), a language for the representation and simulation of biological systems. In this work we apply SCWC to model a newly discovered ammonium transporter. This transporter is believed to play a fundamental role for plant mineral acquisition, which takes place in the arbuscular mycorrhiza, the most wide-spread plant-fungus symbiosis on earth. Investigating this kind of symbiosis is considered one of the most promising ways to develop methods to nurture plants in more natural manners, avoiding the complex chemical productions used nowadays to produce artificial fertilizers. In our experiments the passage of NH3/NH4+ from the fungus to the plant has been dissected in known and hypothetical mechanisms; with the model so far we have been able to simulate the behavior of the system under different conditions. Our simulations confirmed some of the latest experimental results about the LjAMT2;2 transporter. Moreover, by comparing the behaviour of LjAMT2;2 with the behaviour of another ammonium transporter which exists in plants, viz. LjAMT1;1, our simulations support an hypothesis about why LjAMT2;2 is so selectively expressed in arbusculated cells.},
    date-added = {2013-12-12 22:25:24 +0000},
    date-modified = {2017-12-12 13:50:01 +0000},
    number = {13},
    bdsk-url-1 = {http://dx.doi.org/10.1007/978-3-642-19748-2_5}
    }

2010

  • M. Aldinucci, M. Coppo, F. Damiani, M. Drocco, M. Torquati, and A. Troina, "On Designing Multicore-Aware Simulators for Biological Systems," Università degli Studi di Torino, Dipartimento di Informatica, Italy, 131/2010, 2010.
    [BibTeX]
    @techreport{ff:cwc:pdp:11-tr,
    title = {On Designing Multicore-Aware Simulators for Biological Systems},
    author = {Marco Aldinucci and Mario Coppo and Ferruccio Damiani and Maurizio Drocco and Massimo Torquati and Angelo Troina},
    year = {2010},
    month = oct,
    date-added = {2011-05-19 19:07:36 +0200},
    date-modified = {2013-11-24 00:38:00 +0000},
    institution = {Universit\`a degli Studi di Torino, Dipartimento di Informatica, Italy},
    number = {131/2010},
    keywords = {fastflow}
    }

  • M. Aldinucci, S. Ruggieri, and M. Torquati, "Porting Decision Tree Algorithms to Multicore using FastFlow," in Proc. of European Conference in Machine Learning and Knowledge Discovery in Databases (ECML PKDD), Barcelona, Spain, 2010, p. 7–23. doi:10.1007/978-3-642-15880-3_7
    [BibTeX] [Abstract] [Download PDF]

    The whole computer hardware industry embraced multicores. For these machines, the extreme optimisation of sequential algorithms is no longer sufficient to squeeze the real machine power, which can be only exploited via thread-level parallelism. Decision tree algorithms exhibit natural concurrency that makes them suitable to be parallelised. This paper presents an approach for easy-yet-efficient porting of an implementation of the C4.5 algorithm on multicores. The parallel porting requires minimal changes to the original sequential code, and it is able to exploit up to 7X speedup on an Intel dual-quad core machine.

    @inproceedings{fastflow_c45:emclpkdd,
    title = {Porting Decision Tree Algorithms to Multicore using {FastFlow}},
    author = {Marco Aldinucci and Salvatore Ruggieri and Massimo Torquati},
    year = {2010},
    month = sep,
    booktitle = {Proc. of European Conference in Machine Learning and Knowledge Discovery in Databases (ECML PKDD)},
    publisher = {Springer},
    address = {Barcelona, Spain},
    series = {LNCS},
    volume = {6321},
    pages = {7--23},
    doi = {10.1007/978-3-642-15880-3_7},
    abstract = {The whole computer hardware industry embraced multicores. For these machines, the extreme optimisation of sequential algorithms is no longer sufficient to squeeze the real machine power, which can be only exploited via thread-level parallelism. Decision tree algorithms exhibit natural concurrency that makes them suitable to be parallelised. This paper presents an approach for easy-yet-efficient porting of an implementation of the C4.5 algorithm on multicores. The parallel porting requires minimal changes to the original sequential code, and it is able to exploit up to 7X speedup on an Intel dual-quad core machine.},
    date-added = {2010-06-15 21:03:56 +0200},
    date-modified = {2013-11-24 00:38:07 +0000},
    editor = {Jos{\'e} L. Balc{\'a}zar and Francesco Bonchi and Aristides Gionis and Mich{\`e}le Sebag},
    url = {http://calvados.di.unipi.it/storage/paper_files/2010_c45FF_ECMLPKDD.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2010_c45FF_ECMLPKDD.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-642-15880-3_7},
    keywords = {fastflow}
    }

  • M. Coppo, F. Damiani, M. Drocco, E. Grassi, E. Sciacca, S. Spinella, and A. Troina, "Hybrid Calculus of Wrapped Compartments," in Proc. of 4th Workshop on Membrane Computing and Biologically Inspired Process Calculi (MeCBIC), Jena, Germany, 2010, p. 102–120.
    [BibTeX] [Abstract]

    The modelling and analysis of biological systems has deep roots in Mathematics, specifically in the field of ordinary differential equations (ODEs). Alternative approaches based on formal calculi, often derived from process algebras or term rewriting systems, provide a quite complementary way to analyze the behaviour of biological systems. These calculi allow to cope in a natural way with notions like compartments and membranes, which are not easy (sometimes impossible) to handle with purely numerical approaches, and are often based on stochastic simulation methods. Recently, it has also become evident that stochastic effects in regulatory networks play a crucial role in the analysis of such systems. Actually, in many situations it is necessary to use stochastic models. For example when the system to be described is based on the interaction of few molecules, when we are at the presence of a chemical instability, or when we want to simulate the functioning of a pool of entities whose compartmentalised structure evolves dynamically. In contrast, stable metabolic networks, involving a large number of reagents, for which the computational cost of a stochastic simulation becomes an insurmountable obstacle, are efficiently modelled with ODEs. In this paper we define a hybrid simulation method, combining the stochastic approach with ODEs, for systems described in CWC, a calculus on which we can express the compartmentalisation of a biological system whose evolution is defined by a set of rewrite rules.

    @inproceedings{DBLP:journals/corr/abs-1011-0494,
    title = {Hybrid Calculus of Wrapped Compartments},
    author = {Mario Coppo and Ferruccio Damiani and Maurizio Drocco and Elena Grassi and Eva Sciacca and Salvatore Spinella and Angelo Troina},
    year = {2010},
    month = aug,
    booktitle = {Proc. of 4th Workshop on Membrane Computing and Biologically Inspired Process Calculi (MeCBIC)},
    address = {Jena, Germany},
    series = {EPTCS},
    volume = {40},
    pages = {102--120},
    abstract = {The modelling and analysis of biological systems has deep roots in Mathematics, specifically in the field of ordinary differential equations (ODEs). Alternative approaches based on formal calculi, often derived from process algebras or term rewriting systems, provide a quite complementary way to analyze the behaviour of biological systems. These calculi allow to cope in a natural way with notions like compartments and membranes, which are not easy (sometimes impossible) to handle with purely numerical approaches, and are often based on stochastic simulation methods. Recently, it has also become evident that stochastic effects in regulatory networks play a crucial role in the analysis of such systems. Actually, in many situations it is necessary to use stochastic models. For example when the system to be described is based on the interaction of few molecules, when we are at the presence of a chemical instability, or when we want to simulate the functioning of a pool of entities whose compartmentalised structure evolves dynamically. In contrast, stable metabolic networks, involving a large number of reagents, for which the computational cost of a stochastic simulation becomes an insurmountable obstacle, are efficiently modelled with ODEs. In this paper we define a hybrid simulation method, combining the stochastic approach with ODEs, for systems described in CWC, a calculus on which we can express the compartmentalisation of a biological system whose evolution is defined by a set of rewrite rules.},
    bibsource = {DBLP, http://dblp.uni-trier.de},
    date-added = {2013-12-12 22:24:23 +0000},
    date-modified = {2013-12-13 10:30:02 +0000},
    editor = {Gabriel Ciobanu and Maciej Koutny},
    ee = {http://dx.doi.org/10.4204/EPTCS.40.8}
    }

  • M. Aldinucci, M. Danelutto, and P. Kilpatrick, "Autonomic Management of Multiple Non-Functional Concerns in Behavioural Skeletons," in Grids, P2P and Services Computing, F. Desprez, V. Getov, T. Priol, and R. Yahyapour, Eds., Springer, 2010, p. 89–103. doi:10.1007/978-1-4419-6794-7_8
    [BibTeX] [Abstract] [Download PDF]

    We introduce and address the problem of concurrent autonomic management of different non-functional concerns in parallel applications build as a hierarchical composition of behavioural skeletons. We first define the problems arising when multiple concerns are dealt with by independent managers, then we propose a methodology supporting coordinated management, and finally we discuss how autonomic management of multiple concerns may be implemented in a typical use case. Being based on the behavioural skeleton concept proposed in the CoreGRID GCM, it is anticipated that the methodology will be readily integrated into the current reference implementation of GCM based on Java ProActive and running on top of major grid middleware systems.

    @incollection{multiple-nf-concern:cgsymph:09:book,
    title = {Autonomic Management of Multiple Non-Functional Concerns in Behavioural Skeletons},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick},
    year = {2010},
    month = aug,
    booktitle = {Grids, P2P and Services Computing},
    publisher = {Springer},
    series = {CoreGRID},
    pages = {89--103},
    doi = {10.1007/978-1-4419-6794-7_8},
    abstract = {We introduce and address the problem of concurrent autonomic management of different non-functional concerns in parallel applications build as a hierarchical composition of behavioural skeletons. We first define the problems arising when multiple concerns are dealt with by independent managers, then we propose a methodology supporting coordinated management, and finally we discuss how autonomic management of multiple concerns may be implemented in a typical use case. Being based on the behavioural skeleton concept proposed in the CoreGRID GCM, it is anticipated that the methodology will be readily integrated into the current reference implementation of GCM based on Java ProActive and running on top of major grid middleware systems.},
    annote = {ISBN: 978-1-4419-6793-0(Proc. of the CoreGRID Symposium 2009)},
    date-added = {2009-06-30 12:24:06 +0200},
    date-modified = {2012-02-25 00:39:47 +0000},
    editor = {Fr\'ed\'eric Desprez and Vladimir Getov and Thierry Priol and Ramin Yahyapour},
    url = {http://calvados.di.unipi.it/storage/paper_files/2009_CGSymph_Autonomic_BeSke.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2009_CGSymph_Autonomic_BeSke.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-1-4419-6794-7_8}
    }

  • M. Aldinucci, A. Bracciali, P. Liò, A. Sorathiya, and M. Torquati, "StochKit-FF: Efficient Systems Biology on Multicore Architectures," Università di Pisa, Dipartimento di Informatica, Italy, TR-10-12, 2010. doi:10.1007/978-3-642-21878-1_21
    [BibTeX] [Abstract] [Download PDF]

    The stochastic modelling of biological systems is an informative, and in some cases, very adequate technique, which may however result in being more expensive than other modelling approaches, such as differential equations. We present StochKit-FF, a parallel version of StochKit, a reference toolkit for stochastic simulations. StochKit-FF is based on the FastFlow programming toolkit for multicores and exploits the novel concept of selective memory. We experiment StochKit-FF on a model of HIV infection dynamics, with the aim of extracting information from efficiently run experiments, here in terms of average and variance and, on a longer term, of more structured data.

    @techreport{stochkit-ff:tr-10-12,
    title = {{StochKit-FF}: Efficient Systems Biology on Multicore Architectures},
    author = {Marco Aldinucci and Andrea Bracciali and Pietro Li\`o and Anil Sorathiya and Massimo Torquati},
    year = {2010},
    month = jul,
    doi = {10.1007/978-3-642-21878-1_21},
    abstract = {The stochastic modelling of biological systems is an informative, and in some cases, very adequate technique, which may however result in being more expensive than other modelling approaches, such as differential equations. We present StochKit-FF, a parallel version of StochKit, a reference toolkit for stochastic simulations. StochKit-FF is based on the FastFlow programming toolkit for multicores and exploits the novel concept of selective memory. We experiment StochKit-FF on a model of HIV infection dynamics, with the aim of extracting information from efficiently run experiments, here in terms of average and variance and, on a longer term, of more structured data.},
    date-added = {2010-06-27 16:39:46 +0200},
    date-modified = {2013-11-24 00:38:32 +0000},
    institution = {Universit{\`a} di Pisa, Dipartimento di Informatica, Italy},
    number = {TR-10-12},
    url = {http://calvados.di.unipi.it/storage/paper_files/TR-10-12.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/TR-10-12.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-642-21878-1_21},
    keywords = {fastflow}
    }

  • M. Aldinucci, A. Bracciali, and P. Liò, "Formal Synthetic Immunology," ERCIM News, vol. 82, p. 40–41, 2010.
    [BibTeX] [Abstract] [Download PDF]

    The human immune system fights pathogens using an articulated set of strategies whose function is to maintain in health the organism. A large effort to formally model such a complex system using a computational approach is currently underway, with the goal of developing a discipline for engineering "synthetic" immune responses. This requires the integration of a range of analysis techniques developed for formally reasoning about the behaviour of complex dynamical systems. Furthermore, a novel class of software tools has to be developed, capable of efficiently analysing these systems on widely accessible computing platforms, such as commodity multi-core architectures..

    @article{stochkitff:ercimnews:10,
    title = {Formal Synthetic Immunology},
    author = {Marco Aldinucci and Andrea Bracciali and Pietro Li\`o},
    year = {2010},
    month = jul,
    journal = {ERCIM News},
    volume = {82},
    pages = {40--41},
    issn = {0926-4981},
    abstract = {The human immune system fights pathogens using an articulated set of strategies whose function is to maintain in health the organism. A large effort to formally model such a complex system using a computational approach is currently underway, with the goal of developing a discipline for engineering "synthetic" immune responses. This requires the integration of a range of analysis techniques developed for formally reasoning about the behaviour of complex dynamical systems. Furthermore, a novel class of software tools has to be developed, capable of efficiently analysing these systems on widely accessible computing platforms, such as commodity multi-core architectures..},
    date-added = {2010-07-02 20:32:31 +0200},
    date-modified = {2013-11-24 00:38:19 +0000},
    url = {http://ercim-news.ercim.eu/images/stories/EN82/EN82-web.pdf},
    bdsk-url-1 = {http://ercim-news.ercim.eu/images/stories/EN82/EN82-web.pdf},
    keywords = {bioinformatics, fastflow}
    }

  • M. Aldinucci, S. Ruggieri, and M. Torquati, "Porting Decision Tree Algorithms to Multicore using FastFlow," Università di Pisa, Dipartimento di Informatica, Italy, TR-10-11, 2010.
    [BibTeX] [Abstract] [Download PDF]

    The whole computer hardware industry embraced multicores. For these machines, the extreme optimisation of sequential algorithms is no longer sufficient to squeeze the real machine power, which can be only exploited via thread-level parallelism. Decision tree algorithms exhibit natural concurrency that makes them suitable to be parallelised. This paper presents an approach for easy-yet-efficient porting of an implementation of the C4.5 algorithm on multicores. The parallel porting requires minimal changes to the original sequential code, and it is able to exploit up to 7X speedup on an Intel dual-quad core machine.

    @techreport{fastflow_c45:tr-10-11,
    title = {Porting Decision Tree Algorithms to Multicore using {FastFlow}},
    author = {Marco Aldinucci and Salvatore Ruggieri and Massimo Torquati},
    year = {2010},
    month = may,
    abstract = {The whole computer hardware industry embraced multicores. For these machines, the extreme optimisation of sequential algorithms is no longer sufficient to squeeze the real machine power, which can be only exploited via thread-level parallelism. Decision tree algorithms exhibit natural concurrency that makes them suitable to be parallelised. This paper presents an approach for easy-yet-efficient porting of an implementation of the C4.5 algorithm on multicores. The parallel porting requires minimal changes to the original sequential code, and it is able to exploit up to 7X speedup on an Intel dual-quad core machine.},
    date-added = {2010-07-11 16:54:09 +0200},
    date-modified = {2013-11-24 00:38:41 +0000},
    institution = {Universit{\`a} di Pisa, Dipartimento di Informatica, Italy},
    number = {TR-10-11},
    url = {http://calvados.di.unipi.it/storage/paper_files/TR-10-11.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/TR-09-12.pdf},
    bdsk-url-2 = {http://calvados.di.unipi.it/storage/paper_files/TR-10-11.pdf},
    keywords = {fastflow}
    }

  • M. Coppo, F. Damiani, M. Drocco, E. Grassi, and A. Troina, "Stochastic Calculus of Wrapped Compartments," in Proc. of 8th Workshop on Quantitative Aspects of Programming Languages (QAPL), Paphos, Cyprus, 2010, p. 82–98.
    [BibTeX] [Abstract]

    The Calculus of Wrapped Compartments (CWC) is a variant of the Calculus of Looping Sequences (CLS). While keeping the same expressiveness, CWC strongly simplifies the development of automatic tools for the analysis of biological systems. The main simplification consists in the removal of the sequencing operator, thus lightening the formal treatment of the patterns to be matched in a term (whose complexity in CLS is strongly affected by the variables matching in the sequences). We define a stochastic semantics for this new calculus. As an application we model the interaction between macrophages and apoptotic neutrophils and a mechanism of gene regulation in E.Coli.

    @inproceedings{DBLP:journals/corr/abs-1006-5099,
    title = {Stochastic Calculus of Wrapped Compartments},
    author = {Mario Coppo and Ferruccio Damiani and Maurizio Drocco and Elena Grassi and Angelo Troina},
    year = {2010},
    month = mar,
    booktitle = {Proc. of 8th Workshop on Quantitative Aspects of Programming Languages (QAPL)},
    address = {Paphos, Cyprus},
    series = {EPTCS},
    volume = {28},
    pages = {82--98},
    abstract = {The Calculus of Wrapped Compartments (CWC) is a variant of the Calculus of Looping Sequences (CLS). While keeping the same expressiveness, CWC strongly simplifies the development of automatic tools for the analysis of biological systems. The main simplification consists in the removal of the sequencing operator, thus lightening the formal treatment of the patterns to be matched in a term (whose complexity in CLS is strongly affected by the variables matching in the sequences). We define a stochastic semantics for this new calculus. As an application we model the interaction between macrophages and apoptotic neutrophils and a mechanism of gene regulation in E.Coli.},
    bibsource = {DBLP, http://dblp.uni-trier.de},
    date-added = {2013-12-12 22:24:44 +0000},
    date-modified = {2017-12-12 13:49:43 +0000},
    editor = {Alessandra Di Pierro and Gethin Norman},
    ee = {http://dx.doi.org/10.4204/EPTCS.28.6}
    }

  • M. Aldinucci, M. Meneghin, and M. Torquati, "Efficient Smith-Waterman on multi-core with FastFlow," in Proc. of Intl. Euromicro PDP 2010: Parallel Distributed and network-based Processing, Pisa, Italy, 2010, p. 195–199. doi:10.1109/PDP.2010.93
    [BibTeX] [Abstract] [Download PDF]

    Shared memory multiprocessors have returned to popularity thanks to rapid spreading of commodity multi-core architectures. However, little attention has been paid to supporting effective streaming applications on these architectures. In this paper we describe FastFlow, a low-level programming framework based on lock-free queues explicitly designed to support high-level languages for streaming applications. We compare FastFlow with state-of-the-art programming frameworks such as Cilk, OpenMP, and Intel TBB. We experimentally demonstrate that FastFlow is always more efficient than them on a given real world application: the speedup of FastFlow over other solutions may be substantial for fine grain tasks, for example +35% over OpenMP, +226% over Cilk, +96% over TBB for the alignment of protein P01111 against UniProt DB using the Smith-Waterman algorithm.

    @inproceedings{fastflow:pdp:10,
    title = {Efficient {Smith-Waterman} on multi-core with FastFlow},
    author = {Marco Aldinucci and Massimiliano Meneghin and Massimo Torquati},
    year = {2010},
    month = feb,
    booktitle = {Proc. of Intl. Euromicro PDP 2010: Parallel Distributed and network-based Processing},
    publisher = {IEEE},
    address = {Pisa, Italy},
    pages = {195--199},
    doi = {10.1109/PDP.2010.93},
    abstract = {Shared memory multiprocessors have returned to popularity thanks to rapid spreading of commodity multi-core architectures. However, little attention has been paid to supporting effective streaming applications on these architectures. In this paper we describe FastFlow, a low-level programming framework based on lock-free queues explicitly designed to support high-level languages for streaming applications. We compare FastFlow with state-of-the-art programming frameworks such as Cilk, OpenMP, and Intel TBB. We experimentally demonstrate that FastFlow is always more efficient than them on a given real world application: the speedup of FastFlow over other solutions may be substantial for fine grain tasks, for example +35% over OpenMP, +226% over Cilk, +96% over TBB for the alignment of protein P01111 against UniProt DB using the Smith-Waterman algorithm.},
    date-added = {2007-10-26 01:02:32 +0200},
    date-modified = {2013-11-24 00:38:51 +0000},
    editor = {Marco Danelutto and Tom Gross and Julien Bourgeois},
    url = {http://calvados.di.unipi.it/storage/paper_files/2010_fastflow_SW_PDP.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2010_fastflow_SW_PDP.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1109/PDP.2010.93},
    keywords = {fastflow}
    }

  • M. Aldinucci, M. Danelutto, P. Kilpatrick, M. Meneghin, and M. Torquati, "Accelerating sequential programs using FastFlow and self-offloading," Università di Pisa, Dipartimento di Informatica, Italy, TR-10-03, 2010.
    [BibTeX] [Abstract]

    Shared memory multiprocessors come back to popularity thanks to rapid spreading of commodity multi-core architectures. As ever, shared memory programs are fairly easy to write and quite hard to optimise; providing multi-core programmers with optimising tools and programming frameworks is a nowadays challenge. Few efforts have been done to support effective streaming applications on these architectures. In this paper we introduce FastFlow, a low-level programming framework based on lock-free queues explicitly designed to support high-level languages for streaming applications. We compare FastFlow with state-of-the-art programming frameworks such as Cilk, OpenMP, and Intel TBB. We experimentally demonstrate that FastFlow is always more efficient than all of them in a set of micro-benchmarks and on a real world application; the speedup edge of FastFlow over other solutions might be bold for fine grain tasks, as an example +35% on OpenMP, +226% on Cilk, +96% on TBB for the alignment of protein P01111 against UniProt DB using Smith-Waterman algorithm.

    @techreport{fastflow_acc:tr-10-03,
    title = {Accelerating sequential programs using {FastFlow} and self-offloading},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick and Massimiliano Meneghin and Massimo Torquati},
    year = {2010},
    month = feb,
    abstract = {Shared memory multiprocessors come back to popularity thanks to rapid spreading of commodity multi-core architectures. As ever, shared memory programs are fairly easy to write and quite hard to optimise; providing multi-core programmers with optimising tools and programming frameworks is a nowadays challenge. Few efforts have been done to support effective streaming applications on these architectures. In this paper we introduce FastFlow, a low-level programming framework based on lock-free queues explicitly designed to support high-level languages for streaming applications. We compare FastFlow with state-of-the-art programming frameworks such as Cilk, OpenMP, and Intel TBB. We experimentally demonstrate that FastFlow is always more efficient than all of them in a set of micro-benchmarks and on a real world application; the speedup edge of FastFlow over other solutions might be bold for fine grain tasks, as an example +35% on OpenMP, +226% on Cilk, +96% on TBB for the alignment of protein P01111 against UniProt DB using Smith-Waterman algorithm.},
    date-added = {2009-09-08 16:14:34 +0200},
    date-modified = {2013-11-24 00:39:01 +0000},
    institution = {Universit{\`a} di Pisa, Dipartimento di Informatica, Italy},
    number = {TR-10-03},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/TR-09-12.pdf},
    keywords = {fastflow}
    }

  • "CWC Simulator project," , 2010.
    [BibTeX]
    @manual{cwc:web,
    title = {CWC Simulator project},
    year = {2010},
    note = {\url{http://sourceforge.net/projects/cwcsimulator/}},
    optauthor = {Marco Aldinucci and Massimo Torquati},
    organization = {Sourceforge website}
    }

  • M. Aldinucci, "Efficient Parallel MonteCarlo with FastFlow," in HPC-Europa2: Science and Supercomputing in Europe, research highlights 2010, Cineca, 2010.
    [BibTeX] [Abstract] [Download PDF]

    The stochastic simulation of natural systems is a very informative but happens be computationally expensive. We present StochKit-FF, an parallel version of StochKit, a reference toolkit for stochastic simulations that sustantially improves StochKit performances on multi-core platforms.

    @incollection{ff:hpc-europa:10,
    title = {Efficient Parallel {MonteCarlo} with {FastFlow}},
    author = {Marco Aldinucci},
    year = {2010},
    booktitle = {HPC-Europa2: Science and Supercomputing in Europe, research highlights 2010},
    publisher = {Cineca},
    abstract = {The stochastic simulation of natural systems is a very informative but happens be computationally expensive. We present StochKit-FF, an parallel version of StochKit, a reference toolkit for stochastic simulations that sustantially improves StochKit performances on multi-core platforms.},
    date-added = {2011-06-18 18:43:19 +0200},
    date-modified = {2013-11-24 00:40:04 +0000},
    url = {http://calvados.di.unipi.it/storage/paper_files/2010-ff_hpceuropa2_092-inform-Aldinucci.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2010-ff_hpceuropa2_092-inform-Aldinucci.pdf},
    keywords = {bioinformatics, fastflow}
    }

  • T. Weigold, M. Aldinucci, M. Danelutto, and V. Getov, "Integrating Autonomic Grid Components and Process-Driven Business Applications," in Autonomic Computing and Communications Systems Third International ICST Conference, Autonomics 2009, Limassol, Cyprus, September 9-11, 2009, Revised Selected Papers, Limassol, Cyprus, 2010, p. 98–113. doi:10.1007/978-3-642-11482-3_7
    [BibTeX] [Abstract] [Download PDF]

    Today's business applications are increasingly process driven, meaning that the main application logic is executed by a dedicate process engine. In addition, component-oriented software development has been attracting attention for building complex distributed applications. In this paper we present the experiences gained from building a process-driven biometric identification application which makes use of Grid infrastructures via the Grid Component Model (GCM). GCM, besides guaranteeing access to Grid resources, supports autonomic management of notable parallel composite components. This feature is exploited within our biometric identification application to ensure real time identification of fingerprints. Therefore, we briefly introduce the GCM framework and the process engine used, and we describe the implementation of the application using autonomic GCM components. Finally, we summarize the results, experiences, and lessons learned focusing on the integration of autonomic GCM components and the process-driven approach.

    @inproceedings{ibm:autonomics:09,
    title = {Integrating Autonomic Grid Components and Process-Driven Business Applications},
    author = {Thomas Weigold and Marco Aldinucci and Marco Danelutto and Vladimir Getov},
    year = {2010},
    booktitle = {Autonomic Computing and Communications Systems Third International ICST Conference, Autonomics 2009, Limassol, Cyprus, September 9-11, 2009, Revised Selected Papers},
    publisher = {Springer},
    address = {Limassol, Cyprus},
    series = {{Lecture Notes of the Institute for Computer Sciences, Social-Informatics and Telecommunications Engineering (LNICST)}},
    volume = {23},
    pages = {98--113},
    doi = {10.1007/978-3-642-11482-3_7},
    issn = {1867-8211},
    abstract = {Today's business applications are increasingly process driven, meaning that the main application logic is executed by a dedicate process engine. In addition, component-oriented software development has been attracting attention for building complex distributed applications. In this paper we present the experiences gained from building a process-driven biometric identification application which makes use of Grid infrastructures via the Grid Component Model (GCM). GCM, besides guaranteeing access to Grid resources, supports autonomic management of notable parallel composite components. This feature is exploited within our biometric identification application to ensure real time identification of fingerprints. Therefore, we briefly introduce the GCM framework and the process engine used, and we describe the implementation of the application using autonomic GCM components. Finally, we summarize the results, experiences, and lessons learned focusing on the integration of autonomic GCM components and the process-driven approach.},
    annote = {ISBN: 978-3-642-11481-6},
    date-added = {2010-02-13 16:13:10 +0100},
    date-modified = {2012-11-24 09:44:22 +0000},
    editor = {Athanasios V. Vasilakos and Roberto Beraldi and Roy Friedman and Marco Mamei},
    url = {http://calvados.di.unipi.it/storage/paper_files/2010_BS_autonomics09.pdf},
    bdsk-url-1 = {http://dx.doi.org/10.1007/978-3-642-11482-3_7},
    bdsk-url-2 = {http://calvados.di.unipi.it/storage/paper_files/2010_BS_autonomics09.pdf}
    }

  • M. Aldinucci, M. Danelutto, and P. Kilpatrick, "Skeletons for multi/many-core systems," in Parallel Computing: From Multicores and GPU's to Petascale (Proc. of PARCO 2009, Lyon, France), Lyon, France, 2010, p. 265–272. doi:10.3233/978-1-60750-530-3-265
    [BibTeX] [Abstract] [Download PDF]

    We discuss how algorithmic skeletons (and structured parallel programming models in general) can be used to efficiently and seamlessly program multi-core as well as many-core systems. We introduce a new version of the muskel skeleton library that can be used to target multi/many-core systems and we present experimental results that demonstrate the feasibility of the approach. The experimental results presented also give an idea of the computational grains that can be exploited on current, state-of-the-art multi-core systems.

    @inproceedings{multicoreske:parco:09,
    title = {Skeletons for multi/many-core systems},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick},
    year = {2010},
    booktitle = {Parallel Computing: From Multicores and GPU's to Petascale (Proc. of {PARCO 2009}, Lyon, France)},
    publisher = {IOS press},
    address = {Lyon, France},
    series = {Advances in Parallel Computing},
    volume = {19},
    pages = {265--272},
    doi = {10.3233/978-1-60750-530-3-265},
    abstract = {We discuss how algorithmic skeletons (and structured parallel programming models in general) can be used to efficiently and seamlessly program multi-core as well as many-core systems. We introduce a new version of the muskel skeleton library that can be used to target multi/many-core systems and we present experimental results that demonstrate the feasibility of the approach. The experimental results presented also give an idea of the computational grains that can be exploited on current, state-of-the-art multi-core systems.},
    annote = {ISBN: 978-1-60750-529-7},
    date-added = {2009-06-03 17:56:19 +0200},
    date-modified = {2012-11-24 09:43:35 +0000},
    editor = {Barbara Chapman and Fr{\'e}d{\'e}ric Desprez and Gerhard R. Joubert and Alain Lichnewsky and Frans Peters and Thierry Priol},
    url = {http://calvados.di.unipi.it/storage/paper_files/2010_muskel_multicore_parco.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2010_muskel_multicore_parco.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.3233/978-1-60750-530-3-265}
    }

  • M. Aldinucci, M. Danelutto, M. Meneghin, M. Torquati, and P. Kilpatrick, "Efficient streaming applications on multi-core with FastFlow: The biosequence alignment test-bed." Elsevier, 2010, vol. 19, p. 273–280. doi:10.3233/978-1-60750-530-3-273
    [BibTeX] [Abstract] [Download PDF]

    Shared-memory multi-core architectures are becoming increasingly popular. While their parallelism and peak performance is ever increasing, their efficiency is often disappointing due to memory fence overheads. In this paper we present FastFlow, a programming methodology based on lock-free queues explicitly designed for programming streaming applications on multi-cores. The potential of FastFlow is evaluated on micro-benchmarks and on the Smith-Waterman sequence alignment application, which exhibits a substantial speedup against the state-of-the-art multi-threaded implementation (SWPS3 x86/SSE2).

    @inbook{fastflow:parco:09,
    title = {Efficient streaming applications on multi-core with FastFlow: The biosequence alignment test-bed},
    author = {Aldinucci,M. and Danelutto,M. and Meneghin,M. and Torquati,M. and Kilpatrick,P.},
    year = {2010},
    publisher = {Elsevier},
    series = {Advances in Parallel Computing},
    volume = {19},
    pages = {273--280},
    doi = {10.3233/978-1-60750-530-3-273},
    abstract = {Shared-memory multi-core architectures are becoming increasingly popular. While their parallelism and peak performance is ever increasing, their efficiency is often disappointing due to memory fence overheads. In this paper we present FastFlow, a programming methodology based on lock-free queues explicitly designed for programming streaming applications on multi-cores. The potential of FastFlow is evaluated on micro-benchmarks and on the Smith-Waterman sequence alignment application, which exhibits a substantial speedup against the state-of-the-art multi-threaded implementation (SWPS3 x86/SSE2).},
    date-modified = {2019-03-25 23:17:15 +0100},
    language = {English},
    opteditor = {Barbara Chapman and Fr{\'e}d{\'e}ric Desprez and Gerhard R. Joubert and Alain Lichnewsky and Frans Peters and Thierry Priol},
    url = {https://iris.unito.it/retrieve/handle/2318/67254/690859/2009_fastflow_parco.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/67254/690859/2009_fastflow_parco.pdf},
    bdsk-url-2 = {https://doi.org/10.3233/978-1-60750-530-3-273},
    keywords = {fastflow}
    }

2009

  • M. Aldinucci, M. Torquati, and M. Meneghin, "FastFlow: Efficient Parallel Streaming Applications on Multi-core," Università di Pisa, Dipartimento di Informatica, Italy, TR-09-12, 2009.
    [BibTeX] [Abstract] [Download PDF]

    Shared memory multiprocessors come back to popularity thanks to rapid spreading of commodity multi-core architectures. As ever, shared memory programs are fairly easy to write and quite hard to optimise; providing multi-core programmers with optimising tools and programming frameworks is a nowadays challenge. Few efforts have been done to support effective streaming applications on these architectures. In this paper we introduce FastFlow, a low-level programming framework based on lock-free queues explicitly designed to support high-level languages for streaming applications. We compare FastFlow with state-of-the-art programming frameworks such as Cilk, OpenMP, and Intel TBB. We experimentally demonstrate that FastFlow is always more efficient than all of them in a set of micro-benchmarks and on a real world application; the speedup edge of FastFlow over other solutions might be bold for fine grain tasks, as an example +35% on OpenMP, +226% on Cilk, +96% on TBB for the alignment of protein P01111 against UniProt DB using Smith-Waterman algorithm.

    @techreport{fastflow:tr-09-12,
    title = {{FastFlow}: Efficient Parallel Streaming Applications on Multi-core},
    author = {Marco Aldinucci and Massimo Torquati and Massimiliano Meneghin},
    year = {2009},
    month = sep,
    abstract = {Shared memory multiprocessors come back to popularity thanks to rapid spreading of commodity multi-core architectures. As ever, shared memory programs are fairly easy to write and quite hard to optimise; providing multi-core programmers with optimising tools and programming frameworks is a nowadays challenge. Few efforts have been done to support effective streaming applications on these architectures. In this paper we introduce FastFlow, a low-level programming framework based on lock-free queues explicitly designed to support high-level languages for streaming applications. We compare FastFlow with state-of-the-art programming frameworks such as Cilk, OpenMP, and Intel TBB. We experimentally demonstrate that FastFlow is always more efficient than all of them in a set of micro-benchmarks and on a real world application; the speedup edge of FastFlow over other solutions might be bold for fine grain tasks, as an example +35% on OpenMP, +226% on Cilk, +96% on TBB for the alignment of protein P01111 against UniProt DB using Smith-Waterman algorithm.},
    date-added = {2010-02-13 16:20:18 +0100},
    date-modified = {2013-11-24 00:39:38 +0000},
    institution = {Universit{\`a} di Pisa, Dipartimento di Informatica, Italy},
    number = {TR-09-12},
    url = {http://arxiv.org/abs/0909.1187},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/TR-09-12.pdf},
    bdsk-url-2 = {http://arxiv.org/abs/0909.1187},
    keywords = {fastflow}
    }

  • M. Aldinucci, H. L. Bouziane, M. Danelutto, and C. Pérez, "STKM on SCA: a Unified Framework with Components, Workflows and Algorithmic Skeletons," in Proc. of 15th Intl. Euro-Par 2009 Parallel Processing, Delft, The Netherlands, 2009, p. 678–690. doi:10.1007/978-3-642-03869-3
    [BibTeX] [Abstract] [Download PDF]

    This paper investigates an implementation of STKM, a Spatio-Temporal sKeleton Model. STKM expands the Grid Component Model (GCM) with an innovative programmable approach that allows programmers to compose an application by combining component, workflow and skeleton concepts. The paper deals with a projection of the STKM model on top of SCA and it evaluates its implementation using Tuscany Java SCA. Experimental results show the need and the benefits of the high level of abstraction offered by STKM.

    @inproceedings{stkm:europar:09,
    title = {{STKM} on {SCA}: a Unified Framework with Components, Workflows and Algorithmic Skeletons},
    author = {Marco Aldinucci and Hinde Lilia Bouziane and Marco Danelutto and Christian P{\'e}rez},
    year = {2009},
    month = aug,
    booktitle = {Proc. of 15th Intl. Euro-Par 2009 Parallel Processing},
    publisher = {Springer},
    address = {Delft, The Netherlands},
    series = {LNCS},
    volume = {5704},
    pages = {678--690},
    doi = {10.1007/978-3-642-03869-3},
    abstract = {This paper investigates an implementation of STKM, a Spatio-Temporal sKeleton Model. STKM expands the Grid Component Model (GCM) with an innovative programmable approach that allows programmers to compose an application by combining component, workflow and skeleton concepts. The paper deals with a projection of the STKM model on top of SCA and it evaluates its implementation using Tuscany Java SCA. Experimental results show the need and the benefits of the high level of abstraction offered by STKM.},
    date-modified = {2009-12-03 00:58:56 +0100},
    url = {http://calvados.di.unipi.it/storage/paper_files/2009_STKM_Europar.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2009_STKM_Europar.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-642-03869-3}
    }

  • M. Aldinucci, M. Danelutto, and P. Kilpatrick, "Autonomic management of multiple non-functional concerns in behavioural skeletons," Università di Pisa, Dipartimento di Informatica, Italy, TR-09-10, 2009.
    [BibTeX] [Download PDF]
    @techreport{nf-concerns:tr-09-10,
    title = {Autonomic management of multiple non-functional concerns in behavioural skeletons},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick},
    year = {2009},
    month = jul,
    date-added = {2009-09-25 22:49:07 +0200},
    date-modified = {2013-12-08 14:58:33 +0000},
    institution = {Universit{\`a} di Pisa, Dipartimento di Informatica, Italy},
    number = {TR-09-10},
    url = {http://arxiv.org/abs/0909.1517},
    bdsk-url-1 = {http://compass2.di.unipi.it/TR/Files/TR-09-10.pdf.gz},
    bdsk-url-2 = {http://arxiv.org/abs/0909.1517}
    }

  • M. Aldinucci, M. Danelutto, and P. Kilpatrick, "Autonomic management of non-functional concerns in distributed and parallel application programming," in Proc. of Intl. Parallel & Distributed Processing Symposium (IPDPS), Rome, Italy, 2009, p. 1–12. doi:10.1109/IPDPS.2009.5161034
    [BibTeX] [Abstract] [Download PDF]

    An approach to the management of non-functional concerns in massively parallel and/or distributed architectures that marries parallel programming patterns with autonomic computing is presented. The necessity and suitability of the adoption of autonomic techniques are evidenced. Issues arising in the implementation of autonomic managers taking care of multiple concerns and of coordination among hierarchies of such autonomic managers are discussed. Experimental results are presented that demonstrate the feasibility of the approach.

    @inproceedings{beske:ipdps:09,
    title = {Autonomic management of non-functional concerns in distributed and parallel application programming},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick},
    year = {2009},
    month = may,
    booktitle = {Proc. of Intl. Parallel \& Distributed Processing Symposium (IPDPS)},
    publisher = {IEEE},
    address = {Rome, Italy},
    pages = {1--12},
    doi = {10.1109/IPDPS.2009.5161034},
    abstract = {An approach to the management of non-functional concerns in massively parallel and/or distributed architectures that marries parallel programming patterns with autonomic computing is presented. The necessity and suitability of the adoption of autonomic techniques are evidenced. Issues arising in the implementation of autonomic managers taking care of multiple concerns and of coordination among hierarchies of such autonomic managers are discussed. Experimental results are presented that demonstrate the feasibility of the approach.},
    date-added = {2008-12-09 18:58:37 +0100},
    date-modified = {2009-06-07 22:30:35 +0200},
    url = {http://calvados.di.unipi.it/storage/paper_files/2009_f_nf_IPDPS.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2009_f_nf_IPDPS.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1109/IPDPS.2009.5161034}
    }

  • M. Aldinucci, M. Danelutto, and P. Kilpatrick, "Co-design of distributed systems using skeletons and autonomic management abstractions," in Euro-Par 2008 Workshops - Parallel Processing, Selected Papers, Las Palmas, Spain, 2009, p. 403–414. doi:10.1007/978-3-642-00955-6_46
    [BibTeX] [Abstract] [Download PDF]

    We discuss how common problems arising with multi/many-core distributed architectures can be effectively handled through co-design of parallel/distributed programming abstractions and of autonomic management of non-functional concerns. In particular, we demonstrate how restricted parallel/distributed patterns (or skeletons) may be efficiently managed by rule-based autonomic managers. We discuss the basic principles underlying pattern+manager co-design, current implementations inspired by this approach and some results achieved with a proof-of-concept prototype.

    @inproceedings{abstraction:europarworkshop:09,
    title = {Co-design of distributed systems using skeletons and autonomic management abstractions},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick},
    year = {2009},
    month = apr,
    booktitle = {Euro-Par 2008 Workshops - Parallel Processing, Selected Papers},
    publisher = {Springer},
    address = {Las Palmas, Spain},
    series = {LNCS},
    volume = {5415},
    pages = {403--414},
    doi = {10.1007/978-3-642-00955-6_46},
    isbn = {978-3-642-00954-9},
    abstract = {We discuss how common problems arising with multi/many-core distributed architectures can be effectively handled through co-design of parallel/distributed programming abstractions and of autonomic management of non-functional concerns. In particular, we demonstrate how restricted parallel/distributed patterns (or skeletons) may be efficiently managed by rule-based autonomic managers. We discuss the basic principles underlying pattern+manager co-design, current implementations inspired by this approach and some results achieved with a proof-of-concept prototype.},
    date-added = {2009-01-09 17:57:45 +0100},
    date-modified = {2009-06-26 16:12:56 +0200},
    editor = {E. C{\'e}sar and M. Alexander and A. Streit and J.L. Tr{\"a}ff and C. C{\'e}rin and A. Kn{\"u}pfer and D. Kranzlm{\"u}ller and S. Jha},
    url = {http://calvados.di.unipi.it/storage/paper_files/2009_abstraction_workshopeuropar.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2009_abstraction_workshopeuropar.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-642-00955-6_46}
    }

  • M. Aldinucci, M. Danelutto, and P. Kilpatrick, "Towards hierarchical management of autonomic components: a case study," in Proc. of Intl. Euromicro PDP 2009: Parallel Distributed and network-based Processing, Weimar, Germany, 2009, p. 3–10. doi:10.1109/PDP.2009.48
    [BibTeX] [Abstract] [Download PDF]

    We address the issue of autonomic management in hierarchical component-based distributed systems. The long term aim is to provide a modeling framework for autonomic management in which QoS goals can be defined, plans for system adaptation described and proofs of achievement of goals by (sequences of) adaptations furnished. Here we present an early step on this path. We restrict our focus to skeleton-based systems in order to exploit their well-defined structure. The autonomic cycle is described using the Orc system orchestration language while the plans are presented as structural modifications together with associated costs and benefits. A case study is presented to illustrate the interaction of managers to maintain QoS goals for throughput under varying conditions of resource availability.

    @inproceedings{beske:pdp:09,
    title = {Towards hierarchical management of autonomic components: a case study},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick},
    year = {2009},
    month = feb,
    booktitle = {Proc. of Intl. Euromicro PDP 2009: Parallel Distributed and network-based Processing},
    publisher = {IEEE},
    address = {Weimar, Germany},
    pages = {3--10},
    doi = {10.1109/PDP.2009.48},
    abstract = {We address the issue of autonomic management in hierarchical component-based distributed systems. The long term aim is to provide a modeling framework for autonomic management in which QoS goals can be defined, plans for system adaptation described and proofs of achievement of goals by (sequences of) adaptations furnished. Here we present an early step on this path. We restrict our focus to skeleton-based systems in order to exploit their well-defined structure. The autonomic cycle is described using the Orc system orchestration language while the plans are presented as structural modifications together with associated costs and benefits. A case study is presented to illustrate the interaction of managers to maintain QoS goals for throughput under varying conditions of resource availability.},
    date-added = {2008-10-15 22:43:41 +0200},
    date-modified = {2009-05-20 10:26:13 +0200},
    editor = {Didier El Baz and Tom Gross and Francois Spies},
    url = {http://calvados.di.unipi.it/storage/paper_files/2009_hier_man_PDP.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2009_hier_man_PDP.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1109/PDP.2009.48}
    }

  • M. Aldinucci, S. Campa, P. Dazzi, N. Tonellotto, and G. Zoppi, D.NFCF.05 – NFCF tuned prototype and final documentation, 2009.
    [BibTeX]
    @misc{gridcomp:D.NFCF.05,
    title = {{D.NFCF.05} -- NFCF tuned prototype and final documentation},
    author = {Marco Aldinucci and Sonia Campa and Patrizio Dazzi and Nicola Tonellotto and Giorgio Zoppi},
    year = {2009},
    month = jan,
    date-added = {2008-09-19 15:49:43 +0200},
    date-modified = {2009-01-25 23:42:02 +0100},
    howpublished = {\url{ http://gridcomp.ercim.org/}}
    }

  • M. Aldinucci and M. Torquati, "FastFlow website," , 2009.
    [BibTeX]
    @manual{fastflow:web,
    title = {FastFlow website},
    author = {Marco Aldinucci and Massimo Torquati},
    year = {2009},
    note = {\url{http://mc-fastflow.sourceforge.net/}},
    date-added = {2009-10-22 17:57:29 +0200},
    date-modified = {2010-07-07 01:26:23 +0200}
    }

  • M. Aldinucci, S. Campa, P. Dazzi, and N. Tonellotto, "GridComp website," , 2009.
    [BibTeX] [Download PDF]
    @manual{gridcomp-web,
    title = {GridComp website},
    author = {Marco Aldinucci and Sonia Campa and Patrizio Dazzi and Nicola Tonellotto},
    year = {2009},
    note = {\url{http://gridcomp.ercim.org/}},
    date-added = {2009-10-26 00:50:30 +0100},
    date-modified = {2009-10-26 01:00:49 +0100},
    url = {http://gridcomp.ercim.org/},
    bdsk-url-1 = {http://gridcomp.ercim.org/}
    }

  • M. Aldinucci, M. Danelutto, and P. Kilpatrick, "Semi-formal models to support program development: autonomic management within component based parallel and distributed programming," in Formal Methods for Components and Objects: 7th Intl. Symposium, FMCO 2008, Sophia-Antipolis, France, October 20 - 24, 2008, Revised Lectures, 2009, p. 204–225. doi:10.1007/978-3-642-04167-9
    [BibTeX] [Abstract] [Download PDF]

    Functional and non-functional concerns require different programming effort, different techniques and different methodologies when attempting to program efficient parallel/distributed applications. In this work we present a ``programmer oriented'' methodology based on formal tools that permits reasoning about parallel/distributed program development and refinement. The proposed methodology is semi-formal in that it does not require the exploitation of highly formal tools and techniques, while providing a palatable and effective support to programmers developing parallel/distributed applications, in particular when handling non-functional concerns.

    @inproceedings{semi-formal:fmco:09,
    title = {Semi-formal models to support program development: autonomic management within component based parallel and distributed programming},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick},
    year = {2009},
    booktitle = {Formal Methods for Components and Objects: 7th Intl. Symposium, FMCO 2008, Sophia-Antipolis, France, October 20 - 24, 2008, Revised Lectures},
    publisher = {Springer},
    series = {LNCS},
    volume = {5751},
    pages = {204--225},
    doi = {10.1007/978-3-642-04167-9},
    abstract = {Functional and non-functional concerns require different programming effort, different techniques and different methodologies when attempting to program efficient parallel/distributed applications. In this work we present a ``programmer oriented'' methodology based on formal tools that permits reasoning about parallel/distributed program development and refinement. The proposed methodology is semi-formal in that it does not require the exploitation of highly formal tools and techniques, while providing a palatable and effective support to programmers developing parallel/distributed applications, in particular when handling non-functional concerns.},
    date-added = {2009-06-07 16:05:13 +0200},
    date-modified = {2009-08-30 17:11:01 +0200},
    editor = {Frank S. de Boer and Marcello M. Bonsangue and Eric Madelaine},
    url = {http://calvados.di.unipi.it/storage/paper_files/2009_semiformal_FMCO08.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2009_semiformal_FMCO08.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-642-04167-9}
    }

2008

  • M. Aldinucci, G. Antoniu, M. Danelutto, and M. Jan, "Fault-Tolerant Data Sharing for High-level Grid Programming: A Hierarchical Storage Architecture," in Achievements in European Research on Grid Systems, M. Bubak, S. Gorlatch, and T. Priol, Eds., Kraków, Poland: Springer, 2008, p. 67–81. doi:10.1007/978-0-387-72812-4_6
    [BibTeX] [Abstract] [Download PDF]

    Enabling high-level programming models on grids is today a major challenge. A way to achieve this goal relies on the use of environments able to transparently and automatically provide adequate support for low-level, grid-specific issues (fault-tolerance, scalability, etc.). This paper discusses the above approach when applied to grid data management. As a case study, we propose a 2-tier software architecture that supports transparent, fault-tolerant, grid-level data sharing in the ASSIST programming environment (University of Pisa), based on the JuxMem grid data sharing service (INRIA Rennes).

    @incollection{assist:juxmem:IW_book:07,
    title = {Fault-Tolerant Data Sharing for High-level Grid Programming: A Hierarchical Storage Architecture},
    author = {Marco Aldinucci and Gabriel Antoniu and Marco Danelutto and Mathieu Jan},
    year = {2008},
    month = nov,
    booktitle = {Achievements in European Research on Grid Systems},
    publisher = {Springer},
    address = {Krak{\'o}w, Poland},
    series = {CoreGRID},
    pages = {67--81},
    doi = {10.1007/978-0-387-72812-4_6},
    isbn = {978-0-387-72811-7},
    abstract = {Enabling high-level programming models on grids is today a major challenge. A way to achieve this goal relies on the use of environments able to transparently and automatically provide adequate support for low-level, grid-specific issues (fault-tolerance, scalability, etc.). This paper discusses the above approach when applied to grid data management. As a case study, we propose a 2-tier software architecture that supports transparent, fault-tolerant, grid-level data sharing in the ASSIST programming environment (University of Pisa), based on the JuxMem grid data sharing service (INRIA Rennes).},
    date-added = {2007-06-26 01:31:31 +0200},
    date-modified = {2012-11-18 17:45:08 +0000},
    editor = {Marian Bubak and Sergei Gorlatch and Thierry Priol},
    url = {http://calvados.di.unipi.it/storage/paper_files/2007_IW06_book_juxadhocmem.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2007_IW06_book_juxadhocmem.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-0-387-72812-4_6}
    }

  • M. Aldinucci, M. Danelutto, H. L. Bouziane, and C. Pérez, "Towards Software Component Assembly Language Enhanced with Workflows and Skeletons," in Proc. of the ACM SIGPLAN Component-Based High Performance Computing (CBHPC), New York, NY, USA, 2008, p. 1–11. doi:10.1145/1456190.1456194
    [BibTeX] [Abstract] [Download PDF]

    We explore the possibilities offered by a programming model supporting components, workflows and skeletons. In particular we describe how Stcm (Spatio-Temporal Component Model), an already existing programming model supporting components and workflows, can be extended to also provide algorithmic skeleton concepts. Programmers are therefore enabled to assembly applications specifying both temporal and spatial relations among components and instantiating predefined skeleton composite components to implement all those application parts that can be easily modeled with the available skeletons. We discuss preliminary results as well as the benefits deriving from Stkm (Spatio-Temporal sKeleton Model) adoption in a couple of real applications.

    @inproceedings{stkm:CBHPC:08,
    title = {Towards Software Component Assembly Language Enhanced with Workflows and Skeletons},
    author = {Aldinucci, Marco and Danelutto, Marco and Bouziane, Hinde Lilia and P{\'e}rez, Christian},
    year = {2008},
    month = oct,
    booktitle = {Proc. of the ACM SIGPLAN Component-Based High Performance Computing (CBHPC)},
    publisher = {ACM},
    address = {New York, NY, USA},
    pages = {1--11},
    doi = {10.1145/1456190.1456194},
    isbn = {978-1-60558-311-2},
    abstract = {We explore the possibilities offered by a programming model supporting components, workflows and skeletons. In particular we describe how Stcm (Spatio-Temporal Component Model), an already existing programming model supporting components and workflows, can be extended to also provide algorithmic skeleton concepts. Programmers are therefore enabled to assembly applications specifying both temporal and spatial relations among components and instantiating predefined skeleton composite components to implement all those application parts that can be easily modeled with the available skeletons. We discuss preliminary results as well as the benefits deriving from Stkm (Spatio-Temporal sKeleton Model) adoption in a couple of real applications.},
    date-modified = {2008-11-17 18:33:20 +0100},
    location = {Karlsruhe, Germany},
    url = {http://calvados.di.unipi.it/storage/paper_files/2008_CBHPC.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2008_CBHPC.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1145/1456190.1456194}
    }

  • M. Aldinucci, M. Danelutto, H. L. Bouziane, and C. Pérez, "Towards a Spatio-Temporal sKeleton Model implementation on top of SCA," Institute on Programming Model, CoreGRID - Network of Excellence, TR-0171, 2008.
    [BibTeX] [Download PDF]
    @techreport{coregrid:tr0171,
    title = {Towards a {S}patio-{T}emporal s{K}eleton {M}odel implementation on top of {SCA}},
    author = {Aldinucci, Marco and Danelutto, Marco and Bouziane, Hinde Lilia and P{\'e}rez, Christian},
    year = {2008},
    month = sep,
    date-added = {2009-09-08 16:12:10 +0200},
    date-modified = {2014-06-21 22:18:57 +0000},
    institution = {Institute on Programming Model, CoreGRID - Network of Excellence},
    number = {TR-0171},
    url = {http://calvados.di.unipi.it/storage/paper_files/tr-0171.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/tr-0171.pdf}
    }

  • M. Aldinucci and M. Danelutto, "Securing skeletal systems with limited performance penalty: the Muskel experience," Journal of Systems Architecture, vol. 54, iss. 9, p. 868–876, 2008. doi:10.1016/j.sysarc.2008.02.008
    [BibTeX] [Abstract] [Download PDF]

    Algorithmic skeletons have been exploited to implement several parallel programming environments, targeting workstation clusters as well as workstation networks and computational grids. When targeting non-dedicated clusters, workstation networks and grids, security has to be taken adequately into account in order to guarantee both code and data confidentiality and integrity. However, introducing security is usually an expensive activity, both in terms of the effort required to managed security mechanisms and in terms of the time spent performing security related activities at run time.We discuss the cost of security introduction as well as how some features typical of skeleton technology can be exploited to improve the efficiency code and data securing in a typical skeleton based parallel programming environment and we evaluate the performance cost of security mechanisms implemented exploiting state of the art tools. In particular, we take into account the cost of security introduction in muskel, a Java based skeletal system exploiting macro data flow implementation technology. We consider the adoption of mechanisms that allow securing all the communications involving remote, unreliable nodes and we evaluate the cost of such mechanisms. Also, we consider the implications on the computational grains needed to scale secure and insecure skeletal computations.

    @article{security:jsa:07,
    title = {Securing skeletal systems with limited performance penalty: the {Muskel} experience},
    author = {Marco Aldinucci and Marco Danelutto},
    year = {2008},
    month = sep,
    journal = {Journal of Systems Architecture},
    publisher = {Elsevier},
    volume = {54},
    pages = {868--876},
    doi = {10.1016/j.sysarc.2008.02.008},
    abstract = {Algorithmic skeletons have been exploited to implement several parallel programming environments, targeting workstation clusters as well as workstation networks and computational grids. When targeting non-dedicated clusters, workstation networks and grids, security has to be taken adequately into account in order to guarantee both code and data confidentiality and integrity. However, introducing security is usually an expensive activity, both in terms of the effort required to managed security mechanisms and in terms of the time spent performing security related activities at run time.We discuss the cost of security introduction as well as how some features typical of skeleton technology can be exploited to improve the efficiency code and data securing in a typical skeleton based parallel programming environment and we evaluate the performance cost of security mechanisms implemented exploiting state of the art tools. In particular, we take into account the cost of security introduction in muskel, a Java based skeletal system exploiting macro data flow implementation technology. We consider the adoption of mechanisms that allow securing all the communications involving remote, unreliable nodes and we evaluate the cost of such mechanisms. Also, we consider the implications on the computational grains needed to scale secure and insecure skeletal computations.},
    date-added = {2007-10-31 19:23:37 +0100},
    date-modified = {2014-08-24 22:18:21 +0000},
    number = {9},
    url = {http://calvados.di.unipi.it/storage/paper_files/2008_security_JSA.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2008_security_JSA.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1016/j.sysarc.2008.02.008}
    }

  • M. Aldinucci, M. Danelutto, G. Zoppi, and P. Kilpatrick, "Advances in Autonomic Components & Services," in From Grids To Service and Pervasive Computing (Proc. of the CoreGRID Symposium 2008), Las Palmas, Spain, 2008, p. 3–18. doi:10.1007/978-0-387-09455-7_1
    [BibTeX] [Abstract] [Download PDF]

    Hierarchical autonomic management of structured grid applications can be efficiently implemented using production rule engines. Rules of the form "precondition-to-action" can be used to model the behaviour of autonomic managers in such a way that the autonomic control and the application management strategy are kept separate. This simplifies the manager design as well as user customization of autonomic manager policies. We briefly introduce rule-based autonomic managers. Then we discuss an implementation of a GCM-like behavioural skeleton – a composite component modelling a standard parallelism exploitation pattern with its own autonomic controller – in SCA/Tuscany. The implementation uses the JBoss rules engine to provide an autonomic behavioural skeleton component and services to expose the component functionality to the standard service framework. Performance results are discussed and finally similarities and differences with respect to the ProActive-based reference GCM implementation are discussed briefly.

    @inproceedings{sca:cgsymph:08,
    title = {Advances in Autonomic Components {\&} Services},
    author = {Marco Aldinucci and Marco Danelutto and Giorgio Zoppi and Peter Kilpatrick},
    year = {2008},
    month = aug,
    booktitle = {From Grids To Service and Pervasive Computing (Proc. of the CoreGRID Symposium 2008)},
    publisher = {Springer},
    address = {Las Palmas, Spain},
    series = {CoreGRID},
    pages = {3--18},
    doi = {10.1007/978-0-387-09455-7_1},
    isbn = {978-0-387-09454-0},
    abstract = {Hierarchical autonomic management of structured grid applications can be efficiently implemented using production rule engines. Rules of the form "precondition-to-action" can be used to model the behaviour of autonomic managers in such a way that the autonomic control and the application management strategy are kept separate. This simplifies the manager design as well as user customization of autonomic manager policies. We briefly introduce rule-based autonomic managers. Then we discuss an implementation of a GCM-like behavioural skeleton -- a composite component modelling a standard parallelism exploitation pattern with its own autonomic controller -- in SCA/Tuscany. The implementation uses the JBoss rules engine to provide an autonomic behavioural skeleton component and services to expose the component functionality to the standard service framework. Performance results are discussed and finally similarities and differences with respect to the ProActive-based reference GCM implementation are discussed briefly.},
    date-added = {2008-05-11 18:42:40 +0200},
    date-modified = {2012-11-17 16:11:44 +0000},
    editor = {Thierry Priol and Marco Vanneschi},
    url = {http://calvados.di.unipi.it/storage/paper_files/2008_SCA_cgsymph.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2008_SCA_cgsymph.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-0-387-09455-7_1}
    }

  • M. Aldinucci, S. Campa, M. Danelutto, P. Dazzi, P. Kilpatrick, D. Laforenza, and N. Tonellotto, "Behavioural skeletons for component autonomic management on grids," in Making Grids Work, M. Danelutto, P. Frangopoulou, and V. Getov, Eds., Springer, 2008, p. 3–16. doi:10.1007/978-0-387-78448-9_1
    [BibTeX] [Abstract] [Download PDF]

    Autonomic management can improve the QoS provided by parallel/distributed applications. Within the CoreGRID Component Model, the autonomic management is tailored to the automatic – monitoring-driven – alteration of the component assembly and, therefore, is defined as the effect of (distributed)management code. This work yields a semantics based on hypergraph rewriting suitable tomodel the dynamic evolution and non-functional aspects of Service Oriented Architectures and component-based autonomic applications. In this regard, our main goal is to provide a formal description of adaptation operations that are typically only informally specified. We advocate that our approach makes easier to raise the level of abstraction of management code in autonomic and adaptive applications.

    @incollection{beske:cg_book:08,
    title = {Behavioural skeletons for component autonomic management on grids},
    author = {Marco Aldinucci and Sonia Campa and Marco Danelutto and Patrizio Dazzi and Peter Kilpatrick and Domenico Laforenza and Nicola Tonellotto},
    year = {2008},
    month = aug,
    booktitle = {Making Grids Work},
    publisher = {Springer},
    series = {CoreGRID},
    pages = {3--16},
    doi = {10.1007/978-0-387-78448-9_1},
    isbn = {978-0-387-78447-2},
    abstract = {Autonomic management can improve the QoS provided by parallel/distributed applications. Within the CoreGRID Component Model, the autonomic management is tailored to the automatic -- monitoring-driven -- alteration of the component assembly and, therefore, is defined as the effect of (distributed)management code. This work yields a semantics based on hypergraph rewriting suitable tomodel the dynamic evolution and non-functional aspects of Service Oriented Architectures and component-based autonomic applications. In this regard, our main goal is to provide a formal description of adaptation operations that are typically only informally specified. We advocate that our approach makes easier to raise the level of abstraction of management code in autonomic and adaptive applications.},
    chapter = {Component Programming Models},
    date-added = {2007-12-09 22:26:46 +0100},
    date-modified = {2008-11-17 20:07:48 +0100},
    editor = {Marco Danelutto and Paraskevi Frangopoulou and Vladimir Getov},
    url = {http://calvados.di.unipi.it/storage/paper_files/2007_beske_cg_crete_book.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2007_beske_cg_crete_book.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-0-387-78448-9_1}
    }

  • M. Aldinucci and E. Tuosto, "Towards a Formal Semantics for Autonomic Components," in From Grids To Service and Pervasive Computing (Proc. of the CoreGRID Symposium 2008), Las Palmas, Spain, 2008, p. 31–45. doi:10.1007/978-0-387-09455-7_3
    [BibTeX] [Abstract] [Download PDF]

    Autonomic management can improve the QoS provided by parallel/distributed applications. Within the CoreGRID Component Model, the autonomic management is tailored to the automatic – monitoring-driven – alteration of the component assembly and, therefore, is defined as the effect of (distributed)management code. This work yields a semantics based on hypergraph rewriting suitable tomodel the dynamic evolution and non-functional aspects of Service Oriented Architectures and component-based autonomic applications. In this regard, our main goal is to provide a formal description of adaptation operations that are typically only informally specified. We advocate that our approach makes easier to raise the level of abstraction of management code in autonomic and adaptive applications.

    @inproceedings{sem:cgsymph:08,
    title = {Towards a Formal Semantics for Autonomic Components},
    author = {Marco Aldinucci and Emilio Tuosto},
    year = {2008},
    month = aug,
    booktitle = {From Grids To Service and Pervasive Computing (Proc. of the CoreGRID Symposium 2008)},
    publisher = {Springer},
    address = {Las Palmas, Spain},
    series = {CoreGRID},
    pages = {31--45},
    doi = {10.1007/978-0-387-09455-7_3},
    isbn = {978-0-387-09454-0},
    abstract = {Autonomic management can improve the QoS provided by parallel/distributed applications. Within the CoreGRID Component Model, the autonomic management is tailored to the automatic -- monitoring-driven -- alteration of the component assembly and, therefore, is defined as the effect of (distributed)management code. This work yields a semantics based on hypergraph rewriting suitable tomodel the dynamic evolution and non-functional aspects of Service Oriented Architectures and component-based autonomic applications. In this regard, our main goal is to provide a formal description of adaptation operations that are typically only informally specified. We advocate that our approach makes easier to raise the level of abstraction of management code in autonomic and adaptive applications.},
    date-added = {2008-05-11 18:46:45 +0200},
    date-modified = {2010-02-13 19:32:53 +0100},
    editor = {Thierry Priol and Marco Vanneschi},
    url = {http://calvados.di.unipi.it/storage/paper_files/2008_sem_cgsymph.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2008_sem_cgsymph.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-0-387-09455-7_3}
    }

  • A. Oprescu, T. Kielmann, M. Danelutto, and M. Aldinucci, "Autonomic Behavior of Grid Applications using Component Platforms," Institute on Programming Model, CoreGRID - Network of Excellence, TR-0156, 2008.
    [BibTeX] [Download PDF]
    @techreport{coregrid:tr0156,
    title = {Autonomic Behavior of Grid Applications using Component Platforms},
    author = {Ana-Maria Oprescu and Thilo Kielmann and Marco Danelutto and Marco Aldinucci},
    year = {2008},
    month = jul,
    date-modified = {2014-06-21 15:24:31 +0000},
    institution = {Institute on Programming Model, CoreGRID - Network of Excellence},
    number = {TR-0156},
    url = {http://www.coregrid.net/mambo/images/stories/TechnicalReports/tr-0156.pdf},
    bdsk-url-1 = {http://www.coregrid.net/mambo/images/stories/TechnicalReports/tr-0002.pdf},
    bdsk-url-2 = {http://www.coregrid.net/mambo/images/stories/TechnicalReports/tr-0156.pdf}
    }

  • M. Aldinucci, M. Danelutto, H. L. Bouziane, and C. Pérez, "Towards Software Component Assembly Language Enhanced with Workflows and Skeletons," Institute on Programming Model, CoreGRID - Network of Excellence, TR-0153, 2008.
    [BibTeX] [Download PDF]
    @techreport{coregrid:tr0153,
    title = {Towards Software Component Assembly Language Enhanced with Workflows and Skeletons},
    author = {Aldinucci, Marco and Danelutto, Marco and Bouziane, Hinde Lilia and P{\'e}rez, Christian},
    year = {2008},
    month = jul,
    institution = {Institute on Programming Model, CoreGRID - Network of Excellence},
    number = {TR-0153},
    url = {http://www.coregrid.net/mambo/images/stories/TechnicalReports/tr-0153.pdf},
    bdsk-url-1 = {http://www.coregrid.net/mambo/images/stories/TechnicalReports/tr-0153.pdf}
    }

  • M. Aldinucci, S. Campa, P. Dazzi, N. Tonellotto, and G. Zoppi, D.NFCF.03 – Methodology to derive performance models for component and composite components, 2008.
    [BibTeX]
    @misc{gridcomp:D.NFCF.03,
    title = {{D.NFCF.03} -- Methodology to derive performance models for component and composite components},
    author = {Marco Aldinucci and Sonia Campa and Patrizio Dazzi and Nicola Tonellotto and Giorgio Zoppi},
    year = {2008},
    month = jun,
    date-added = {2008-09-19 15:52:00 +0200},
    date-modified = {2008-09-19 15:54:19 +0200},
    howpublished = {\url{ http://gridcomp.ercim.org/}}
    }

  • M. Aldinucci, S. Campa, P. Dazzi, N. Tonellotto, and G. Zoppi, D.NFCF.04 – NFCF prototype and early documentation, 2008.
    [BibTeX]
    @misc{gridcomp:D.NFCF.04,
    title = {{D.NFCF.04} -- {NFCF} prototype and early documentation},
    author = {Marco Aldinucci and Sonia Campa and Patrizio Dazzi and Nicola Tonellotto and Giorgio Zoppi},
    year = {2008},
    month = jun,
    date-added = {2009-01-25 23:49:16 +0100},
    date-modified = {2009-01-25 23:50:44 +0100},
    howpublished = {\url{ http://gridcomp.ercim.org/}}
    }

  • M. Aldinucci, CoreGRID Institute on Programming ModelBarcelona, Spain: , 2008.
    [BibTeX] [Download PDF]
    @misc{ogf:cg:poster:08,
    title = {CoreGRID Institute on Programming Model},
    author = {Marco Aldinucci},
    year = {2008},
    month = jun,
    address = {Barcelona, Spain},
    note = {Poster},
    date-added = {2008-06-15 15:40:37 +0200},
    date-modified = {2014-06-21 15:28:48 +0000},
    howpublished = {Open Grid Forum (OGF), CoreGRID Industrial Showcase},
    url = {http://calvados.di.unipi.it/storage/paper_files/2008_CG_PM_OGF.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2008_CG_PM_OGF.pdf}
    }

  • M. Aldinucci, S. Campa, M. Coppola, M. Danelutto, G. Zoppi, A. Basso, A. Bolotov, F. Baude, H. L. Bouziane, D. Caromel, L. Henrio, C. Pérez, J. Cunha, C. Michael, P. Classen, C. Lengauer, J. Cohen, S. Mc Gough, N. Currle-Linde, P. Dazzi, N. Tonellotto, J. Dünnweber, S. Gorlatch, P. Kilpatrick, N. Ranaldo, and E. Zimeo, "Proceedings of the Programming Model Institute Technical meeting 2008," Institute of Programming Model, CoreGRID - Network of Excellence, TR-0138, 2008.
    [BibTeX] [Download PDF]
    @techreport{coregrid:tr0138,
    title = {Proceedings of the Programming Model Institute Technical meeting 2008},
    author = {Aldinucci, Marco and Campa, Sonia and Coppola, Massimo and Danelutto, Marco and Zoppi, G. and Basso, Alessandro and Bolotov, Alexander and Baude, Francoise and Bouziane, Hinde Lilia and Caromel, Denis and Henrio, Ludovic and P{\'e}rez, Christian and Cunha, Jose and Michael, Classen and Classen, Philipp and Lengauer, Christian and Cohen, J. and Mc Gough, S. and Currle-Linde, Natalia and Dazzi, Patrizio and Tonellotto, Nicola and D{\"u}nnweber, Jan and Gorlatch, Sergei and Kilpatrick, Peter and Ranaldo, Nadia and Zimeo, Eugenio},
    year = {2008},
    month = may,
    institution = {Institute of Programming Model, CoreGRID - Network of Excellence},
    number = {TR-0138},
    url = {http://www.coregrid.net/mambo/images/stories/TechnicalReports/tr-0138.pdf},
    bdsk-url-1 = {http://www.coregrid.net/mambo/images/stories/TechnicalReports/tr-0138.pdf}
    }

  • M. Aldinucci and E. Tuosto, "Toward a Formal Semantics for Autonomic Components," Università di Pisa, Dipartimento di Informatica, Italy, TR-08-08, 2008.
    [BibTeX] [Download PDF]
    @techreport{sem:tr-08-08,
    title = {Toward a Formal Semantics for Autonomic Components},
    author = {Marco Aldinucci and Emilio Tuosto},
    year = {2008},
    month = apr,
    date-added = {2008-05-01 01:24:46 +0200},
    date-modified = {2008-05-01 01:27:17 +0200},
    institution = {Universit{\`a} di Pisa, Dipartimento di Informatica, Italy},
    number = {TR-08-08},
    url = {http://compass2.di.unipi.it/TR/Files/TR-08-08.pdf.gz},
    bdsk-url-1 = {http://compass2.di.unipi.it/TR/Files/TR-05-05.pdf.gz},
    bdsk-url-2 = {http://compass2.di.unipi.it/TR/Files/TR-08-08.pdf.gz}
    }

  • M. Aldinucci, M. Danelutto, P. Kilpatrick, and P. Dazzi, "From Orc Models to Distributed Grid Java code," in Proc. of the Integrated Research in Grid Computing Workshop, Hersonissos, Crete, Greece, 2008, p. 2–13.
    [BibTeX] [Abstract] [Download PDF]

    We present O2J, a Java library that allows implementation of Orc programs on distributed architectures including grids and clusters/networks of workstations. With minimal programming effort the grid programmer may implement Orc programs, as he/she is not required to write any low level code relating to distributed orchestration of the computation but only that required to implement Orc expressions. Using the prototype O2J implementation, grid application developers can reason about abstract grid orchestration code described in Orc. Once the required orchestration has been determined and its properties analysed, a grid application prototype can be simply, efficiently and quickly implemented by taking the Orc code, rewriting it into corresponding Java/O2J syntax and finally providing the functional code implementing the sites and processes involved. The proposed modus operandi brings a Model Driven Engineering approach to grid application development.

    @inproceedings{orc:IW:08,
    title = {From {Orc} Models to Distributed Grid {Java} code},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick and Patrizio Dazzi},
    year = {2008},
    month = apr,
    booktitle = {Proc. of the Integrated Research in Grid Computing Workshop},
    address = {Hersonissos, Crete, Greece},
    series = {CoreGRID},
    pages = {2--13},
    abstract = {We present O2J, a Java library that allows implementation of Orc programs on distributed architectures including grids and clusters/networks of workstations. With minimal programming effort the grid programmer may implement Orc programs, as he/she is not required to write any low level code relating to distributed orchestration of the computation but only that required to implement Orc expressions. Using the prototype O2J implementation, grid application developers can reason about abstract grid orchestration code described in Orc. Once the required orchestration has been determined and its properties analysed, a grid application prototype can be simply, efficiently and quickly implemented by taking the Orc code, rewriting it into corresponding Java/O2J syntax and finally providing the functional code implementing the sites and processes involved. The proposed modus operandi brings a Model Driven Engineering approach to grid application development.},
    date-added = {2008-02-09 16:59:20 +0100},
    date-modified = {2012-11-18 18:07:06 +0000},
    editor = {Sergei Gorlatch and Paraskevi Fragopoulou and Thierry Priol},
    url = {http://calvados.di.unipi.it/storage/paper_files/2008_IW_O2J.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2008_IW_O2J.pdf},
    keywords = {Duplicate}
    }

  • M. Aldinucci and A. Benoit, "Automatic mapping of ASSIST applications using process algebra," Parallel Processing Letters, vol. 18, iss. 1, p. 175–188, 2008. doi:10.1142/S0129626408003302
    [BibTeX] [Abstract] [Download PDF]

    Grid technologies aim to harness the computational capabilities of widely distributed collections of computers. Due to the heterogeneous and dynamic nature of the set of grid resources, the programming and optimisation burden of a low level approach to grid computing is clearly unacceptable for large scale, complex applications. The development of grid applications can be simplified by using high-level programming environments. In the present work, we address the problem of the mapping of a high-level grid application onto the computational resources. In order to optimise the mapping of the application, we propose to automatically generate performance models from the application using the process algebra PEPA. We target applications written with the high-level environment ASSIST, since the use of such a structured environment allows us to automate the study of the application more effectively.

    @article{assist:pepa:ppl:08,
    title = {Automatic mapping of {ASSIST} applications using process algebra},
    author = {Marco Aldinucci and Anne Benoit},
    year = {2008},
    month = mar,
    journal = {Parallel Processing Letters},
    volume = {18},
    pages = {175--188},
    doi = {10.1142/S0129626408003302},
    issn = {0129-6264},
    abstract = {Grid technologies aim to harness the computational capabilities of widely distributed collections of computers. Due to the heterogeneous and dynamic nature of the set of grid resources, the programming and optimisation burden of a low level approach to grid computing is clearly unacceptable for large scale, complex applications. The development of grid applications can be simplified by using high-level programming environments. In the present work, we address the problem of the mapping of a high-level grid application onto the computational resources. In order to optimise the mapping of the application, we propose to automatically generate performance models from the application using the process algebra PEPA. We target applications written with the high-level environment ASSIST, since the use of such a structured environment allows us to automate the study of the application more effectively.},
    annote = {ISSN: 0129-6264},
    date-modified = {2013-06-17 14:09:49 +0000},
    number = {1},
    url = {http://calvados.di.unipi.it/storage/paper_files/2008_pepa_ppl.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2008_pepa_ppl.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1142/S0129626408003302}
    }

  • M. Aldinucci, S. Campa, M. Danelutto, M. Vanneschi, P. Dazzi, D. Laforenza, N. Tonellotto, and P. Kilpatrick, "Behavioural skeletons in GCM: autonomic management of grid components," in Proc. of Intl. Euromicro PDP 2008: Parallel Distributed and network-based Processing, Toulouse, France, 2008, p. 54–63. doi:10.1109/PDP.2008.46
    [BibTeX] [Abstract] [Download PDF]

    Autonomic management can be used to improve the QoS provided by parallel/distributed applications. We discuss behavioural skeletons introduced in earlier work: rather than relying on programmer ability to design "from scratch" efficient autonomic policies, we encapsulate general autonomic controller features into algorithmic skeletons. Then we leave to the programmer the duty of specifying the parameters needed to specialise the skeletons to the needs of the particular application at hand. This results in the programmer having the ability to fast prototype and tune distributed/parallel applications with non-trivial autonomic management capabilities. We discuss how behavioural skeletons have been implemented in the framework of GCM (the grid component model developed within the CoreGRID NoE and currently being implemented within the GridCOMP STREP project). We present results evaluating the overhead introduced by autonomic management activities as well as the overall behaviour of the skeletons. We also present results achieved with a long running application subject to autonomic management and dynamically adapting to changing features of the target architecture. Overall the results demonstrate both the feasibility of implementing autonomic control via behavioural skeletons and the effectiveness of our sample behavioural skeletons in managing the "functional replication" pattern(s).

    @inproceedings{orc:pdp:08,
    title = {Behavioural skeletons in {GCM}: autonomic management of grid components},
    author = {Marco Aldinucci and Sonia Campa and Marco Danelutto and Marco Vanneschi and Patrizio Dazzi and Domenico Laforenza and Nicola Tonellotto and Peter Kilpatrick},
    year = {2008},
    month = feb,
    booktitle = {Proc. of Intl. Euromicro PDP 2008: Parallel Distributed and network-based Processing},
    publisher = {IEEE},
    address = {Toulouse, France},
    pages = {54--63},
    doi = {10.1109/PDP.2008.46},
    abstract = {Autonomic management can be used to improve the QoS provided by parallel/distributed applications. We discuss behavioural skeletons introduced in earlier work: rather than relying on programmer ability to design "from scratch" efficient autonomic policies, we encapsulate general autonomic controller features into algorithmic skeletons. Then we leave to the programmer the duty of specifying the parameters needed to specialise the skeletons to the needs of the particular application at hand. This results in the programmer having the ability to fast prototype and tune distributed/parallel applications with non-trivial autonomic management capabilities. We discuss how behavioural skeletons have been implemented in the framework of GCM (the grid component model developed within the CoreGRID NoE and currently being implemented within the GridCOMP STREP project). We present results evaluating the overhead introduced by autonomic management activities as well as the overall behaviour of the skeletons. We also present results achieved with a long running application subject to autonomic management and dynamically adapting to changing features of the target architecture. Overall the results demonstrate both the feasibility of implementing autonomic control via behavioural skeletons and the effectiveness of our sample behavioural skeletons in managing the "functional replication" pattern(s).},
    date-added = {2007-10-09 12:13:13 +0200},
    date-modified = {2009-02-05 23:55:55 +0100},
    editor = {Didier El Baz and Julien Bourgeois and Francois Spies},
    url = {http://calvados.di.unipi.it/storage/paper_files/2008_orc_PDP.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2008_orc_PDP.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1109/PDP.2008.46}
    }

  • M. Aldinucci, M. Danelutto, and P. Kilpatrick, "Hierarchical autonomic management: a case study with skeletal systems," Institute on Programming Model, CoreGRID - Network of Excellence, TR-0127, 2008.
    [BibTeX] [Download PDF]
    @techreport{coregrid:tr0127,
    title = {Hierarchical autonomic management: a case study with skeletal systems},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick},
    year = {2008},
    month = feb,
    date-added = {2008-05-15 23:44:59 +0200},
    date-modified = {2014-06-21 22:13:51 +0000},
    institution = {Institute on Programming Model, CoreGRID - Network of Excellence},
    number = {TR-0127},
    url = {http://calvados.di.unipi.it/storage/paper_files/tr-0127.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/tr-0127.pdf}
    }

  • M. Aldinucci, M. Torquati, M. Vanneschi, and P. Zuccato, "The VirtuaLinux Storage Abstraction Layer for Efficient Virtual Clustering," in Proc. of Intl. Euromicro PDP 2008: Parallel Distributed and network-based Processing, Toulouse, France, 2008, p. 619–627. doi:10.1109/PDP.2008.86
    [BibTeX] [Abstract] [Download PDF]

    VirtuaLinux is a meta-distribution that enables a standard Linux distribution to support robust physical and virtualized clusters. VirtuaLinux helps in avoiding the "single point of failure" effect by means of a combination of architectural strategies, including the transparent support for disk-less and master-less cluster configuration. VirtuaLinux supports the creation and management of Virtual Clusters in seamless way: VirtuaLinux Virtual Cluster Manager enables the system administrator to create, save, restore Xen-based Virtual Clusters, and to map and dynamically re-map them onto the nodes of the physical cluster. In this paper we introduce and discuss VirtuaLinux virtualization architecture, features, and tools, and in particular, the novel disk abstraction layer, which permits the fast and space-efficient creation of Virtual Clusters.

    @inproceedings{vlinux:pdp:08,
    title = {The VirtuaLinux Storage Abstraction Layer for Efficient Virtual Clustering},
    author = {Marco Aldinucci and Massimo Torquati and Marco Vanneschi and Pierfrancesco Zuccato},
    year = {2008},
    month = feb,
    booktitle = {Proc. of Intl. Euromicro PDP 2008: Parallel Distributed and network-based Processing},
    publisher = {IEEE},
    address = {Toulouse, France},
    pages = {619--627},
    doi = {10.1109/PDP.2008.86},
    abstract = {VirtuaLinux is a meta-distribution that enables a standard Linux distribution to support robust physical and virtualized clusters. VirtuaLinux helps in avoiding the "single point of failure" effect by means of a combination of architectural strategies, including the transparent support for disk-less and master-less cluster configuration. VirtuaLinux supports the creation and management of Virtual Clusters in seamless way: VirtuaLinux Virtual Cluster Manager enables the system administrator to create, save, restore Xen-based Virtual Clusters, and to map and dynamically re-map them onto the nodes of the physical cluster. In this paper we introduce and discuss VirtuaLinux virtualization architecture, features, and tools, and in particular, the novel disk abstraction layer, which permits the fast and space-efficient creation of Virtual Clusters.},
    date-added = {2009-11-10 01:29:09 +0100},
    date-modified = {2009-11-10 01:29:09 +0100},
    editor = {Didier El Baz and Julien Bourgeois and Francois Spies},
    url = {http://calvados.di.unipi.it/storage/paper_files/2008_VirtuaLinux_PDP.pdf},
    bdsk-url-1 = {http://dx.doi.org/10.1109/PDP.2008.86},
    bdsk-url-2 = {http://calvados.di.unipi.it/storage/paper_files/2008_VirtuaLinux_PDP.pdf}
    }

  • C. Bertolli, R. Fantacci, G. Mencagli, D. Tarchi, and M. Vanneschi, "Next generation grids and wireless communication networks: towards a novel integrated approach," Wireless Communications and Mobile Computing, 2008.
    [BibTeX]
    @article{position:insyeme:09,
    title = {Next generation grids and wireless communication networks: towards a novel integrated approach},
    author = {Carlo Bertolli and Romano Fantacci and Gabriele Mencagli and Daniele Tarchi and Marco Vanneschi},
    year = {2008},
    journal = {Wireless Communications and Mobile Computing},
    note = {To appear},
    date-added = {2008-10-15 23:08:23 +0200},
    date-modified = {2008-11-16 18:18:07 +0100}
    }

  • M. Aldinucci, M. Danelutto, and P. Kilpatrick, "A framework for prototyping and reasoning about grid systems," in Parallel Computing: Architectures, Algorithms and Applications, Germany, 2008, p. 235–242.
    [BibTeX] [Abstract] [Download PDF]

    A framework supporting fast prototyping as well as tuning of distributed applications is presented. The approach is based on the adoption of a formal model that is used to describe the orchestration of distributed applications. The formal model (Orc by Misra and Cook) can be used to support semi-formal reasoning about the applications at hand. The paper describes how the framework can be used to derive and evaluate alternative orchestrations of a well know parallel/distributed computation pattern; and shows how the same formal model can be used to support generation of prototypes of distributed applications skeletons directly from the application description.

    @inproceedings{orc:parco:07,
    title = {A framework for prototyping and reasoning about grid systems},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick},
    year = {2008},
    booktitle = {Parallel Computing: Architectures, Algorithms and Applications},
    publisher = {IOS press},
    address = {Germany},
    series = {ADVANCES IN PARALLEL COMPUTING},
    volume = {15},
    pages = {235--242},
    isbn = {9781586037963},
    abstract = {A framework supporting fast prototyping as well as tuning of distributed applications is presented. The approach is based on the adoption of a formal model that is used to describe the orchestration of distributed applications. The formal model (Orc by Misra and Cook) can be used to support semi-formal reasoning about the applications at hand. The paper describes how the framework can be used to derive and evaluate alternative orchestrations of a well know parallel/distributed computation pattern; and shows how the same formal model can be used to support generation of prototypes of distributed applications skeletons directly from the application description.},
    annote = {Parco 2007},
    date-added = {2007-06-26 01:48:06 +0200},
    date-modified = {2012-11-18 17:48:22 +0000},
    editor = {C. Bischof and M. B{\"u}cker and P. Gibbon and G. R. Joubert and T. Lippert and B. Mohr and F. J. Peters},
    url = {http://calvados.di.unipi.it/storage/paper_files/2007_orc_parco.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2007_orc_parco.pdf}
    }

  • M. Aldinucci, M. Danelutto, P. Kilpatrick, and P. Dazzi, "From Orc Models to Distributed Grid Java code," in Grid Computing: Achievements and Prospects, S. Gorlatch, P. Fragopoulou, and T. Priol, Eds., Springer, 2008, p. 13–24. doi:10.1007/978-0-387-09457-1_2
    [BibTeX] [Abstract] [Download PDF]

    We present O2J, a Java library that allows implementation of Orc programs on distributed architectures including grids and clusters/networks of workstations. With minimal programming effort the grid programmer may implement Orc programs, as he/she is not required to write any low level code relating to distributed orchestration of the computation but only that required to implement Orc expressions. Using the prototype O2J implementation, grid application developers can reason about abstract grid orchestration code described inOrc. Once the required orchestration has been determined and its properties analysed, a grid application prototype can be simply, efficiently and quickly implemented by taking the Orc code, rewriting it into corresponding Java/O2J syntax and finally providing the functional code implementing the sites and processes involved. The proposed modus operandi brings aModel Driven Engineering approach to grid application development.

    @incollection{orc:IW_book:08,
    title = {From {Orc} Models to Distributed Grid {Java} code},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick and Patrizio Dazzi},
    year = {2008},
    booktitle = {Grid Computing: Achievements and Prospects},
    publisher = {Springer},
    series = {CoreGRID},
    pages = {13--24},
    doi = {10.1007/978-0-387-09457-1_2},
    isbn = {978-0-387-09456-4},
    abstract = {We present O2J, a Java library that allows implementation of Orc programs on distributed architectures including grids and clusters/networks of workstations. With minimal programming effort the grid programmer may implement Orc programs, as he/she is not required to write any low level code relating to distributed orchestration of the computation but only that required to implement Orc expressions. Using the prototype O2J implementation, grid application developers can reason about abstract grid orchestration code described inOrc. Once the required orchestration has been determined and its properties analysed, a grid application prototype can be simply, efficiently and quickly implemented by taking the Orc code, rewriting it into corresponding Java/O2J syntax and finally providing the functional code implementing the sites and processes involved. The proposed modus operandi brings aModel Driven Engineering approach to grid application development.},
    date-added = {2008-11-16 16:26:47 +0100},
    date-modified = {2015-02-21 14:30:35 +0000},
    editor = {Sergei Gorlatch and Paraskevi Fragopoulou and Thierry Priol},
    url = {http://calvados.di.unipi.it/storage/paper_files/2008_IW_book_O2J.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2008_IW_book_O2J.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-0-387-09457-1},
    bdsk-url-3 = {http://dx.doi.org/10.1007/978-0-387-09457-1_2}
    }

  • M. Aldinucci, M. Danelutto, M. Torquati, F. Polzella, G. Spinatelli, M. Vanneschi, A. Gervaso, M. Cacitti, and P. Zuccato, "VirtuaLinux: virtualized high-density clusters with no single point of failure," in Parallel Computing: Architectures, Algorithms and Applications, The Netherlands, 2008, p. 355–362.
    [BibTeX] [Abstract] [Download PDF]

    VirtuaLinux is a Linux meta-distribution that allows the creation, deployment and administration of both physical and virtualized clusters with no single point of failure. VirtuaLinux supports the creation and management of virtual clusters in seamless way: VirtuaLinux Virtual Cluster Manager enables the system administrator to create, save, restore Xen-based virtual clusters, and to map and dynamically remap them onto the nodes of the physical cluster. We introduces and discuss VirtuaLinux virtualization architecture, features, and tools. These rely on a novel disk abstraction layer, which enables the fast, space-efficient, dynamic creation of virtual clusters composed of fully independent complete virtual machines.

    @inproceedings{virtualinux:parco:07,
    title = {{VirtuaLinux}: virtualized high-density clusters with no single point of failure},
    author = {Marco Aldinucci and Marco Danelutto and Massimo Torquati and Francesco Polzella and Gianmarco Spinatelli and Marco Vanneschi and Alessandro Gervaso and Manuel Cacitti and Pierfrancesco Zuccato},
    year = {2008},
    booktitle = {Parallel Computing: Architectures, Algorithms and Applications},
    publisher = {IOS press},
    address = {The Netherlands},
    series = {ADVANCES IN PARALLEL COMPUTING},
    volume = {15},
    pages = {355--362},
    abstract = {VirtuaLinux is a Linux meta-distribution that allows the creation, deployment and administration of both physical and virtualized clusters with no single point of failure. VirtuaLinux supports the creation and management of virtual clusters in seamless way: VirtuaLinux Virtual Cluster Manager enables the system administrator to create, save, restore Xen-based virtual clusters, and to map and dynamically remap them onto the nodes of the physical cluster. We introduces and discuss VirtuaLinux virtualization architecture, features, and tools. These rely on a novel disk abstraction layer, which enables the fast, space-efficient, dynamic creation of virtual clusters composed of fully independent complete virtual machines.},
    annote = {Parco 2007},
    date-added = {2007-06-26 01:43:08 +0200},
    date-modified = {2012-11-18 17:56:09 +0000},
    editor = {C. Bischof and M. B{\"u}cker and P. Gibbon and G. R. Joubert and T. Lippert and B. Mohr and F. J. Peters},
    url = {http://calvados.di.unipi.it/storage/paper_files/2007_vlinux_parco.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2007_vlinux_parco.pdf}
    }

2007

  • M. Aldinucci, M. Danelutto, and P. Dazzi, "MUSKEL: an expandable skeleton environment," Scalable Computing: Practice and Experience, vol. 8, iss. 4, p. 325–341, 2007.
    [BibTeX] [Abstract] [Download PDF]

    Programming models based on algorithmic skeletons promise to raise the level of abstraction perceived by programmers when implementing parallel applications, while guaranteeing good performance figures. At the same time, however, they restrict the freedom of programmers to implement arbitrary parallelism exploitation patterns. In fact, efficiency is achieved by restricting the parallelism exploitation patterns provided to the programmer to the useful ones for which efficient implementations, as well as useful and efficient compositions, are known. In this work we introduce muskel, a full Java library targeting workstation clusters, networks and grids and providing the programmers with a skeleton based parallel programming environment. muskel is implemented exploiting (macro) data flow technology, rather than the more usual skeleton technology relying on the use of implementation templates. Using data flow, muskel easily and efficiently implements both classical, predefined skeletons, and user-defined parallelism exploitation patterns. This provides a means to overcome some of the problems that Cole identified in his skeleton ``manifesto'' as the issues impairing skeleton success in the parallel programming arena. We discuss fully how user-defined skeletons are supported by exploiting a data flow implementation, experimental results and we also discuss extensions supporting the further characterization of skeletons with non-functional properties, such as security, through the use of Aspect Oriented Programming and annotations.

    @article{muskel:SCPE:07,
    title = {MUSKEL: an expandable skeleton environment},
    author = {Marco Aldinucci and Marco Danelutto and Patrizio Dazzi},
    year = {2007},
    month = dec,
    journal = {Scalable Computing: Practice and Experience},
    volume = {8},
    pages = {325--341},
    abstract = {Programming models based on algorithmic skeletons promise to raise the level of abstraction perceived by programmers when implementing parallel applications, while guaranteeing good performance figures. At the same time, however, they restrict the freedom of programmers to implement arbitrary parallelism exploitation patterns. In fact, efficiency is achieved by restricting the parallelism exploitation patterns provided to the programmer to the useful ones for which efficient implementations, as well as useful and efficient compositions, are known. In this work we introduce muskel, a full Java library targeting workstation clusters, networks and grids and providing the programmers with a skeleton based parallel programming environment. muskel is implemented exploiting (macro) data flow technology, rather than the more usual skeleton technology relying on the use of implementation templates. Using data flow, muskel easily and efficiently implements both classical, predefined skeletons, and user-defined parallelism exploitation patterns. This provides a means to overcome some of the problems that Cole identified in his skeleton ``manifesto'' as the issues impairing skeleton success in the parallel programming arena. We discuss fully how user-defined skeletons are supported by exploiting a data flow implementation, experimental results and we also discuss extensions supporting the further characterization of skeletons with non-functional properties, such as security, through the use of Aspect Oriented Programming and annotations.},
    date-added = {2007-06-26 01:27:03 +0200},
    date-modified = {2014-08-24 22:17:35 +0000},
    number = {4},
    url = {http://www.scpe.org/index.php/scpe/article/view/429},
    bdsk-url-1 = {http://www.scpe.org/vols/vol08/no4/SCPE_8_4_01.pdf},
    bdsk-url-2 = {http://calvados.di.unipi.it/storage/paper_files/2007_SCPE_muskel.pdf},
    bdsk-url-3 = {http://www.scpe.org/index.php/scpe/article/view/429}
    }

  • M. Danelutto, M. Pasin, M. Vanneschi, P. Dazzi, L. Presti, and D. Laforenza, "PAL: Exploiting Java Annotations for Parallelism," in Achievements in European Research on Grid Systems, M. Bubak, S. Gorlatch, and T. Priol, Eds., Kraków, Poland: Springer, 2007, p. 83–96.
    [BibTeX]
    @incollection{pal:IW_book:07,
    title = {PAL: Exploiting Java Annotations for Parallelism},
    author = {Marco Danelutto and Marcelo Pasin and Marco Vanneschi and Patrizio Dazzi and Luigi Presti and Domenico Laforenza},
    year = {2007},
    month = nov,
    booktitle = {Achievements in European Research on Grid Systems},
    publisher = {Springer},
    address = {Krak{\'o}w, Poland},
    series = {CoreGRID},
    pages = {83--96},
    date-modified = {2011-02-12 15:55:56 +0200},
    editor = {Marian Bubak and Sergei Gorlatch and Thierry Priol},
    read = {Yes}
    }

  • M. Aldinucci and M. Danelutto, "Skeleton based parallel programming: functional and parallel semantic in a single shot," Computer Languages, Systems and Structures, vol. 33, iss. 3-4, p. 179–192, 2007. doi:10.1016/j.cl.2006.07.004
    [BibTeX] [Abstract] [Download PDF]

    Semantics of skeleton-based parallel programming languages comes usually as two distinct items: a functional semantics, modeling the function computed by the skeleton program, and a parallel semantics describing the ways used to exploit parallelism during the execution of the skeleton program. The former is usually expressed using some kind of semantic formalism, while the latter is almost always given in an informal way. Such a separation of functional and parallel semantics seriously impairs the possibility of programmers to use the semantic tools to prove properties of programs. In this work, we show how a formal semantic framework can be set up that handles both functional and parallel aspects of skeleton-based parallel programs. The framework is based on a labeled transition system. We show how different properties related to skeleton programs can be proved using such a system. We use Lithium, a skeleton-based full Java parallel programming environment, as the case study.

    @article{lithium:sem:CLSS,
    title = {Skeleton based parallel programming: functional and parallel semantic in a single shot},
    author = {Marco Aldinucci and Marco Danelutto},
    year = {2007},
    month = oct,
    journal = {Computer Languages, Systems and Structures},
    volume = {33},
    pages = {179--192},
    doi = {10.1016/j.cl.2006.07.004},
    abstract = {Semantics of skeleton-based parallel programming languages comes usually as two distinct items: a functional semantics, modeling the function computed by the skeleton program, and a parallel semantics describing the ways used to exploit parallelism during the execution of the skeleton program. The former is usually expressed using some kind of semantic formalism, while the latter is almost always given in an informal way. Such a separation of functional and parallel semantics seriously impairs the possibility of programmers to use the semantic tools to prove properties of programs. In this work, we show how a formal semantic framework can be set up that handles both functional and parallel aspects of skeleton-based parallel programs. The framework is based on a labeled transition system. We show how different properties related to skeleton programs can be proved using such a system. We use Lithium, a skeleton-based full Java parallel programming environment, as the case study.},
    annote = {ISSN: 1477-8424},
    date-modified = {2014-08-24 22:17:22 +0000},
    number = {3-4},
    url = {http://calvados.di.unipi.it/storage/paper_files/2005_semantics_CLSS.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2005_semantics_CLSS.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1016/j.cl.2006.07.004}
    }

  • C. Bertolli, M. Coppola, and C. Zoccolo, "The co-replication methodology and its application to structured parallel programs," in CompFrame '07: Proc. of the 2007 symposium on Component and framework technology in high-performance and scientific computing, New York, NY, USA, 2007, p. 39–48. doi:https://doi.acm.org/10.1145/1297385.1297393
    [BibTeX] [Download PDF]
    @inproceedings{corep:bertolli:07,
    title = {The co-replication methodology and its application to structured parallel programs},
    author = {Carlo Bertolli and Massimo Coppola and Corrado Zoccolo},
    year = {2007},
    month = oct,
    booktitle = {CompFrame '07: Proc. of the 2007 symposium on Component and framework technology in high-performance and scientific computing},
    publisher = {ACM},
    address = {New York, NY, USA},
    pages = {39--48},
    doi = {https://doi.acm.org/10.1145/1297385.1297393},
    isbn = {978-1-59593-867-1},
    location = {Montreal, Quebec, Canada},
    url = {https://doi.acm.org/10.1145/1297385.1297393},
    bdsk-url-1 = {https://doi.acm.org/10.1145/1297385.1297393}
    }

  • Toward Next Generation Grids, Proc. of the CorGRID Symposium 2007Rennes, France: Springer, 2007.
    [BibTeX]
    @proceedings{cgs:proc:07,
    title = {Toward Next Generation Grids, Proc. of the CorGRID Symposium 2007},
    year = {2007},
    month = sep,
    publisher = {Springer},
    address = {Rennes, France},
    date-added = {2007-09-25 11:58:05 +0200},
    date-modified = {2007-09-25 12:01:08 +0200},
    editor = {Thierry Priol and Marco Vanneschi}
    }

  • M. Coppola, D. Laforenza, N. Tonellotto, M. Danelutto, M. Vanneschi, and C. Zoccolo, "Managing User Expectation with Component Performance Contracts," in Proc. of the Workshop on Usage of Service Level Agreements in Grids, Austin, TX, USA, 2007.
    [BibTeX]
    @inproceedings{cop:usla:07,
    title = {Managing User Expectation with Component Performance Contracts},
    author = {Massimo Coppola and Domenico Laforenza and Nicola Tonellotto and Marco Danelutto and Marco Vanneschi and Corrado Zoccolo},
    year = {2007},
    month = sep,
    booktitle = {Proc. of the Workshop on Usage of Service Level Agreements in Grids},
    publisher = {Springer},
    address = {Austin, TX, USA},
    series = {CoreGRID},
    date-added = {2007-10-09 10:25:36 +0200},
    date-modified = {2008-10-15 23:00:30 +0200}
    }

  • M. Aldinucci, M. Danelutto, and P. Kilpatrick, "Adding metadata to Orc to support reasoning about grid programming," in Towards Next Generation Grids (Proc. of the CoreGRID Symposium 2007), Rennes, France, 2007, p. 205–214. doi:10.1007/978-0-387-72498-0_19
    [BibTeX] [Abstract] [Download PDF]

    Following earlier work demonstrating the utility of Orc as a means of specifying and reasoning about grid applications we propose the enhancement of such specifications with metadata that provide a means to extend an Orc specification with implementation oriented information. We argue that such specifications provide a useful refinement step in allowing reasoning about implementation related issues ahead of actual implementation or even prototyping. As examples, we demonstrate how such extended specifications can be used for investigating security related issues and for evaluating the cost of handling grid resource faults. The approach emphasises a semi-formal style of reasoning that makes maximum use of programmer domain knowledge and experience.

    @inproceedings{orc:metadata:cgs:07,
    title = {Adding metadata to Orc to support reasoning about grid programming},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick},
    year = {2007},
    month = sep,
    booktitle = {Towards Next Generation Grids (Proc. of the CoreGRID Symposium 2007)},
    publisher = {Springer},
    address = {Rennes, France},
    series = {CoreGRID},
    pages = {205--214},
    doi = {10.1007/978-0-387-72498-0_19},
    isbn = {978-0-387-72497-3},
    abstract = {Following earlier work demonstrating the utility of Orc as a means of specifying and reasoning about grid applications we propose the enhancement of such specifications with metadata that provide a means to extend an Orc specification with implementation oriented information. We argue that such specifications provide a useful refinement step in allowing reasoning about implementation related issues ahead of actual implementation or even prototyping. As examples, we demonstrate how such extended specifications can be used for investigating security related issues and for evaluating the cost of handling grid resource faults. The approach emphasises a semi-formal style of reasoning that makes maximum use of programmer domain knowledge and experience.},
    date-added = {2007-06-26 01:55:01 +0200},
    date-modified = {2009-02-04 18:57:20 +0100},
    editor = {Thierry Priol and Marco Vanneschi},
    url = {http://calvados.di.unipi.it/storage/paper_files/2007_orc_CGSymph.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2007_orc_CGSymph.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-0-387-72498-0_19}
    }

  • M. Danelutto, M. Aldinucci, and P. Kilpatrick, "Prototyping and reasoning about distributed systems: an Orc based framework," Institute on Programming Model, CoreGRID - Network of Excellence, TR-0102, 2007.
    [BibTeX] [Download PDF]
    @techreport{coregrid:tr0102,
    title = {Prototyping and reasoning about distributed systems: an Orc based framework},
    author = {Marco Danelutto and Marco Aldinucci and Peter Kilpatrick},
    year = {2007},
    month = aug,
    date-added = {2008-02-09 17:07:09 +0100},
    date-modified = {2008-02-09 17:07:09 +0100},
    institution = {Institute on Programming Model, CoreGRID - Network of Excellence},
    number = {TR-0102},
    url = {http://www.coregrid.net/mambo/images/stories/TechnicalReports/tr-0102.pdf},
    bdsk-url-1 = {http://www.coregrid.net/mambo/images/stories/TechnicalReports/tr-0102.pdf}
    }

  • M. Aldinucci, M. Danelutto, and P. Kilpatrick, "Management in distributed systems: a semi-formal approach," in Proc. of 13th Intl. Euro-Par 2007 Parallel Processing, Rennes, France, 2007, p. 651–661. doi:10.1007/978-3-540-74466-5
    [BibTeX] [Abstract] [Download PDF]

    The reverse engineering of a skeleton based programming environment and redesign to distribute management activities of the system and thereby remove a potential single point of failure is considered. The Orc notation is used to facilitate abstraction of the design and analysis of its properties. It is argued that Orc is particularly suited to this role as this type of management is essentially an orchestration activity. The Orc specification of the original version of the system is modified via a series of semi-formally justified derivation steps to obtain a specification of the decentralized management version which is then used as a basis for its implementation. Analysis of the two specifications allows qualitative prediction of the expected performance of the derived version with respect to the original, and this prediction is borne out in practice.

    @inproceedings{orc:europar:07,
    title = {Management in distributed systems: a semi-formal approach},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick},
    year = {2007},
    month = aug,
    booktitle = {Proc. of 13th Intl. Euro-Par 2007 Parallel Processing},
    publisher = {Springer},
    address = {Rennes, France},
    series = {LNCS},
    volume = {4641},
    pages = {651--661},
    doi = {10.1007/978-3-540-74466-5},
    isbn = {978-3-540-74465-8},
    abstract = {The reverse engineering of a skeleton based programming environment and redesign to distribute management activities of the system and thereby remove a potential single point of failure is considered. The Orc notation is used to facilitate abstraction of the design and analysis of its properties. It is argued that Orc is particularly suited to this role as this type of management is essentially an orchestration activity. The Orc specification of the original version of the system is modified via a series of semi-formally justified derivation steps to obtain a specification of the decentralized management version which is then used as a basis for its implementation. Analysis of the two specifications allows qualitative prediction of the expected performance of the derived version with respect to the original, and this prediction is borne out in practice.},
    date-added = {2009-05-01 23:33:34 +0200},
    date-modified = {2009-05-01 23:33:34 +0200},
    editor = {A.-M. Kermarrec and L. Boug{\'e} and T. Priol},
    url = {http://calvados.di.unipi.it/storage/paper_files/2007_orc_europar.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2007_orc_europar.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-540-74466-5}
    }

  • M. Aldinucci, S. Campa, P. Dazzi, and N. Tonellotto, D.NFCF.02 – Non functional component subsystem architectural design (code), 2007.
    [BibTeX]
    @misc{gridcomp:D.NFCF.02,
    title = {{D.NFCF.02} -- Non functional component subsystem architectural design (code)},
    author = {Marco Aldinucci and Sonia Campa and Patrizio Dazzi and Nicola Tonellotto},
    year = {2007},
    month = jun,
    date-added = {2008-09-19 15:47:46 +0200},
    date-modified = {2008-09-19 15:54:36 +0200},
    howpublished = {\url{ http://gridcomp.ercim.org/}}
    }

  • M. Aldinucci, M. Torquati, M. Vanneschi, M. Cacitti, A. Gervaso, and P. Zuccato, "VirtuaLinux Design Principles," Università di Pisa, Dipartimento di Informatica, Italy, TR-07-13, 2007.
    [BibTeX] [Download PDF]
    @techreport{virtualinux:tr:07,
    title = {{VirtuaLinux} Design Principles},
    author = {Marco Aldinucci and Massimo Torquati and Marco Vanneschi and Manuel Cacitti and Alessandro Gervaso and Pierfrancesco Zuccato},
    year = {2007},
    month = jun,
    date-added = {2007-07-30 20:59:58 +0200},
    date-modified = {2007-09-16 18:47:11 +0200},
    institution = {Universit{\`a} di Pisa, Dipartimento di Informatica, Italy},
    number = {TR-07-13},
    url = {http://calvados.di.unipi.it/storage/paper_files/2007_VirtuaLinux_TR-07-13.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2007_VirtuaLinux_TR-07-13.pdf}
    }

  • M. Aldinucci, S. Campa, M. Danelutto, P. Dazzi, P. Kilpatrick, D. Laforenza, and N. Tonellotto, "Behavioural skeletons for component autonomic management on grids," in CoreGRID Workshop on Grid Programming Model, Grid and P2P Systems Architecture, Grid Systems, Tools and Environments, Heraklion, Crete, Greece, 2007.
    [BibTeX] [Abstract] [Download PDF]

    We present behavioural skeletons for the CoreGRID Component Model, which are an abstraction aimed at simplifying the development of GCM-based self-management applications. Behavioural skeletons abstract component self-managent in component-based design as design patterns abstract class design in classic OO development. As here we just wish to introduce the behavioural skeleton framework, emphasis is placed on general skeleton structure rather than on their autonomic management policies.

    @inproceedings{beske:cg:heraklion:07,
    title = {Behavioural skeletons for component autonomic management on grids},
    author = {Marco Aldinucci and Sonia Campa and Marco Danelutto and Patrizio Dazzi and Peter Kilpatrick and Domenico Laforenza and Nicola Tonellotto},
    year = {2007},
    month = jun,
    booktitle = {CoreGRID Workshop on Grid Programming Model, Grid and P2P Systems Architecture, Grid Systems, Tools and Environments},
    address = {Heraklion, Crete, Greece},
    abstract = {We present behavioural skeletons for the CoreGRID Component Model, which are an abstraction aimed at simplifying the development of GCM-based self-management applications. Behavioural skeletons abstract component self-managent in component-based design as design patterns abstract class design in classic OO development. As here we just wish to introduce the behavioural skeleton framework, emphasis is placed on general skeleton structure rather than on their autonomic management policies.},
    date-added = {2007-06-26 01:50:37 +0200},
    date-modified = {2007-12-16 23:32:27 +0100},
    url = {http://compass2.di.unipi.it/TR/Files/TR-07-12.pdf.gz},
    bdsk-url-1 = {http://compass2.di.unipi.it/TR/Files/TR-07-12.pdf.gz}
    }

  • M. Aldinucci and P. Zuccato, "Virtual clusters with no single point of failure," in Intl. Supercomputing Conference (ISC2007), Poster session, Dresden, Germany, 2007.
    [BibTeX] [Abstract] [Download PDF]

    VirtuaLinux is a Linux meta-distribution that allows the creation, deployment and administration of virtualized clusters with no single point of failure. VirtuaLinux architecture supports diskless configurations and provides an efficient, iSCSI based abstraction of the SAN. Clusters running VirtuaLinux exhibit no master node, thus boosting resilience and flexibility.

    @inproceedings{virtualinux:poster:ics:07,
    title = {Virtual clusters with no single point of failure},
    author = {Marco Aldinucci and Pierfrancesco Zuccato},
    year = {2007},
    month = jun,
    booktitle = {Intl. Supercomputing Conference (ISC2007), Poster session},
    address = {Dresden, Germany},
    abstract = {VirtuaLinux is a Linux meta-distribution that allows the creation, deployment and administration of virtualized clusters with no single point of failure. VirtuaLinux architecture supports diskless configurations and provides an efficient, iSCSI based abstraction of the SAN. Clusters running VirtuaLinux exhibit no master node, thus boosting resilience and flexibility.},
    date-added = {2007-06-26 01:37:15 +0200},
    date-modified = {2007-11-03 14:28:15 +0100},
    url = {http://calvados.di.unipi.it/storage/paper_files/2007_ICS_VirtuaLinux.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2007_ICS_VirtuaLinux.pdf}
    }

  • M. Aldinucci, S. Campa, P. Dazzi, and N. Tonellotto, D.NFCF.01 – Non functional component subsystem architectural design, 2007.
    [BibTeX]
    @misc{gridcomp:D.NFCF.01,
    title = {{D.NFCF.01} -- Non functional component subsystem architectural design},
    author = {Marco Aldinucci and Sonia Campa and Patrizio Dazzi and Nicola Tonellotto},
    year = {2007},
    month = jun,
    date-added = {2008-09-19 15:45:08 +0200},
    date-modified = {2008-09-19 15:54:12 +0200},
    howpublished = {\url{ http://gridcomp.ercim.org/}}
    }

  • M. Aldinucci, M. Danelutto, and P. Kilpatrick, "Orc + metadata supporting grid programming," Università di Pisa, Dipartimento di Informatica, TR-07-10, 2007.
    [BibTeX] [Download PDF]
    @techreport{orcmetadata:TR-07-10,
    title = {Orc + metadata supporting grid programming},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick},
    year = {2007},
    month = may,
    date-added = {2007-10-15 20:04:45 +0200},
    date-modified = {2007-10-15 20:05:26 +0200},
    institution = {Universit{\`a} di Pisa, Dipartimento di Informatica},
    number = {TR-07-10},
    url = {http://compass2.di.unipi.it/TR/Files/TR-07-10.pdf.gz},
    bdsk-url-1 = {http://compass2.di.unipi.it/TR/Files/TR-07-10.pdf.gz}
    }

  • C. Dittamo, A. Cisternino, and M. Danelutto, "Parallelization of C\# Programs Through Annotations," in Proc. of Practical Aspects of High-Level Parallel Programming Workshop (PAPP, co-located with ICCS 2007), Beijing, China, 2007, p. 585–592. doi:10.1007/978-3-540-72586-2_86
    [BibTeX] [Download PDF]
    @inproceedings{07:dcd:csharp,
    title = {Parallelization of C\# Programs Through Annotations},
    author = {Cristian Dittamo and Antonio Cisternino and Marco Danelutto},
    year = {2007},
    month = may,
    booktitle = {Proc. of Practical Aspects of High-Level Parallel Programming Workshop (PAPP, co-located with ICCS 2007)},
    publisher = {Springer},
    address = {Beijing, China},
    series = {LNCS},
    volume = {4488},
    pages = {585--592},
    doi = {10.1007/978-3-540-72586-2_86},
    date-added = {2008-02-19 16:51:28 +0100},
    date-modified = {2008-09-14 14:13:29 +0200},
    url = {https://dx.doi.org/10.1007/978-3-540-72586-2_86},
    bdsk-url-1 = {https://dx.doi.org/10.1007/978-3-540-72586-2_86}
    }

  • M. Aldinucci, S. Campa, M. Danelutto, P. Kilpatrick, P. Dazzi, D. Laforenza, and N. Tonellotto, "Behavioural skeletons for component autonomic management on grids," Università di Pisa, Dipartimento di Informatica, TR-07-12, 2007. doi:10.1007/978-0-387-78448-9_1
    [BibTeX] [Download PDF]
    @techreport{beske:TR-07-12,
    title = {Behavioural skeletons for component autonomic management on grids},
    author = {Marco Aldinucci and Sonia Campa and Marco Danelutto and Peter Kilpatrick and Patrizio Dazzi and Domenico Laforenza and Nicola Tonellotto},
    year = {2007},
    month = may,
    doi = {10.1007/978-0-387-78448-9_1},
    date-added = {2007-10-15 20:03:59 +0200},
    date-modified = {2007-10-15 20:04:09 +0200},
    institution = {Universit{\`a} di Pisa, Dipartimento di Informatica},
    number = {TR-07-12},
    url = {http://compass2.di.unipi.it/TR/Files/TR-07-12.pdf.gz},
    bdsk-url-1 = {http://compass2.di.unipi.it/TR/Files/TR-07-12.pdf.gz},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-0-387-78448-9_1}
    }

  • P. Kilpatrick, M. Danelutto, and M. Aldinucci, "Deriving Grid Applications from Abstract Models," Institute on Programming Model, CoreGRID - Network of Excellence, TR-0085, 2007.
    [BibTeX] [Download PDF]
    @techreport{coregrid:tr0085,
    title = {Deriving Grid Applications from Abstract Models},
    author = {Peter Kilpatrick and Marco Danelutto and Marco Aldinucci},
    year = {2007},
    month = apr,
    date-added = {2007-09-25 13:17:20 +0200},
    date-modified = {2007-09-25 13:17:20 +0200},
    institution = {Institute on Programming Model, CoreGRID - Network of Excellence},
    number = {TR-0085},
    url = {http://www.coregrid.net/mambo/images/stories/TechnicalReports/tr-0085.pdf},
    bdsk-url-1 = {http://www.coregrid.net/mambo/images/stories/TechnicalReports/tr-0085.pdf}
    }

  • M. Aldinucci, M. Danelutto, and P. Kilpatrick, "Management in distributed systems: a semi-formal approach," Università di Pisa, Dipartimento di Informatica, TR-07-05, 2007. doi:10.1007/978-3-540-74466-5_69
    [BibTeX] [Download PDF]
    @techreport{orc_muskel:TR-07-05,
    title = {Management in distributed systems: a semi-formal approach},
    author = {Marco Aldinucci and Marco Danelutto and Peter Kilpatrick},
    year = {2007},
    month = feb,
    doi = {10.1007/978-3-540-74466-5_69},
    date-modified = {2007-10-15 20:03:29 +0200},
    institution = {Universit{\`a} di Pisa, Dipartimento di Informatica},
    number = {TR-07-05},
    url = {http://compass2.di.unipi.it/TR/Files/TR-07-05.pdf.gz},
    bdsk-url-1 = {http://compass2.di.unipi.it/TR/Files/TR-07-05.pdf.gz},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-540-74466-5_69}
    }

  • M. Aldinucci and M. Danelutto, "The cost of security in skeletal systems," in Proc. of Intl. Euromicro PDP 2007: Parallel Distributed and network-based Processing, Napoli, Italia, 2007, p. 213–220. doi:10.1109/PDP.2007.79
    [BibTeX] [Abstract] [Download PDF]

    Skeletal systems exploit algorithmical skeletons technology to provide the user very high level, efficient parallel programming environments. They have been recently demonstrated to be suitable for highly distributed architectures, such as workstation clusters, networks and grids. However, when using skeletal system for grid programming care must be taken to secure data and code transfers across non-dedicated, non-secure network links. In this work we take into account the cost of security introduction in muskel, a Java based skeletal system exploiting macro data flow implementation technology. We consider the adoption of mechanisms that allow securing all the communications taking place between remote, unreliable nodes and we evaluate the cost of such mechanisms. In particular, we consider the implications on the computational grains needed to scale secure and insecure skeletal computations.

    @inproceedings{security:euromicro:07,
    title = {The cost of security in skeletal systems},
    author = {Marco Aldinucci and Marco Danelutto},
    year = {2007},
    month = feb,
    booktitle = {Proc. of Intl. Euromicro PDP 2007: Parallel Distributed and network-based Processing},
    publisher = {IEEE},
    address = {Napoli, Italia},
    pages = {213--220},
    doi = {10.1109/PDP.2007.79},
    abstract = {Skeletal systems exploit algorithmical skeletons technology to provide the user very high level, efficient parallel programming environments. They have been recently demonstrated to be suitable for highly distributed architectures, such as workstation clusters, networks and grids. However, when using skeletal system for grid programming care must be taken to secure data and code transfers across non-dedicated, non-secure network links. In this work we take into account the cost of security introduction in muskel, a Java based skeletal system exploiting macro data flow implementation technology. We consider the adoption of mechanisms that allow securing all the communications taking place between remote, unreliable nodes and we evaluate the cost of such mechanisms. In particular, we consider the implications on the computational grains needed to scale secure and insecure skeletal computations.},
    date-added = {2007-03-08 15:44:26 +0100},
    date-modified = {2008-02-18 12:49:23 +0100},
    editor = {Pasqua D'Ambra and Mario Rosario Guarracino},
    url = {http://calvados.di.unipi.it/storage/paper_files/2007_security_PDP.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2007_security_PDP.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1109/PDP.2007.79}
    }

  • N. Tonellotto, D. Laforenza, M. Danelutto, M. Vanneschi, and C. Zoccolo, "A Performance Model for Stream-Based Computations," in Proc. of Intl. Euromicro PDP 2007: Parallel Distributed and network-based Processing, Napoli, Italia, 2007, p. 91–96.
    [BibTeX]
    @inproceedings{zoccolo:streammodel:euromicro:07,
    title = {A Performance Model for Stream-Based Computations},
    author = {Nicola Tonellotto and Domenico Laforenza and Marco Danelutto and Marco Vanneschi and Corrado Zoccolo},
    year = {2007},
    month = feb,
    booktitle = {Proc. of Intl. Euromicro PDP 2007: Parallel Distributed and network-based Processing},
    publisher = {IEEE},
    address = {Napoli, Italia},
    pages = {91--96},
    date-modified = {2007-03-08 15:47:19 +0100},
    editor = {Pasqua D'Ambra and Mario Rosario Guarracino}
    }

  • M. Aldinucci, S. Campa, M. Coppola, M. Danelutto, C. Zoccolo, F. André, and J. Buisson, "An abstract schema modeling adaptivity management," in Integrated Research in Grid Computing, S. Gorlatch and M. Danelutto, Eds., Springer, 2007, p. 89–102. doi:10.1007/978-0-387-47658-2_7
    [BibTeX] [Abstract] [Download PDF]

    Nowadays, component application adaptivity in Grid environments has been afforded in different ways, such those provided by the Dynaco/AFPAC framework and by the ASSIST environment. We propose an abstract schema that catches all the designing aspects a model for parallel component applications on Grid should define in order to uniformly handle the dynamic behavior of computing resources within complex parallel applications. The abstraction is validated by demonstrating how two different approaches to adaptivity, ASSIST and Dynaco/AFPAC, easily map to such schema.

    @incollection{adapt_rennes:IW_book:06,
    title = {An abstract schema modeling adaptivity management},
    author = {Marco Aldinucci and Sonia Campa and Massimo Coppola and Marco Danelutto and Corrado Zoccolo and Francoise Andr{\'e} and J{\'e}r{\'e}my Buisson},
    year = {2007},
    booktitle = {Integrated Research in Grid Computing},
    publisher = {Springer},
    series = {CoreGRID},
    pages = {89--102},
    doi = {10.1007/978-0-387-47658-2_7},
    isbn = {978-0-387-47656-8},
    abstract = {Nowadays, component application adaptivity in Grid environments has been afforded in different ways, such those provided by the Dynaco/AFPAC framework and by the ASSIST environment. We propose an abstract schema that catches all the designing aspects a model for parallel component applications on Grid should define in order to uniformly handle the dynamic behavior of computing resources within complex parallel applications. The abstraction is validated by demonstrating how two different approaches to adaptivity, ASSIST and Dynaco/AFPAC, easily map to such schema.},
    annote = {ISBN: 0-387-47656-3},
    date-modified = {2012-03-18 00:36:49 +0000},
    editor = {Sergei Gorlatch and Marco Danelutto},
    owner = {aldinuc},
    timestamp = {2006.06.28},
    url = {http://calvados.di.unipi.it/storage/paper_files/2006_IW_book_adapt.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2006_IW_book_adapt.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-0-387-47658-2_7}
    }

  • M. Vanneschi and L. Veraldi, "Dynamicity in distributed applications: issues, problems and the ASSIST approach," Parallel Computing, vol. 33, iss. 12, p. 822–845, 2007. doi:http://dx.doi.org/10.1016/j.parco.2007.08.001
    [BibTeX] [Download PDF]
    @article{dyn:van:pc:07,
    title = {Dynamicity in distributed applications: issues, problems and the {ASSIST} approach},
    author = {Marco Vanneschi and Luca Veraldi},
    year = {2007},
    journal = {Parallel Computing},
    publisher = {Elsevier},
    address = {Amsterdam, The Netherlands},
    volume = {33},
    pages = {822--845},
    doi = {http://dx.doi.org/10.1016/j.parco.2007.08.001},
    issn = {0167-8191},
    date-modified = {2008-02-23 15:45:51 +0100},
    number = {12},
    url = {http://dx.doi.org/10.1016/j.parco.2007.08.001},
    bdsk-url-1 = {http://dx.doi.org/10.1016/j.parco.2007.08.001}
    }

  • M. Coppola, M. Danelutto, N. Tonellotto, M. Vanneschi, and C. Zoccolo, "Execution Support of High Performance Heterogeneous Component-Based Applications on the Grid," in in Proc. of Euro-Par 2006 Workshops: Parallel Processing, CoreGRID 2006, UNICORE Summit 2006, Petascale Computational Biology and Bioinformatics, Dresden, Germany, August 29-September 1, 2006, Revised Selected Papers, 2007, p. 171–185.
    [BibTeX]
    @inproceedings{DBLP:conf/europar/CoppolaDTVZ06,
    title = {Execution Support of High Performance Heterogeneous Component-Based Applications on the Grid},
    author = {Massimo Coppola and Marco Danelutto and Nicola Tonellotto and Marco Vanneschi and Corrado Zoccolo},
    year = {2007},
    booktitle = {in Proc. of Euro-Par 2006 Workshops: Parallel Processing, CoreGRID 2006, UNICORE Summit 2006, Petascale Computational Biology and Bioinformatics, Dresden, Germany, August 29-September 1, 2006, Revised Selected Papers},
    publisher = {Springer},
    series = {LNCS},
    volume = {4375},
    pages = {171--185},
    annote = {Euro-Par Workshops},
    bibsource = {DBLP, http://dblp.uni-trier.de},
    editor = {Wolfgang Lehner and Norbert Meyer and Achim Streit and Craig Stewart},
    ee = {http://dx.doi.org/10.1007/978-3-540-72337-0_16}
    }

  • J. Dünnweber, S. Gorlatch, S. Campa, M. Aldinucci, and M. Danelutto, "Adaptable Parallel Components for Grid Programming," in Integrated Research in Grid Computing, S. Gorlatch and M. Danelutto, Eds., Springer, 2007, p. 43–57. doi:10.1007/978-0-387-47658-2_4
    [BibTeX] [Abstract] [Download PDF]

    We suggest that parallel software components used for grid computing should be adaptable to application-specific requirements, instead of developing new components from scratch for each particular application. As an example, we take a parallel farm component which is "embarrassingly parallel", i. e. , free of dependencies, and adapt it to the wavefront processing pattern with dependencies that impact its behavior. We describe our approach in the context of Higher-Order Components (HOCs), with the Java-based system Lithium as our implementation framework. The adaptation process relies on HOCs' mobile code parameters that are shipped over the network of the grid. We describe our implementation of the proposed component adaptation method and report first experimental results for a particular grid application – the alignment of DNA sequence pairs, a popular, time-critical problem in computational molecular biology.

    @incollection{codeadapt:IW_book:06,
    title = {Adaptable Parallel Components for Grid Programming},
    author = {Jan D{\"u}nnweber and Sergei Gorlatch and Sonia Campa and Marco Aldinucci and Marco Danelutto},
    year = {2007},
    booktitle = {Integrated Research in Grid Computing},
    publisher = {Springer},
    series = {CoreGRID},
    pages = {43--57},
    doi = {10.1007/978-0-387-47658-2_4},
    isbn = {978-0-387-47656-8},
    abstract = {We suggest that parallel software components used for grid computing should be adaptable to application-specific requirements, instead of developing new components from scratch for each particular application. As an example, we take a parallel farm component which is "embarrassingly parallel", i. e. , free of dependencies, and adapt it to the wavefront processing pattern with dependencies that impact its behavior. We describe our approach in the context of Higher-Order Components (HOCs), with the Java-based system Lithium as our implementation framework. The adaptation process relies on HOCs' mobile code parameters that are shipped over the network of the grid. We describe our implementation of the proposed component adaptation method and report first experimental results for a particular grid application -- the alignment of DNA sequence pairs, a popular, time-critical problem in computational molecular biology.},
    date-modified = {2009-02-01 17:56:57 +0100},
    editor = {Sergei Gorlatch and Marco Danelutto},
    timestamp = {2006.06.28},
    url = {http://calvados.di.unipi.it/storage/paper_files/2006_IW_book_muester.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2006_IW_book_muester.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-0-387-47658-2_4}
    }

  • M. Pasin, P. Kuonen, M. Danelutto, and M. Aldinucci, "Skeleton Parallel Programming and Parallel Objects," in Integrated Research in Grid Computing, S. Gorlatch and M. Danelutto, Eds., Springer, 2007, p. 59–71. doi:10.1007/978-0-387-47658-2_5
    [BibTeX] [Abstract] [Download PDF]

    This paper describes the ongoing work aimed at integrating the POP-C++ parallel object programming environment with the ASSIST component based parallel programming environment. Both these programming environments are shortly outlined, then several possibilities of integration are considered. For each one of these integration opportunities, the advantages and synergies that can be possibly achieved are outlined and discussed. The text explains how GEA, the ASSIST deployer can be considered as the basis for the integration of such different systems. An architecture is proposed, extending the existing tools to work together. The current status of integration of the two environments is discussed, along with the expected results and fallouts on the two programming environments.

    @incollection{pasin:IW_book:06,
    title = {Skeleton Parallel Programming and Parallel Objects},
    author = {Marcelo Pasin and Pierre Kuonen and Marco Danelutto and Marco Aldinucci},
    year = {2007},
    booktitle = {Integrated Research in Grid Computing},
    publisher = {Springer},
    series = {CoreGRID},
    pages = {59--71},
    doi = {10.1007/978-0-387-47658-2_5},
    isbn = {978-0-387-47656-8},
    abstract = {This paper describes the ongoing work aimed at integrating the POP-C++ parallel object programming environment with the ASSIST component based parallel programming environment. Both these programming environments are shortly outlined, then several possibilities of integration are considered. For each one of these integration opportunities, the advantages and synergies that can be possibly achieved are outlined and discussed. The text explains how GEA, the ASSIST deployer can be considered as the basis for the integration of such different systems. An architecture is proposed, extending the existing tools to work together. The current status of integration of the two environments is discussed, along with the expected results and fallouts on the two programming environments.},
    annote = {ISBN: 978-0-387-47656-8},
    date-modified = {2009-02-01 17:51:38 +0100},
    editor = {Sergei Gorlatch and Marco Danelutto},
    owner = {aldinuc},
    timestamp = {2006.06.28},
    url = {http://calvados.di.unipi.it/storage/paper_files/2006_IW_book_popc.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2006_IW_book_popc.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-0-387-47658-2_5}
    }

  • M. Aldinucci, M. Torquati, and P. Zuccato, "Virtualinux website," , 2007.
    [BibTeX]
    @manual{virtualinux-web,
    title = {Virtualinux website},
    author = {Marco Aldinucci and Massimo Torquati and Pierfrancesco Zuccato},
    year = {2007},
    note = {\url{http://virtualinux.sourceforge.net/}}
    }

  • M. Aldinucci and A. Benoit, "Towards the Automatic Mapping of ASSIST Applications for the Grid," in Integrated Research in Grid Computing, S. Gorlatch and M. Danelutto, Eds., Springer, 2007, p. 73–87. doi:10.1007/978-0-387-47658-2_6
    [BibTeX] [Abstract] [Download PDF]

    One of the most promising technical innovations in present-day computing is the invention of grid technologies which harness the computational power of widely distributed collections of computers. However, the programming and optimisation burden of a low level approach to grid computing is clearly unacceptable for large scale, complex applications. The development of grid applications can be simplified by using high-level programming environments. In the present work, we address the problem of the mapping of a high-level grid application onto the computational resources. In order to optimise the mapping of the application, we propose to automatically generate performance models from the application using the process algebra PEPA. We target in this work applications written with the high-level environment ASSIST, since the use of such a structured environment allows us to automate the study of the application more effectively.

    @incollection{assist:pepa:IW_book:06,
    title = {Towards the Automatic Mapping of {ASSIST} Applications for the Grid},
    author = {Marco Aldinucci and Anne Benoit},
    year = {2007},
    booktitle = {Integrated Research in Grid Computing},
    publisher = {Springer},
    series = {CoreGRID},
    pages = {73--87},
    doi = {10.1007/978-0-387-47658-2_6},
    isbn = {978-0-387-47656-8},
    abstract = {One of the most promising technical innovations in present-day computing is the invention of grid technologies which harness the computational power of widely distributed collections of computers. However, the programming and optimisation burden of a low level approach to grid computing is clearly unacceptable for large scale, complex applications. The development of grid applications can be simplified by using high-level programming environments. In the present work, we address the problem of the mapping of a high-level grid application onto the computational resources. In order to optimise the mapping of the application, we propose to automatically generate performance models from the application using the process algebra PEPA. We target in this work applications written with the high-level environment ASSIST, since the use of such a structured environment allows us to automate the study of the application more effectively.},
    date-modified = {2009-02-01 17:26:53 +0100},
    editor = {Sergei Gorlatch and Marco Danelutto},
    owner = {aldinuc},
    timestamp = {2006.06.28},
    url = {http://calvados.di.unipi.it/storage/paper_files/2006_IW_book_pepa.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2006_IW_book_pepa.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1007/978-0-387-47658-2_6}
    }

2006

  • M. Aldinucci, F. André, J. Buisson, S. Campa, M. Coppola, M. Danelutto, and C. Zoccolo, "Parallel program/component adaptivity management," in Parallel Computing: Current & Future Issues of High-End Computing (Proc. of PARCO 2005, Malaga, Spain), Germany, 2006, p. 89–96.
    [BibTeX] [Abstract] [Download PDF]

    Grid computing platforms require to handle dynamic behaviour of computing resources within complex parallel applications. We introduce a formalization of adaptive behaviour that separates the abstract model of the application from the implementation design. We exemplify the abstract adaptation schema on two applications, and we show how two quite different approaches to adaptivity, the ASSIST environment and the AFPAC framework, easily map to this common schema.

    @inproceedings{adaptivity:parco:05,
    title = {Parallel program/component adaptivity management},
    author = {Marco Aldinucci and Francoise Andr{\'e} and J{\'e}r{\'e}my Buisson and Sonia Campa and Massimo Coppola and Marco Danelutto and Corrado Zoccolo},
    year = {2006},
    month = dec,
    booktitle = {Parallel Computing: Current \& Future Issues of High-End Computing (Proc. of {PARCO 2005}, Malaga, Spain)},
    publisher = {John von Neumann Institute for Computing},
    address = {Germany},
    series = {NIC},
    volume = {33},
    pages = {89--96},
    abstract = {Grid computing platforms require to handle dynamic behaviour of computing resources within complex parallel applications. We introduce a formalization of adaptive behaviour that separates the abstract model of the application from the implementation design. We exemplify the abstract adaptation schema on two applications, and we show how two quite different approaches to adaptivity, the ASSIST environment and the AFPAC framework, easily map to this common schema.},
    date-modified = {2012-11-18 17:08:30 +0000},
    editor = {G. R. Joubert and W. E. Nagel and F. J. Peters and O. Plata and P. Tirado and E. Zapata},
    optannote = {ISBN: 3-00-017352-8},
    url = {http://calvados.di.unipi.it/storage/paper_files/2005_adaptivity_parco.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2005_adaptivity_parco.pdf}
    }

  • M. Aldinucci, M. Danelutto, G. Giaccherini, M. Torquati, and M. Vanneschi, "Towards a distributed scalable data service for the grid," in Parallel Computing: Current & Future Issues of High-End Computing (Proc. of PARCO 2005, Malaga, Spain), Germany, 2006, p. 73–80.
    [BibTeX] [Abstract] [Download PDF]

    ADHOC (Adaptive Distributed Herd of Object Caches) is a Grid-enabled, fast, scalable object repository providing programmers with a general storage module. We present three different software tools based on ADHOC: A parallel cache for Apache, a DSM, and a main memory parallel file system. We also show that these tools exhibit a considerable performance and speedup both in absolute figures and w.r.t. other software tools exploiting the same features.

    @inproceedings{adhoc:parco:05,
    title = {Towards a distributed scalable data service for the grid},
    author = {Marco Aldinucci and Marco Danelutto and Gianni Giaccherini and Massimo Torquati and Marco Vanneschi},
    year = {2006},
    month = dec,
    booktitle = {Parallel Computing: Current \& Future Issues of High-End Computing (Proc. of {PARCO 2005}, Malaga, Spain)},
    publisher = {John von Neumann Institute for Computing},
    address = {Germany},
    series = {NIC},
    volume = {33},
    pages = {73--80},
    abstract = {ADHOC (Adaptive Distributed Herd of Object Caches) is a Grid-enabled, fast, scalable object repository providing programmers with a general storage module. We present three different software tools based on ADHOC: A parallel cache for Apache, a DSM, and a main memory parallel file system. We also show that these tools exhibit a considerable performance and speedup both in absolute figures and w.r.t. other software tools exploiting the same features.},
    date-modified = {2012-11-18 17:07:26 +0000},
    editor = {G. R. Joubert and W. E. Nagel and F. J. Peters and O. Plata and P. Tirado and E. Zapata},
    optannote = {ISBN: 3-00-017352-8},
    url = {https://iris.unito.it/retrieve/handle/2318/60375/711125/2005_adhoc_parco.pdf},
    bdsk-url-1 = {https://iris.unito.it/retrieve/handle/2318/60375/711125/2005_adhoc_parco.pdf}
    }

  • M. Aldinucci, M. Danelutto, A. Paternesi, R. Ravazzolo, and M. Vanneschi, "Building interoperable grid-aware ASSIST applications via WebServices," in Parallel Computing: Current & Future Issues of High-End Computing (Proc. of PARCO 2005, Malaga, Spain), Germany, 2006, p. 145–152.
    [BibTeX] [Abstract] [Download PDF]

    The ASSIST environment provides a high-level programming toolkit for the grid. ASSIST applications are described by means of a coordination language, which can express arbitrary graphs of modules. These modules (or a graph of them) may be enclosed in components specifically designed for the grid (GRID.it components). In this paper we describe how ASSIST modules can be wired through standard Web Services, and how GRID.it components may be made available as standard Web Services.

    @inproceedings{assist:webs:parco:05,
    title = {Building interoperable grid-aware {ASSIST} applications via {WebServices}},
    author = {Marco Aldinucci and Marco Danelutto and Andrea Paternesi and Roberto Ravazzolo and Marco Vanneschi},
    year = {2006},
    month = dec,
    booktitle = {Parallel Computing: Current \& Future Issues of High-End Computing (Proc. of {PARCO 2005}, Malaga, Spain)},
    publisher = {John von Neumann Institute for Computing},
    address = {Germany},
    series = {NIC},
    volume = {33},
    pages = {145--152},
    isbn = {3000173528},
    abstract = {The ASSIST environment provides a high-level programming toolkit for the grid. ASSIST applications are described by means of a coordination language, which can express arbitrary graphs of modules. These modules (or a graph of them) may be enclosed in components specifically designed for the grid (GRID.it components). In this paper we describe how ASSIST modules can be wired through standard Web Services, and how GRID.it components may be made available as standard Web Services.},
    date-modified = {2012-11-18 17:06:42 +0000},
    editor = {G. R. Joubert and W. E. Nagel and F. J. Peters and O. Plata and P. Tirado and E. Zapata},
    url = {http://calvados.di.unipi.it/storage/paper_files/2005_ws_parco.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2005_ws_parco.pdf}
    }

  • M. Aldinucci, G. Antoniu, M. Danelutto, and M. Jan, "Fault-Tolerant Data Sharing for High-level Grid Programming: A Hierarchical Storage Architecture," in Proc. of the Integrated Research in Grid Computing Workshop, Kraków, Poland, 2006, p. 177–188.
    [BibTeX] [Abstract] [Download PDF]

    Enabling high-level programming models on grids is today a major challenge. A way to achieve this goal relies on the use of environments able to transparently and automatically provide adequate support for low-level, grid-specific issues (fault-tolerance, scalability, etc.). This paper discusses the above approach when applied to grid data management. As a case study, we propose a 2-tier software architecture that supports transparent, fault-tolerant, grid-level data sharing in the ASSIST programming environment (University of Pisa), based on the JuxMem grid data sharing service (INRIA Rennes).

    @inproceedings{assist:juxmem:IW:06,
    title = {Fault-Tolerant Data Sharing for High-level Grid Programming: A Hierarchical Storage Architecture},
    author = {Marco Aldinucci and Gabriel Antoniu and Marco Danelutto and Mathieu Jan},
    year = {2006},
    month = oct,
    booktitle = {Proc. of the Integrated Research in Grid Computing Workshop},
    publisher = {Academic Computing Centre {CYFRONET AGH}},
    address = {Krak{\'o}w, Poland},
    series = {CoreGRID},
    pages = {177--188},
    abstract = {Enabling high-level programming models on grids is today a major challenge. A way to achieve this goal relies on the use of environments able to transparently and automatically provide adequate support for low-level, grid-specific issues (fault-tolerance, scalability, etc.). This paper discusses the above approach when applied to grid data management. As a case study, we propose a 2-tier software architecture that supports transparent, fault-tolerant, grid-level data sharing in the ASSIST programming environment (University of Pisa), based on the JuxMem grid data sharing service (INRIA Rennes).},
    date-modified = {2012-11-18 17:23:11 +0000},
    editor = {Marian Bubak and Sergei Gorlatch and Thierry Priol},
    optannote = {ISBN: 83-9115141-6-1},
    url = {http://calvados.di.unipi.it/storage/paper_files/2006_IW_juxadhocmem.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2006_IW_juxadhocmem.pdf},
    keywords = {Duplicate}
    }

  • M. Aldinucci, G. Antoniu, M. Danelutto, and M. Jan, "Fault-Tolerant Data Sharing for High-level Grid Programming: A Hierarchical Storage Architecture," Institute on Programming Model, CoreGRID - Network of Excellence, TR-0058, 2006.
    [BibTeX] [Download PDF]
    @techreport{coregrid:tr0058,
    title = {Fault-Tolerant Data Sharing for High-level Grid Programming: A Hierarchical Storage Architecture},
    author = {Marco Aldinucci and Gabriel Antoniu and Marco Danelutto and Mathieu Jan},
    year = {2006},
    month = aug,
    date-added = {2007-09-25 13:15:55 +0200},
    date-modified = {2007-09-25 13:16:55 +0200},
    institution = {Institute on Programming Model, CoreGRID - Network of Excellence},
    number = {TR-0058},
    url = {http://www.coregrid.net/mambo/images/stories/TechnicalReports/tr-0058.pdf},
    bdsk-url-1 = {http://www.coregrid.net/mambo/images/stories/TechnicalReports/tr-0058.pdf}
    }

  • M. Aldinucci, C. Bertolli, S. Campa, M. Coppola, M. Vanneschi, L. Veraldi, and C. Zoccolo, "Self-Configuring and Self-Optimising Grid Components in the GCM model and their ASSIST Implementation," Università di Pisa, Dipartimento di Informatica, Italy, TR-06-13, 2006.
    [BibTeX] [Download PDF]
    @techreport{self:tr:06-13,
    title = {Self-Configuring and Self-Optimising Grid Components in the GCM model and their ASSIST Implementation},
    author = {Marco Aldinucci and Carlo Bertolli and Sonia Campa and Massimo Coppola and Marco Vanneschi and Luca Veraldi and Corrado Zoccolo},
    year = {2006},
    month = aug,
    date-added = {2007-10-23 22:47:47 +0200},
    date-modified = {2007-10-23 22:52:01 +0200},
    institution = {Universit{\`a} di Pisa, Dipartimento di Informatica, Italy},
    number = {TR-06-13},
    url = {http://compass2.di.unipi.it/TR/Files/TR-06-13.pdf.gz},
    bdsk-url-1 = {http://compass2.di.unipi.it/TR/Files/TR-06-13.pdf.gz}
    }

  • M. Aldinucci, C. Bertolli, S. Campa, M. Coppola, M. Vanneschi, L. Veraldi, and C. Zoccolo, "Self-configuring and self-optimizing grid components in the GCM model and their ASSIST implementation," in Proc. of. HPC-GECO/Compframe (held in conjunction with HPDC-15), Paris, France, 2006, p. 45–52.
    [BibTeX] [Abstract] [Download PDF]

    We present the concept of autonomic super-component as a building block for Grid-aware applications. Super-components are parametric, higher-order components exhibiting a well-known parallel behaviour. The proposal of a super-component feature is part of the experience we gained in the implementation of the ASSIST environment, which allows the development of self-configuring and optimising component-based applications following a structured and hierarchical approach. We discuss how such approach to Grid programming influenced the design of the Grid Component Model (GCM).

    @inproceedings{selfadapt:hpcgeco:06,
    title = {Self-configuring and self-optimizing grid components in the {GCM} model and their {ASSIST} implementation},
    author = {Marco Aldinucci and Carlo Bertolli and Sonia Campa and Massimo Coppola and Marco Vanneschi and Luca Veraldi and Corrado Zoccolo},
    year = {2006},
    month = jun,
    booktitle = {Proc. of. HPC-GECO/Compframe (held in conjunction with HPDC-15)},
    publisher = {IEEE},
    address = {Paris, France},
    pages = {45--52},
    abstract = {We present the concept of autonomic super-component as a building block for Grid-aware applications. Super-components are parametric, higher-order components exhibiting a well-known parallel behaviour. The proposal of a super-component feature is part of the experience we gained in the implementation of the ASSIST environment, which allows the development of self-configuring and optimising component-based applications following a structured and hierarchical approach. We discuss how such approach to Grid programming influenced the design of the Grid Component Model (GCM).},
    date-modified = {2014-08-25 15:06:03 +0000},
    owner = {aldinuc},
    timestamp = {2006.06.28},
    url = {http://calvados.di.unipi.it/storage/paper_files/2006_self_HPC-GECO.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2006_self_HPC-GECO.pdf}
    }

  • M. Aldinucci, M. Danelutto, and M. Vanneschi, "Autonomic QoS in ASSIST Grid-aware components," in Proc. of Intl. Euromicro PDP 2006: Parallel Distributed and network-based Processing, Montbéliard, France, 2006, p. 221–230. doi:10.1109/PDP.2006.25
    [BibTeX] [Abstract] [Download PDF]

    Current Grid-aware applications are developed on existing software infrastructures, such as Globus, by developers who are experts on Grid software implementation. Although many useful applications have been produced this way, this approach may hardly support the additional complexity to Quality of Service (QoS) control in real application. We describe the ASSIST programming environment, the prototype of parallel programming environment currently under development at our group, as a suitable basis to capture all the desired features for QoS control for the Grid. Grid applications, built as compositions of ASSIST components, are supported by an innovative Grid Abstract Machine, which includes essential abstractions of standard middleware services and a hierarchical Application Manager, which may be considered as an early prototype of Autonomic Manager.

    @inproceedings{assist:qos:euromicro:06,
    title = {Autonomic {QoS} in {ASSIST} Grid-aware components},
    author = {Marco Aldinucci and Marco Danelutto and Marco Vanneschi},
    year = {2006},
    month = feb,
    booktitle = {Proc. of Intl. Euromicro PDP 2006: Parallel Distributed and network-based Processing},
    publisher = {IEEE},
    address = {Montb{\'e}liard, France},
    pages = {221--230},
    doi = {10.1109/PDP.2006.25},
    abstract = {Current Grid-aware applications are developed on existing software infrastructures, such as Globus, by developers who are experts on Grid software implementation. Although many useful applications have been produced this way, this approach may hardly support the additional complexity to Quality of Service (QoS) control in real application. We describe the ASSIST programming environment, the prototype of parallel programming environment currently under development at our group, as a suitable basis to capture all the desired features for QoS control for the Grid. Grid applications, built as compositions of ASSIST components, are supported by an innovative Grid Abstract Machine, which includes essential abstractions of standard middleware services and a hierarchical Application Manager, which may be considered as an early prototype of Autonomic Manager.},
    date-modified = {2012-11-18 16:14:35 +0000},
    url = {http://calvados.di.unipi.it/storage/paper_files/2006_QoS_PDP.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2006_QoS_PDP.pdf},
    bdsk-url-2 = {http://dx.doi.org/10.1109/PDP.2006.25}
    }

  • M. Aldinucci and M. Danelutto, "The cost of security in skeletal systems," Università di Pisa, Dipartimento di Informatica, Italy, TR-06-03, 2006.
    [BibTeX] [Download PDF]
    @techreport{self:tr:06-03,
    title = {The cost of security in skeletal systems},
    author = {Marco Aldinucci and Marco Danelutto},
    year = {2006},
    month = feb,
    date-added = {2007-10-23 22:54:04 +0200},
    date-modified = {2007-10-23 22:54:58 +0200},
    institution = {Universit{\`a} di Pisa, Dipartimento di Informatica, Italy},
    number = {TR-06-03},
    url = {http://compass2.di.unipi.it/TR/Files/TR-06-03.pdf.gz},
    bdsk-url-1 = {http://compass2.di.unipi.it/TR/Files/TR-06-03.pdf.gz}
    }

  • M. Aldinucci, M. Coppola, M. Danelutto, M. Vanneschi, and C. Zoccolo, "ASSIST as a research framework for high-performance Grid programming environments," in Grid Computing: Software environments and Tools, J. C. Cunha and O. F. Rana, Eds., Springer, 2006, p. 230–256. doi:10.1007/1-84628-339-6_10
    [BibTeX] [Abstract] [Download PDF]

    ASSIST is a programming environment supporting the development of parallel and distributed high-performance applications on a wide range of target architectures including massively parallel clusters/networks of workstations and Grids. We discuss how ASSIST can act as a valid research vehicle to study, experiment and realize Grid-aware programming environments for high-performance applications. Special emphasis is put on the innovative methodologies, strategies and tools for dynamically adaptive applications that represent the necessary step for the success of Grid platforms. We start considering which are the fundamental features of Grid-aware programming environments, based upon structured parallel programming and components technology. Then we show how ASSIST evolved from its very first version, only targeting workstation clusters, to the current version, targeting Grids and solving many critical problems related to expressive power, flexibility, interoperability and efficiency. We also discuss how ASSIST deals with interoperability issues. Eventually we discuss how an ASSIST-based model for supporting dynamically adaptive applications can be derived.

    @incollection{assist:cunhabook:05,
    title = {{ASSIST} as a research framework for high-performance Grid programming environments},
    author = {Marco Aldinucci and Massimo Coppola and Marco Danelutto and Marco Vanneschi and Corrado Zoccolo},
    year = {2006},
    month = jan,
    booktitle = {Grid Computing: Software environments and Tools},
    publisher = {Springer},
    pages = {230--256},
    doi = {10.1007/1-84628-339-6_10},
    isbn = {978-1-85233-998-2},
    abstract = {ASSIST is a programming environment supporting the development of parallel and distributed high-performance applications on a wide range of target architectures including massively parallel clusters/networks of workstations and Grids. We discuss how ASSIST can act as a valid research vehicle to study, experiment and realize Grid-aware programming environments for high-performance applications. Special emphasis is put on the innovative methodologies, strategies and tools for dynamically adaptive applications that represent the necessary step for the success of Grid platforms. We start considering which are the fundamental features of Grid-aware programming environments, based upon structured parallel programming and components technology. Then we show how ASSIST evolved from its very first version, only targeting workstation clusters, to the current version, targeting Grids and solving many critical problems related to expressive power, flexibility, interoperability and efficiency. We also discuss how ASSIST deals with interoperability issues. Eventually we discuss how an ASSIST-based model for supporting dynamically adaptive applications can be derived.},
    chapter = {10},
    date-modified = {2014-06-22 10:12:07 +0000},
    editor = {J. C. Cunha and O. F. Rana},
    url = {http://calvados.di.unipi.it/storage/paper_files/2005_assist_CuhnaBook.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2005_assist_CuhnaBook.pdf},
    bdsk-url-2 = {https://doi.org/10.1007/1-84628-339-6_10}
    }

  • M. Aldinucci and M. Danelutto, "Algorithmic skeletons meeting grids," Parallel Computing, vol. 32, iss. 7, p. 449–462, 2006. doi:10.1016/j.parco.2006.04.001
    [BibTeX] [Abstract] [Download PDF]

    In this work, we discuss an extension of the set of principles that should guide the future design and development of skeletal programming systems, as defined by Cole in his "pragmatic manifesto'" paper. The three further principles introduced are related to the ability to exploit existing sequential code as well as to the ability to target typical modern architectures, those made out of heterogeneous processing elements with dynamically varying availability, processing power and connectivity features such as grids or heterogeneous, non-dedicated clusters. We outline two skeleton based programming environments currently developed at our university and we discuss how these environments adhere to the proposed set of principles. Eventually, we outline how some other relevant, well-known skeleton environments conform to the same set of principles.

    @article{advske:pc:06,
    title = {Algorithmic skeletons meeting grids},
    author = {Marco Aldinucci and Marco Danelutto},
    year = {2006},
    journal = {Parallel Computing},
    volume = {32},
    pages = {449--462},
    doi = {10.1016/j.parco.2006.04.001},
    abstract = {In this work, we discuss an extension of the set of principles that should guide the future design and development of skeletal programming systems, as defined by Cole in his "pragmatic manifesto'" paper. The three further principles introduced are related to the ability to exploit existing sequential code as well as to the ability to target typical modern architectures, those made out of heterogeneous processing elements with dynamically varying availability, processing power and connectivity features such as grids or heterogeneous, non-dedicated clusters. We outline two skeleton based programming environments currently developed at our university and we discuss how these environments adhere to the proposed set of principles. Eventually, we outline how some other relevant, well-known skeleton environments conform to the same set of principles.},
    date-modified = {2008-02-07 03:38:19 +0100},
    number = {7},
    url = {http://calvados.di.unipi.it/storage/paper_files/2006_advske_PC.pdf},
    bdsk-url-1 = {http://calvados.di.unipi.it/storage/paper_files/2006_advske_PC.pdf},
    bdsk-url-2 = {https://doi.org/10.1016/j.parco.2006.04.001}
    }

  • M. Aldinucci, M. Coppola, M. Danelutto, N. Tonellotto, M. Vanneschi, and C. Zoccolo, "High level grid programming with ASSIST," Computational Methods in Science and Technology, vol. 12, iss. 1, p. 21–32, 2006.
    [BibTeX] [Abstract] [Download PDF]

    The development of efficient Grid applications usually requires writing huge portions of code directly at the level of abstraction provided by the underlying Grid middleware. In this work we discuss an alternative approach, raising the level of abstraction used when programming Grid applications. Our approach requires programmers just to describe in a qualitative way the kind of parallelism they want to express. Then, compiler tools, loader tools and run time system take complete care of running the application on a Grid target architecture. This allows to move most of the cumbersome tasks related to Grid targeting and management from programmer responsibility to tools. This paper introduces the structured parallel programming environment ASSIST, whose design is aimed at raising the level of abstraction in Grid programming and discusses how it can support transparent Grid programming while implementing Grid adaptivity.

    @article{assist:CMST:06,
    title = {High level grid programming with {ASSIST}},
    author = {Marco Aldinucci and Massimo Coppola and Marco Danelutto and Nicola Tonellotto and Marco Vanneschi and Corrado Zoccolo},
    year = {2006},
    journal = {Computational Methods in Science and Technology},
    volume = {12},
    pages = {21--32},
    abstract = {The development of efficient Grid applications usually requires writing huge portions of code directly at the level of abstraction provided by the underlying Grid middleware. In this work we discuss an alternative approach, raising the level of abstraction used when programming Grid applications. Our approach requires programmers just to describe in a qualitative way the kind of parallelism they want to express. Then, compiler tools, loader tools