Papers | Parallel Computing
2020
Jose Daniel Garcia, Jose Daniel Rio, Marco Aldinucci, Fabio Tordini, Marco Danelutto, Gabriele Mencagli, Massimo Torquati
Challenging the abstraction penalty in parallel patterns libraries: Adding FastFlow support to GrPPI Journal Article
In: The Journal of Supercomputing, vol. 76, no. 7, pp. 5139–5159, 2020.
Abstract | Links | BibTeX | Tags: fastflow, rephrase
@article{19:jsupe:grppi,
title = {Challenging the abstraction penalty in parallel patterns libraries: Adding FastFlow support to GrPPI},
author = {Jose Daniel Garcia and Jose Daniel Rio and Marco Aldinucci and Fabio Tordini and Marco Danelutto and Gabriele Mencagli and Massimo Torquati},
url = {https://iris.unito.it/retrieve/handle/2318/1762686/744894/2020-js-grppi-postprint.pdf},
doi = {10.1007/s11227-019-02826-5},
year = {2020},
date = {2020-01-01},
journal = {The Journal of Supercomputing},
volume = {76},
number = {7},
pages = {5139–5159},
abstract = {In the last years, pattern-based programming has been recognized as a good practice for efficiently exploiting parallel hardware resources. Following this approach, multiple libraries have been designed for providing such high-level abstractions to ease the parallel programming. However, those libraries do not share a common interface. To pave the way, GrPPI has been designed for providing an intermediate abstraction layer between application developers and existing parallel programming frameworks like OpenMP, Intel TBB or ISO C++ threads. On the other hand, FastFlow has been adopted as an efficient object-based programming framework that may benefit from being supported as an additional GrPPI backend. However, the object-based approach presents some major challenges to be incorporated under the GrPPI type safe functional programming style. In this paper, we present the integration of FastFlow as a new GrPPI backend to demonstrate that structured parallel programming frameworks perfectly fit the GrPPI design. Additionally, we also demonstrate that GrPPI does not incur in additional overheads for providing its abstraction layer, and we study the programmability in terms of lines of code and cyclomatic complexity. In general, the presented work acts as reciprocal validation of both FastFlow (as an efficient, native structured parallel programming framework) and GrPPI (as an efficient abstraction layer on top of existing parallel programming frameworks).},
keywords = {fastflow, rephrase},
pubstate = {published},
tppubtype = {article}
}
2019
Massimo Torquati, Daniele De Sensi, Gabriele Mencagli, Marco Aldinucci, Marco Danelutto
Power-Aware Pipelining with Automatic Concurrency Control Journal Article
In: Concurrency and Computation: Practice and Experience, vol. 31, no. 5, 2019.
Abstract | Links | BibTeX | Tags: rephrase
@article{18:dynqueue:ccpe,
title = {Power-Aware Pipelining with Automatic Concurrency Control},
author = {Massimo Torquati and Daniele De Sensi and Gabriele Mencagli and Marco Aldinucci and Marco Danelutto},
url = {https://iris.unito.it/retrieve/handle/2318/1668445/414282/2018_CCPE.pdf},
doi = {10.1002/cpe.4652},
year = {2019},
date = {2019-01-01},
journal = {Concurrency and Computation: Practice and Experience},
volume = {31},
number = {5},
abstract = {Continuous streaming computations are usually composed of different modules, exchanging data through shared message queues. The selection of the algorithm used to access such queues (i.e. the concurrency control) is a critical aspect both for performance and power consumption. In this paper we describe the design of automatic concurrency control algorithm for implement- ing power-efficient communications on shared-memory multicores. The algorithm automatically switches between nonblocking and blocking concurrency protocols, getting the best from the two worlds, i.e. obtaining the same throughput offered by the nonblocking implementa- tion and the same power efficiency of the blocking concurrency protocol. We demonstrate the effectiveness of our approach using two micro-benchmarks and two real streaming applications},
keywords = {rephrase},
pubstate = {published},
tppubtype = {article}
}
Marco Danelutto, Tiziano De Matteis, Daniele De Sensi, Gabriele Mencagli, Massimo Torquati, Marco Aldinucci, Peter Kilpatrick
The RePhrase Extended Pattern Set for Data Intensive Parallel Computing Journal Article
In: International Journal of Parallel Programming, vol. 47, no. 1, pp. 74–93, 2019.
Abstract | Links | BibTeX | Tags: fastflow, rephrase
@article{17:rephrasepatterns:ijpp,
title = {The RePhrase Extended Pattern Set for Data Intensive Parallel Computing},
author = {Marco Danelutto and Tiziano De Matteis and Daniele De Sensi and Gabriele Mencagli and Massimo Torquati and Marco Aldinucci and Peter Kilpatrick},
url = {https://iris.unito.it/retrieve/handle/2318/1659336/387667/2017_ijpp_rephrase.pdf},
doi = {10.1007/s10766-017-0540-z},
year = {2019},
date = {2019-01-01},
journal = {International Journal of Parallel Programming},
volume = {47},
number = {1},
pages = {74–93},
abstract = {We discuss the extended parallel pattern set identified within the EU-funded project RePhrase as a candidate pattern set to support data intensive applications targeting heterogeneous architectures. The set has been designed to include three classes of pattern, namely i) core patterns, modelling common, not necessarily data intensive parallelism exploitation patterns, usually to be used in composition; ii) high level patterns, modelling common, complex and complete parallelism exploitation patterns; and iii) building block patterns, modelling the single components of data intensive applications, suitable for use–in composition–to implement patterns not covered by the core and high level patterns. We discuss the expressive power of the RePhrase extended pattern set and results illustrating the performances that may be achieved with the FastFlow implementation of the high level patterns.},
keywords = {fastflow, rephrase},
pubstate = {published},
tppubtype = {article}
}
Massimo Torquati, Gabriele Mencagli, Maurizio Drocco, Marco Aldinucci, Tiziano De Matteis, Marco Danelutto
On Dynamic Memory Allocation in Sliding-Window Parallel Patterns for Streaming Analytics Journal Article
In: The Journal of Supercomputing, vol. 75, no. 8, pp. 4114–4131, 2019.
Abstract | Links | BibTeX | Tags: fastflow, rephrase
@article{17:dmadasp:jsupe,
title = {On Dynamic Memory Allocation in Sliding-Window Parallel Patterns for Streaming Analytics},
author = {Massimo Torquati and Gabriele Mencagli and Maurizio Drocco and Marco Aldinucci and Tiziano De Matteis and Marco Danelutto},
url = {https://iris.unito.it/retrieve/handle/2318/1648626/362381/17_torquati_jsc.pdf},
doi = {10.1007/s11227-017-2152-1},
year = {2019},
date = {2019-01-01},
journal = {The Journal of Supercomputing},
volume = {75},
number = {8},
pages = {4114–4131},
abstract = {This work studies the issues related to dynamic memory management in Data Stream Processing, an emerging paradigm enabling the real-time processing of live data streams. In this paper we consider two streaming parallel patterns and we discuss different implementation variants related on how dynamic memory is managed. The results show that the standard mechanisms provided by modern C++ are not entirely adequate for maximizing the performance. Instead, the combined use of an efficient general-purpose memory allocator, a custom allocator optimized for the pattern considered and a custom variant of the C++ shared pointer mechanism, provides a performance improvement up to 16% on the best case.},
keywords = {fastflow, rephrase},
pubstate = {published},
tppubtype = {article}
}
2018
Claudia Misale, Maurizio Drocco, Guy Tremblay, Marco Aldinucci
PiCo: a Novel Approach to Stream Data Analytics Proceedings Article
In: Proc. of Euro-Par Workshops: 1st Intl. Workshop on Autonomic Solutions for Parallel and Distributed Data Stream Processing (Auto-DaSP 2017), Springer, Santiago de Compostela, Spain, 2018.
Abstract | Links | BibTeX | Tags: rephrase, toreador
@inproceedings{pico:autodasp:17,
title = {PiCo: a Novel Approach to Stream Data Analytics},
author = {Claudia Misale and Maurizio Drocco and Guy Tremblay and Marco Aldinucci},
url = {https://iris.unito.it/retrieve/handle/2318/1659344/409520/autodasp.pdf},
doi = {10.1007/978-3-319-75178-8_10},
year = {2018},
date = {2018-08-01},
booktitle = {Proc. of Euro-Par Workshops: 1st Intl. Workshop on Autonomic Solutions for Parallel and Distributed Data Stream Processing (Auto-DaSP 2017)},
volume = {10659},
publisher = {Springer},
address = {Santiago de Compostela, Spain},
series = {LNCS},
abstract = {In this paper, we present a new C++ API with a fluent interface called PiCo (Pipeline Composition). PiCo's programming model aims at making easier the programming of data analytics applications while preserving or enhancing their performance. This is attained through three key design choices: 1) unifying batch and stream data access models, 2) decoupling processing from data layout, and 3) exploiting a stream-oriented, scalable, effiicient C++11 runtime system. PiCo proposes a programming model based on pipelines and operators that are polymorphic with respect to data types in the sense that it is possible to re-use the same algorithms and pipelines on different data models (e.g., streams, lists, sets, etc.). Preliminary results show that PiCo can attain better performances in terms of execution times and hugely improve memory utilization when compared to Spark and Flink in both batch and stream processing.},
keywords = {rephrase, toreador},
pubstate = {published},
tppubtype = {inproceedings}
}
Gabriele Mencagli, Massimo Torquati, Fabio Lucattini, Salvatore Cuomo, Marco Aldinucci
Harnessing sliding-window execution semantics for parallel stream processing Journal Article
In: Journal of Parallel and Distributed Computing, vol. 116, pp. 74–88, 2018, ISSN: 0743-7315.
Abstract | Links | BibTeX | Tags: fastflow, rephrase
@article{17:slidingwindows:jpdc,
title = {Harnessing sliding-window execution semantics for parallel stream processing},
author = {Gabriele Mencagli and Massimo Torquati and Fabio Lucattini and Salvatore Cuomo and Marco Aldinucci},
url = {https://iris.unito.it/retrieve/e27ce42c-1381-2581-e053-d805fe0acbaa/preprint-jpdc-2017.pdf},
doi = {10.1016/j.jpdc.2017.10.021},
issn = {0743-7315},
year = {2018},
date = {2018-06-01},
journal = {Journal of Parallel and Distributed Computing},
volume = {116},
pages = {74–88},
abstract = {Abstract According to the recent trend in data acquisition and processing technology, big data are increasingly available in the form of unbounded streams of elementary data items to be processed in real-time. In this paper we study in detail the paradigm of sliding windows, a well-known technique for approximated queries that update their results continuously as new fresh data arrive from the stream. In this work we focus on the relationship between the various existing sliding window semantics and the way the query processing is performed from the parallelism perspective. From this study two alternative parallel models are identified, each covering semantics with very precise properties. Each model is described in terms of its pros and cons, and parallel implementations in the FastFlow framework are analyzed by discussing the layout of the concurrent data structures used for the efficient windows representation in each model.},
keywords = {fastflow, rephrase},
pubstate = {published},
tppubtype = {article}
}
Marco Aldinucci, Sergio Rabellino, Marco Pironti, Filippo Spiga, Paolo Viviani, Maurizio Drocco, Marco Guerzoni, Guido Boella, Marco Mellia, Paolo Margara, Idillio Drago, Roberto Marturano, Guido Marchetto, Elio Piccolo, Stefano Bagnasco, Stefano Lusso, Sara Vallero, Giuseppe Attardi, Alex Barchiesi, Alberto Colla, Fulvio Galeazzi
HPC4AI, an AI-on-demand federated platform endeavour Proceedings Article
In: ACM Computing Frontiers, Ischia, Italy, 2018.
Abstract | Links | BibTeX | Tags: hpc4ai, rephrase, toreador
@inproceedings{18:hpc4ai_acm_CF,
title = {HPC4AI, an AI-on-demand federated platform endeavour},
author = {Marco Aldinucci and Sergio Rabellino and Marco Pironti and Filippo Spiga and Paolo Viviani and Maurizio Drocco and Marco Guerzoni and Guido Boella and Marco Mellia and Paolo Margara and Idillio Drago and Roberto Marturano and Guido Marchetto and Elio Piccolo and Stefano Bagnasco and Stefano Lusso and Sara Vallero and Giuseppe Attardi and Alex Barchiesi and Alberto Colla and Fulvio Galeazzi},
url = {https://iris.unito.it/retrieve/handle/2318/1765596/689772/2018_hpc4ai_ACM_CF.pdf},
doi = {10.1145/3203217.3205340},
year = {2018},
date = {2018-05-01},
booktitle = {ACM Computing Frontiers},
address = {Ischia, Italy},
abstract = {In April 2018, under the auspices of the POR-FESR 2014-2020 program of Italian Piedmont Region, the Turin's Centre on High-Performance Computing for Artificial Intelligence (HPC4AI) was funded with a capital investment of 4.5Me and it began its deployment. HPC4AI aims to facilitate scientific research and engineering in the areas of Artificial Intelligence and Big Data Analytics. HPC4AI will specifically focus on methods for the on-demand provisioning of AI and BDA Cloud services to the regional and national industrial community, which includes the large regional ecosystem of Small-Medium Enterprises (SMEs) active in many different sectors such as automotive, aerospace, mechatronics, manufacturing, health and agrifood.},
keywords = {hpc4ai, rephrase, toreador},
pubstate = {published},
tppubtype = {inproceedings}
}
Fabio Tordini, Marco Aldinucci, Paolo Viviani, Ivan Merelli, Pietro Liò
Scientific Workflows on Clouds with Heterogeneous and Preemptible Instances Proceedings Article
In: Proc. of the Intl. Conference on Parallel Computing, ParCo 2017, 12-15 September 2017, Bologna, Italy, IOS Press, 2018.
Abstract | Links | BibTeX | Tags: rephrase
@inproceedings{18:parco:workflow,
title = {Scientific Workflows on Clouds with Heterogeneous and Preemptible Instances},
author = {Fabio Tordini and Marco Aldinucci and Paolo Viviani and Ivan Merelli and Pietro Liò},
url = {https://iris.unito.it/retrieve/handle/2318/1658510/385411/main.pdf},
doi = {10.3233/978-1-61499-843-3-605},
year = {2018},
date = {2018-01-01},
booktitle = {Proc. of the Intl. Conference on Parallel Computing, ParCo 2017, 12-15 September 2017, Bologna, Italy},
publisher = {IOS Press},
series = {Advances in Parallel Computing},
abstract = {The cloud environment is increasingly appealing for the HPC community, which has always dealt with scientific applications. However, there is still some skepticism about moving from traditional physical infrastructures to virtual HPC clusters. This mistrusting probably originates from some well known factors, including the effective economy of using cloud services, data and software availability, and the longstanding matter of data stewardship. In this work we discuss the design of a framework (based on Mesos) aimed at achieving a cost-effective and efficient usage of heterogeneous Processing Elements (PEs) for workflow execution, which supports hybrid cloud bursting over preemptible cloud Virtual Machines.},
keywords = {rephrase},
pubstate = {published},
tppubtype = {inproceedings}
}
Paolo Viviani, Marco Aldinucci, Roberto d'Ippolito, Jan Lemeire, Dean Vucinic
A Flexible Numerical Framework for Engineering—A Response Surface Modelling Application Book Chapter
In: Improved Performance of Materials: Design and Experimental Approaches, pp. 93–106, Springer International Publishing, Cham, 2018, ISBN: 978-3-319-59590-0.
Abstract | Links | BibTeX | Tags: repara, rephrase
@inbook{17:viviani:advstruct,
title = {A Flexible Numerical Framework for Engineering—A Response Surface Modelling Application},
author = {Paolo Viviani and Marco Aldinucci and Roberto d'Ippolito and Jan Lemeire and Dean Vucinic},
doi = {10.1007/978-3-319-59590-0_9},
isbn = {978-3-319-59590-0},
year = {2018},
date = {2018-01-01},
booktitle = {Improved Performance of Materials: Design and Experimental Approaches},
pages = {93–106},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {This work presents an innovative approach adopted for the development of a new numerical software framework for accelerating dense linear algebra calculations and its application within an engineering context. In particular, response surface models (RSM) are a key tool to reduce the computational effort involved in engineering design processes like design optimization. However, RSMs may prove to be too expensive to be computed when the dimensionality of the system and/or the size of the dataset to be synthesized is significantly high or when a large number of different response surfaces has to be calculated in order to improve the overall accuracy (e.g. like when using ensemble modelling techniques). On the other hand, the potential of modern hybrid hardware (e.g. multicore, GPUs) is not exploited by current engineering tools, while they can lead to a significant performance improvement. To fill this gap, a software framework is being developed that enables the hybrid and scalable acceleration of the linear algebra core for engineering applications and especially of RSMs calculations with a user-friendly syntax that allows good portability between different hardware architectures, with no need of specific expertise in parallel programming and accelerator technology. The effectiveness of this framework is shown by comparing an accelerated code to a single-core calculation of a radial basis function RSM on some benchmark datasets. This approach is then validated within a real-life engineering application and the achievements are presented and discussed.},
keywords = {repara, rephrase},
pubstate = {published},
tppubtype = {inbook}
}
Marco Aldinucci, Marco Danelutto, Maurizio Drocco, Peter Kilpatrick, Claudia Misale, Guilherme Peretti Pezzi, Massimo Torquati
A Parallel Pattern for Iterative Stencil + Reduce Journal Article
In: Journal of Supercomputing, vol. 74, no. 11, pp. 5690–5705, 2018.
Abstract | Links | BibTeX | Tags: HPC, repara, rephrase
@article{16:stencilreduce:jsupe,
title = {A Parallel Pattern for Iterative Stencil + Reduce},
author = {Marco Aldinucci and Marco Danelutto and Maurizio Drocco and Peter Kilpatrick and Claudia Misale and Guilherme Peretti Pezzi and Massimo Torquati},
url = {https://iris.unito.it/retrieve/0716fc42-53d7-48c0-9469-697aabfe7759/jspaper.pdf},
doi = {10.1007/s11227-016-1871-z},
year = {2018},
date = {2018-01-01},
journal = {Journal of Supercomputing},
volume = {74},
number = {11},
pages = {5690–5705},
abstract = {We advocate the Loop-of-stencil-reduce pattern as a means of simplifying the implementation of data-parallel programs on heterogeneous multi-core platforms. Loop-of-stencil-reduce is general enough to subsume map, reduce, map-reduce, stencil, stencil-reduce, and, crucially, their usage in a loop in both data-parallel and streaming applications, or a combination of both. The pattern makes it possible to deploy a single stencil computation kernel on different GPUs. We discuss the implementation of Loop-of-stencil-reduce in FastFlow, a framework for the implementation of applications based on the parallel patterns. Experiments are presented to illustrate the use of Loop-of-stencil-reduce in developing data-parallel kernels running on heterogeneous systems.},
keywords = {HPC, repara, rephrase},
pubstate = {published},
tppubtype = {article}
}
2017
Maurizio Drocco
Parallel Programming with Global Asynchronous Memory: Models, C++ APIs and Implementations PhD Thesis
Computer Science Department, University of Torino, 2017.
Abstract | Links | BibTeX | Tags: fastflow, paraphrase, repara, rephrase, toreador
@phdthesis{17:gam:drocco:thesis,
title = {Parallel Programming with Global Asynchronous Memory: Models, C++ APIs and Implementations},
author = {Maurizio Drocco},
url = {https://zenodo.org/record/1037585/files/Drocco_phd_thesis.pdf},
doi = {10.5281/zenodo.1037585},
year = {2017},
date = {2017-10-01},
school = {Computer Science Department, University of Torino},
abstract = {In the realm of High Performance Computing (HPC), message passing has been the programming paradigm of choice for over twenty years. The durable MPI (Message Passing Interface) standard, with send/receive communication, broadcast, gather/scatter, and reduction collectives is still used to construct parallel programs where each communication is orchestrated by the de-vel-oper-based precise knowledge of data distribution and overheads; collective communications simplify the orchestration but might induce excessive synchronization. Early attempts to bring shared-memory programming model—with its programming adv-antages—to distributed computing, referred as the Distributed Shared Memory (DSM) model, faded away; one of the main issue was to combine performance and programmability with the memory consistency model. The recently proposed Partitioned Global Address Space (PGAS) model is a modern revamp of DSM that exposes data placement to enable optimizations based on locality, but it still addresses (simple) data-parallelism only and it relies on expensive sharing protocols. We advocate an alternative programming model for distributed computing based on a Global Asynchronous Memory (GAM), aiming to emphavoid coherency and consistency problems rather than solving them. We materialize GAM by designing and implementing a emphdistributed smart pointers library, inspired by C++ smart pointers. In this model, public and private pointers (resembling C++ shared and unique pointers, respectively) are moved around instead of messages (i.e., data), thus alleviating the user from the burden of minimizing transfers. On top of smart pointers, we propose a high-level C++ template library for writing applications in terms of dataflow-like networks, namely GAM nets, consisting of stateful processors exchanging pointers in fully asynchronous fashion. We demonstrate the validity of the proposed approach, from the expressiveness perspective, by showing how GAM nets can be exploited to implement higher-level parallel programming models, such as data and task parallelism. As for the performance perspective, the execution of two non-toy benchmarks on a number of different small-scale HPC clusters exhibits both close-to-ideal scalability and negligible overhead with respect to state-of-the-art benchmark implementations. For instance, the GAM implementation of a high-quality video restoration filter sustains a 100 fps throughput over 70%-noisy high-quality video streams on a 4-node cluster of Graphics Processing Units (GPUs), with minimal programming effort.},
keywords = {fastflow, paraphrase, repara, rephrase, toreador},
pubstate = {published},
tppubtype = {phdthesis}
}
Paula Severi, Luca Padovani, Emilio Tuosto, Mariangiola Dezani-Ciancaglini
On Sessions and Infinite Data Journal Article
In: Logical Methods in Computer Science, vol. Volume 13, Issue 2, 2017.
Links | BibTeX | Tags: rephrase, semantics
@article{lmcs:3725,
title = {On Sessions and Infinite Data},
author = {Paula Severi and Luca Padovani and Emilio Tuosto and Mariangiola Dezani-Ciancaglini},
url = {http://lmcs.episciences.org/3725},
doi = {10.23638/LMCS-13(2:9)2017},
year = {2017},
date = {2017-06-01},
journal = {Logical Methods in Computer Science},
volume = {Volume 13, Issue 2},
keywords = {rephrase, semantics},
pubstate = {published},
tppubtype = {article}
}
Maurizio Drocco, Claudia Misale, Guy Tremblay, Marco Aldinucci
A Formal Semantics for Data Analytics Pipelines Technical Report
Computer Science Department, University of Torino 2017, (https://arxiv.org/abs/1705.01629).
Links | BibTeX | Tags: rephrase, toreador
@techreport{17:drocco:techreport,
title = {A Formal Semantics for Data Analytics Pipelines},
author = {Maurizio Drocco and Claudia Misale and Guy Tremblay and Marco Aldinucci},
url = {https://doi.org/10.5281/zenodo.571802},
doi = {10.5281/zenodo.571802},
year = {2017},
date = {2017-05-01},
institution = {Computer Science Department, University of Torino},
note = {https://arxiv.org/abs/1705.01629},
keywords = {rephrase, toreador},
pubstate = {published},
tppubtype = {techreport}
}
Claudia Misale
PiCo: A Domain-Specific Language for Data Analytics Pipelines PhD Thesis
Computer Science Department, University of Torino, 2017.
Abstract | Links | BibTeX | Tags: fastflow, paraphrase, repara, rephrase, toreador
@phdthesis{17:pico:misale:thesis,
title = {PiCo: A Domain-Specific Language for Data Analytics Pipelines},
author = {Claudia Misale},
url = {https://iris.unito.it/retrieve/handle/2318/1633743/320170/Misale_thesis.pdf},
doi = {10.5281/zenodo.579753},
year = {2017},
date = {2017-05-01},
school = {Computer Science Department, University of Torino},
abstract = {In the world of Big Data analytics, there is a series of tools aiming at simplifying programming applications to be executed on clusters. Although each tool claims to provide better programming, data and execution models—for which only informal (and often confusing) semantics is generally provided—all share a common under- lying model, namely, the Dataflow model. Using this model as a starting point, it is possible to categorize and analyze almost all aspects about Big Data analytics tools from a high level perspective. This analysis can be considered as a first step toward a formal model to be exploited in the design of a (new) framework for Big Data analytics. By putting clear separations between all levels of abstraction (i.e., from the runtime to the user API), it is easier for a programmer or software designer to avoid mixing low level with high level aspects, as we are often used to see in state-of-the-art Big Data analytics frameworks.
From the user-level perspective, we think that a clearer and simple semantics is preferable, together with a strong separation of concerns. For this reason, we use the Dataflow model as a starting point to build a programming environment with a simplified programming model implemented as a Domain-Specific Language, that is on top of a stack of layers that build a prototypical framework for Big Data analytics.
The contribution of this thesis is twofold: first, we show that the proposed model is (at least) as general as existing batch and streaming frameworks (e.g., Spark, Flink, Storm, Google Dataflow), thus making it easier to understand high-level data-processing applications written in such frameworks. As result of this analysis, we provide a layered model that can represent tools and applications following the Dataflow paradigm and we show how the analyzed tools fit in each level.
Second, we propose a programming environment based on such layered model in the form of a Domain-Specific Language (DSL) for processing data collections, called PiCo (Pipeline Composition). The main entity of this programming model is the Pipeline, basically a DAG-composition of processing elements. This model is intended to give the user an unique interface for both stream and batch processing, hiding completely data management and focusing only on operations, which are represented by Pipeline stages. Our DSL will be built on top of the FastFlow library, exploiting both shared and distributed parallelism, and implemented in C++11/14 with the aim of porting C++ into the Big Data world.},
keywords = {fastflow, paraphrase, repara, rephrase, toreador},
pubstate = {published},
tppubtype = {phdthesis}
}
From the user-level perspective, we think that a clearer and simple semantics is preferable, together with a strong separation of concerns. For this reason, we use the Dataflow model as a starting point to build a programming environment with a simplified programming model implemented as a Domain-Specific Language, that is on top of a stack of layers that build a prototypical framework for Big Data analytics.
The contribution of this thesis is twofold: first, we show that the proposed model is (at least) as general as existing batch and streaming frameworks (e.g., Spark, Flink, Storm, Google Dataflow), thus making it easier to understand high-level data-processing applications written in such frameworks. As result of this analysis, we provide a layered model that can represent tools and applications following the Dataflow paradigm and we show how the analyzed tools fit in each level.
Second, we propose a programming environment based on such layered model in the form of a Domain-Specific Language (DSL) for processing data collections, called PiCo (Pipeline Composition). The main entity of this programming model is the Pipeline, basically a DAG-composition of processing elements. This model is intended to give the user an unique interface for both stream and batch processing, hiding completely data management and focusing only on operations, which are represented by Pipeline stages. Our DSL will be built on top of the FastFlow library, exploiting both shared and distributed parallelism, and implemented in C++11/14 with the aim of porting C++ into the Big Data world.
Paolo Viviani, Massimo Torquati, Marco Aldinucci, Roberto d'Ippolito
Multiple back-end support for the Armadillo linear algebra interface Proceedings Article
In: In proc. of the 32nd ACM Symposium on Applied Computing (SAC), pp. 1566–1573, Marrakesh, Morocco, 2017.
Abstract | Links | BibTeX | Tags: HPC, repara, rephrase
@inproceedings{17:sac:armadillo,
title = {Multiple back-end support for the Armadillo linear algebra interface},
author = {Paolo Viviani and Massimo Torquati and Marco Aldinucci and Roberto d'Ippolito},
url = {https://iris.unito.it/retrieve/handle/2318/1626229/299089/armadillo_4aperto.pdf},
year = {2017},
date = {2017-04-01},
booktitle = {In proc. of the 32nd ACM Symposium on Applied Computing (SAC)},
pages = {1566–1573},
address = {Marrakesh, Morocco},
abstract = {The Armadillo C++ library provides programmers with a high-level Matlab-like syntax for linear algebra. Its design aims at providing a good balance between speed and ease of use. It can be linked with different back-ends, i.e. different LAPACK-compliant libraries. In this work we present a novel run-time support of Armadillo, which gracefully extends mainstream implementation to enable back-end switching without recompilation and multiple back-end support. The extension is specifically designed to not affect Armadillo class template prototypes, thus to be easily interoperable with future evolutions of the Armadillo library itself. The proposed software stack is then tested for functionality and performance against a kernel code extracted from an industrial application.},
keywords = {HPC, repara, rephrase},
pubstate = {published},
tppubtype = {inproceedings}
}
Marco Aldinucci, Marco Danelutto, Daniele De Sensi, Gabriele Mencagli, Massimo Torquati
Towards Power-Aware Data Pipelining on Multicores Proceedings Article
In: Proceedings of the 10th International Symposium on High-Level Parallel Programming and Applications, Valladolid, Spain, 2017.
Abstract | Links | BibTeX | Tags: fastflow, rephrase
@inproceedings{17:hlpp:powerstream,
title = {Towards Power-Aware Data Pipelining on Multicores},
author = {Marco Aldinucci and Marco Danelutto and Daniele De Sensi and Gabriele Mencagli and Massimo Torquati},
url = {https://iris.unito.it/retrieve/handle/2318/1644982/351415/17_HLPP_powerstream.pdf},
year = {2017},
date = {2017-01-01},
booktitle = {Proceedings of the 10th International Symposium on High-Level Parallel Programming and Applications},
address = {Valladolid, Spain},
abstract = {Power consumption management has become a major concern in software development. Continuous streaming computations are usually com- posed by different modules, exchanging data through shared message queues. The selection of the algorithm used to access such queues (i.e., the concurrency control) is a critical aspect for both performance and power consumption. In this paper, we describe the design of an adaptive concurrency control algo- rithm for implementing power-efficient communications on shared memory multicores. The algorithm provides the throughput offered by a nonblocking implementation and the power efficiency of a blocking protocol. We demon- strate that our algorithm reduces the power consumption of data streaming computations without decreasing their throughput.},
keywords = {fastflow, rephrase},
pubstate = {published},
tppubtype = {inproceedings}
}
Claudia Misale, Maurizio Drocco, Marco Aldinucci, Guy Tremblay
A Comparison of Big Data Frameworks on a Layered Dataflow Model Journal Article
In: Parallel Processing Letters, vol. 27, no. 01, pp. 1–20, 2017.
Abstract | Links | BibTeX | Tags: rephrase, toreador
@article{17:bigdatasurvey:PPL,
title = {A Comparison of Big Data Frameworks on a Layered Dataflow Model},
author = {Claudia Misale and Maurizio Drocco and Marco Aldinucci and Guy Tremblay},
url = {https://iris.unito.it/retrieve/handle/2318/1626287/303421/preprintPPL_4aperto.pdf},
doi = {10.1142/S0129626417400035},
year = {2017},
date = {2017-01-01},
journal = {Parallel Processing Letters},
volume = {27},
number = {01},
pages = {1–20},
abstract = {In the world of Big Data analytics, there is a series of tools aiming at simplifying programming applications to be executed on clusters. Although each tool claims to provide better programming, data and execution models, for which only informal (and often confusing) semantics is generally provided, all share a common underlying model, namely, the Dataflow model. The Dataflow model we propose shows how various tools share the same expressiveness at different levels of abstraction. The contribution of this work is twofold: first, we show that the proposed model is (at least) as general as existing batch and streaming frameworks (e.g., Spark, Flink, Storm), thus making it easier to understand high-level data-processing applications written in such frameworks. Second, we provide a layered model that can represent tools and applications following the Dataflow paradigm and we show how the analyzed tools fit in each level.},
keywords = {rephrase, toreador},
pubstate = {published},
tppubtype = {article}
}
Fabio Tordini, Maurizio Drocco, Claudia Misale, Luciano Milanesi, Pietro Liò, Ivan Merelli, Massimo Torquati, Marco Aldinucci
NuChart-II: the road to a fast and scalable tool for Hi-C data analysis Journal Article
In: International Journal of High Performance Computing Applications, vol. 31, no. 3, pp. 196–211, 2017.
Abstract | Links | BibTeX | Tags: bioinformatics, fastflow, repara, rephrase
@article{16:ijhpca:nuchart,
title = {NuChart-II: the road to a fast and scalable tool for Hi-C data analysis},
author = {Fabio Tordini and Maurizio Drocco and Claudia Misale and Luciano Milanesi and Pietro Liò and Ivan Merelli and Massimo Torquati and Marco Aldinucci},
url = {https://iris.unito.it/retrieve/handle/2318/1607126/238747/main.pdf},
doi = {10.1177/1094342016668567},
year = {2017},
date = {2017-01-01},
journal = {International Journal of High Performance Computing Applications},
volume = {31},
number = {3},
pages = {196–211},
abstract = {Recent advances in molecular biology and bioinformatics techniques brought to an explosion of the information about the spatial organisation of the DNA in the nucleus of a cell. High-throughput molecular biology techniques provide a genome-wide capture of the spatial organization of chromosomes at unprecedented scales, which permit to identify physical interactions between genetic elements located throughout a genome. Recent results have shown that there is a large correlation between co-localization and co-regulation of genes, but these important information are hampered by the lack of biologists-friendly analysis and visualisation software. In this work we present NuChart-II, an efficient and highly optimized tool for genomic data analysis that provides a gene-centric, graph-based representation of genomic information. While designing NuChart-II we addressed several common issues in the parallelisation of memory bound algorithms for shared-memory systems. With performance and usability in mind, NuChart-II is a R package that embeds a C++ engine: computing capabilities and memory hierarchy of multi-core architectures are fully exploited, while the versatile R environment for statistical analysis and data visualisation rises the level of abstraction and permits to orchestrate analysis and visualisation of genomic data.},
keywords = {bioinformatics, fastflow, repara, rephrase},
pubstate = {published},
tppubtype = {article}
}
Mario Coppo, Mariangiola Dezani-Ciancaglini, Alejandro D'ıaz-Caro, Ines Margaria, Maddalena Zacchi
Retractions in Intersection Types Proceedings Article
In: Kobayashi, Naoki (Ed.): ITRS'16, pp. 31–47, 2017.
Links | BibTeX | Tags: rephrase, semantics
@inproceedings{CDMZ16,
title = {Retractions in Intersection Types},
author = {Mario Coppo and Mariangiola Dezani-Ciancaglini and Alejandro D'ıaz-Caro and Ines Margaria and Maddalena Zacchi},
editor = {Naoki Kobayashi},
url = {http://www.di.unito.it/~dezani/papers/cddmz.pdf},
doi = {10.4204/EPTCS.242.5},
year = {2017},
date = {2017-01-01},
booktitle = {ITRS'16},
volume = {242},
pages = {31–47},
series = {EPTCS},
keywords = {rephrase, semantics},
pubstate = {published},
tppubtype = {inproceedings}
}
2016
Claudia Misale, Maurizio Drocco, Marco Aldinucci, Guy Tremblay
A Comparison of Big Data Frameworks on a Layered Dataflow Model Proceedings Article
In: Proc. of Intl. Workshop on High-Level Parallel Programming (HLPP), pp. 1–19, arXiv.org, Muenster, Germany, 2016.
Abstract | Links | BibTeX | Tags: rephrase, toreador
@inproceedings{16:bigdatasurvey:hlpp,
title = {A Comparison of Big Data Frameworks on a Layered Dataflow Model},
author = {Claudia Misale and Maurizio Drocco and Marco Aldinucci and Guy Tremblay},
url = {http://arxiv.org/pdf/1606.05293v1.pdf},
doi = {10.5281/zenodo.321866},
year = {2016},
date = {2016-07-01},
booktitle = {Proc. of Intl. Workshop on High-Level Parallel Programming (HLPP)},
pages = {1–19},
publisher = {arXiv.org},
address = {Muenster, Germany},
abstract = {In the world of Big Data analytics, there is a series of tools aiming at simplifying programming applications to be executed on clusters. Although each tool claims to provide better programming, data and execution models, for which only informal (and often confusing) semantics is generally provided, all share a common underlying model, namely, the Dataflow model. The Dataflow model we propose shows how various tools share the same expressiveness at different levels of abstraction. The contribution of this work is twofold: first, we show that the proposed model is (at least) as general as existing batch and streaming frameworks (e.g., Spark, Flink, Storm), thus making it easier to understand high-level data-processing applications written in such frameworks. Second, we provide a layered model that can represent tools and applications following the Dataflow paradigm and we show how the analyzed tools fit in each level.},
keywords = {rephrase, toreador},
pubstate = {published},
tppubtype = {inproceedings}
}
Paolo Viviani, Marco Aldinucci, Roberto d'Ippolito, Jean Lemeire, Dean Vucinic
A flexible numerical framework for engineering - a Response Surface Modelling application Unpublished
2016.
Abstract | BibTeX | Tags: HPC, repara, rephrase
@unpublished{16:acex:armadillo,
title = {A flexible numerical framework for engineering - a Response Surface Modelling application},
author = {Paolo Viviani and Marco Aldinucci and Roberto d'Ippolito and Jean Lemeire and Dean Vucinic},
year = {2016},
date = {2016-01-01},
booktitle = {10th Intl. Conference on Advanced Computational Engineering and Experimenting (ACE-X)},
abstract = {This work presents the innovative approach adopted for the development of a new numerical software framework for accelerating Dense Linear Algebra calculations and its application within an engineering context. In particular, Response Surface Models (RSM) are a key tool to reduce the computational effort involved in engineering design processes like design optimization. However, RSMs may prove to be too expensive to be computed when the dimensionality of the system and/or the size of the dataset to be synthesized is significantly high or when a large number of different Response Surfaces has to be calculated in order to improve the overall accuracy (e.g. like when using Ensemble Modelling techniques). On the other hand, it is a known challenge that the potential of modern hybrid hardware (e.g. multicore, GPUs) is not exploited by current engineering tools, while they can lead to a significant performance improvement. To fill this gap, a software framework is being developed that enables the hybrid and scalable acceleration of the linear algebra core for engineering applications and especially of RSMs calculations with a user-friendly syntax that allows good portability between different hardware architectures, with no need of specific expertise in parallel programming and accelerator technology. The effectiveness of this framework is shown by comparing an accelerated code to a single-core calculation of a Radial Basis Function RSM on some benchmark datasets. This approach is then validated within a real-life engineering application and the achievements are presented and discussed.},
keywords = {HPC, repara, rephrase},
pubstate = {published},
tppubtype = {unpublished}
}
Maurizio Drocco, Claudia Misale, Marco Aldinucci
A Cluster-As-Accelerator approach for SPMD-free Data Parallelism Proceedings Article
In: Proc. of 24th Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP), pp. 350–353, IEEE, Crete, Greece, 2016.
Abstract | Links | BibTeX | Tags: fastflow, rephrase
@inproceedings{skedato:pdp:16,
title = {A Cluster-As-Accelerator approach for SPMD-free Data Parallelism},
author = {Maurizio Drocco and Claudia Misale and Marco Aldinucci},
url = {https://iris.unito.it/retrieve/handle/2318/1611858/262689/2016_pdp_skedato.pdf},
doi = {10.1109/PDP.2016.97},
year = {2016},
date = {2016-01-01},
booktitle = {Proc. of 24th Euromicro Intl. Conference on Parallel Distributed and network-based Processing (PDP)},
pages = {350–353},
publisher = {IEEE},
address = {Crete, Greece},
abstract = {In this paper we present a novel approach for functional-style programming of distributed-memory clusters, targeting data-centric applications. The programming model proposed is purely sequential, SPMD-free and based on high- level functional features introduced since C++11 specification. Additionally, we propose a novel cluster-as-accelerator design principle. In this scheme, cluster nodes act as general inter- preters of user-defined functional tasks over node-local portions of distributed data structures. We envision coupling a simple yet powerful programming model with a lightweight, locality- aware distributed runtime as a promising step along the road towards high-performance data analytics, in particular under the perspective of the upcoming exascale era. We implemented the proposed approach in SkeDaTo, a prototyping C++ library of data-parallel skeletons exploiting cluster-as-accelerator at the bottom layer of the runtime software stack.},
keywords = {fastflow, rephrase},
pubstate = {published},
tppubtype = {inproceedings}
}
Vladimir Janjic, Christopher Brown, Kenneth MacKenzie, Kevin Hammond, Marco Danelutto, Marco Aldinucci, Jose Daniel Garcia
RPL: A Domain-Specific Language for Designing and Implementing Parallel C++ Applications Proceedings Article
In: Proc. of Intl. Euromicro PDP 2016: Parallel Distributed and network-based Processing, IEEE, Crete, Greece, 2016.
Abstract | Links | BibTeX | Tags: fastflow, rephrase
@inproceedings{rpl:pdp:16,
title = {RPL: A Domain-Specific Language for Designing and Implementing Parallel C++ Applications},
author = {Vladimir Janjic and Christopher Brown and Kenneth MacKenzie and Kevin Hammond and Marco Danelutto and Marco Aldinucci and Jose Daniel Garcia},
url = {https://iris.unito.it/retrieve/handle/2318/1597172/299237/2016_jsupe_stencil_pp_4aperto.pdf},
doi = {10.1109/PDP.2016.122},
year = {2016},
date = {2016-01-01},
booktitle = {Proc. of Intl. Euromicro PDP 2016: Parallel Distributed and network-based Processing},
publisher = {IEEE},
address = {Crete, Greece},
abstract = {Parallelising sequential applications is usually a very hard job, due to many different ways in which an application can be parallelised and a large number of programming models (each with its own advantages and disadvantages) that can be used. In this paper, we describe a method to semi- automatically generate and evaluate different parallelisations of the same application, allowing programmers to find the best parallelisation without significant manual reengineering of the code. We describe a novel, high-level domain-specific language, Refactoring Pattern Language (RPL), that is used to represent the parallel structure of an application and to capture its extra-functional properties (such as service time). We then describe a set of RPL rewrite rules that can be used to generate alternative, but semantically equivalent, parallel structures (parallelisations) of the same application. We also describe the RPL Shell that can be used to evaluate these parallelisations, in terms of the desired extra-functional properties. Finally, we describe a set of C++ refactorings, targeting OpenMP, Intel TBB and FastFlow parallel programming models, that semi-automatically apply the desired parallelisation to the application's source code, therefore giving a parallel version of the code. We demonstrate how the RPL and the refactoring rules can be used to derive efficient parallelisations of two realistic C++ use cases (Image Convolution and Ant Colony Optimisation).},
keywords = {fastflow, rephrase},
pubstate = {published},
tppubtype = {inproceedings}
}
Fabio Tordini
A cloud solution for multi-omics data integration Proceedings Article
In: Proceedings of the 16th IEEE International Conference on Scalable Computing and Communication, pp. 559–566, IEEE Computer Society, 2016, (Best paper award).
Abstract | Links | BibTeX | Tags: bioinformatics, fastflow, rephrase
@inproceedings{16:scalcom:cloud,
title = {A cloud solution for multi-omics data integration},
author = {Fabio Tordini},
url = {http://calvados.di.unipi.it/storage/paper_files/2016_cloudpipeline_scalcom.pdf},
doi = {10.1109/UIC-ATC-ScalCom-CBDCom-IoP-SmartWorld.2016.131},
year = {2016},
date = {2016-01-01},
booktitle = {Proceedings of the 16th IEEE International Conference on Scalable Computing and Communication},
pages = {559–566},
publisher = {IEEE Computer Society},
abstract = {Recent advances in molecular biology and Bioinformatics techniques have brought to an explosion of the information about the spatial organisation of the DNA inside the nucleus. In particular, 3C-based techniques are revealing the genome folding for many different cell types, and permit to create a more effective representation of the disposition of genes in the three-dimensional space. This information can be used to re-interpret heterogeneous genomic data (multi-omic) relying on 3D maps of the chromosome. The storage and computational requirements needed to accomplish such operations on raw sequenced data have to be fulfilled using HPC solutions, and the the Cloud paradigm is a valuable and convenient mean for delivering HPC to Bioinformatics. In this work we describe a data analysis work-flow that allows the integration and the interpretation of multi-omic data on a sort of ``topographical'' nuclear map, capable of representing the effective disposition of genes in a graph-based representation. We propose a cloud-based task farm pattern to orchestrate the services needed to accomplish genomic data analysis, where each service represents a special-purpose tool, playing a part in well known data analysis pipelines.},
note = {Best paper award},
keywords = {bioinformatics, fastflow, rephrase},
pubstate = {published},
tppubtype = {inproceedings}
}
Andrea Bracciali, Marco Aldinucci, Murray Patterson, Tobias Marschall, Nadia Pisanti, Ivan Merelli, Massimo Torquati
pWhatsHap: efficient haplotyping for future generation sequencing Journal Article
In: BMC Bioinformatics, vol. 17, no. Suppl 11, pp. 342, 2016.
Abstract | Links | BibTeX | Tags: fastflow, paraphrase, rephrase
@article{16:pwhatshap:bmc,
title = {pWhatsHap: efficient haplotyping for future generation sequencing},
author = {Andrea Bracciali and Marco Aldinucci and Murray Patterson and Tobias Marschall and Nadia Pisanti and Ivan Merelli and Massimo Torquati},
url = {http://bmcbioinformatics.biomedcentral.com/track/pdf/10.1186/s12859-016-1170-y?site=bmcbioinformatics.biomedcentral.com},
doi = {10.1186/s12859-016-1170-y},
year = {2016},
date = {2016-01-01},
journal = {BMC Bioinformatics},
volume = {17},
number = {Suppl 11},
pages = {342},
abstract = {Background: Haplotype phasing is an important problem in the analysis of genomics information. Given a set of DNA fragments of an individual, it consists of determining which one of the possible alleles (alternative forms of a gene) each fragment comes from. Haplotype information is relevant to gene regulation, epigenetics, genome-wide association studies, evolutionary and population studies, and the study of mutations. Haplotyping is currently addressed as an optimisation problem aiming at solutions that minimise, for instance, error correction costs, where costs are a measure of the confidence in the accuracy of the information acquired from DNA sequencing. Solutions have typically an exponential computational complexity. WhatsHap is a recent optimal approach which moves computational complexity from DNA fragment length to fragment overlap, i.e., coverage, and is hence of particular interest when considering sequencing technology's current trends that are producing longer fragments. Results: Given the potential relevance of efficient haplotyping in several analysis pipelines, we have designed and engineered pWhatsHap, a parallel, high-performance version of WhatsHap. pWhatsHap is embedded in a toolkit developed in Python and supports genomics datasets in standard file formats. Building on WhatsHap, pWhatsHap exhibits the same complexity exploring a number of possible solutions which is exponential in the coverage of the dataset. The parallel implementation on multi-core architectures allows for a relevant reduction of the execution time for haplotyping, while the provided results enjoy the same high accuracy as that provided by WhatsHap, which increases with coverage. Conclusions: Due to its structure and management of the large datasets, the parallelisation of WhatsHap posed demanding technical challenges, which have been addressed exploiting a high-level parallel programming framework. The result, pWhatsHap, is a freely available toolkit that improves the efficiency of the analysis of genomics information.},
keywords = {fastflow, paraphrase, rephrase},
pubstate = {published},
tppubtype = {article}
}
Ilaria Castellani, Mariangiola Dezani-Ciancaglini, Ugo Liguoro
Secure Multiparty Sessions with Topics Proceedings Article
In: PLACES'16, pp. 1–12, 2016.
Links | BibTeX | Tags: rephrase, semantics
@inproceedings{CDL16,
title = {Secure Multiparty Sessions with Topics},
author = {Ilaria Castellani and Mariangiola Dezani-Ciancaglini and Ugo Liguoro},
url = {http://www.di.unito.it/~dezani/papers/cdl16.pdf},
year = {2016},
date = {2016-01-01},
booktitle = {PLACES'16},
volume = {211},
pages = {1–12},
series = {EPTCS},
keywords = {rephrase, semantics},
pubstate = {published},
tppubtype = {inproceedings}
}
Ilaria Castellani, Mariangiola Dezani-Ciancaglini, Jorge A. Pérez
Self-Adaptation and Secure Information Flow in Multiparty Communications Journal Article
In: Formal Aspects of Computing, vol. 28, no. 4, pp. 669–696, 2016.
Links | BibTeX | Tags: rephrase, semantics
@article{CDP16,
title = {Self-Adaptation and Secure Information Flow in Multiparty Communications},
author = {Ilaria Castellani and Mariangiola Dezani-Ciancaglini and Jorge A. Pérez},
url = {http://www.di.unito.it/~dezani/papers/cdp16.pdf},
year = {2016},
date = {2016-01-01},
journal = {Formal Aspects of Computing},
volume = {28},
number = {4},
pages = {669–696},
publisher = {Springer},
keywords = {rephrase, semantics},
pubstate = {published},
tppubtype = {article}
}
Mario Coppo, Mariangiola Dezani-Ciancaglini, Betti Venneri
Parallel Monitors for Self-adaptive Sessions Proceedings Article
In: PLACES'16, pp. 25–36, 2016.
Links | BibTeX | Tags: rephrase, semantics
@inproceedings{CDV16,
title = {Parallel Monitors for Self-adaptive Sessions},
author = {Mario Coppo and Mariangiola Dezani-Ciancaglini and Betti Venneri},
url = {http://www.di.unito.it/~dezani/papers/cdv16.pdf},
year = {2016},
date = {2016-01-01},
booktitle = {PLACES'16},
volume = {211},
pages = {25–36},
series = {EPTCS},
keywords = {rephrase, semantics},
pubstate = {published},
tppubtype = {inproceedings}
}
Mariangiola Dezani-Ciancaglini, Silvia Ghilezan, Svetlana Jaksic, Jovanka Pantovic, Nobuko Yoshida
Denotational and Operational Preciseness of Subtyping: A Roadmap Proceedings Article
In: Theory and Practice of Formal Methods, pp. 155–172, 2016.
Links | BibTeX | Tags: rephrase, semantics
@inproceedings{DGJPY16,
title = {Denotational and Operational Preciseness of Subtyping: A Roadmap},
author = {Mariangiola Dezani-Ciancaglini and Silvia Ghilezan and Svetlana Jaksic and Jovanka Pantovic and Nobuko Yoshida},
url = {http://www.di.unito.it/~dezani/papers/dgjpy16.pdf},
doi = {10.1007/978-3-319-30734-3_12},
year = {2016},
date = {2016-01-01},
booktitle = {Theory and Practice of Formal Methods},
volume = {9660},
pages = {155–172},
series = {LNCS},
keywords = {rephrase, semantics},
pubstate = {published},
tppubtype = {inproceedings}
}
Mariangiola Dezani-Ciancaglini, Paola Giannini
Reversible Multiparty Sessions with Checkpoints Proceedings Article
In: EXPRESS/SOS'16, pp. 60–74, 2016.
Links | BibTeX | Tags: rephrase, semantics
@inproceedings{DG16,
title = {Reversible Multiparty Sessions with Checkpoints},
author = {Mariangiola Dezani-Ciancaglini and Paola Giannini},
url = {http://www.di.unito.it/~dezani/papers/dg16.pdf},
year = {2016},
date = {2016-01-01},
booktitle = {EXPRESS/SOS'16},
volume = {222},
pages = {60–74},
series = {EPTCS},
keywords = {rephrase, semantics},
pubstate = {published},
tppubtype = {inproceedings}
}